Example #1
0
def AddAndroidTestArgs(parser):
  """Register args which are specific to Android test commands.

  Args:
    parser: An argparse parser used to add arguments that follow a command in
        the CLI.
  """
  parser.add_argument(
      '--app',
      category=base.COMMONLY_USED_FLAGS,
      help='The path to the application binary file. The path may be in the '
      'local filesystem or in Google Cloud Storage using gs:// notation. '
      'Android App Bundles are specified as .aab, all other files are assumed '
      'to be APKs.')
  parser.add_argument(
      '--app-package',
      action=actions.DeprecationAction(
          '--app-package',
          warn=('The `--app-package` flag is deprecated and should no longer '
                'be used. By default, the correct application package name is '
                'parsed from the APK manifest.')),
      help='The Java package of the application under test (default: extracted '
      'from the APK manifest).')
  parser.add_argument(
      '--auto-google-login',
      action='store_true',
      default=None,
      help='Automatically log into the test device using a preconfigured '
      'Google account before beginning the test. Enabled by default, use '
      '--no-auto-google-login to disable.')
  parser.add_argument(
      '--directories-to-pull',
      type=arg_parsers.ArgList(),
      metavar='DIR_TO_PULL',
      help='A list of paths that will be copied from the device\'s storage to '
      'the designated results bucket after the test is complete. These must be '
      'absolute paths under `/sdcard` or `/data/local/tmp` (for example, '
      '`--directories-to-pull /sdcard/tempDir1,/data/local/tmp/tempDir2`). '
      'Path names are restricted to the characters ```a-zA-Z0-9_-./+```. '
      'The paths `/sdcard` and `/data` will be made available and treated as '
      'implicit path substitutions. E.g. if `/sdcard` on a particular device '
      'does not map to external storage, the system will replace it with the '
      'external storage path prefix for that device.')
  parser.add_argument(
      '--environment-variables',
      type=arg_parsers.ArgDict(),
      metavar='KEY=VALUE',
      help="""\
      A comma-separated, key=value map of environment variables and their
      desired values. The environment variables are mirrored as extra options to
      the `am instrument -e KEY1 VALUE1 ...` command and passed to your test
      runner (typically AndroidJUnitRunner). Examples:

      Break test cases into four shards and run only the first shard:

      ```
      --environment-variables numShards=4,shardIndex=0
      ```

      Enable code coverage and provide a directory to store the coverage
      results when using Android Test Orchestrator (`--use-orchestrator`):

      ```
      --environment-variables clearPackageData=true,coverage=true,coverageFilePath=/sdcard/
      ```

      Enable code coverage and provide a file path to store the coverage
      results when *not* using Android Test Orchestrator
      (`--no-use-orchestrator`):

      ```
      --environment-variables coverage=true,coverageFile=/sdcard/coverage.ec
      ```

      Note: If you need to embed a comma into a `VALUE` string, please refer to
      `gcloud topic escaping` for ways to change the default list delimiter.
      """)
  parser.add_argument(
      '--obb-files',
      type=arg_parsers.ArgList(min_length=1, max_length=2),
      metavar='OBB_FILE',
      help='A list of one or two Android OBB file names which will be copied '
      'to each test device before the tests will run (default: None). Each '
      'OBB file name must conform to the format as specified by Android (e.g. '
      '[main|patch].0300110.com.example.android.obb) and will be installed '
      'into <shared-storage>/Android/obb/<package-name>/ on the test device.')
  parser.add_argument(
      '--performance-metrics',
      action='store_true',
      default=None,
      help='Monitor and record performance metrics: CPU, memory, network usage,'
      ' and FPS (game-loop only). Enabled by default, use '
      '--no-performance-metrics to disable.')
  parser.add_argument(
      '--results-history-name',
      help='The history name for your test results (an arbitrary string label; '
      'default: the application\'s label from the APK manifest). All tests '
      'which use the same history name will have their results grouped '
      'together in the Firebase console in a time-ordered test history list.')
  parser.add_argument(
      '--type',
      category=base.COMMONLY_USED_FLAGS,
      choices=['instrumentation', 'robo', 'game-loop'],
      help='The type of test to run.')

  # The following args are specific to Android instrumentation tests.

  parser.add_argument(
      '--test',
      category=base.COMMONLY_USED_FLAGS,
      help='The path to the binary file containing instrumentation tests. The '
      'given path may be in the local filesystem or in Google Cloud Storage '
      'using a URL beginning with `gs://`.')
  parser.add_argument(
      '--test-package',
      action=actions.DeprecationAction(
          '--test-package',
          warn=('The `--test-package` flag is deprecated and should no longer '
                'be used. By default, the correct test package name is '
                'parsed from the APK manifest.')),
      category=ANDROID_INSTRUMENTATION_TEST,
      help='The Java package name of the instrumentation test (default: '
      'extracted from the APK manifest).')
  parser.add_argument(
      '--test-runner-class',
      category=ANDROID_INSTRUMENTATION_TEST,
      help='The fully-qualified Java class name of the instrumentation test '
      'runner (default: the last name extracted from the APK manifest).')
  parser.add_argument(
      '--test-targets',
      category=ANDROID_INSTRUMENTATION_TEST,
      type=arg_parsers.ArgList(min_length=1),
      metavar='TEST_TARGET',
      help="""\
      A list of one or more test target filters to apply (default: run all test
      targets). Each target filter must be fully qualified with the package
      name, class name, or test annotation desired. Any test filter supported by
      `am instrument -e ...` is supported. See
       https://developer.android.com/reference/android/support/test/runner/AndroidJUnitRunner
       for more information. Examples:

         * `--test-targets "package com.my.package.name"`
         * `--test-targets "notPackage com.package.to.skip"`
         * `--test-targets "class com.foo.ClassName"`
         * `--test-targets "notClass com.foo.ClassName#testMethodToSkip"`
         * `--test-targets "annotation com.foo.AnnotationToRun"`
         * `--test-targets "size large notAnnotation com.foo.AnnotationToSkip"`
      """)
  parser.add_argument(
      '--use-orchestrator',
      category=ANDROID_INSTRUMENTATION_TEST,
      action='store_true',
      default=None,
      help='Whether each test runs in its own Instrumentation instance with '
      'the Android Test Orchestrator (default: Orchestrator is not used, same '
      'as specifying --no-use-orchestrator). Orchestrator is only compatible '
      'with AndroidJUnitRunner v1.0 or higher. See '
      'https://developer.android.com/training/testing/junit-runner.html'
      '#using-android-test-orchestrator for more information about Android '
      'Test Orchestrator.')

  # The following args are specific to Android Robo tests.

  parser.add_argument(
      '--robo-directives',
      metavar='TYPE:RESOURCE_NAME=INPUT',
      category=ANDROID_ROBO_TEST,
      type=arg_parsers.ArgDict(),
      help='A comma-separated (`<type>:<key>=<value>`) map of '
      '`robo_directives` that you can use to customize the behavior of Robo '
      'test. The `type` specifies the action type of the directive, which may '
      'take on values `click`, `text` or `ignore`. If no `type` is provided, '
      '`text` will be used by default. Each key should be the Android resource '
      'name of a target UI element and each value should be the text input for '
      'that element. Values are only permitted for `text` type elements, so no '
      'value should be specified for `click` and `ignore` type elements.'
      '\n\n'
      'To provide custom login credentials for your app, use'
      '\n\n'
      '    --robo-directives text:username_resource=username,'
      'text:password_resource=password'
      '\n\n'
      'To instruct Robo to click on the sign-in button, use'
      '\n\n'
      '    --robo-directives click:sign_in_button='
      '\n\n'
      'To instruct Robo to ignore any UI elements with resource names which '
      'equal or start with the user-defined value, use'
      '\n\n'
      '  --robo-directives ignore:ignored_ui_element_resource_name='
      '\n\n'
      'To learn more about Robo test and robo_directives, see '
      'https://firebase.google.com/docs/test-lab/android/command-line#custom_login_and_text_input_with_robo_test.'
      '\n\n'
      'Caution: You should only use credentials for test accounts that are not '
      'associated with real users.')

  # The following args are specific to Android game-loop tests.

  parser.add_argument(
      '--scenario-numbers',
      metavar='int',
      type=arg_parsers.ArgList(element_type=int, min_length=1, max_length=1024),
      category=ANDROID_GAME_LOOP_TEST,
      help='A list of game-loop scenario numbers which will be run as part of '
      'the test (default: all scenarios). A maximum of 1024 scenarios may be '
      'specified in one test matrix, but the maximum number may also be '
      'limited by the overall test *--timeout* setting.')

  parser.add_argument(
      '--scenario-labels',
      metavar='LABEL',
      type=arg_parsers.ArgList(min_length=1),
      category=ANDROID_GAME_LOOP_TEST,
      help='A list of game-loop scenario labels (default: None). '
      'Each game-loop scenario may be labeled in the APK manifest file with '
      'one or more arbitrary strings, creating logical groupings (e.g. '
      'GPU_COMPATIBILITY_TESTS). If *--scenario-numbers* and '
      '*--scenario-labels* are specified together, Firebase Test Lab will '
      'first execute each scenario from *--scenario-numbers*. It will then '
      'expand each given scenario label into a list of scenario numbers marked '
      'with that label, and execute those scenarios.')
Example #2
0
def _Args(parser):
  """Register flags for this command.

  Args:
    parser: An argparse.ArgumentParser-like object. It is mocked out in order
        to capture some information, but behaves like an ArgumentParser.
  """
  parser.add_argument('name', help='The name of this cluster.')
  # Timeout in seconds for operation
  parser.add_argument(
      '--timeout',
      type=int,
      default=1800,
      help=argparse.SUPPRESS)
  flags.AddAsyncFlag(parser)
  parser.add_argument(
      '--num-nodes',
      type=arg_parsers.BoundedInt(1),
      help='The number of nodes to be created in each of the cluster\'s zones.',
      default=3)
  parser.add_argument(
      '--additional-zones',
      type=arg_parsers.ArgList(min_length=1),
      metavar='ZONE',
      help="""\
The set of additional zones in which the specified node footprint should be
replicated. All zones must be in the same region as the cluster's primary zone.
If additional-zones is not specified, all nodes will be in the cluster's primary
zone.

Note that `NUM_NODES` nodes will be created in each zone, such that if you
specify `--num-nodes=4` and choose one additional zone, 8 nodes will be created.

Multiple locations can be specified, separated by commas. For example:

  $ {command} example-cluster --zone us-central1-a --additional-zones us-central1-b,us-central1-c
""")
  parser.add_argument(
      '--machine-type', '-m',
      help='The type of machine to use for nodes. Defaults to n1-standard-1.')
  parser.add_argument(
      '--subnetwork',
      help='The name of the Google Compute Engine subnetwork '
      '(https://cloud.google.com/compute/docs/subnetworks) to which the '
      'cluster is connected. If specified, the cluster\'s network must be a '
      '"custom subnet" network.'
      ''
      'Can not be used with the "--create-subnetwork" option.')
  parser.add_argument(
      '--disable-addons',
      type=arg_parsers.ArgList(
          choices=[api_adapter.INGRESS, api_adapter.HPA,
                   api_adapter.DASHBOARD]),
      help='List of cluster addons to disable. Options are {0}'.format(
          ', '.join(
              [api_adapter.INGRESS, api_adapter.HPA, api_adapter.DASHBOARD])))
  parser.add_argument(
      '--network',
      help='The Compute Engine Network that the cluster will connect to. '
      'Google Container Engine will use this network when creating routes '
      'and firewalls for the clusters. Defaults to the \'default\' network.')
  parser.add_argument(
      '--cluster-ipv4-cidr',
      help='The IP address range for the pods in this cluster in CIDR '
      'notation (e.g. 10.0.0.0/14).  Prior to Kubernetes version 1.7.0 '
      'this must be a subset of 10.0.0.0/8; however, starting with version '
      '1.7.0 can be any RFC 1918 IP range.')
  parser.add_argument(
      '--password',
      help='The password to use for cluster auth. Defaults to a '
      'server-specified randomly-generated string.')
  parser.add_argument(
      '--scopes',
      type=arg_parsers.ArgList(min_length=1),
      metavar='SCOPE',
      help="""\
Specifies scopes for the node instances. The project's default
service account is used. Examples:

  $ {{command}} example-cluster --scopes https://www.googleapis.com/auth/devstorage.read_only

  $ {{command}} example-cluster --scopes bigquery,storage-rw,compute-ro

Multiple SCOPEs can specified, separated by commas. The scopes
necessary for the cluster to function properly (compute-rw, storage-ro),
are always added, even if not explicitly specified.

SCOPE can be either the full URI of the scope or an alias.
Available aliases are:

[format="csv",options="header"]
|========
Alias,URI
{aliases}
|========

{scope_deprecation_msg}
""".format(
    aliases=compute_constants.ScopesForHelp(),
    scope_deprecation_msg=compute_constants.DEPRECATED_SCOPES_MESSAGES))
  parser.add_argument(
      '--enable-cloud-endpoints',
      action='store_true',
      default=True,
      help='Automatically enable Google Cloud Endpoints to take advantage of '
      'API management features.')
  parser.add_argument(
      '--enable-cloud-logging',
      action='store_true',
      default=True,
      help='Automatically send logs from the cluster to the '
      'Google Cloud Logging API.')
  parser.set_defaults(enable_cloud_logging=True)
  parser.add_argument(
      '--enable-cloud-monitoring',
      action='store_true',
      default=True,
      help='Automatically send metrics from pods in the cluster to the '
      'Google Cloud Monitoring API. VM metrics will be collected by Google '
      'Compute Engine regardless of this setting.')
  parser.set_defaults(enable_cloud_monitoring=True)
  parser.add_argument(
      '--disk-size',
      type=int,
      help='Size in GB for node VM boot disks. Defaults to 100GB.')
  parser.add_argument(
      '--username', '-u',
      help='The user name to use for cluster auth.',
      default='admin')
  parser.add_argument(
      '--max-nodes-per-pool',
      type=arg_parsers.BoundedInt(100, api_adapter.MAX_NODES_PER_POOL),
      help='The maximum number of nodes to allocate per default initial node '
      'pool. Container Engine will automatically create enough nodes pools '
      'such that each node pool contains less than '
      '--max-nodes-per-pool nodes. Defaults to {nodes} nodes, but can be set '
      'as low as 100 nodes per pool on initial create.'.format(
          nodes=api_adapter.MAX_NODES_PER_POOL))
  flags.AddImageTypeFlag(parser, 'cluster')
  flags.AddNodeLabelsFlag(parser)
  flags.AddTagsFlag(parser, """\
Applies the given Compute Engine tags (comma separated) on all nodes in the new
node-pool. Example:

  $ {command} example-cluster --tags=tag1,tag2

New nodes, including ones created by resize or recreate, will have these tags
on the Compute Engine API instance object and can be used in firewall rules.
See https://cloud.google.com/sdk/gcloud/reference/compute/firewall-rules/create
for examples.
""")
  flags.AddClusterVersionFlag(parser)
  flags.AddDiskTypeFlag(parser, suppressed=True)
  parser.display_info.AddFormat(util.CLUSTERS_FORMAT)
""")
JOB_NAME = base.Argument('job', help='Name of the job.')
MODULE_NAME = base.Argument('--module-name',
                            required=True,
                            help='Name of the module to run')
PACKAGE_PATH = base.Argument('--package-path',
                             help="""\
Path to a Python package to build. This should point to a directory containing
the Python source for the job. It will be built using setuptools (which must be
installed) using its *parent* directory as context. If the parent directory
contains a `setup.py` file, the build will use that; otherwise, it will use a
simple built-in one.
""")
PACKAGES = base.Argument('--packages',
                         default=[],
                         type=arg_parsers.ArgList(),
                         metavar='PACKAGE',
                         help="""\
Path to Python archives used for training. These can be local paths
(absolute or relative), in which case they will be uploaded to the Cloud
Storage bucket given by `--staging-bucket`, or Cloud Storage URLs
(`gs://bucket-name/path/to/package.tar.gz`).
""")


def GetJobDirFlag(upload_help=True):
    """Get base.Argument() for `--job-dir`."""
    help_ = """\
A {dir_type} in which to store training outputs and other data
needed for training.
Example #4
0
def AddUpdateArgs(parser, include_beta=False, include_alpha=False):
    """Add args to the parser for subnet update.

  Args:
    parser: The argparse parser.
    include_beta: Include beta functionality.
    include_alpha: Include alpha functionality.
  """
    updated_field = parser.add_mutually_exclusive_group()

    updated_field.add_argument(
        '--enable-private-ip-google-access',
        action=arg_parsers.StoreTrueFalseAction,
        help=(
            'Enable/disable access to Google Cloud APIs from this subnet for '
            'instances without a public ip address.'))

    updated_field.add_argument('--add-secondary-ranges',
                               type=arg_parsers.ArgDict(min_length=1),
                               action='append',
                               metavar='PROPERTY=VALUE',
                               help="""\
      Adds secondary IP ranges to the subnetwork for use in IP aliasing.

      For example, `--add-secondary-ranges range1=192.168.64.0/24` adds
      a secondary range 192.168.64.0/24 with name range1.

      * `RANGE_NAME` - Name of the secondary range.
      * `RANGE` - `IP range in CIDR format.`
      """)

    updated_field.add_argument('--remove-secondary-ranges',
                               type=arg_parsers.ArgList(min_length=1),
                               action='append',
                               metavar='PROPERTY=VALUE',
                               help="""\
      Removes secondary ranges from the subnetwork.

      For example, `--remove-secondary-ranges range2,range3` removes the
      secondary ranges with names range2 and range3.
      """)

    updated_field.add_argument(
        '--enable-flow-logs',
        action=arg_parsers.StoreTrueFalseAction,
        help=(
            'Enable/disable VPC flow logging for this subnet. More information '
            'for VPC flow logs can be found at '
            'https://cloud.google.com/vpc/docs/using-flow-logs.'))

    if include_beta:
        messages = apis.GetMessagesModule('compute',
                                          compute_api.COMPUTE_BETA_API_VERSION)

        AddLoggingAggregationInterval(parser, messages)

        parser.add_argument('--logging-flow-sampling',
                            type=arg_parsers.BoundedFloat(lower_bound=0.0,
                                                          upper_bound=1.0),
                            help="""\
        Can only be specified if VPC flow logging for this subnetwork is
        enabled. The value of the field must be in [0, 1]. Set the sampling rate
        of VPC flow logs within the subnetwork where 1.0 means all collected
        logs are reported and 0.0 means no logs are reported. Default is 0.5
        which means half of all collected logs are reported.
        """)

        AddLoggingMetadata(parser, messages)

    if include_alpha:
        messages = apis.GetMessagesModule(
            'compute', compute_api.COMPUTE_ALPHA_API_VERSION)
        updated_field.add_argument(
            '--role',
            choices={'ACTIVE': 'The ACTIVE subnet that is currently used.'},
            type=lambda x: x.replace('-', '_').upper(),
            help=(
                'The role is set to ACTIVE to update a BACKUP reserved '
                'address range to\nbe the new ACTIVE address range. Note '
                'that the only supported value for\nthis flag is ACTIVE since '
                'setting an address range to BACKUP is not\nsupported. '
                '\n\nThis field is only valid when updating a reserved IP '
                'address range used\nfor the purpose of Internal HTTP(S) Load '
                'Balancer.'))

        parser.add_argument('--drain-timeout',
                            type=arg_parsers.Duration(lower_bound='0s'),
                            default='0s',
                            help="""\
        The time period for draining traffic from Internal HTTP(S) Load Balancer
        proxies that are assigned addresses in the current ACTIVE subnetwork.
        For example, ``1h'', ``60m'' and ``3600s'' each specify a duration of
        1 hour for draining the traffic. Longer times reduce the number of
        proxies that are draining traffic at any one time, and so improve
        the availability of proxies for load balancing. The drain timeout is
        only applicable when the [--role=ACTIVE] flag is being used.
        """)

        AddLoggingAggregationIntervalAlpha(parser, messages)

        parser.add_argument('--flow-sampling',
                            type=arg_parsers.BoundedFloat(lower_bound=0.0,
                                                          upper_bound=1.0),
                            help="""\
        Can only be specified if VPC flow logging for this subnetwork is
        enabled. The value of the field must be in [0, 1]. Set the sampling rate
        of VPC flow logs within the subnetwork where 1.0 means all collected
        logs are reported and 0.0 means no logs are reported. Default is 0.5
        which means half of all collected logs are reported.
        """)

        AddLoggingMetadataAlpha(parser, messages)

        updated_field.add_argument(
            '--enable-private-ipv6-access',
            action=arg_parsers.StoreTrueFalseAction,
            help=('Enable/disable private IPv6 access for the subnet.'))

        GetPrivateIpv6GoogleAccessTypeFlagMapper(
            messages).choice_arg.AddToParser(updated_field)
Example #5
0
def _Args(parser):
    """Register flags for this command.

  Args:
    parser: An argparse.ArgumentParser-like object. It is mocked out in order
        to capture some information, but behaves like an ArgumentParser.
  """
    parser.add_argument('name', help='The name of this cluster.')
    # Timeout in seconds for operation
    parser.add_argument('--timeout',
                        type=int,
                        default=1800,
                        help=argparse.SUPPRESS)
    flags.AddClustersWaitAndAsyncFlags(parser)
    parser.add_argument(
        '--num-nodes',
        type=arg_parsers.BoundedInt(1),
        help=
        'The number of nodes to be created in each of the cluster\'s zones.',
        default=3)
    parser.add_argument('--additional-zones',
                        type=arg_parsers.ArgList(min_length=1),
                        metavar='ZONE',
                        help="""\
The set of additional zones in which the specified node footprint should be
replicated. All zones must be in the same region as the cluster's primary zone.
If additional-zones is not specified, all nodes will be in the cluster's primary
zone.

Note that `NUM_NODES` nodes will be created in each zone, such that if you
specify `--num-nodes=4` and choose one additional zone, 8 nodes will be created.

Multiple locations can be specified, separated by commas. For example:

  $ {command} example-cluster --zone us-central1-a --additional-zones us-central1-b,us-central1-c
""")
    parser.add_argument(
        '--machine-type',
        '-m',
        help='The type of machine to use for nodes. Defaults to '
        'server-specified')
    parser.add_argument(
        '--subnetwork',
        help='The name of the Google Compute Engine subnetwork '
        '(https://cloud.google.com/compute/docs/subnetworks) to which the '
        'cluster is connected. If specified, the cluster\'s network must be a '
        '"custom subnet" network. Specification of subnetworks is an '
        'alpha feature, and requires that the '
        'Google Compute Engine alpha API be enabled.')
    parser.add_argument(
        '--disable-addons',
        type=arg_parsers.ArgList(
            choices=[api_adapter.INGRESS, api_adapter.HPA]),
        help='List of cluster addons to disable. Options are {0}'.format(
            ', '.join([api_adapter.INGRESS, api_adapter.HPA])))
    parser.add_argument(
        '--network',
        help='The Compute Engine Network that the cluster will connect to. '
        'Google Container Engine will use this network when creating routes '
        'and firewalls for the clusters. Defaults to the \'default\' network.')
    parser.add_argument(
        '--cluster-ipv4-cidr',
        help='The IP address range for the pods in this cluster in CIDR '
        'notation (e.g. 10.0.0.0/14). Due to kube-proxy limitations, this range '
        'must be a subset of the 10.0.0.0/8 space. Defaults to server-specified'
    )
    parser.add_argument(
        '--password',
        help='The password to use for cluster auth. Defaults to a '
        'server-specified randomly-generated string.')
    parser.add_argument(
        '--scopes',
        type=arg_parsers.ArgList(min_length=1),
        metavar='SCOPE',
        help="""\
Specifies scopes for the node instances. The project's default
service account is used. Examples:

  $ {{command}} example-cluster --scopes https://www.googleapis.com/auth/devstorage.read_only

  $ {{command}} example-cluster --scopes bigquery,storage-rw,compute-ro

Multiple SCOPEs can specified, separated by commas. The scopes
necessary for the cluster to function properly (compute-rw, storage-ro),
are always added, even if not explicitly specified.

SCOPE can be either the full URI of the scope or an alias.
Available aliases are:

[format="csv",options="header"]
|========
Alias,URI
{aliases}
|========
""".format(aliases='\n'.join(
            ','.join(value)
            for value in sorted(compute_constants.SCOPES.iteritems()))))
    parser.add_argument(
        '--enable-cloud-endpoints',
        action='store_true',
        default=True,
        help='Automatically enable Google Cloud Endpoints to take advantage of '
        'API management features.')
    parser.add_argument('--enable-cloud-logging',
                        action='store_true',
                        default=True,
                        help='Automatically send logs from the cluster to the '
                        'Google Cloud Logging API.')
    parser.set_defaults(enable_cloud_logging=True)
    parser.add_argument(
        '--enable-cloud-monitoring',
        action='store_true',
        default=True,
        help='Automatically send metrics from pods in the cluster to the '
        'Google Cloud Monitoring API. VM metrics will be collected by Google '
        'Compute Engine regardless of this setting.')
    parser.set_defaults(enable_cloud_monitoring=True)
    parser.add_argument(
        '--disk-size',
        type=int,
        help='Size in GB for node VM boot disks. Defaults to 100GB.')
    parser.add_argument('--username',
                        '-u',
                        help='The user name to use for cluster auth.',
                        default='admin')
    parser.add_argument(
        '--max-nodes-per-pool',
        type=arg_parsers.BoundedInt(100, api_adapter.MAX_NODES_PER_POOL),
        help='The maximum number of nodes to allocate per default initial node '
        'pool. Container engine will automatically create enough nodes pools '
        'such that each node pool contains less than '
        '--max-nodes-per-pool nodes. Defaults to {nodes} nodes, but can be set '
        'as low as 100 nodes per pool on initial create.'.format(
            nodes=api_adapter.MAX_NODES_PER_POOL))
    parser.add_argument('--tags',
                        help=argparse.SUPPRESS,
                        type=arg_parsers.ArgList(min_length=1),
                        metavar='TAGS')
    flags.AddImageTypeFlag(parser, 'cluster')
    flags.AddNodeLabelsFlag(parser)
Example #6
0
def AddMigStatefulFlagsForInstanceConfigs(parser, for_update=False):
    """Adding stateful flags for creating and updating instance configs."""
    parser.add_argument('--instance',
                        required=True,
                        help="""
        URI to existing or non existing instance.

        Name - last part of URI - will be preserved for existing per instance
        configs.

        For zonal managed instance groups there is no need to specify the whole
        URI to the instance - for this case instance name can be applied instead
        of URI.
      """)

    stateful_disks_help = STATEFUL_DISKS_HELP + """
      Besides preserving disks already attached to the instance by specifying
      only device names, user have an option to attach (and preserve) other
      existing persistent disk(s) to the given instance.

      The same disk can be attached to many instances but only in read-only
      mode.
      """
    if for_update:
        stateful_disks_help += """
      Use this argument multiple times to update multiple disks.

      If stateful disk with given `device-name` exists in current instance
      config, its properties will be replaced by the newly provided ones. In
      other case new stateful disk definition will be added to the instance
      config.
      """
        stateful_disk_argument_name = '--update-stateful-disk'
    else:
        stateful_disks_help += """
      Use this argument multiple times to attach more disks.
      """
        stateful_disk_argument_name = '--stateful-disk'
    stateful_disks_help += """
      *device-name*::: Name under which disk is or will be attached.

      *source*::: Optional argument used to specify URI of existing persistent
      disk to attach under specified `device-name`.

      *mode*::: Specifies the mode of the disk to attach. Supported options are
      `ro` for read-only and `rw` for read-write. If omitted when source is
      specified, `rw` is used as a default.
      """ + AUTO_DELETE_ARG_HELP
    parser.add_argument(
        stateful_disk_argument_name,
        type=arg_parsers.ArgDict(
            spec={
                'device-name':
                str,
                'source':
                str,
                'mode':
                str,
                'auto-delete':
                AutoDeleteFlag.ValidatorWithFlagName(
                    stateful_disk_argument_name)
            }),
        action='append',
        help=stateful_disks_help,
    )
    if for_update:
        parser.add_argument(
            '--remove-stateful-disks',
            metavar='DEVICE_NAME',
            type=arg_parsers.ArgList(min_length=1),
            help=('List all device names which should be removed from current '
                  'instance config.'),
        )

    if for_update:
        stateful_metadata_argument_name = '--update-stateful-metadata'
    else:
        stateful_metadata_argument_name = '--stateful-metadata'
    stateful_metadata_help = """
      Additional metadata to be made available to the guest operating system
      on top of the metadata defined in the instance template.

      Stateful metadata may be used to define a key/value pair specific for
      the one given instance to differentiate it from the other instances in
      the managed instance group.

      Stateful metadata have priority over the metadata defined in the
      instance template. It means that stateful metadata defined for the keys
      already existing in the instance template override their values.

      Each metadata entry is a key/value pair separated by an equals sign.
      Metadata keys must be unique and less than 128 bytes in length. Multiple
      entries can be passed to this flag, e.g.,
      ``{argument_name} key-1=value-1,key-2=value-2,key-3=value-3''.
  """.format(argument_name=stateful_metadata_argument_name)
    if for_update:
        stateful_metadata_help += """
      If stateful metadata with the given key exists in current instance config,
      its value will be overridden with the newly provided one. If the key does
      not exist in the current instance config, a new key/value pair will be
      added.
    """
    parser.add_argument(stateful_metadata_argument_name,
                        type=arg_parsers.ArgDict(min_length=1),
                        default={},
                        action=arg_parsers.StoreOnceAction,
                        metavar='KEY=VALUE',
                        help=stateful_metadata_help)
    if for_update:
        parser.add_argument(
            '--remove-stateful-metadata',
            metavar='KEY',
            type=arg_parsers.ArgList(min_length=1),
            help=(
                'List all stateful metadata keys which should be removed from '
                'current instance config.'),
        )
def AddTagsFlag(parser, help_text):
    """Adds a --tags to the given parser."""
    parser.add_argument('--tags',
                        metavar='TAG',
                        type=arg_parsers.ArgList(min_length=1),
                        help=help_text)
Example #8
0
LEVEL = base.Argument(
    '--level',
    help='Filter entries with severity equal to or higher than a given level.',
    required=False,
    default='any',
    choices=logs_util.LOG_LEVELS)

LOGS = base.Argument(
    '--logs',
    help=('Filter entries from a particular set of logs. Must be a '
          'comma-separated list of log names (request_log, stdout, stderr, '
          'etc).'),
    required=False,
    default=logs_util.DEFAULT_LOGS,
    metavar='APP_LOG',
    type=arg_parsers.ArgList(min_length=1))

SERVER_FLAG = base.Argument('--server', help=argparse.SUPPRESS)

SERVICE = base.Argument(
    '--service', '-s', help='Limit to specific service.', required=False)

VERSION = base.Argument(
    '--version', '-v', help='Limit to specific version.', required=False)


def AddServiceVersionSelectArgs(parser, short_flags=False):
  """Add arguments to a parser for selecting service and version.

  Args:
    parser: An argparse.ArgumentParser.
Example #9
0
  def AddArgument(self,
                  parser,
                  mutex_group=None,
                  operation_type='operate on',
                  cust_metavar=None):
    """Add this set of arguments to argparse parser."""

    params = dict(
        metavar=cust_metavar if cust_metavar else self.name.upper(),
        completer=self.completer,
    )

    if self._detailed_help:
      params['help'] = self._detailed_help
    elif self._short_help:
      params['help'] = self._short_help
    else:
      params['help'] = 'Name{} of the {} to {}.'.format(
          's' if self.plural else '',
          text.Pluralize(
              int(self.plural) + 1, self.resource_name or '',
              self.custom_plural),
          operation_type)

    if self.name_arg.startswith('--'):
      params['required'] = self.required
      if self.plural:
        params['type'] = arg_parsers.ArgList(min_length=1)
    else:
      if self.required:
        if self.plural:
          params['nargs'] = '+'
      else:
        params['nargs'] = '*' if self.plural else '?'

    (mutex_group or parser).add_argument(self.name_arg, **params)

    if len(self.scopes) > 1:
      scope = parser.add_mutually_exclusive_group()
    else:
      scope = parser

    if compute_scope.ScopeEnum.ZONE in self.scopes:
      AddZoneFlag(
          scope,
          flag_prefix=self.scopes.flag_prefix,
          resource_type=self.resource_name,
          operation_type=operation_type,
          explanation=self._zone_explanation,
          hidden=self._zone_hidden,
          plural=self.plural,
          custom_plural=self.custom_plural)

    if compute_scope.ScopeEnum.REGION in self.scopes:
      AddRegionFlag(
          scope,
          flag_prefix=self.scopes.flag_prefix,
          resource_type=self.resource_name,
          operation_type=operation_type,
          explanation=self._region_explanation,
          hidden=self._region_hidden,
          plural=self.plural,
          custom_plural=self.custom_plural)

    if not self.plural:
      resource_mention = '{} is'.format(self.resource_name)
    elif self.plural and not self.custom_plural:
      resource_mention = '{}s are'.format(self.resource_name)
    else:
      resource_mention = '{} are'.format(self.custom_plural)
    if compute_scope.ScopeEnum.GLOBAL in self.scopes and len(self.scopes) > 1:
      scope.add_argument(
          self.scopes[compute_scope.ScopeEnum.GLOBAL].flag,
          action='store_true',
          default=None,
          help='If set, the {0} global.'
          .format(resource_mention))
Example #10
0
def _Args(parser):
    """Common arguments to add-path-matcher commands for each release track."""
    parser.add_argument(
        '--description',
        help='An optional, textual description for the path matcher.')

    parser.add_argument('--path-matcher-name',
                        required=True,
                        help='The name to assign to the path matcher.')

    parser.add_argument('--path-rules',
                        type=arg_parsers.ArgDict(min_length=1),
                        default={},
                        metavar='PATH=SERVICE',
                        help='Rules for mapping request paths to services.')

    host_rule = parser.add_mutually_exclusive_group()
    host_rule.add_argument(
        '--new-hosts',
        type=arg_parsers.ArgList(min_length=1),
        metavar='NEW_HOST',
        help=('If specified, a new host rule with the given hosts is created '
              'and the path matcher is tied to the new host rule.'))

    host_rule.add_argument('--existing-host',
                           help="""\
      An existing host rule to tie the new path matcher to. Although
      host rules can contain more than one host, only a single host
      is needed to uniquely identify the host rule.
      """)

    parser.add_argument(
        '--delete-orphaned-path-matcher',
        action='store_true',
        default=False,
        help=('If provided and a path matcher is orphaned as a result of this '
              'command, the command removes the orphaned path matcher instead '
              'of failing.'))

    group = parser.add_mutually_exclusive_group(required=True)
    group.add_argument(
        '--default-service',
        help=('A backend service that will be used for requests that the path '
              'matcher cannot match. Exactly one of --default-service or '
              '--default-backend-bucket is required.'))
    group.add_argument(
        '--default-backend-bucket',
        help=('A backend bucket that will be used for requests that the path '
              'matcher cannot match. Exactly one of --default-service or '
              '--default-backend-bucket is required.'))

    parser.add_argument('--backend-service-path-rules',
                        type=arg_parsers.ArgDict(min_length=1),
                        default={},
                        metavar='PATH=SERVICE',
                        help='Rules for mapping request paths to services.')
    parser.add_argument(
        '--backend-bucket-path-rules',
        type=arg_parsers.ArgDict(min_length=1),
        default={},
        metavar='PATH=BUCKET',
        help='Rules for mapping request paths to backend buckets.')
Example #11
0
    def Args(parser):
        """Adds arguments to the supplied parser."""

        parser.add_argument(
            '--description',
            help='An optional, textual description for the target VPN tunnel.')

        ike_version = parser.add_argument(
            '--ike-version',
            choices=[1, 2],
            type=int,
            help='Internet Key Exchange protocol version number.')
        ike_version.detailed_help = """\
        Internet Key Exchange protocol version number.
        Valid options are 1 and 2.  Default is 2.
        """

        parser.add_argument(
            '--peer-address',
            required=True,
            help='A valid IP-v4 address representing the remote tunnel endpoint'
        )

        # TODO(user) Add other group members
        shared_secret = parser.add_argument(
            '--shared-secret',
            type=ValidateSimpleSharedSecret,
            required=True,
            help='A shared secret consisting of printable characters')
        shared_secret.detailed_help = (
            'A shared secret consisting of printable characters.  Valid '
            'arguments match the regular expression ' +
            _PRINTABLE_CHARS_PATTERN)

        parser.add_argument('--target-vpn-gateway',
                            required=True,
                            help='A reference to a target vpn gateway')

        parser.add_argument('--ike-networks',
                            type=arg_parsers.ArgList(min_length=1),
                            action=arg_parsers.FloatingListValuesCatcher(),
                            help=argparse.SUPPRESS)

        parser.add_argument(
            '--local-traffic-selector',
            type=arg_parsers.ArgList(min_length=1),
            action=arg_parsers.FloatingListValuesCatcher(),
            metavar='CIDR',
            help=
            ('Traffic selector is an agreement between IKE peers to permit '
             'traffic through a tunnel if the traffic matches a specified pair'
             ' of local and remote addresses.\n\n'
             'local_traffic_selector allows to configure the local addresses '
             'that are permitted. The value should be a comma separated list '
             'of CIDR formatted strings. '
             'Example: 192.168.0.0/16,10.0.0.0/24.'))

        flags.AddRegionFlag(parser,
                            resource_type='VPN Tunnel',
                            operation_type='create')

        parser.add_argument('name', help='The name of the VPN tunnel.')
    def Args(parser):
        Update.Args(parser)
        dlp_group = parser.add_argument_group(help=(
            'Settings for Cloud DLP enabled sinks. If any of these arguments '
            'are omitted they are unchanged.'))
        dlp_group.add_argument(
            '--dlp-inspect-template',
            help=
            ('Relative path to a Cloud DLP inspection template resource. For '
             'example "projects/my-project/inspectTemplates/my-template" or '
             '"organizations/my-org/inspectTemplates/my-template".'))
        dlp_group.add_argument(
            '--dlp-deidentify-template',
            help=('Relative path to a Cloud DLP de-identification template '
                  'resource. For example '
                  '"projects/my-project/deidentifyTemplates/my-template" or '
                  '"organizations/my-org/deidentifyTemplates/my-template".'))

        bigquery_group = parser.add_argument_group(
            help='Settings for sink exporting data to BigQuery.')
        bigquery_group.add_argument(
            '--use-partitioned-tables',
            action='store_true',
            help=
            ('If specified, use BigQuery\'s partitioned tables. By default, '
             'Logging creates dated tables based on the log entries\' '
             'timestamps, e.g. \'syslog_20170523\'. Partitioned tables remove '
             'the suffix and special query syntax '
             '(https://cloud.google.com/bigquery/docs/'
             'querying-partitioned-tables) must be used.'))

        parser.add_argument('--clear-exclusions',
                            action='store_true',
                            help=('Remove all logging exclusions.'))
        parser.add_argument(
            '--remove-exclusions',
            type=arg_parsers.ArgList(),
            metavar='EXCLUSION ID',
            help=('Specify the name of the Logging exclusion(s) to delete.'))
        parser.add_argument(
            '--add-exclusions',
            action='append',
            type=arg_parsers.ArgDict(spec={
                'name': str,
                'description': str,
                'filter': str,
                'disabled': bool
            },
                                     required_keys=['name', 'filter']),
            help=
            ('Add an exclusion filter for a log entry that is not to be '
             'exported. This flag can be repeated.\n\n'
             'The `name` and `filter` attributes are required. The following '
             'keys are accepted:\n\n'
             '*name*::: An identifier, such as "load-balancer-exclusion". '
             'Identifiers are limited to 100 characters and can include only '
             'letters, digits, underscores, hyphens, and periods.\n\n'
             '*description*::: A description of this exclusion.\n\n'
             '*filter*::: An advanced log filter that matches the log entries '
             'to be excluded.\n\n'
             '*disabled*::: If this exclusion should be disabled and not '
             'exclude the log entries.'))

        parser.add_argument('--description', help='Description of the sink.')

        parser.add_argument(
            '--disabled',
            action='store_true',
            help=('Disable the sink. Disabled sinks do not export logs.'))
Example #13
0
def _CommonArgs(parser):
    """Register flags common to all tracks."""
    instances_flags.AddTagsArgs(parser)
    base.ASYNC_FLAG.AddToParser(parser)
    labels_util.AddCreateLabelsFlags(parser)
    parser.add_argument(
        '--metadata',
        type=arg_parsers.ArgDict(min_length=1),
        action='append',
        default=None,
        help=('Metadata to be made available to the guest operating system '
              'running on the instances'),
        metavar='KEY=VALUE')
    parser.add_argument('name', help='The name of this cluster.')
    parser.add_argument(
        '--num-workers',
        type=int,
        help='The number of worker nodes in the cluster. Defaults to '
        'server-specified.')
    parser.add_argument(
        '--num-preemptible-workers',
        type=int,
        help='The number of preemptible worker nodes in the cluster.')
    parser.add_argument(
        '--master-machine-type',
        help='The type of machine to use for the master. Defaults to '
        'server-specified.')
    parser.add_argument(
        '--worker-machine-type',
        help='The type of machine to use for workers. Defaults to '
        'server-specified.')
    parser.add_argument('--image', hidden=True)
    parser.add_argument(
        '--image-version',
        metavar='VERSION',
        help='The image version to use for the cluster. Defaults to the '
        'latest version.')
    parser.add_argument(
        '--bucket',
        help='The Google Cloud Storage bucket to use with the Google Cloud '
        'Storage connector. A bucket is auto created when this parameter is '
        'not specified.')

    netparser = parser.add_mutually_exclusive_group()
    netparser.add_argument('--network',
                           help="""\
      The Compute Engine network that the VM instances of the cluster will be
      part of. This is mutually exclusive with --subnet. If neither is
      specified, this defaults to the "default" network.
      """)
    netparser.add_argument('--subnet',
                           help="""\
      Specifies the subnet that the cluster will be part of. This is mutally
      exclusive with --network.
      """)
    parser.add_argument(
        '--zone',
        '-z',
        help='The compute zone (e.g. us-central1-a) for the cluster.',
        action=actions.StoreProperty(properties.VALUES.compute.zone))
    parser.add_argument(
        '--num-worker-local-ssds',
        type=int,
        help='The number of local SSDs to attach to each worker in a cluster.')
    parser.add_argument(
        '--num-master-local-ssds',
        type=int,
        help='The number of local SSDs to attach to the master in a cluster.')
    parser.add_argument(
        '--initialization-actions',
        type=arg_parsers.ArgList(min_length=1),
        metavar='CLOUD_STORAGE_URI',
        help=('A list of Google Cloud Storage URIs of '
              'executables to run on each node in the cluster.'))
    parser.add_argument(
        '--initialization-action-timeout',
        type=arg_parsers.Duration(),
        metavar='TIMEOUT',
        default='10m',
        help='The maximum duration of each initialization action.')
    parser.add_argument('--properties',
                        type=arg_parsers.ArgDict(),
                        metavar='PREFIX:PROPERTY=VALUE',
                        default={},
                        help="""\
Specifies configuration properties for installed packages, such as Hadoop
and Spark.

Properties are mapped to configuration files by specifying a prefix, such as
"core:io.serializations". The following are supported prefixes and their
mappings:

[format="csv",options="header"]
|========
Prefix,Target Configuration File
core,core-site.xml
hdfs,hdfs-site.xml
mapred,mapred-site.xml
yarn,yarn-site.xml
hive,hive-site.xml
pig,pig.properties
spark,spark-defaults.conf
|========

""")
    parser.add_argument(
        '--service-account',
        help='The Google Cloud IAM service account to be authenticated as.')
    parser.add_argument('--scopes',
                        type=arg_parsers.ArgList(min_length=1),
                        metavar='SCOPE',
                        help="""\
Specifies scopes for the node instances. The project's default service account
is used. Multiple SCOPEs can specified, separated by commas.
Examples:

  $ {{command}} example-cluster --scopes https://www.googleapis.com/auth/bigtable.admin

  $ {{command}} example-cluster --scopes sqlservice,bigquery

The following scopes necessary for the cluster to function properly are always
added, even if not explicitly specified:

[format="csv"]
|========
{minimum_scopes}
|========

If this flag is not specified the following default scopes are also included:

[format="csv"]
|========
{additional_scopes}
|========

If you want to enable all scopes use the 'cloud-platform' scope.

SCOPE can be either the full URI of the scope or an alias.
Available aliases are:

[format="csv",options="header"]
|========
Alias,URI
{aliases}
|========
""".format(minimum_scopes='\n'.join(constants.MINIMUM_SCOPE_URIS),
           additional_scopes='\n'.join(
               constants.ADDITIONAL_DEFAULT_SCOPE_URIS),
           aliases=compute_helpers.SCOPE_ALIASES_FOR_HELP))

    master_boot_disk = parser.add_mutually_exclusive_group()
    worker_boot_disk = parser.add_mutually_exclusive_group()

    # Deprecated, to be removed at a future date.
    master_boot_disk.add_argument('--master-boot-disk-size-gb',
                                  type=int,
                                  hidden=True)
    worker_boot_disk.add_argument('--worker-boot-disk-size-gb',
                                  type=int,
                                  hidden=True)

    boot_disk_size_detailed_help = """\
      The size of the boot disk. The value must be a
      whole number followed by a size unit of ``KB'' for kilobyte, ``MB''
      for megabyte, ``GB'' for gigabyte, or ``TB'' for terabyte. For example,
      ``10GB'' will produce a 10 gigabyte disk. The minimum size a boot disk
      can have is 10 GB. Disk size must be a multiple of 1 GB.
      """
    master_boot_disk.add_argument(
        '--master-boot-disk-size',
        type=arg_parsers.BinarySize(lower_bound='10GB'),
        help=boot_disk_size_detailed_help)
    worker_boot_disk.add_argument(
        '--worker-boot-disk-size',
        type=arg_parsers.BinarySize(lower_bound='10GB'),
        help=boot_disk_size_detailed_help)

    parser.add_argument('--preemptible-worker-boot-disk-size',
                        type=arg_parsers.BinarySize(lower_bound='10GB'),
                        help="""\
      The size of the boot disk. The value must be a
      whole number followed by a size unit of ``KB'' for kilobyte, ``MB''
      for megabyte, ``GB'' for gigabyte, or ``TB'' for terabyte. For example,
      ``10GB'' will produce a 10 gigabyte disk. The minimum size a boot disk
      can have is 10 GB. Disk size must be a multiple of 1 GB.
      """)
Example #14
0
 def Args(parser):
     """Adds args and flags to the parser."""
     # TODO(b/35705305): move common flags to command_lib.sql.flags
     parser.add_argument(
         '--activation-policy',
         required=False,
         choices=['ALWAYS', 'NEVER', 'ON_DEMAND'],
         help=(
             'The activation policy for this instance. This specifies when '
             'the instance should be activated and is applicable only when '
             'the instance state is RUNNABLE.'))
     parser.add_argument(
         '--assign-ip',
         action='store_true',
         default=None,  # Tri-valued: None => don't change the setting.
         help='The instance must be assigned an IP address.')
     gae_apps_group = parser.add_mutually_exclusive_group()
     gae_apps_group.add_argument(
         '--authorized-gae-apps',
         type=arg_parsers.ArgList(min_length=1),
         metavar='APP',
         required=False,
         help=(
             'First Generation instances only. List of IDs for App Engine '
             'applications running in the Standard environment that '
             'can access this instance.'))
     gae_apps_group.add_argument(
         '--clear-gae-apps',
         required=False,
         action='store_true',
         help=
         ('Specified to clear the list of App Engine apps that can access '
          'this instance.'))
     networks_group = parser.add_mutually_exclusive_group()
     networks_group.add_argument(
         '--authorized-networks',
         type=arg_parsers.ArgList(min_length=1),
         metavar='NETWORK',
         required=False,
         help=(
             'The list of external networks that are allowed to connect to '
             'the instance. Specified in CIDR notation, also known as '
             '\'slash\' notation (e.g. 192.168.100.0/24).'))
     networks_group.add_argument(
         '--clear-authorized-networks',
         required=False,
         action='store_true',
         help=
         ('Clear the list of external networks that are allowed to connect '
          'to the instance.'))
     backups_group = parser.add_mutually_exclusive_group()
     backups_group.add_argument(
         '--backup-start-time',
         required=False,
         help=('The start time of daily backups, specified in the 24 hour '
               'format - HH:MM, in the UTC timezone.'))
     backups_group.add_argument(
         '--no-backup',
         required=False,
         action='store_true',
         help='Specified if daily backup should be disabled.')
     database_flags_group = parser.add_mutually_exclusive_group()
     database_flags_group.add_argument(
         '--database-flags',
         type=arg_parsers.ArgDict(min_length=1),
         metavar='FLAG=VALUE',
         required=False,
         help=
         ('A comma-separated list of database flags to set on the '
          'instance. Use an equals sign to separate flag name and value. '
          'Flags without values, like skip_grant_tables, can be written '
          'out without a value after, e.g., `skip_grant_tables=`. Use '
          'on/off for booleans. View the Instance Resource API for allowed '
          'flags. (e.g., `--database-flags max_allowed_packet=55555,'
          'skip_grant_tables=,log_output=1`)'))
     database_flags_group.add_argument(
         '--clear-database-flags',
         required=False,
         action='store_true',
         help=('Clear the database flags set on the instance. '
               'WARNING: Instance will be restarted.'))
     parser.add_argument(
         '--enable-bin-log',
         action='store_true',
         default=None,  # Tri-valued: None => don't change the setting.
         help=
         ('Enable binary log. If backup configuration is disabled, binary '
          'log should be disabled as well.'))
     parser.add_argument(
         '--follow-gae-app',
         required=False,
         help=(
             'First Generation instances only. The App Engine app '
             'this instance should follow. It must be in the same region as '
             'the instance. WARNING: Instance may be restarted.'))
     parser.add_argument(
         '--gce-zone',
         required=False,
         help=('The preferred Compute Engine zone (e.g. us-central1-a, '
               'us-central1-b, etc.). WARNING: Instance may be restarted.'))
     parser.add_argument('instance',
                         completion_resource='sql.instances',
                         help='Cloud SQL instance ID.')
     parser.add_argument(
         '--pricing-plan',
         '-p',
         required=False,
         choices=['PER_USE', 'PACKAGE'],
         help=('First Generation instances only. The pricing plan for this '
               'instance.'))
     parser.add_argument('--replication',
                         required=False,
                         choices=['SYNCHRONOUS', 'ASYNCHRONOUS'],
                         help='The type of replication this instance uses.')
     parser.add_argument(
         '--require-ssl',
         action='store_true',
         default=None,  # Tri-valued: None => don't change the setting.
         help=
         ('mysqld should default to \'REQUIRE X509\' for users connecting '
          'over IP.'))
     parser.add_argument(
         '--tier',
         '-t',
         required=False,
         help=
         ('The tier for this instance. For Second Generation instances, '
          'TIER is the instance\'s machine type (e.g., db-n1-standard-1). '
          'For PostgreSQL instances, only shared-core machine types '
          '(e.g., db-f1-micro) apply. A complete list of tiers is '
          'available here: https://cloud.google.com/sql/pricing. WARNING: '
          'Instance will be restarted.'))
     parser.add_argument(
         '--enable-database-replication',
         action='store_true',
         default=None,  # Tri-valued: None => don't change the setting.
         help=(
             'Enable database replication. Applicable only for read replica '
             'instance(s). WARNING: Instance will be restarted.'))
     parser.add_argument('--async',
                         action='store_true',
                         help='Do not wait for the operation to complete.')
     parser.add_argument(
         '--diff',
         action='store_true',
         help='Show what changed as a result of the update.')
Example #15
0
    def Args(parser):
        """Args is called by calliope to gather arguments for this command.

    Args:
      parser: An argparse parser that you can use to add arguments that go
          on the command line after this command. Positional arguments are
          allowed.
    """
        parser.add_argument(
            '--pipeline-file',
            help=
            '''A YAML or JSON file containing a v2alpha1 or v1alpha2 Pipeline
          object. See
[](https://cloud.google.com/genomics/reference/rest/v2alpha1/pipelines#Pipeline)
''')

        parser.add_argument(
            '--docker-image',
            category=base.COMMONLY_USED_FLAGS,
            default=CLOUD_SDK_IMAGE,
            help=
            '''v2alpha1 only. A docker image to run. Requires --command-line to
            be specified and cannot be used with --pipeline-file.''')

        parser.add_argument(
            '--command-line',
            category=base.COMMONLY_USED_FLAGS,
            help=
            '''v2alpha1 only. Command line to run with /bin/sh in the specified
            docker image. Cannot be used with --pipeline-file.''')

        parser.add_argument(
            '--inputs',
            category=base.COMMONLY_USED_FLAGS,
            metavar='NAME=VALUE',
            type=arg_parsers.ArgDict(),
            action=arg_parsers.UpdateAction,
            help='''Map of input PipelineParameter names to values.
            Used to pass literal parameters to the pipeline, and to specify
            input files in Google Cloud Storage that will have a localCopy
            made. Specified as a comma-separated list: --inputs
            file=gs://my-bucket/in.txt,name=hello''')

        parser.add_argument(
            '--inputs-from-file',
            category=base.COMMONLY_USED_FLAGS,
            metavar='NAME=FILE',
            type=arg_parsers.ArgDict(),
            action=arg_parsers.UpdateAction,
            help='''Map of input PipelineParameter names to values.
            Used to pass literal parameters to the pipeline where values come
            from local files; this can be used to send large pipeline input
            parameters, such as code, data, or configuration values.
            Specified as a comma-separated list:
            --inputs-from-file script=myshellscript.sh,pyfile=mypython.py''')

        parser.add_argument(
            '--outputs',
            category=base.COMMONLY_USED_FLAGS,
            metavar='NAME=VALUE',
            type=arg_parsers.ArgDict(),
            action=arg_parsers.UpdateAction,
            help='''Map of output PipelineParameter names to values.
            Used to specify output files in Google Cloud Storage that will be
            made from a localCopy. Specified as a comma-separated list:
            --outputs ref=gs://my-bucket/foo,ref2=gs://my-bucket/bar''')

        parser.add_argument(
            '--logging',
            category=base.COMMONLY_USED_FLAGS,
            help=
            '''The location in Google Cloud Storage to which the pipeline logs
            will be copied. Can be specified as a fully qualified directory
            path, in which case logs will be output with a unique identifier
            as the filename in that directory, or as a fully specified path,
            which must end in `.log`, in which case that path will be
            used. Stdout and stderr logs from the run are also generated and
            output as `-stdout.log` and `-stderr.log`.''')

        labels_util.AddCreateLabelsFlags(parser)

        parser.add_argument(
            '--memory',
            category=base.COMMONLY_USED_FLAGS,
            type=float,
            help='''The number of GB of RAM needed to run the pipeline. Overrides
             any value specified in the pipeline-file.''')

        parser.add_argument(
            '--cpus',
            category=base.COMMONLY_USED_FLAGS,
            type=int,
            help='''The minimum number of CPUs to run the pipeline. Overrides
             any value specified in the pipeline-file.''')

        parser.add_argument(
            '--disk-size',
            category=base.COMMONLY_USED_FLAGS,
            default=None,
            help=
            '''The disk size(s) in GB, specified as a comma-separated list of
            pairs of disk name and size. For example:
            --disk-size "name:size,name2:size2".
            Overrides any values specified in the pipeline-file.''')

        parser.add_argument(
            '--preemptible',
            category=base.COMMONLY_USED_FLAGS,
            action='store_true',
            help='''Whether to use a preemptible VM for this pipeline. The
            "resource" section of the pipeline-file must also set preemptible
            to "true" for this flag to take effect.''')

        parser.add_argument('--run-id',
                            hidden=True,
                            help='THIS ARGUMENT NEEDS HELP TEXT.')

        parser.add_argument(
            '--service-account-email',
            default='default',
            help='''The service account used to run the pipeline. If unspecified,
            defaults to the Compute Engine service account for your project.'''
        )

        parser.add_argument(
            '--service-account-scopes',
            metavar='SCOPE',
            type=arg_parsers.ArgList(),
            default=[],
            help=
            '''List of additional scopes to be made available for this service
             account. The following scopes are always requested for v1alpha2
             requests:

             https://www.googleapis.com/auth/compute
             https://www.googleapis.com/auth/devstorage.full_control
             https://www.googleapis.com/auth/genomics
             https://www.googleapis.com/auth/logging.write
             https://www.googleapis.com/auth/monitoring.write

             For v2alpha1 requests, only the following scopes are always
             requested:

             https://www.googleapis.com/auth/devstorage.read_write
             https://www.googleapis.com/auth/genomics''')

        parser.add_argument(
            '--zones',
            metavar='ZONE',
            type=arg_parsers.ArgList(),
            help='''List of Compute Engine zones the pipeline can run in.

If no zones are specified with the zones flag, then zones in the
pipeline definition file will be used.

If no zones are specified in the pipeline definition, then the
default zone in your local client configuration is used.

If you have no default zone, then v1alpha2 pipelines may run in any zone.  For
v2alpha1 pipelines at least one zone or region must be specified.

For more information on default zones, see
https://cloud.google.com/compute/docs/gcloud-compute/#set_default_zone_and_region_in_your_local_client'''
        )

        parser.add_argument(
            '--regions',
            metavar='REGION',
            type=arg_parsers.ArgList(),
            help=
            '''v2alpha1 only. List of Compute Engine regions the pipeline can
            run in.

If no regions are specified with the regions flag, then regions in the
pipeline definition file will be used.

If no regions are specified in the pipeline definition, then the
default region in your local client configuration is used.

At least one region or region must be specified.

For more information on default regions, see
https://cloud.google.com/compute/docs/gcloud-compute/#set_default_zone_and_region_in_your_local_client'''
        )
Example #16
0
def _CommonArgs(parser,
                include_physical_block_size_support=False,
                vss_erase_enabled=False,
                source_instant_snapshot_enabled=False,
                support_pd_interface=False,
                support_user_licenses=False):
    """Add arguments used for parsing in all command tracks."""
    Create.disks_arg.AddArgument(parser, operation_type='create')
    parser.add_argument(
        '--description',
        help='An optional, textual description for the disks being created.')

    parser.add_argument(
        '--size',
        type=arg_parsers.BinarySize(lower_bound='10GB',
                                    suggested_binary_size_scales=[
                                        'GB', 'GiB', 'TB', 'TiB', 'PiB', 'PB'
                                    ]),
        help="""\
        Size of the disks. The value must be a whole
        number followed by a size unit of ``GB'' for gigabyte, or ``TB''
        for terabyte. If no size unit is specified, GB is
        assumed. For example, ``10GB'' will produce 10 gigabyte
        disks. Disk size must be a multiple of 1 GB. Limit your boot disk size
        to 2TB to account for MBR partition table limitations. If disk size is
        not specified, the default size of {}GB for pd-standard disks, {}GB for
        pd-balanced disks, {}GB for pd-ssd disks, and {}GB for pd-extreme will
        be used. For details about disk size limits, refer to:
        https://cloud.google.com/compute/docs/disks
        """.format(
            constants.DEFAULT_DISK_SIZE_GB_MAP[
                constants.DISK_TYPE_PD_STANDARD],
            constants.DEFAULT_DISK_SIZE_GB_MAP[
                constants.DISK_TYPE_PD_BALANCED],
            constants.DEFAULT_DISK_SIZE_GB_MAP[constants.DISK_TYPE_PD_SSD],
            constants.DEFAULT_DISK_SIZE_GB_MAP[
                constants.DISK_TYPE_PD_EXTREME]))

    parser.add_argument('--type',
                        completer=completers.DiskTypesCompleter,
                        help="""\
      Specifies the type of disk to create. To get a
      list of available disk types, run `gcloud compute disk-types list`.
      The default disk type is pd-standard.
      """)

    if support_pd_interface:
        parser.add_argument('--interface',
                            help="""\
        Specifies the disk interface to use for attaching this disk. Valid values
        are `SCSI` and `NVME`. The default is `SCSI`.
        """)

    parser.display_info.AddFormat(
        'table(name, zone.basename(), sizeGb, type.basename(), status)')

    parser.add_argument(
        '--licenses',
        type=arg_parsers.ArgList(),
        metavar='LICENSE',
        help=(
            'A list of URIs to license resources. The provided licenses will '
            'be added onto the created disks to indicate the licensing and '
            'billing policies.'))

    _SourceArgs(parser, source_instant_snapshot_enabled)

    disks_flags.AddProvisionedIopsFlag(parser, arg_parsers, constants)

    if support_user_licenses:
        parser.add_argument(
            '--user-licenses',
            type=arg_parsers.ArgList(),
            metavar='LICENSE',
            help=('List of URIs to license resources. User-provided licenses '
                  'can be edited after disk is created.'))

    csek_utils.AddCsekKeyArgs(parser)
    labels_util.AddCreateLabelsFlags(parser)

    if include_physical_block_size_support:
        parser.add_argument('--physical-block-size',
                            choices=['4096', '16384'],
                            default='4096',
                            help="""\
Physical block size of the persistent disk in bytes.
Valid values are 4096(default) and 16384.
""")
    if vss_erase_enabled:
        flags.AddEraseVssSignature(parser, resource='a source snapshot')

    resource_flags.AddResourcePoliciesArgs(parser, 'added to', 'disk')
Example #17
0
def ArgsForClusterRef(parser, beta=False):
    """Register flags for creating a dataproc cluster.

  Args:
    parser: The argparse.ArgParser to configure with dataproc cluster arguments.
    beta: whether or not this is a beta command (may affect flag visibility)
  """
    labels_util.AddCreateLabelsFlags(parser)
    instances_flags.AddTagsArgs(parser)
    # 30m is backend timeout + 5m for safety buffer.
    flags.AddTimeoutFlag(parser, default='35m')
    parser.add_argument(
        '--metadata',
        type=arg_parsers.ArgDict(min_length=1),
        action='append',
        default=None,
        help=('Metadata to be made available to the guest operating system '
              'running on the instances'),
        metavar='KEY=VALUE')

    # Either allow creating a single node cluster (--single-node), or specifying
    # the number of workers in the multi-node cluster (--num-workers and
    # --num-preemptible-workers)
    node_group = parser.add_argument_group(mutex=True)  # Mutually exclusive
    node_group.add_argument('--single-node',
                            action='store_true',
                            help="""\
      Create a single node cluster.

      A single node cluster has all master and worker components.
      It cannot have any separate worker nodes. If this flag is not
      specified, a cluster with separate workers is created.
      """)
    # Not mutually exclusive
    worker_group = node_group.add_argument_group(
        help='Multi-node cluster flags')
    worker_group.add_argument(
        '--num-workers',
        type=int,
        help='The number of worker nodes in the cluster. Defaults to '
        'server-specified.')
    worker_group.add_argument(
        '--num-preemptible-workers',
        type=int,
        help='The number of preemptible worker nodes in the cluster.')

    parser.add_argument(
        '--master-machine-type',
        help='The type of machine to use for the master. Defaults to '
        'server-specified.')
    parser.add_argument(
        '--worker-machine-type',
        help='The type of machine to use for workers. Defaults to '
        'server-specified.')
    if beta:
        image_parser = parser.add_mutually_exclusive_group()
        # TODO(b/73291743): Add external doc link to --image
        image_parser.add_argument(
            '--image',
            metavar='IMAGE',
            help='The full custom image URI or the custom image name that '
            'will be used to create a cluster.')
        image_parser.add_argument(
            '--image-version',
            metavar='VERSION',
            help='The image version to use for the cluster. Defaults to the '
            'latest version.')
    else:
        parser.add_argument(
            '--image',
            hidden=True,
            help='The full image URI to use with the cluster. Overrides '
            '--image-version.')
        parser.add_argument(
            '--image-version',
            metavar='VERSION',
            help='The image version to use for the cluster. Defaults to the '
            'latest version.')

    parser.add_argument(
        '--bucket',
        help='The Google Cloud Storage bucket to use with the Google Cloud '
        'Storage connector. A bucket is auto created when this parameter is '
        'not specified.')

    netparser = parser.add_mutually_exclusive_group()
    netparser.add_argument('--network',
                           help="""\
      The Compute Engine network that the VM instances of the cluster will be
      part of. This is mutually exclusive with --subnet. If neither is
      specified, this defaults to the "default" network.
      """)
    netparser.add_argument('--subnet',
                           help="""\
      Specifies the subnet that the cluster will be part of. This is mutally
      exclusive with --network.
      """)
    parser.add_argument(
        '--num-worker-local-ssds',
        type=int,
        help='The number of local SSDs to attach to each worker in a cluster.')
    parser.add_argument(
        '--num-master-local-ssds',
        type=int,
        help='The number of local SSDs to attach to the master in a cluster.')
    parser.add_argument(
        '--initialization-actions',
        type=arg_parsers.ArgList(min_length=1),
        metavar='CLOUD_STORAGE_URI',
        help=('A list of Google Cloud Storage URIs of '
              'executables to run on each node in the cluster.'))
    parser.add_argument(
        '--initialization-action-timeout',
        type=arg_parsers.Duration(),
        metavar='TIMEOUT',
        default='10m',
        help=('The maximum duration of each initialization action. See '
              '$ gcloud topic datetimes for information on duration formats.'))
    parser.add_argument(
        '--num-masters',
        type=arg_parsers.CustomFunctionValidator(
            lambda n: int(n) in [1, 3],
            'Number of masters must be 1 (Standard) or 3 (High Availability)',
            parser=arg_parsers.BoundedInt(1, 3)),
        help="""\
      The number of master nodes in the cluster.

      [format="csv",options="header"]
      |========
      Number of Masters,Cluster Mode
      1,Standard
      3,High Availability
      |========
      """)
    parser.add_argument('--properties',
                        type=arg_parsers.ArgDict(),
                        metavar='PREFIX:PROPERTY=VALUE',
                        default={},
                        help="""\
Specifies configuration properties for installed packages, such as Hadoop
and Spark.

Properties are mapped to configuration files by specifying a prefix, such as
"core:io.serializations". The following are supported prefixes and their
mappings:

[format="csv",options="header"]
|========
Prefix,Target Configuration File
core,core-site.xml
hdfs,hdfs-site.xml
mapred,mapred-site.xml
yarn,yarn-site.xml
hive,hive-site.xml
pig,pig.properties
spark,spark-defaults.conf
|========

""")
    parser.add_argument(
        '--service-account',
        help='The Google Cloud IAM service account to be authenticated as.')
    parser.add_argument('--scopes',
                        type=arg_parsers.ArgList(min_length=1),
                        metavar='SCOPE',
                        help="""\
Specifies scopes for the node instances. The project's default service account
is used. Multiple SCOPEs can specified, separated by commas.
Examples:

  $ {{command}} example-cluster --scopes https://www.googleapis.com/auth/bigtable.admin

  $ {{command}} example-cluster --scopes sqlservice,bigquery

The following scopes necessary for the cluster to function properly are always
added, even if not explicitly specified:

[format="csv"]
|========
{minimum_scopes}
|========

If this flag is not specified the following default scopes are also included:

[format="csv"]
|========
{additional_scopes}
|========

If you want to enable all scopes use the 'cloud-platform' scope.

SCOPE can be either the full URI of the scope or an alias.
Available aliases are:

[format="csv",options="header"]
|========
Alias,URI
{aliases}
|========

{scope_deprecation_msg}
""".format(minimum_scopes='\n'.join(constants.MINIMUM_SCOPE_URIS),
           additional_scopes='\n'.join(
               constants.ADDITIONAL_DEFAULT_SCOPE_URIS),
           aliases=compute_helpers.SCOPE_ALIASES_FOR_HELP,
           scope_deprecation_msg=compute_constants.DEPRECATED_SCOPES_MESSAGES))

    master_boot_disk_size = parser.add_mutually_exclusive_group()
    worker_boot_disk_size = parser.add_mutually_exclusive_group()

    # Deprecated, to be removed at a future date.
    master_boot_disk_size.add_argument(
        '--master-boot-disk-size-gb',
        action=actions.DeprecationAction(
            '--master-boot-disk-size-gb',
            warn=(
                'The `--master-boot-disk-size-gb` flag is deprecated. '
                'Use `--master-boot-disk-size` flag with "GB" after value.')),
        type=int,
        hidden=True,
        help='Use `--master-boot-disk-size` flag with "GB" after value.')
    worker_boot_disk_size.add_argument(
        '--worker-boot-disk-size-gb',
        action=actions.DeprecationAction(
            '--worker-boot-disk-size-gb',
            warn=(
                'The `--worker-boot-disk-size-gb` flag is deprecated. '
                'Use `--worker-boot-disk-size` flag with "GB" after value.')),
        type=int,
        hidden=True,
        help='Use `--worker-boot-disk-size` flag with "GB" after value.')

    boot_disk_size_detailed_help = """\
      The size of the boot disk. The value must be a
      whole number followed by a size unit of ``KB'' for kilobyte, ``MB''
      for megabyte, ``GB'' for gigabyte, or ``TB'' for terabyte. For example,
      ``10GB'' will produce a 10 gigabyte disk. The minimum size a boot disk
      can have is 10 GB. Disk size must be a multiple of 1 GB.
      """
    master_boot_disk_size.add_argument(
        '--master-boot-disk-size',
        type=arg_parsers.BinarySize(lower_bound='10GB'),
        help=boot_disk_size_detailed_help)
    worker_boot_disk_size.add_argument(
        '--worker-boot-disk-size',
        type=arg_parsers.BinarySize(lower_bound='10GB'),
        help=boot_disk_size_detailed_help)
    parser.add_argument('--preemptible-worker-boot-disk-size',
                        type=arg_parsers.BinarySize(lower_bound='10GB'),
                        help=boot_disk_size_detailed_help)

    # Args that are visible only in Beta track
    parser.add_argument('--no-address',
                        action='store_true',
                        help="""\
      If provided, the instances in the cluster will not be assigned external
      IP addresses.

      Note: Dataproc VMs need access to the Dataproc API. This can be achieved
      without external IP addresses using Private Google Access
      (https://cloud.google.com/compute/docs/private-google-access).
      """,
                        hidden=not beta)

    if beta:
        boot_disk_type_detailed_help = """\
        The type of the boot disk. The value must be ``pd-standard'' or
        ``pd-ssd''.
        """
        parser.add_argument('--master-boot-disk-type',
                            help=boot_disk_type_detailed_help)
        parser.add_argument('--worker-boot-disk-type',
                            help=boot_disk_type_detailed_help)
        parser.add_argument('--preemptible-worker-boot-disk-type',
                            help=boot_disk_type_detailed_help)
Example #18
0
def AddCustomAdvertisementArgs(parser, resource_str):
    """Adds common arguments for setting/updating custom advertisements."""

    parser.add_argument(
        '--advertisement-mode',
        choices=_MODE_CHOICES,
        type=lambda mode: mode.upper(),
        metavar='MODE',
        help="""The new advertisement mode for this {0}.""".format(
            resource_str))

    parser.add_argument(
        '--advertisement-groups',
        type=arg_parsers.ArgList(choices=_GROUP_CHOICES,
                                 element_type=lambda group: group.upper()),
        metavar='GROUP',
        help="""The list of pre-defined groups of IP ranges to dynamically
              advertise on this {0}. This list can only be specified in
              custom advertisement mode.""".format(resource_str))

    parser.add_argument(
        '--advertisement-ranges',
        type=arg_parsers.ArgDict(allow_key_only=True),
        metavar='CIDR_RANGE=DESC',
        help="""The list of individual IP ranges, in CIDR format, to dynamically
              advertise on this {0}. Each IP range can (optionally) be given a
              text description DESC. For example, to advertise a specific range,
              use `--advertisement-ranges=192.168.10.0/24`.  To store a
              description with the range, use
              `--advertisement-ranges=192.168.10.0/24=my-networks`. This list
              can only be specified in custom advertisement mode.""".format(
            resource_str))

    incremental_args = parser.add_mutually_exclusive_group(required=False)

    incremental_args.add_argument(
        '--add-advertisement-groups',
        type=arg_parsers.ArgList(choices=_GROUP_CHOICES,
                                 element_type=lambda group: group.upper()),
        metavar='GROUP',
        help=
        """A list of pre-defined groups of IP ranges to dynamically advertise
              on this {0}. This list is appended to any existing advertisements.
              This field can only be specified in custom advertisement mode."""
        .format(resource_str))

    incremental_args.add_argument(
        '--remove-advertisement-groups',
        type=arg_parsers.ArgList(choices=_GROUP_CHOICES,
                                 element_type=lambda group: group.upper()),
        metavar='GROUP',
        help="""A list of pre-defined groups of IP ranges to remove from dynamic
              advertisement on this {0}. Each group in the list must exist in
              the current set of custom advertisements. This field can only be
              specified in custom advertisement mode.""".format(resource_str))

    incremental_args.add_argument(
        '--add-advertisement-ranges',
        type=arg_parsers.ArgDict(allow_key_only=True),
        metavar='CIDR_RANGE=DESC',
        help="""A list of individual IP ranges, in CIDR format, to dynamically
              advertise on this {0}. This list is appended to any existing
              advertisements. Each IP range can (optionally) be given a text
              description DESC. For example, to advertise a specific range, use
              `--advertisement-ranges=192.168.10.0/24`.  To store a description
              with the range, use
              `--advertisement-ranges=192.168.10.0/24=my-networks`. This list
              can only be specified in custom advertisement mode.""".format(
            resource_str))

    incremental_args.add_argument(
        '--remove-advertisement-ranges',
        type=arg_parsers.ArgList(),
        metavar='CIDR_RANGE',
        help="""A list of individual IP ranges, in CIDR format, to remove from
              dynamic advertisement on this {0}. Each IP range in the list must
              exist in the current set of custom advertisements. This field can
              only be specified in custom advertisement mode.""".format(
            resource_str))
def AddNodeIdentityFlags(parser, example_target, new_behavior=True):
    """Adds node identity flags to the given parser.

  Node identity flags are --scopes, --[no-]enable-cloud-endpoints (deprecated),
  and --service-account.  --service-account is mutually exclusive with the
  others.  --[no-]enable-cloud-endpoints is not allowed if property
  container/new_scopes_behavior is set to true, and is removed completely if
  new_behavior is set to true.

  Args:
    parser: A given parser.
    example_target: the target for the command, e.g. mycluster.
    new_behavior: Use new (alpha & beta) behavior: remove
    --[no-]enable-cloud-endpoints.
  """
    node_identity_group = parser.add_group(
        mutex=True, help='Options to specify the node identity.')
    scopes_group = node_identity_group.add_group(help='Scopes options.')

    if new_behavior:
        track_help = """
Unless container/new_scopes_behavior property is true, compute-rw and storage-ro
are always added, even if not explicitly specified, and --enable-cloud-endpoints
(by default) adds service-control and service-management scopes.

If container/new_scopes_behavior property is true, none of the above scopes are
added (though storage-ro, service-control, and service-management are all
included in the default scopes.  In a future release, this will be the default
behavior.
"""
    else:
        track_help = ''
    scopes_group.add_argument('--scopes',
                              type=arg_parsers.ArgList(),
                              metavar='SCOPE',
                              default='gke-default',
                              help="""\
Specifies scopes for the node instances. The project's default service account
is used. Examples:

    $ {{command}} {example_target} --scopes=https://www.googleapis.com/auth/devstorage.read_only

    $ {{command}} {example_target} --scopes=bigquery,storage-rw,compute-ro

Multiple SCOPEs can specified, separated by commas.  logging-write and/or
monitoring are added unless Cloud Logging and/or Cloud Monitoring are disabled
(see --enable-cloud-logging and --enable-cloud-monitoring for more info).
{track_help}
SCOPE can be either the full URI of the scope or an alias. Available aliases
are:

[format="csv",options="header"]
|========
Alias,URI
{aliases}
|========

{scope_deprecation_msg}
""".format(aliases=compute_constants.ScopesForHelp(),
           scope_deprecation_msg=compute_constants.DEPRECATED_SCOPES_MESSAGES,
           example_target=example_target,
           track_help=track_help))

    cloud_endpoints_help_text = """\
Automatically enable Google Cloud Endpoints to take advantage of API management
features by adding service-control and service-management scopes.

If --no-enable-cloud-endpoints is set, remove service-control and
service-management scopes, even if they are implicitly (via default) or
explicitly set via --scopes.

--[no-]enable-cloud-endpoints is not allowed if container/new_scopes_behavior
property is set to true.
"""
    scopes_group.add_argument(
        '--enable-cloud-endpoints',
        action=actions.DeprecationAction(
            '--[no-]enable-cloud-endpoints',
            warn='Flag --[no-]enable-cloud-endpoints is deprecated and will be '
            'removed in a future release.  Scopes necessary for Google Cloud '
            'Endpoints are now included in the default set and may be '
            'excluded using --scopes.',
            removed=new_behavior,
            action='store_true'),
        default=True,
        help=cloud_endpoints_help_text)

    sa_help_text = """\
The Google Cloud Platform Service Account to be used by the node VMs.  If a \
service account is specified, the cloud-platform scope is used. If no Service \
Account is specified, the project default service account is used.
"""
    node_identity_group.add_argument('--service-account', help=sa_help_text)
Example #20
0
def GenerateFlag(field, attributes, fix_bools=True, category=None):
  """Generates a flag for a single field in a message.

  Args:
    field: The apitools field object.
    attributes: yaml_command_schema.Argument, The attributes to use to
      generate the arg.
    fix_bools: True to generate boolean flags as switches that take a value or
      False to just generate them as regular string flags.
    category: The help category to put the flag in.

  Raises:
    ArgumentGenerationError: When an argument could not be generated from the
      API field.

  Returns:
    calliope.base.Argument, The generated argument.
  """
  variant = field.variant if field else None
  t = attributes.type or TYPES.get(variant, None)

  choices = None
  if attributes.choices is not None:
    choice_map = {c.arg_value: c.help_text for c in attributes.choices}
    # If help text is provided, give a choice map. Otherwise, just use the
    # choice values.
    choices = (choice_map if any(choice_map.values())
               else sorted(choice_map.keys()))
  elif variant == messages.Variant.ENUM:
    choices = [EnumNameToChoice(name) for name in sorted(field.type.names())]

  action = attributes.action
  if t == bool and fix_bools and not action:
    # For boolean flags, we want to create a flag with action 'store_true'
    # rather than a flag that takes a value and converts it to a boolean. Only
    # do this if not using a custom action.
    action = 'store_true'
  # Default action is store if one was not provided.
  action = action or 'store'

  # pylint: disable=g-explicit-bool-comparison, only an explicit False should
  # override this, None just means to do the default.
  repeated = (field and field.repeated) and attributes.repeated != False

  if repeated:
    if action != 'store':
      raise ArgumentGenerationError(
          field.name,
          'The field is repeated but is but is using a custom action. You might'
          ' want to set repeated: False in your arg spec.')
    if t:
      # A special ArgDict wrapper type was given, bind it to the message so it
      # can generate the message from the key/value pairs.
      if isinstance(t, RepeatedMessageBindableType):
        action = t.Action()
        t = t.GenerateType(field.type)
      # If a simple type was provided, just use a list of that type (even if it
      # is a message). The type function will be responsible for converting to
      # the correct value. If type is an ArgList or ArgDict, don't try to wrap
      # it.
      elif not isinstance(t, arg_parsers.ArgList):
        t = arg_parsers.ArgList(element_type=t, choices=choices)
        # Don't register the choices on the argparse arg because it is validated
        # by the ArgList.
        choices = None
  elif isinstance(t, RepeatedMessageBindableType):
    raise ArgumentGenerationError(
        field.name, 'The given type can only be used on repeated fields.')

  if field and not t and action == 'store' and not attributes.processor:
    # The type is unknown and there is no custom action or processor, we don't
    # know what to do with this.
    raise ArgumentGenerationError(
        field.name, 'The field is of an unknown type. You can specify a type '
                    'function or a processor to manually handle this argument.')

  name = attributes.arg_name
  arg = base.Argument(
      name if attributes.is_positional else '--' + name,
      category=category if not attributes.is_positional else None,
      action=action,
      completer=attributes.completer,
      help=attributes.help_text,
      hidden=attributes.hidden,
  )
  if attributes.default is not None:
    arg.kwargs['default'] = attributes.default
  if action != 'store_true':
    # For this special action type, it won't accept a bunch of the common
    # kwargs, so we can only add them if not generating a boolean flag.
    metavar = attributes.metavar or name
    arg.kwargs['metavar'] = resource_property.ConvertToAngrySnakeCase(
        metavar.replace('-', '_'))
    arg.kwargs['type'] = t
    arg.kwargs['choices'] = choices

  if not attributes.is_positional:
    arg.kwargs['required'] = attributes.required
  return arg
Example #21
0
def ArgsForClusterRef(parser,
                      beta=False,
                      include_deprecated=True,
                      include_ttl_config=False):
    """Register flags for creating a dataproc cluster.

  Args:
    parser: The argparse.ArgParser to configure with dataproc cluster arguments.
    beta: whether or not this is a beta command (may affect flag visibility)
    include_deprecated: whether deprecated flags should be included
    include_ttl_config: whether to include Scheduled Delete(TTL) args
  """
    labels_util.AddCreateLabelsFlags(parser)
    instances_flags.AddTagsArgs(parser)
    # 30m is backend timeout + 5m for safety buffer.
    flags.AddTimeoutFlag(parser, default='35m')
    flags.AddZoneFlag(parser, short_flags=include_deprecated)
    flags.AddComponentFlag(parser)

    parser.add_argument(
        '--metadata',
        type=arg_parsers.ArgDict(min_length=1),
        action='append',
        default=None,
        help=('Metadata to be made available to the guest operating system '
              'running on the instances'),
        metavar='KEY=VALUE')

    # Either allow creating a single node cluster (--single-node), or specifying
    # the number of workers in the multi-node cluster (--num-workers and
    # --num-preemptible-workers)
    node_group = parser.add_argument_group(mutex=True)  # Mutually exclusive
    node_group.add_argument('--single-node',
                            action='store_true',
                            help="""\
      Create a single node cluster.

      A single node cluster has all master and worker components.
      It cannot have any separate worker nodes. If this flag is not
      specified, a cluster with separate workers is created.
      """)
    # Not mutually exclusive
    worker_group = node_group.add_argument_group(
        help='Multi-node cluster flags')
    worker_group.add_argument(
        '--num-workers',
        type=int,
        help='The number of worker nodes in the cluster. Defaults to '
        'server-specified.')
    worker_group.add_argument(
        '--num-preemptible-workers',
        type=int,
        help='The number of preemptible worker nodes in the cluster.')

    parser.add_argument(
        '--master-machine-type',
        help='The type of machine to use for the master. Defaults to '
        'server-specified.')
    parser.add_argument(
        '--worker-machine-type',
        help='The type of machine to use for workers. Defaults to '
        'server-specified.')
    image_parser = parser.add_mutually_exclusive_group()
    # TODO(b/73291743): Add external doc link to --image
    image_parser.add_argument(
        '--image',
        metavar='IMAGE',
        help='The full custom image URI or the custom image name that '
        'will be used to create a cluster.')
    image_parser.add_argument(
        '--image-version',
        metavar='VERSION',
        help='The image version to use for the cluster. Defaults to the '
        'latest version.')
    parser.add_argument('--bucket',
                        help="""\
      The Google Cloud Storage bucket to use by default to stage job
      dependencies, miscellaneous config files, and job driver console output
      when using this cluster.
      """)

    netparser = parser.add_mutually_exclusive_group()
    netparser.add_argument('--network',
                           help="""\
      The Compute Engine network that the VM instances of the cluster will be
      part of. This is mutually exclusive with --subnet. If neither is
      specified, this defaults to the "default" network.
      """)
    netparser.add_argument('--subnet',
                           help="""\
      Specifies the subnet that the cluster will be part of. This is mutally
      exclusive with --network.
      """)
    parser.add_argument(
        '--num-worker-local-ssds',
        type=int,
        help='The number of local SSDs to attach to each worker in a cluster.')
    parser.add_argument(
        '--num-master-local-ssds',
        type=int,
        help='The number of local SSDs to attach to the master in a cluster.')
    parser.add_argument('--num-preemptible-worker-local-ssds',
                        type=int,
                        help="""\
      The number of local SSDs to attach to each preemptible worker in
      a cluster.
      """)
    parser.add_argument(
        '--initialization-actions',
        type=arg_parsers.ArgList(min_length=1),
        metavar='CLOUD_STORAGE_URI',
        help=('A list of Google Cloud Storage URIs of '
              'executables to run on each node in the cluster.'))
    parser.add_argument(
        '--initialization-action-timeout',
        type=arg_parsers.Duration(),
        metavar='TIMEOUT',
        default='10m',
        help=('The maximum duration of each initialization action. See '
              '$ gcloud topic datetimes for information on duration formats.'))
    parser.add_argument(
        '--num-masters',
        type=arg_parsers.CustomFunctionValidator(
            lambda n: int(n) in [1, 3],
            'Number of masters must be 1 (Standard) or 3 (High Availability)',
            parser=arg_parsers.BoundedInt(1, 3)),
        help="""\
      The number of master nodes in the cluster.

      Number of Masters | Cluster Mode
      --- | ---
      1 | Standard
      3 | High Availability
      """)
    parser.add_argument('--properties',
                        type=arg_parsers.ArgDict(),
                        action=arg_parsers.UpdateAction,
                        default={},
                        metavar='PREFIX:PROPERTY=VALUE',
                        help="""\
Specifies configuration properties for installed packages, such as Hadoop
and Spark.

Properties are mapped to configuration files by specifying a prefix, such as
"core:io.serializations". The following are supported prefixes and their
mappings:

Prefix | File | Purpose of file
--- | --- | ---
capacity-scheduler | capacity-scheduler.xml | Hadoop YARN Capacity Scheduler configuration
core | core-site.xml | Hadoop general configuration
distcp | distcp-default.xml | Hadoop Distributed Copy configuration
hadoop-env | hadoop-env.sh | Hadoop specific environment variables
hdfs | hdfs-site.xml | Hadoop HDFS configuration
hive | hive-site.xml | Hive configuration
mapred | mapred-site.xml | Hadoop MapReduce configuration
mapred-env | mapred-env.sh | Hadoop MapReduce specific environment variables
pig | pig.properties | Pig configuration
spark | spark-defaults.conf | Spark configuration
spark-env | spark-env.sh | Spark specific environment variables
yarn | yarn-site.xml | Hadoop YARN configuration
yarn-env | yarn-env.sh | Hadoop YARN specific environment variables

See https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/cluster-properties
for more information.

""")
    parser.add_argument(
        '--service-account',
        help='The Google Cloud IAM service account to be authenticated as.')
    parser.add_argument('--scopes',
                        type=arg_parsers.ArgList(min_length=1),
                        metavar='SCOPE',
                        help="""\
Specifies scopes for the node instances. Multiple SCOPEs can be specified,
separated by commas.
Examples:

  $ {{command}} example-cluster --scopes https://www.googleapis.com/auth/bigtable.admin

  $ {{command}} example-cluster --scopes sqlservice,bigquery

The following *minimum scopes* are necessary for the cluster to function
properly and are always added, even if not explicitly specified:

  {minimum_scopes}

If the `--scopes` flag is not specified, the following *default scopes*
are also included:

  {additional_scopes}

If you want to enable all scopes use the 'cloud-platform' scope.

{scopes_help}
""".format(minimum_scopes='\n  '.join(constants.MINIMUM_SCOPE_URIS),
           additional_scopes='\n  '.join(
               constants.ADDITIONAL_DEFAULT_SCOPE_URIS),
           scopes_help=compute_helpers.SCOPES_HELP))

    if include_deprecated:
        _AddDiskArgsDeprecated(parser)
    else:
        _AddDiskArgs(parser)

    # --no-address is an exception to the no negative-flag style guildline to be
    # consistent with gcloud compute instances create --no-address
    parser.add_argument('--no-address',
                        action='store_true',
                        help="""\
      If provided, the instances in the cluster will not be assigned external
      IP addresses.

      If omitted the instances in the cluster will each be assigned an
      ephemeral external IP address.

      Note: Dataproc VMs need access to the Dataproc API. This can be achieved
      without external IP addresses using Private Google Access
      (https://cloud.google.com/compute/docs/private-google-access).
      """)

    boot_disk_type_detailed_help = """\
      The type of the boot disk. The value must be ``pd-standard'' or
      ``pd-ssd''.
      """
    parser.add_argument('--master-boot-disk-type',
                        help=boot_disk_type_detailed_help)
    parser.add_argument('--worker-boot-disk-type',
                        help=boot_disk_type_detailed_help)
    parser.add_argument('--preemptible-worker-boot-disk-type',
                        help=boot_disk_type_detailed_help)

    if include_ttl_config:
        parser.add_argument('--max-idle',
                            type=arg_parsers.Duration(),
                            hidden=not (beta),
                            help="""\
          The duration before cluster is auto-deleted after last job completes,
          such as "2h" or "1d".
          See $ gcloud topic datetimes for information on duration formats.
          """)

        auto_delete_group = parser.add_mutually_exclusive_group()
        auto_delete_group.add_argument('--max-age',
                                       type=arg_parsers.Duration(),
                                       hidden=not (beta),
                                       help="""\
          The lifespan of the cluster before it is auto-deleted, such as
          "2h" or "1d".
          See $ gcloud topic datetimes for information on duration formats.
          """)

        auto_delete_group.add_argument('--expiration-time',
                                       type=arg_parsers.Datetime.Parse,
                                       hidden=not (beta),
                                       help="""\
          The time when cluster will be auto-deleted, such as
          "2017-08-29T18:52:51.142Z." See $ gcloud topic datetimes for
          information on time formats.
          """)

    AddKerberosGroup(parser)
Example #22
0
def AddMatrixArgs(parser):
    """Register the repeatable args which define the the axes for a test matrix.

  Args:
    parser: An argparse parser used to add arguments that follow a command
        in the CLI.
  """
    parser.add_argument('--device',
                        category=base.COMMONLY_USED_FLAGS,
                        type=arg_parsers.ArgDict(min_length=1),
                        action='append',
                        metavar='DIMENSION=VALUE',
                        help="""\
      A list of ``DIMENSION=VALUE'' pairs which specify a target device to test
      against. This flag may be repeated to specify multiple devices. The four
      device dimensions are: *model*, *version*, *locale*, and
      *orientation*. If any dimensions are omitted, they will use a default
      value. The default value can be found with the list command for each
      dimension, `$ {parent_command} <dimension> list`.
      *--device* is now the preferred way to specify test devices and may not
      be used in conjunction with *--devices-ids*, *--os-version-ids*,
      *--locales*, or *--orientations*. Omitting all of the preceding
      dimension-related flags will run tests against a single device using
      defaults for all four device dimensions.

      Examples:\n
      ```
      --device model=Nexus6
      --device version=23,orientation=portrait
      --device model=shamu,version=22,locale=zh_CN,orientation=landscape
      ```
      """)
    parser.add_argument(
        '--device-ids',
        '-d',
        category=DEPRECATED_DEVICE_DIMENSIONS,
        type=arg_parsers.ArgList(min_length=1),
        metavar='MODEL_ID',
        help='The list of MODEL_IDs to test against (default: one device model '
        'determined by the Firebase Test Lab device catalog; see TAGS listed '
        'by the `$ {parent_command} devices list` command).')
    parser.add_argument(
        '--os-version-ids',
        '-v',
        category=DEPRECATED_DEVICE_DIMENSIONS,
        type=arg_parsers.ArgList(min_length=1),
        metavar='OS_VERSION_ID',
        help='The list of OS_VERSION_IDs to test against (default: a version ID '
        'determined by the Firebase Test Lab device catalog).')
    parser.add_argument(
        '--locales',
        '-l',
        category=DEPRECATED_DEVICE_DIMENSIONS,
        type=arg_parsers.ArgList(min_length=1),
        metavar='LOCALE',
        help='The list of LOCALEs to test against (default: a single locale '
        'determined by the Firebase Test Lab device catalog).')
    parser.add_argument(
        '--orientations',
        '-o',
        category=DEPRECATED_DEVICE_DIMENSIONS,
        type=arg_parsers.ArgList(min_length=1,
                                 max_length=2,
                                 choices=arg_validate.ORIENTATION_LIST),
        completer=arg_parsers.GetMultiCompleter(OrientationsCompleter),
        metavar='ORIENTATION',
        help='The device orientation(s) to test against (default: portrait).')
Example #23
0
def AddWorkerpoolArgs(parser, update=False):
  """Set up all the argparse flags for creating or updating a workerpool.

  Args:
    parser: An argparse.ArgumentParser-like object.
    update: If true, use the version of the flags for updating a workerpool.
      Otherwise, use the version for creating a workerpool.

  Returns:
    The parser argument with workerpool flags added in.
  """
  verb = 'update' if update else 'create'
  file_or_flags = parser.add_mutually_exclusive_group(required=True)
  file_or_flags.add_argument(
      '--config-from-file',
      help=(_UPDATE_FILE_DESC if update else _CREATE_FILE_DESC),
  )
  flags = file_or_flags.add_argument_group(
      'Command-line flags to configure the WorkerPool:')
  flags.add_argument(
      'WORKER_POOL',
      help='The WorkerPool to %s.' % verb,
  )
  flags.add_argument(
      '--worker-count',
      help='Total number of workers to be created across all requested '
      'regions.',
  )
  if update:
    region_flags = flags.add_argument_group(help="""\
Update the Cloud region or regions in which the Workerpool is located.
To overwrite regions, use --clear-regions followed by --add-regions in the same
command.
Choices: us-central1, us-west1, us-east1, and us-east4.
""")
    region_flags.add_argument(
        '--add-regions',
        type=arg_parsers.ArgList(),
        metavar='REGION',
        help='Add regions, separated by comma.',
    )
    region_flags.add_argument(
        '--clear-regions',
        action='store_true',
        help='Remove all regions.',
    )
    region_flags.add_argument(
        '--remove-regions',
        type=arg_parsers.ArgList(),
        metavar='REGION',
        help='Remove regions, separated by comma.',
    )
  else:
    flags.add_argument(
        '--regions',
        type=arg_parsers.ArgList(),
        metavar='REGION',
        help="""\
The Cloud region or regions in which to create the WorkerPool.

Choices: us-central1, us-west1, us-east1, us-east4.
""")
  worker_flags = flags.add_argument_group(
      'Configuration to be used for creating workers in the WorkerPool:')
  worker_flags.add_argument(
      '--worker-machine-type',
      help="""\
Machine Type of the worker, such as n1-standard-1.

See https://cloud.google.com/compute/docs/machine-types.

If left blank, Cloud Build will use a standard unspecified machine to create the
worker pool.

`--worker-machine-type` is overridden if you specify a different machine type
using `--machine-type` during `gcloud builds submit`.
""")
  worker_flags.add_argument(
      '--worker-disk-size',
      type=arg_parsers.BinarySize(lower_bound='100GB'),
      help="""\
Size of the disk attached to the worker.

If not given, Cloud Build will use a standard disk size. `--worker-disk-size` is
overridden if you specify a different disk size using `--disk-size` during
`gcloud builds submit`.
""")
  worker_network_flags = worker_flags.add_argument_group(help="""\
The network definition used to create the worker.

If all of these flags are unused, the workers will be created in the
WorkerPool's project on the default network. You cannot specify just one of
these flags: it is all or none. However, you can set them to the empty string in
order to use the default settings.
""")
  worker_network_flags.add_argument(
      '--worker-network-project',
      help="""\
ID of the project containing the given network and subnet.

The workerpool's project is used if empty string.
""")
  worker_network_flags.add_argument(
      '--worker-network-name',
      help="""\
Network on which the workers are created.

`default` network is used if empty string.
""")
  worker_network_flags.add_argument(
      '--worker-network-subnet',
      help="""\
Subnet on which the workers are created.

`default` subnet is used if empty string.
""")
  worker_flags.add_argument(
      '--worker-tag',
      help="""\
The tag applied to the worker, and the same tag used by the firewall rule.

It is used to identify the Cloud Build workers among other VMs. The default
value for tag is `worker`.
""")
  return parser
Example #24
0
def AddAndroidTestArgs(parser):
    """Register args which are specific to Android test commands.

  Args:
    parser: An argparse parser used to add arguments that follow a command in
        the CLI.
  """
    parser.add_argument(
        '--app-package',
        help=
        'The Java package of the application under test (default: extracted '
        'from the APK manifest).')
    parser.add_argument(
        '--auto-google-login',
        action='store_true',
        default=True,
        help='Automatically log into the test device using a preconfigured '
        'Google account before beginning the test.')
    parser.add_argument(
        '--directories-to-pull',
        type=arg_parsers.ArgList(),
        metavar='DIR_TO_PULL',
        help='A list of paths that will be copied from the device\'s storage to '
        'the designated results bucket after the test is complete. (For example '
        '--directories-to-pull /sdcard/tempDir1,/data/tempDir2)')
    parser.add_argument(
        '--environment-variables',
        type=arg_parsers.ArgDict(),
        metavar='KEY=VALUE',
        help='A comma-separated, key=value, map of environment variables and '
        'their desired values. The environment variables passed here will '
        'be mirrored on to the adb run command. For example, specify '
        '--environment-variables '
        'coverage=true,coverageFile="/sdcard/tempDir/coverage.ec" to enable code '
        'coverage and provide a file path to store the coverage results.')
    parser.add_argument(
        '--obb-files',
        type=arg_parsers.ArgList(min_length=1, max_length=2),
        metavar='OBB_FILE',
        help='A list of one or two Android OBB file names which will be copied '
        'to each test device before the tests will run (default: None). Each '
        'OBB file name must conform to the format as specified by Android (e.g. '
        '[main|patch].0300110.com.example.android.obb) and will be installed '
        'into <shared-storage>/Android/obb/<package-name>/ on the test device.'
    )

    # The following args are specific to Android instrumentation tests.

    parser.add_argument(
        '--test',
        category=base.COMMONLY_USED_FLAGS,
        help='The path to the binary file containing instrumentation tests. The '
        'given path may be in the local filesystem or in Google Cloud Storage '
        'using gs:// notation.')
    parser.add_argument(
        '--test-package',
        category=ANDROID_INSTRUMENTATION_TEST,
        help='The Java package name of the instrumentation test (default: '
        'extracted from the APK manifest).')
    parser.add_argument(
        '--test-runner-class',
        category=ANDROID_INSTRUMENTATION_TEST,
        help='The fully-qualified Java class name of the instrumentation test '
        'runner (default: the last name extracted from the APK manifest).')
    parser.add_argument(
        '--test-targets',
        category=ANDROID_INSTRUMENTATION_TEST,
        type=arg_parsers.ArgList(min_length=1),
        metavar='TEST_TARGET',
        help='A list of one or more instrumentation test targets to be run '
        '(default: all targets). Each target must be fully qualified with the '
        'package name or class name, in one of these formats:\n'
        '* "package package_name"\n'
        '* "class package_name.class_name"\n'
        '* "class package_name.class_name#method_name".')

    # The following args are specific to Android Robo tests.

    parser.add_argument(
        '--max-steps',
        metavar='int',
        category=ANDROID_ROBO_TEST,
        type=arg_validate.NONNEGATIVE_INT_PARSER,
        help='The maximum number of steps/actions a robo test can execute '
        '(default: no limit).')
    parser.add_argument(
        '--max-depth',
        metavar='int',
        category=ANDROID_ROBO_TEST,
        type=arg_validate.POSITIVE_INT_PARSER,
        help='The maximum depth of the traversal stack a robo test can explore. '
        'Needs to be at least 2 to make Robo explore the app beyond the first '
        'activity (default: 50).')
    parser.add_argument(
        '--app-initial-activity',
        category=ANDROID_ROBO_TEST,
        help='The initial activity used to start the app during a robo test.')
    parser.add_argument(
        '--robo-directives',
        metavar='TYPE:RESOURCE_NAME=INPUT',
        category=ANDROID_ROBO_TEST,
        type=arg_parsers.ArgDict(),
        help='A comma-separated (`<type>:<key>=<value>`) map of '
        '`robo_directives` that you can use to customize the behavior of Robo '
        'test. The `type` specifies the action type of the directive, which may '
        'take on values `click` or `text`. If no `type` is provided, `text` will '
        'be used by default. Each key should be the Android resource name of a '
        'target UI element and each value should be the text input for that '
        'element. Values are only permitted for `text` type elements, so no '
        'value should be specified for `click` type elements. For example, use'
        '\n\n'
        '    --robo-directives text:username_resource=username,'
        'text:password_resource=password'
        '\n\n'
        'to provide custom login credentials for your app, or'
        '\n\n'
        '    --robo-directives click:sign_in_button='
        '\n\n'
        'to instruct Robo to click on the sign in button. To learn more about '
        'Robo test and robo_directives, see '
        'https://firebase.google.com/docs/test-lab/command-line'
        '#custom_login_and_text_input_with_robo_test.'
        '\n\n'
        'Caution: You should only use credentials for test accounts that are not '
        'associated with real users.')
Example #25
0
def _Args(parser):
  """Add arguments for route creation."""

  parser.add_argument(
      '--description',
      help='An optional, textual description for the route.')

  parser.add_argument(
      '--network',
      default='default',
      help='Specifies the network to which the route will be applied.')

  tags = parser.add_argument(
      '--tags',
      type=arg_parsers.ArgList(min_length=1),
      action=arg_parsers.FloatingListValuesCatcher(),
      default=[],
      metavar='TAG',
      help='Identifies the set of instances that this route will apply to.')
  tags.detailed_help = """\
      Identifies the set of instances that this route will apply to. If no
      tags are provided, the route will apply to all instances in the network.
      """

  destination_range = parser.add_argument(
      '--destination-range',
      required=True,
      help=('The destination range of outgoing packets that the route will '
            'apply to.'))
  destination_range.detailed_help = """\
      The destination range of outgoing packets that the route will
      apply to. To match all traffic, use ``0.0.0.0/0''.
      """

  priority = parser.add_argument(
      '--priority',
      default=1000,
      help=('Specifies the priority of this route relative to other routes '
            'with the same specifity.'),
      type=int)
  priority.detailed_help = """\
      Specifies the priority of this route relative to other routes
      with the same specifity. The lower the value, the higher the
      priority.
      """

  next_hop = parser.add_mutually_exclusive_group(required=True)

  _AddGaHops(next_hop)

  next_hop_instance_zone = parser.add_argument(
      '--next-hop-instance-zone',
      help='The zone of the next hop instance.',
      action=actions.StoreProperty(properties.VALUES.compute.zone))
  next_hop_instance_zone.detailed_help = ("""\
      The zone of the next hop instance.
      """ + constants.ZONE_PROPERTY_EXPLANATION)

  next_hop_vpn_tunnel_region = parser.add_argument(
      '--next-hop-vpn-tunnel-region',
      help='The region of the next hop vpn tunnel.')
  next_hop_vpn_tunnel_region.detailed_help = ("""\
     The region of the next hop vpn tunnel.
     """ + constants.REGION_PROPERTY_EXPLANATION)

  parser.add_argument(
      'name',
      help='The name to assign to the route.')
Example #26
0
    def Args(cls, parser):
        """Args is called by calliope to gather arguments for this command.

    Please add arguments in alphabetical order except for no- or a clear-
    pair for that argument which can follow the argument itself.
    Args:
      parser: An argparse parser that you can use to add arguments that go
          on the command line after this command. Positional arguments are
          allowed.
    """
        parser.add_argument(
            '--activation-policy',
            required=False,
            choices=['ALWAYS', 'NEVER', 'ON_DEMAND'],
            help=
            'The activation policy for this instance. This specifies when the '
            'instance should be activated and is applicable only when the '
            'instance state is RUNNABLE.')
        parser.add_argument(
            '--assign-ip',
            action='store_true',
            default=None,  # Tri-valued: None => don't change the setting.
            help='The instance must be assigned an IP address.')
        gae_apps_group = parser.add_mutually_exclusive_group()
        gae_apps_group.add_argument(
            '--authorized-gae-apps',
            type=arg_parsers.ArgList(min_length=1),
            metavar='APP',
            required=False,
            action=arg_parsers.FloatingListValuesCatcher(),
            help='A list of App Engine app IDs that can access this instance.')
        gae_apps_group.add_argument(
            '--clear-gae-apps',
            required=False,
            action='store_true',
            help=
            ('Specified to clear the list of App Engine apps that can access '
             'this instance.'))
        networks_group = parser.add_mutually_exclusive_group()
        networks_group.add_argument(
            '--authorized-networks',
            type=arg_parsers.ArgList(min_length=1),
            metavar='NETWORK',
            required=False,
            action=arg_parsers.FloatingListValuesCatcher(),
            help=
            'The list of external networks that are allowed to connect to the '
            'instance. Specified in CIDR notation, also known as \'slash\' '
            'notation (e.g. 192.168.100.0/24).')
        networks_group.add_argument(
            '--clear-authorized-networks',
            required=False,
            action='store_true',
            help=
            'Clear the list of external networks that are allowed to connect '
            'to the instance.')
        backups_group = parser.add_mutually_exclusive_group()
        backups_group.add_argument(
            '--backup-start-time',
            required=False,
            help=
            'The start time of daily backups, specified in the 24 hour format '
            '- HH:MM, in the UTC timezone.')
        backups_group.add_argument(
            '--no-backup',
            required=False,
            action='store_true',
            help='Specified if daily backup should be disabled.')
        database_flags_group = parser.add_mutually_exclusive_group()
        database_flags_group.add_argument(
            '--database-flags',
            type=arg_parsers.ArgDict(min_length=1),
            metavar='FLAG=VALUE',
            required=False,
            action=arg_parsers.FloatingListValuesCatcher(),
            help=
            'A comma-separated list of database flags to set on the instance. '
            'Use an equals sign to separate flag name and value. Flags without '
            'values, like skip_grant_tables, can be written out without a value '
            'after, e.g., `skip_grant_tables=`. Use on/off for '
            'booleans. View the Instance Resource API for allowed flags. '
            '(e.g., `--database-flags max_allowed_packet=55555,skip_grant_tables=,'
            'log_output=1`)')
        database_flags_group.add_argument(
            '--clear-database-flags',
            required=False,
            action='store_true',
            help='Clear the database flags set on the instance. '
            'WARNING: Instance will be restarted.')
        parser.add_argument(
            '--enable-bin-log',
            action='store_true',
            default=None,  # Tri-valued: None => don't change the setting.
            help=
            'Enable binary log. If backup configuration is disabled, binary '
            'log should be disabled as well.')
        parser.add_argument(
            '--follow-gae-app',
            required=False,
            help='The App Engine app this instance should follow. It must be in '
            'the same region as the instance. '
            'WARNING: Instance may be restarted.')
        parser.add_argument(
            '--gce-zone',
            required=False,
            help='The preferred Compute Engine zone (e.g. us-central1-a, '
            'us-central1-b, etc.). '
            'WARNING: Instance may be restarted.')
        parser.add_argument('instance',
                            completion_resource='sql.instances',
                            help='Cloud SQL instance ID.')
        parser.add_argument('--pricing-plan',
                            '-p',
                            required=False,
                            choices=['PER_USE', 'PACKAGE'],
                            help='The pricing plan for this instance.')
        parser.add_argument('--replication',
                            required=False,
                            choices=['SYNCHRONOUS', 'ASYNCHRONOUS'],
                            help='The type of replication this instance uses.')
        parser.add_argument(
            '--require-ssl',
            action='store_true',
            default=None,  # Tri-valued: None => don't change the setting.
            help=
            'mysqld should default to \'REQUIRE X509\' for users connecting '
            'over IP.')
        parser.add_argument(
            '--tier',
            '-t',
            required=False,
            help='The tier of service for this instance, for example D0, D1. '
            'WARNING: Instance will be restarted.')
        parser.add_argument(
            '--enable-database-replication',
            action='store_true',
            default=None,  # Tri-valued: None => don't change the setting.
            help='Enable database replication. Applicable only '
            'for read replica instance(s). WARNING: Instance will be restarted.'
        )
        parser.add_argument('--async',
                            action='store_true',
                            help='Do not wait for the operation to complete.')
        parser.add_argument(
            '--diff',
            action='store_true',
            help='Show what changed as a result of the update.')
Example #27
0
def _AddPrePostStepArguments(parser):
  """Add pre-/post-patch setting flags."""
  pre_patch_linux_group = parser.add_group(
      help='Pre-patch step settings for Linux machines:')
  pre_patch_linux_group.add_argument(
      '--pre-patch-linux-executable',
      help="""\
      A set of commands to run on a Linux machine before an OS patch begins.
      Commands must be supplied in a file. If the file contains a shell script,
      include the shebang line.

      The path to the file must be supplied in one of the following formats:

      An absolute path of the file on the local filesystem.

      A URI for a Google Cloud Storage object with a generation number.
      """,
  )
  pre_patch_linux_group.add_argument(
      '--pre-patch-linux-success-codes',
      type=arg_parsers.ArgList(element_type=int),
      metavar='PRE_PATCH_LINUX_SUCCESS_CODES',
      help="""\
      Additional exit codes that the executable can return to indicate a
      successful run. The default exit code for success is 0.""",
  )
  post_patch_linux_group = parser.add_group(
      help='Post-patch step settings for Linux machines:')
  post_patch_linux_group.add_argument(
      '--post-patch-linux-executable',
      help="""\
      A set of commands to run on a Linux machine after an OS patch completes.
      Commands must be supplied in a file. If the file contains a shell script,
      include the shebang line.

      The path to the file must be supplied in one of the following formats:

      An absolute path of the file on the local filesystem.

      A URI for a Google Cloud Storage object with a generation number.
      """,
  )
  post_patch_linux_group.add_argument(
      '--post-patch-linux-success-codes',
      type=arg_parsers.ArgList(element_type=int),
      metavar='POST_PATCH_LINUX_SUCCESS_CODES',
      help="""\
      Additional exit codes that the executable can return to indicate a
      successful run. The default exit code for success is 0.""",
  )

  pre_patch_windows_group = parser.add_group(
      help='Pre-patch step settings for Windows machines:')
  pre_patch_windows_group.add_argument(
      '--pre-patch-windows-executable',
      help="""\
      A set of commands to run on a Windows machine before an OS patch begins.
      Commands must be supplied in a file. If the file contains a PowerShell
      script, include the .ps1 file extension.

      The path to the file must be supplied in one of the following formats:

      An absolute path of the file on the local filesystem.

      A URI for a Google Cloud Storage object with a generation number.
      """,
  )
  pre_patch_windows_group.add_argument(
      '--pre-patch-windows-success-codes',
      type=arg_parsers.ArgList(element_type=int),
      metavar='PRE_PATCH_WINDOWS_SUCCESS_CODES',
      help="""\
      Additional exit codes that the executable can return to indicate a
      successful run. The default exit code for success is 0.""",
  )

  post_patch_windows_group = parser.add_group(
      help='Post-patch step settings for Windows machines:')
  post_patch_windows_group.add_argument(
      '--post-patch-windows-executable',
      help="""\
      A set of commands to run on a Windows machine after an OS patch completes.
      Commands must be supplied in a file. If the file contains a PowerShell
      script, include the .ps1 file extension.

      The path to the file must be supplied in one of the following formats:

      An absolute path of the file on the local filesystem.

      A URI for a Google Cloud Storage object with a generation number.
      """,
  )
  post_patch_windows_group.add_argument(
      '--post-patch-windows-success-codes',
      type=arg_parsers.ArgList(element_type=int),
      metavar='POST_PATCH_WINDOWS_SUCCESS_CODES',
      help="""\
      Additional exit codes that the executable can return to indicate a
      successful run. The default exit code for success is 0.""",
  )
Example #28
0
def AddUpdatableArgs(parser,
                     compute_messages,
                     default_protocol='HTTP',
                     default_timeout='30s'):
  """Adds top-level backend service arguments that can be updated."""
  parser.add_argument(
      '--description',
      help='An optional, textual description for the backend service.')

  http_health_checks = parser.add_argument(
      '--http-health-checks',
      type=arg_parsers.ArgList(min_length=1),
      metavar='HTTP_HEALTH_CHECK',
      action=arg_parsers.FloatingListValuesCatcher(),
      help=('Specifies a list of HTTP health check objects for checking the '
            'health of the backend service.'))
  http_health_checks.detailed_help = """\
      Specifies a list of HTTP health check objects for checking the health
      of the backend service.
      """

  https_health_checks = parser.add_argument(
      '--https-health-checks',
      type=arg_parsers.ArgList(min_length=1),
      metavar='HTTPS_HEALTH_CHECK',
      action=arg_parsers.FloatingListValuesCatcher(),
      help=('Specifies a list of HTTPS health check objects for checking the '
            'health of the backend service.'))
  https_health_checks.detailed_help = """\
      Specifies a list of HTTPS health check objects for checking the health
      of the backend service.
      """

  timeout = parser.add_argument(
      '--timeout',
      default=default_timeout,
      type=arg_parsers.Duration(),
      help=('The amount of time to wait for a backend to respond to a '
            'request before considering the request failed.'))
  timeout.detailed_help = """\
      The amount of time to wait for a backend to respond to a request
      before considering the request failed. For example, specifying
      ``10s'' will give backends 10 seconds to respond to
      requests. Valid units for this flag are ``s'' for seconds, ``m''
      for minutes, and ``h'' for hours.
      """
  # TODO(user): Remove port once port_name is in use. b/16486110
  parser.add_argument(
      '--port',
      type=int,
      help=('The TCP port to use when connecting to the backend. '
            '--port is being deprecated in favor of --port-name.'))

  port_name = parser.add_argument(
      '--port-name',
      help=('A user-defined port name used to resolve which port to use on '
            'each backend.'))
  port_name.detailed_help = """\
      The name of a service that has been added to an instance group
      in this backend. Instance group services map a name to a port
      number which is used by the load balancing service.
      Only one ``port-name'' may be added to a backend service, and that
      name must exist as a service on all instance groups that are a
      part of this backend service. The port number associated with the
      name may differ between instances. If you do not specify
      this flag, your instance groups must have a service named ``http''
      configured. See also
      `gcloud compute instance-groups set-named-ports --help`.
      """

  parser.add_argument(
      '--protocol',
      choices=ProtocolOptions(compute_messages.BackendService),
      default=default_protocol,
      type=lambda x: x.upper(),
      help='The protocol for incoming requests.')
Example #29
0
def _Args(parser):
    """Register flags for this command.

  Args:
    parser: An argparse.ArgumentParser-like object. It is mocked out in order
        to capture some information, but behaves like an ArgumentParser.
  """
    parser.add_argument('name', help='The name of this cluster.')
    parser.add_argument(
        '--no-wait',
        dest='wait',
        action='store_false',
        help='Return after issuing create request without polling the operation'
        ' for completion.')
    parser.add_argument('--num-nodes',
                        type=int,
                        help='The number of nodes in the cluster.',
                        default=3)
    parser.add_argument(
        '--machine-type',
        '-m',
        help='The type of machine to use for workers. Defaults to '
        'server-specified')
    parser.add_argument(
        '--network',
        help='The Compute Engine Network that the cluster will connect to. '
        'Google Container Engine will use this network when creating routes '
        'and firewalls for the clusters. Defaults to the \'default\' network.')
    parser.add_argument(
        '--container-ipv4-cidr',
        help='The IP addresses of the container pods in this cluster in CIDR '
        'notation (e.g. 10.0.0.0/14). Defaults to server-specified')
    parser.add_argument(
        '--password',
        help='The password to use for cluster auth. Defaults to a '
        'randomly-generated string.')
    parser.add_argument(
        '--scopes',
        type=arg_parsers.ArgList(min_length=1),
        metavar='SCOPE',
        action=arg_parsers.FloatingListValuesCatcher(),
        help="""\
Specifies scopes for the node instances. The project's default
service account is used. Examples:

  $ {{command}} example-cluster --scopes https://www.googleapis.com/auth/devstorage.read_only

  $ {{command}} example-cluster --scopes bigquery,storage-rw,compute-ro

Multiple SCOPEs can specified, separated by commas. The scopes
necessary for the cluster to function properly (compute-rw, devstorage-ro),
are always added, even if not explicitly specified.

SCOPE can be either the full URI of the scope or an alias.
Available aliases are:

Alias,URI
{aliases}
""".format(aliases='\n        '.join(
            ','.join(value)
            for value in sorted(constants.SCOPES.iteritems()))))
Example #30
0
def _Args(parser, support_next_hop_ilb=False):
    """Add arguments for route creation."""

    parser.add_argument('--description',
                        help='An optional, textual description for the route.')

    parser.add_argument(
        '--network',
        default='default',
        help='Specifies the network to which the route will be applied.')

    parser.add_argument('--tags',
                        type=arg_parsers.ArgList(min_length=1),
                        default=[],
                        metavar='TAG',
                        help="""\
      Identifies the set of instances that this route will apply to. If no
      tags are provided, the route will apply to all instances in the network.
      """)

    parser.add_argument('--destination-range',
                        required=True,
                        help="""\
      The destination range of outgoing packets that the route will
      apply to. To match all traffic, use ``0.0.0.0/0''.
      """)

    parser.add_argument('--priority',
                        default=1000,
                        type=int,
                        help="""\
      Specifies the priority of this route relative to other routes
      with the same specificity. The lower the value, the higher the
      priority.
      """)

    next_hop = parser.add_mutually_exclusive_group(required=True)

    _AddGaHops(next_hop)

    parser.add_argument('--next-hop-instance-zone',
                        action=actions.StoreProperty(
                            properties.VALUES.compute.zone),
                        help=('The zone of the next hop instance. ' +
                              instance_flags.ZONE_PROPERTY_EXPLANATION))

    parser.add_argument('--next-hop-vpn-tunnel-region',
                        help=('The region of the next hop vpn tunnel. ' +
                              compute_flags.REGION_PROPERTY_EXPLANATION))

    if support_next_hop_ilb:
        next_hop.add_argument('--next-hop-ilb',
                              help="""\
        The target forwarding rule that will receive forwarded traffic. This
        can only be used when the destination_range is a public (non-RFC 1918)
        IP CIDR range. Requires --load-balancing-scheme=INTERNAL on the
        corresponding forwarding rule.
        """)
        parser.add_argument(
            '--next-hop-ilb-region',
            help=('The region of the next hop forwarding rule. ' +
                  compute_flags.REGION_PROPERTY_EXPLANATION))

    parser.display_info.AddCacheUpdater(completers.RoutesCompleter)