def _Args(parser):
    """Common arguments to add-path-matcher commands for each release track."""
    parser.add_argument(
        '--description',
        help='An optional, textual description for the path matcher.')

    parser.add_argument('--path-matcher-name',
                        required=True,
                        help='The name to assign to the path matcher.')

    parser.add_argument('--path-rules',
                        type=arg_parsers.ArgDict(min_length=1),
                        default={},
                        metavar='PATH=SERVICE',
                        help='Rules for mapping request paths to services.')

    host_rule = parser.add_mutually_exclusive_group()
    host_rule.add_argument(
        '--new-hosts',
        type=arg_parsers.ArgList(min_length=1),
        metavar='NEW_HOST',
        help=('If specified, a new host rule with the given hosts is created '
              'and the path matcher is tied to the new host rule.'))

    host_rule.add_argument('--existing-host',
                           help="""\
      An existing host rule to tie the new path matcher to. Although
      host rules can contain more than one host, only a single host
      is needed to uniquely identify the host rule.
      """)

    parser.add_argument(
        '--delete-orphaned-path-matcher',
        action='store_true',
        default=False,
        help=('If provided and a path matcher is orphaned as a result of this '
              'command, the command removes the orphaned path matcher instead '
              'of failing.'))

    group = parser.add_mutually_exclusive_group(required=True)
    group.add_argument(
        '--default-service',
        help=('A backend service that will be used for requests that the path '
              'matcher cannot match. Exactly one of --default-service or '
              '--default-backend-bucket is required.'))
    group.add_argument(
        '--default-backend-bucket',
        help=('A backend bucket that will be used for requests that the path '
              'matcher cannot match. Exactly one of --default-service or '
              '--default-backend-bucket is required.'))

    parser.add_argument('--backend-service-path-rules',
                        type=arg_parsers.ArgDict(min_length=1),
                        default={},
                        metavar='PATH=SERVICE',
                        help='Rules for mapping request paths to services.')
    parser.add_argument(
        '--backend-bucket-path-rules',
        type=arg_parsers.ArgDict(min_length=1),
        default={},
        metavar='PATH=BUCKET',
        help='Rules for mapping request paths to backend buckets.')
예제 #2
0
def AddMigStatefulFlagsForInstanceConfigs(parser, for_update=False):
    """Adding stateful flags for creating and updating instance configs."""
    parser.add_argument('--instance',
                        required=True,
                        help="""
        URI/name of an existing instance in the managed instance group.
      """)

    stateful_disks_help = STATEFUL_DISKS_HELP + """
      You can also attach and preserve disks, not defined in the group's
      instance template, to a given instance.

      The same disk can be attached to more than one instance but only in
      read-only mode.
      """
    if for_update:
        stateful_disks_help += """
      Use this argument multiple times to update multiple disks.

      If stateful disk with given `device-name` exists in current instance
      config, its properties will be replaced by the newly provided ones. In
      other case new stateful disk definition will be added to the instance
      config.
      """
        stateful_disk_argument_name = '--update-stateful-disk'
    else:
        stateful_disks_help += """
      Use this argument multiple times to attach and preserve multiple disks.
      """
        stateful_disk_argument_name = '--stateful-disk'
    stateful_disks_help += """
      *device-name*::: Name under which disk is or will be attached.

      *source*::: Optional argument used to specify the URI of an existing
      persistent disk to attach under specified `device-name`.

      *mode*::: Specifies the mode of the disk to attach. Supported options are
      `ro` for read-only and `rw` for read-write. If omitted when source is
      specified, `rw` is used as a default. `mode` can only be specified if
      `source` is given.
      """ + AUTO_DELETE_ARG_HELP
    parser.add_argument(
        stateful_disk_argument_name,
        type=arg_parsers.ArgDict(
            spec={
                'device-name':
                str,
                'source':
                str,
                'mode':
                str,
                'auto-delete':
                AutoDeleteFlag.ValidatorWithFlagName(
                    stateful_disk_argument_name)
            }),
        action='append',
        help=stateful_disks_help,
    )
    if for_update:
        parser.add_argument(
            '--remove-stateful-disks',
            metavar='DEVICE_NAME',
            type=arg_parsers.ArgList(min_length=1),
            help='List all device names to remove from the instance\'s config.',
        )

    if for_update:
        stateful_metadata_argument_name = '--update-stateful-metadata'
    else:
        stateful_metadata_argument_name = '--stateful-metadata'
    stateful_metadata_help = """
      Additional metadata to be made available to the guest operating system
      in addition to the metadata defined in the instance template.

      Stateful metadata may be used to define a key/value pair specific for
      the one given instance to differentiate it from the other instances in
      the managed instance group.

      Stateful metadata have priority over the metadata defined in the
      instance template. This means that stateful metadata that is defined for a
      key that already exists in the instance template overrides the instance
      template value.

      Each metadata entry is a key/value pair separated by an equals sign.
      Metadata keys must be unique and less than 128 bytes in length. Multiple
      entries can be passed to this flag, e.g.,
      ``{argument_name} key-1=value-1,key-2=value-2,key-3=value-3''.
  """.format(argument_name=stateful_metadata_argument_name)
    if for_update:
        stateful_metadata_help += """
      If stateful metadata with the given key exists in current instance config,
      its value will be overridden with the newly provided one. If the key does
      not exist in the current instance config, a new key/value pair will be
      added.
    """
    parser.add_argument(stateful_metadata_argument_name,
                        type=arg_parsers.ArgDict(min_length=1),
                        default={},
                        action=arg_parsers.StoreOnceAction,
                        metavar='KEY=VALUE',
                        help=stateful_metadata_help)
    if for_update:
        parser.add_argument(
            '--remove-stateful-metadata',
            metavar='KEY',
            type=arg_parsers.ArgList(min_length=1),
            help=('List all stateful metadata keys to remove from the'
                  'instance\'s config.'),
        )
예제 #3
0
def _AddMutuallyExclusiveArgs(mutex_group, release_track):
    """Add all arguments that need to be mutually exclusive from each other."""
    mutex_group.add_argument(
        '--monitoring-service',
        help='The monitoring service to use for the cluster. Options '
        'are: "monitoring.googleapis.com" (the Google Cloud Monitoring '
        'service),  "none" (no metrics will be exported from the cluster)')

    if release_track in [base.ReleaseTrack.BETA, base.ReleaseTrack.ALPHA]:
        mutex_group.add_argument(
            '--update-addons',
            type=arg_parsers.ArgDict(
                spec={
                    api_adapter.INGRESS: _ParseAddonDisabled,
                    api_adapter.HPA: _ParseAddonDisabled,
                    api_adapter.DASHBOARD: _ParseAddonDisabled,
                    api_adapter.NETWORK_POLICY: _ParseAddonDisabled,
                    api_adapter.ISTIO: _ParseAddonDisabled,
                }),
            dest='disable_addons',
            metavar='ADDON=ENABLED|DISABLED',
            help="""Cluster addons to enable or disable. Options are
{hpa}=ENABLED|DISABLED
{ingress}=ENABLED|DISABLED
{dashboard}=ENABLED|DISABLED
{istio}=ENABLED|DISABLED
{network_policy}=ENABLED|DISABLED""".format(
                hpa=api_adapter.HPA,
                ingress=api_adapter.INGRESS,
                dashboard=api_adapter.DASHBOARD,
                network_policy=api_adapter.NETWORK_POLICY,
                istio=api_adapter.ISTIO,
            ))

    else:
        mutex_group.add_argument(
            '--update-addons',
            type=arg_parsers.ArgDict(
                spec={
                    api_adapter.INGRESS: _ParseAddonDisabled,
                    api_adapter.HPA: _ParseAddonDisabled,
                    api_adapter.DASHBOARD: _ParseAddonDisabled,
                    api_adapter.NETWORK_POLICY: _ParseAddonDisabled,
                }),
            dest='disable_addons',
            metavar='ADDON=ENABLED|DISABLED',
            help="""Cluster addons to enable or disable. Options are
{hpa}=ENABLED|DISABLED
{ingress}=ENABLED|DISABLED
{dashboard}=ENABLED|DISABLED
{network_policy}=ENABLED|DISABLED""".format(
                hpa=api_adapter.HPA,
                ingress=api_adapter.INGRESS,
                dashboard=api_adapter.DASHBOARD,
                network_policy=api_adapter.NETWORK_POLICY,
            ))

    mutex_group.add_argument(
        '--generate-password',
        action='store_true',
        default=None,
        help='Ask the server to generate a secure password and use that as the '
        'basic auth password, keeping the existing username.')
    mutex_group.add_argument(
        '--set-password',
        action='store_true',
        default=None,
        help='Set the basic auth password to the specified value, keeping the '
        'existing username.')

    flags.AddBasicAuthFlags(mutex_group, None, None)
예제 #4
0
def AddMigStatefulFlagsForInstanceConfigs(parser, for_update=False):
  """Adding stateful flags for creating and updating instance configs."""
  parser.add_argument(
      '--instance',
      required=True,
      help="""
        URI to existing or non existing instance.

        Name - last part of URI - will be preserved for existing per instance
        configs.

        For zonal managed instance groups there is no need to specify the whole
        URI to the instance - for this case instance name can be applied instead
        of URI.
      """)

  stateful_disks_help = STATEFUL_DISKS_HELP + """
      Besides preserving disks already attached to the instance by specifying
      only device names, user have an option to attach (and preserve) other
      existing persistent disk(s) to the given instance.

      The same disk can be attached to many instances but only in read-only
      mode.
      """
  if for_update:
    stateful_disks_help += """
      Use this argument multiple times to update multiple disks.

      If stateful disk with given `device-name` exists in current instance
      config, its properties will be replaced by the newly provided ones. In
      other case new stateful disk definition will be added to the instance
      config.
      """
    stateful_disk_argument_name = '--update-stateful-disk'
  else:
    stateful_disks_help += """
      Use this argument multiple times to attach more disks.
      """
    stateful_disk_argument_name = '--stateful-disk'
  stateful_disks_help += """
      *device-name*::: Name under which disk is or will be attached.

      *source*::: Optional argument used to specify URI of existing persistent
      disk to attach under specified `device-name`.

      *mode*::: Specifies the mode of the disk to attach. Supported options are
      `ro` for read-only and `rw` for read-write. If omitted when source is
      specified, `rw` is used as a default.
      """
  parser.add_argument(
      stateful_disk_argument_name,
      type=arg_parsers.ArgDict(spec={
          'device-name': str,
          'source': str,
          'mode': str,
      }),
      action='append',
      help=stateful_disks_help,
  )
  if for_update:
    parser.add_argument(
        '--remove-stateful-disks',
        metavar='DEVICE_NAME',
        type=arg_parsers.ArgList(min_length=1),
        help=('List all device names which should be removed from current '
              'instance config.'),
    )

  if for_update:
    stateful_metadata_argument_name = '--update-stateful-metadata'
  else:
    stateful_metadata_argument_name = '--stateful-metadata'
  stateful_metadata_help = """
      Additional metadata to be made available to the guest operating system
      on top of the metadata defined in the instance template.

      Stateful metadata may be used to define a key/value pair specific for
      the one given instance to differentiate it from the other instances in
      the managed instance group.

      Stateful metadata have priority over the metadata defined in the
      instance template. It means that stateful metadata defined for the keys
      already existing in the instance template override their values.

      Each metadata entry is a key/value pair separated by an equals sign.
      Metadata keys must be unique and less than 128 bytes in length. Multiple
      entries can be passed to this flag, e.g.,
      ``{argument_name} key-1=value-1,key-2=value-2,key-3=value-3''.
  """.format(argument_name=stateful_metadata_argument_name)
  if for_update:
    stateful_metadata_help += """
      If stateful metadata with the given key exists in current instance config,
      its value will be overridden with the newly provided one. If the key does
      not exist in the current instance config, a new key/value pair will be
      added.
    """
  parser.add_argument(
      stateful_metadata_argument_name,
      type=arg_parsers.ArgDict(min_length=1),
      default={},
      action=arg_parsers.StoreOnceAction,
      metavar='KEY=VALUE',
      help=stateful_metadata_help)
  if for_update:
    parser.add_argument(
        '--remove-stateful-metadata',
        metavar='KEY',
        type=arg_parsers.ArgList(min_length=1),
        help=('List all stateful metadata keys which should be removed from '
              'current instance config.'),
    )
예제 #5
0
def AddUpdateArgs(parser, include_alpha_logging,
                  include_l7_internal_load_balancing,
                  include_private_ipv6_access_alpha,
                  include_private_ipv6_access_beta):
  """Add args to the parser for subnet update.

  Args:
    parser: The argparse parser.
    include_alpha_logging: Include alpha-specific logging args.
    include_l7_internal_load_balancing: Include Internal HTTP(S) LB args.
    include_private_ipv6_access_alpha: Include alpha Private Ipv6 Access args.
    include_private_ipv6_access_beta: Include beta Private Ipv6 Access args.
  """
  messages = apis.GetMessagesModule('compute',
                                    compute_api.COMPUTE_GA_API_VERSION)

  updated_field = parser.add_mutually_exclusive_group()

  updated_field.add_argument(
      '--enable-private-ip-google-access',
      action=arg_parsers.StoreTrueFalseAction,
      help=('Enable/disable access to Google Cloud APIs from this subnet for '
            'instances without a public ip address.'))

  updated_field.add_argument(
      '--add-secondary-ranges',
      type=arg_parsers.ArgDict(min_length=1),
      action='append',
      metavar='PROPERTY=VALUE',
      help="""\
      Adds secondary IP ranges to the subnetwork for use in IP aliasing.

      For example, `--add-secondary-ranges range1=192.168.64.0/24` adds
      a secondary range 192.168.64.0/24 with name range1.

      * `RANGE_NAME` - Name of the secondary range.
      * `RANGE` - `IP range in CIDR format.`
      """)

  updated_field.add_argument(
      '--remove-secondary-ranges',
      type=arg_parsers.ArgList(min_length=1),
      action='append',
      metavar='PROPERTY=VALUE',
      help="""\
      Removes secondary ranges from the subnetwork.

      For example, `--remove-secondary-ranges range2,range3` removes the
      secondary ranges with names range2 and range3.
      """)

  updated_field.add_argument(
      '--enable-flow-logs',
      action=arg_parsers.StoreTrueFalseAction,
      help=('Enable/disable VPC flow logging for this subnet. More information '
            'for VPC flow logs can be found at '
            'https://cloud.google.com/vpc/docs/using-flow-logs.'))

  AddLoggingAggregationInterval(parser, messages)
  parser.add_argument(
      '--logging-flow-sampling',
      type=arg_parsers.BoundedFloat(lower_bound=0.0, upper_bound=1.0),
      help="""\
      Can only be specified if VPC flow logging for this subnetwork is
      enabled. The value of the field must be in [0, 1]. Set the sampling rate
      of VPC flow logs within the subnetwork where 1.0 means all collected
      logs are reported and 0.0 means no logs are reported. Default is 0.5
      which means half of all collected logs are reported.
      """)

  if include_alpha_logging:
    messages = apis.GetMessagesModule('compute',
                                      compute_api.COMPUTE_ALPHA_API_VERSION)
    AddLoggingAggregationIntervalDeprecated(parser, messages)
    parser.add_argument(
        '--flow-sampling',
        type=arg_parsers.BoundedFloat(lower_bound=0.0, upper_bound=1.0),
        help="""\
        Can only be specified if VPC flow logging for this subnetwork is
        enabled. The value of the field must be in [0, 1]. Set the sampling rate
        of VPC flow logs within the subnetwork where 1.0 means all collected
        logs are reported and 0.0 means no logs are reported. Default is 0.5
        which means half of all collected logs are reported.
        """)
    AddLoggingMetadataDeprecated(parser, messages)

    parser.add_argument(
        '--logging-filter-expr',
        help="""\
        Can only be specified if VPC flow logs for this subnetwork is enabled.
        Export filter used to define which VPC flow logs should be logged.
        """)
    AddLoggingMetadataAlpha(parser, messages)
    parser.add_argument(
        '--logging-metadata-fields',
        type=arg_parsers.ArgList(),
        metavar='METADATA_FIELD',
        default=None,
        help="""\
        Can only be specified if VPC flow logs for this subnetwork is enabled
        and "metadata" is set to CUSTOM_METADATA. The custom list of metadata
        fields that should be added to reported VPC flow logs.
        """)
  else:
    AddLoggingMetadata(parser, messages)

  if include_l7_internal_load_balancing:
    updated_field.add_argument(
        '--role',
        choices={'ACTIVE': 'The ACTIVE subnet that is currently used.'},
        type=lambda x: x.replace('-', '_').upper(),
        help=('The role is set to ACTIVE to update a BACKUP reserved '
              'address range to\nbe the new ACTIVE address range. Note '
              'that the only supported value for\nthis flag is ACTIVE since '
              'setting an address range to BACKUP is not\nsupported. '
              '\n\nThis field is only valid when updating a reserved IP '
              'address range used\nfor the purpose of Internal HTTP(S) Load '
              'Balancer.'))
    parser.add_argument(
        '--drain-timeout',
        type=arg_parsers.Duration(lower_bound='0s'),
        default='0s',
        help="""\
        The time period for draining traffic from Internal HTTP(S) Load Balancer
        proxies that are assigned addresses in the current ACTIVE subnetwork.
        For example, ``1h'', ``60m'' and ``3600s'' each specify a duration of
        1 hour for draining the traffic. Longer times reduce the number of
        proxies that are draining traffic at any one time, and so improve
        the availability of proxies for load balancing. The drain timeout is
        only applicable when the [--role=ACTIVE] flag is being used.
        """)

  if include_private_ipv6_access_alpha:
    messages = apis.GetMessagesModule('compute',
                                      compute_api.COMPUTE_ALPHA_API_VERSION)
    updated_field.add_argument(
        '--enable-private-ipv6-access',
        action=arg_parsers.StoreTrueFalseAction,
        help=('Enable/disable private IPv6 access for the subnet.'))
    update_private_ipv6_access_field = updated_field.add_argument_group()
    GetPrivateIpv6GoogleAccessTypeFlagMapperAlpha(
        messages).choice_arg.AddToParser(update_private_ipv6_access_field)
    update_private_ipv6_access_field.add_argument(
        '--private-ipv6-google-access-service-accounts',
        default=None,
        metavar='EMAIL',
        type=arg_parsers.ArgList(min_length=0),
        help="""\
        The service accounts can be used to selectively turn on Private IPv6
        Google Access only on the VMs primary service account matching the
        value.

        Setting this will override the existing Private IPv6 Google Access
        service accounts for the subnetwork.
        The following will clear the existing Private IPv6 Google Access service
        accounts:

        $ {command} MY-SUBNET --private-ipv6-google-access-service-accounts ''
        """)
  elif include_private_ipv6_access_beta:
    messages = apis.GetMessagesModule('compute',
                                      compute_api.COMPUTE_BETA_API_VERSION)
    update_private_ipv6_access_field = updated_field.add_argument_group()
    GetPrivateIpv6GoogleAccessTypeFlagMapperBeta(
        messages).choice_arg.AddToParser(update_private_ipv6_access_field)
예제 #6
0
def AddMatrixArgs(parser):
    """Register the repeatable args which define the axes for a test matrix.

  Args:
    parser: An argparse parser used to add arguments that follow a command
        in the CLI.
  """
    parser.add_argument('--device',
                        category=base.COMMONLY_USED_FLAGS,
                        type=arg_parsers.ArgDict(min_length=1),
                        action='append',
                        metavar='DIMENSION=VALUE',
                        help="""\
      A list of ``DIMENSION=VALUE'' pairs which specify a target device to test
      against. This flag may be repeated to specify multiple devices. The four
      device dimensions are: *model*, *version*, *locale*, and *orientation*. If
      any dimensions are omitted, they will use a default value. The default
      value, and all possible values, for each dimension can be found with the
      ``list'' command for that dimension, such as `$ {parent_command} models
      list`. *--device* is now the preferred way to specify test devices and may
      not be used in conjunction with *--devices-ids*, *--os-version-ids*,
      *--locales*, or *--orientations*. Omitting all of the preceding
      dimension-related flags will run tests against a single device using
      defaults for all four device dimensions.

      Examples:\n
      ```
      --device model=Nexus6
      --device version=23,orientation=portrait
      --device model=shamu,version=22,locale=zh_CN,orientation=default
      ```
      """)
    parser.add_argument(
        '--device-ids',
        '-d',
        category=DEPRECATED_DEVICE_DIMENSIONS,
        type=arg_parsers.ArgList(min_length=1),
        metavar='MODEL_ID',
        help='The list of MODEL_IDs to test against (default: one device model '
        'determined by the Firebase Test Lab device catalog; see TAGS listed '
        'by the `$ {parent_command} devices list` command).')
    parser.add_argument(
        '--os-version-ids',
        '-v',
        category=DEPRECATED_DEVICE_DIMENSIONS,
        type=arg_parsers.ArgList(min_length=1),
        metavar='OS_VERSION_ID',
        help='The list of OS_VERSION_IDs to test against (default: a version ID '
        'determined by the Firebase Test Lab device catalog).')
    parser.add_argument(
        '--locales',
        '-l',
        category=DEPRECATED_DEVICE_DIMENSIONS,
        type=arg_parsers.ArgList(min_length=1),
        metavar='LOCALE',
        help='The list of LOCALEs to test against (default: a single locale '
        'determined by the Firebase Test Lab device catalog).')
    parser.add_argument(
        '--orientations',
        '-o',
        category=DEPRECATED_DEVICE_DIMENSIONS,
        type=arg_parsers.ArgList(min_length=1,
                                 max_length=2,
                                 choices=arg_validate.ORIENTATION_LIST),
        completer=arg_parsers.GetMultiCompleter(OrientationsCompleter),
        metavar='ORIENTATION',
        help='The device orientation(s) to test against (default: portrait). '
        'Specifying \'default\' will pick the preferred orientation '
        'for the app.')
예제 #7
0
  def Args(parser):
    """Register flags for this command.

    Args:
      parser: An argparse.ArgumentParser-like object. It is mocked out in order
          to capture some information, but behaves like an ArgumentParser.
    """
    source = parser.add_mutually_exclusive_group()
    source.add_argument(
        'source',
        nargs='?',
        default='.',  # By default, the current directory is used.
        help='The location of the source to build. The location can be a '
        'directory on a local disk or a gzipped archive file (.tar.gz) in '
        'Google Cloud Storage. If the source is a local directory, this '
        'command skips the files specified in the `.gcloudignore` file. If a '
        '`.gitignore` file is present in the local source directory, gcloud '
        'will use a Git-compatible `.gcloudignore` file that respects your '
        '.gitignored files. The global `.gitignore` is not respected. For more '
        'information on `.gcloudignore`, see `$gcloud topic gcloudignore`.',
    )
    source.add_argument(
        '--no-source',
        action='store_true',
        help='Specify that no source should be uploaded with this build.')

    parser.add_argument(
        '--gcs-source-staging-dir',
        help='A directory in Google Cloud Storage to copy the source used for '
        'staging the build. If the specified bucket does not exist, Cloud '
        'Build will create one. If you don\'t set this field, '
        '```gs://[PROJECT_ID]_cloudbuild/source``` is used.',
    )
    parser.add_argument(
        '--gcs-log-dir',
        help='A directory in Google Cloud Storage to hold build logs. If this '
        'field is not set, '
        '```gs://[PROJECT_NUMBER].cloudbuild-logs.googleusercontent.com/``` '
        'will be created and used.',
    )
    parser.add_argument(
        '--timeout',
        help='Maximum time a build is run before it is failed as `TIMEOUT`. It '
        'is specified as a duration; for example, "2h15m5s" is two hours, '
        'fifteen minutes, and five seconds. If you don\'t specify a unit, '
        'seconds is assumed. For example, "10" is 10 seconds.',
        action=actions.StoreProperty(properties.VALUES.container.build_timeout),
    )

    Submit._machine_type_flag_map.choice_arg.AddToParser(parser)

    parser.add_argument(
        '--disk-size',
        type=arg_parsers.BinarySize(lower_bound='100GB', upper_bound='1TB'),
        help='Machine disk size (GB) to run the build.',
    )
    parser.add_argument(
        '--substitutions',
        metavar='KEY=VALUE',
        type=arg_parsers.ArgDict(),
        help="""\
Parameters to be substituted in the build specification.

For example (using some nonsensical substitution keys; all keys must begin with
an underscore):

    $ gcloud container builds submit . \\
        --config config.yaml \\
        --substitutions _FAVORITE_COLOR=blue,_NUM_CANDIES=10

This will result in a build where every occurrence of ```${_FAVORITE_COLOR}```
in certain fields is replaced by "blue", and similarly for ```${_NUM_CANDIES}```
and "10".

Only the following built-in variables can be specified with the
`--substitutions` flag: REPO_NAME, BRANCH_NAME, TAG_NAME, REVISION_ID,
COMMIT_SHA, SHORT_SHA.

For more details, see:
https://cloud.google.com/cloud-build/docs/api/build-requests#substitutions
""")

    build_config = parser.add_mutually_exclusive_group(required=True)
    build_config.add_argument(
        '--tag',
        '-t',
        help='The tag to use with a "docker build" image creation. '
        'Cloud Build will run a remote "docker build -t '
        '$TAG .", where $TAG is the tag provided by this flag. The tag '
        'must be in the gcr.io/* or *.gcr.io/* namespaces. Specify a tag '
        'if you want Cloud Build to build using a Dockerfile '
        'instead of a build config file. If you specify a tag in this '
        'command, your source must include a Dockerfile. For instructions '
        'on building using a Dockerfile see '
        'https://cloud.google.com/cloud-build/docs/quickstart-docker.',
    )
    build_config.add_argument(
        '--config',
        help='The .yaml or .json file to use for build configuration.',
    )
    base.ASYNC_FLAG.AddToParser(parser)
    parser.display_info.AddFormat("""
          table(
            id,
            createTime.date('%Y-%m-%dT%H:%M:%S%Oz', undefined='-'),
            duration(start=startTime,end=finishTime,precision=0,calendar=false,undefined="  -").slice(2:).join(""):label=DURATION,
            build_source(undefined="-"):label=SOURCE,
            build_images(undefined="-"):label=IMAGES,
            status
          )
        """)
    # Do not try to create a URI to update the cache.
    parser.display_info.AddCacheUpdater(None)
예제 #8
0
def AddFileShareArg(parser,
                    api_version,
                    include_snapshot_flags=False,
                    include_backup_flags=False,
                    required=True):
  """Adds a --file-share flag to the given parser.

  Args:
    parser: argparse parser.
    api_version: filestore_client api version.
    include_snapshot_flags: bool, whether to include --source-snapshot flags.
    include_backup_flags: bool, whether to include --source-backup flags.
    required: bool, passthrough to parser.add_argument.
  """
  file_share_help = {
      filestore_client.V1_API_VERSION: """\
File share configuration for an instance.  Specifying both `name` and `capacity`
is required.
*capacity*::: The desired capacity of the volume. The capacity must be a whole
number followed by a capacity unit such as ``TB'' for terabyte. If no capacity
unit is specified, GB is assumed. The minimum capacity for a standard instance
is 1TB. The minimum capacity for a premium instance is 2.5TB.
*name*::: The desired logical name of the volume.
""",
      filestore_client.ALPHA_API_VERSION: """
File share configuration for an instance. Specifying both `name` and `capacity`
is required.

*capacity*::: The desired capacity of the volume in GB or TB units. If no capacity
unit is specified, GB is assumed. Acceptable instance capacities for each tier are as follows:
* BASIC_HDD: 1TB-63.9TB in 1GB increments or its multiples.
* BASIC_SSD: 2.5TB-63.9TB in 1GB increments or its multiples.
* HIGH_SCALE_SSD: 60TB-320TB in 10TB increments or its multiples.

*name*::: The desired logical name of the volume.

*nfs-export-options*::: The NfsExportOptions for the Cloud Filestore instance file share.
Configuring NfsExportOptions is optional.
Use the `--flags-file` flag to specify the path to a JSON or YAML configuration file that contains the required NfsExportOptions flags.

*ip-ranges*::: A list of IPv4 addresses or CIDR ranges that are allowed to mount the file share.
IPv4 addresses format: {octet 1}.{octet 2}.{octet 3}.{octet 4}.
CIDR range format: {octet 1}.{octet 2}.{octet 3}.{octet 4}/{mask size}.
Overlapping IP ranges, even across NfsExportOptions, are not allowed and will return an error.
The limit of IP ranges/addresses for each FileShareConfig among all NfsExportOptions is 64 per instance.

*access-mode*::: The type of access allowed for the specified IP-addresses or CIDR ranges.
READ_ONLY: Allows only read requests on the exported file share.
READ_WRITE: Allows both read and write requests on the exported file share.
The default setting is READ_WRITE.

*squash-mode*::: Enables or disables root squash for the specified
IP addresses or CIDR ranges.
NO_ROOT_SQUASH: Disables root squash to allow root access on the exported file share.
ROOT_SQUASH. Enables root squash to remove root access on the exported file share.
The default setting is NO_ROOT_SQUASH.

*anon_uid*::: An integer that represents the user ID of anonymous users.
Anon_uid may only be set when squash_mode is set to ROOT_SQUASH.
If NO_ROOT_SQUASH is specified, an error will be returned.
The default value is 65534.

*anon_gid*::: An integer that represents the group ID of anonymous groups.
Anon_gid may only be set when squash_mode is set to ROOT_SQUASH.
If NO_ROOT_SQUASH is specified, an error will be returned.
The default value is 65534.
""",
      filestore_client.BETA_API_VERSION: """
File share configuration for an instance. Specifying both `name` and `capacity`
is required.

*capacity*::: The desired capacity of the volume in GB or TB units. If no capacity
unit is specified, GB is assumed. Acceptable instance capacities for each tier are as follows:
* BASIC_HDD: 1TB-63.9TB in 1GB increments or its multiples.
* BASIC_SSD: 2.5TB-63.9TB in 1GB increments or its multiples.
* HIGH_SCALE_SSD: 60TB-320TB in 10TB increments or its multiples.

*name*::: The desired logical name of the volume.

*nfs-export-options*::: The NfsExportOptions for the Cloud Filestore instance file share.
Configuring NfsExportOptions is optional.
Use the `--flags-file` flag to specify the path to a JSON or YAML configuration file that contains the required NfsExportOptions flags.

*ip-ranges*::: A list of IPv4 addresses or CIDR ranges that are allowed to mount the file share.
IPv4 addresses format: {octet 1}.{octet 2}.{octet 3}.{octet 4}.
CIDR range format: {octet 1}.{octet 2}.{octet 3}.{octet 4}/{mask size}.
Overlapping IP ranges, even across NfsExportOptions, are not allowed and will return an error.
The limit of IP ranges/addresses for each FileShareConfig among all NfsExportOptions is 64 per instance.

*access-mode*::: The type of access allowed for the specified IP-addresses or CIDR ranges.
READ_ONLY: Allows only read requests on the exported file share.
READ_WRITE: Allows both read and write requests on the exported file share.
The default setting is READ_WRITE.

*squash-mode*::: Enables or disables root squash for the specified
IP addresses or CIDR ranges.
NO_ROOT_SQUASH: Disables root squash to allow root access on the exported file share.
ROOT_SQUASH. Enables root squash to remove root access on the exported file share.
The default setting is NO_ROOT_SQUASH.

*anon_uid*::: An integer that represents the user ID of anonymous users.
Anon_uid may only be set when squash_mode is set to ROOT_SQUASH.
If NO_ROOT_SQUASH is specified, an error will be returned.
The default value is 65534.

*anon_gid*::: An integer that represents the group ID of anonymous groups.
Anon_gid may only be set when squash_mode is set to ROOT_SQUASH.
If NO_ROOT_SQUASH is specified, an error will be returned.
The default value is 65534.
"""
  }
  source_snapshot_help = """\

*source-snapshot*::: The name of the snapshot to restore from. Supported for BASIC instances only.

*source-snapshot-region*::: The region of the source snapshot. If
unspecified, it is assumed that the Filestore snapshot is local and
instance-zone will be used.
"""
  source_backup_help = """\

*source-backup*::: The name of the backup to restore from.

*source-backup-region*::: The region of the source backup.
"""

  spec = FILE_SHARE_API_VER[api_version].copy()
  if include_backup_flags:
    spec['source-backup'] = str
    spec['source-backup-region'] = str
  if include_snapshot_flags:
    spec['source-snapshot'] = str
    spec['source-snapshot-region'] = str

  file_share_help = file_share_help[api_version]
  parser.add_argument(
      '--file-share',
      type=arg_parsers.ArgDict(spec=spec, required_keys=['name', 'capacity']),
      required=required,
      help=file_share_help +
      (source_snapshot_help if include_snapshot_flags else '') +
      (source_backup_help if include_backup_flags else ''))
예제 #9
0
      machineType: n1-highmem-2
    replicaCount: 1
    containerSpec:
      imageUri: gcr.io/ucaip-test/ucaip-training-test
      args:
      - port=8500
      command:
      - start""")

WORKER_POOL_SPEC = base.Argument('--worker-pool-spec',
                                 action='append',
                                 type=arg_parsers.ArgDict(
                                     spec={
                                         'replica-count': int,
                                         'machine-type': str,
                                         'container-image-uri': str,
                                         'python-image-uri': str,
                                         'python-module': str,
                                     },
                                     required_keys=['machine-type']),
                                 metavar='WORKER_POOL_SPEC',
                                 help="""
Define the worker pool configuration used by the custom job. You can specify multiple
worker pool specs in order to create a custom job with multiple worker pools.

The spec can contain the following fields, which are listed with corresponding
fields in the WorkerPoolSpec API message:

*machine-type*::: (Required): machineSpec.machineType
*replica-count*::: replicaCount
*container-image-uri*::: containerSpec.imageUri
예제 #10
0
def ArgsForClusterRef(parser):
  """Register flags for creating a dataproc cluster.

  Args:
    parser: The argparse.ArgParser to configure with dataproc cluster arguments.
  """
  labels_util.AddCreateLabelsFlags(parser)
  instances_flags.AddTagsArgs(parser)
  # 30m is backend timeout + 5m for safety buffer.
  util.AddTimeoutFlag(parser, default='35m')
  parser.add_argument(
      '--metadata',
      type=arg_parsers.ArgDict(min_length=1),
      action='append',
      default=None,
      help=('Metadata to be made available to the guest operating system '
            'running on the instances'),
      metavar='KEY=VALUE')

  parser.add_argument(
      '--num-workers',
      type=int,
      help='The number of worker nodes in the cluster. Defaults to '
      'server-specified.')
  parser.add_argument(
      '--num-preemptible-workers',
      type=int,
      help='The number of preemptible worker nodes in the cluster.')
  parser.add_argument(
      '--master-machine-type',
      help='The type of machine to use for the master. Defaults to '
      'server-specified.')
  parser.add_argument(
      '--worker-machine-type',
      help='The type of machine to use for workers. Defaults to '
      'server-specified.')
  parser.add_argument('--image', hidden=True)
  parser.add_argument(
      '--image-version',
      metavar='VERSION',
      help='The image version to use for the cluster. Defaults to the '
      'latest version.')
  parser.add_argument(
      '--bucket',
      help='The Google Cloud Storage bucket to use with the Google Cloud '
      'Storage connector. A bucket is auto created when this parameter is '
      'not specified.')

  netparser = parser.add_mutually_exclusive_group()
  netparser.add_argument(
      '--network',
      help="""\
      The Compute Engine network that the VM instances of the cluster will be
      part of. This is mutually exclusive with --subnet. If neither is
      specified, this defaults to the "default" network.
      """)
  netparser.add_argument(
      '--subnet',
      help="""\
      Specifies the subnet that the cluster will be part of. This is mutally
      exclusive with --network.
      """)
  parser.add_argument(
      '--num-worker-local-ssds',
      type=int,
      help='The number of local SSDs to attach to each worker in a cluster.')
  parser.add_argument(
      '--num-master-local-ssds',
      type=int,
      help='The number of local SSDs to attach to the master in a cluster.')
  parser.add_argument(
      '--initialization-actions',
      type=arg_parsers.ArgList(min_length=1),
      metavar='CLOUD_STORAGE_URI',
      help=('A list of Google Cloud Storage URIs of '
            'executables to run on each node in the cluster.'))
  parser.add_argument(
      '--initialization-action-timeout',
      type=arg_parsers.Duration(),
      metavar='TIMEOUT',
      default='10m',
      help='The maximum duration of each initialization action.')
  parser.add_argument(
      '--properties',
      type=arg_parsers.ArgDict(),
      metavar='PREFIX:PROPERTY=VALUE',
      default={},
      help="""\
Specifies configuration properties for installed packages, such as Hadoop
and Spark.

Properties are mapped to configuration files by specifying a prefix, such as
"core:io.serializations". The following are supported prefixes and their
mappings:

[format="csv",options="header"]
|========
Prefix,Target Configuration File
core,core-site.xml
hdfs,hdfs-site.xml
mapred,mapred-site.xml
yarn,yarn-site.xml
hive,hive-site.xml
pig,pig.properties
spark,spark-defaults.conf
|========

""")
  parser.add_argument(
      '--service-account',
      help='The Google Cloud IAM service account to be authenticated as.')
  parser.add_argument(
      '--scopes',
      type=arg_parsers.ArgList(min_length=1),
      metavar='SCOPE',
      help="""\
Specifies scopes for the node instances. The project's default service account
is used. Multiple SCOPEs can specified, separated by commas.
Examples:

  $ {{command}} example-cluster --scopes https://www.googleapis.com/auth/bigtable.admin

  $ {{command}} example-cluster --scopes sqlservice,bigquery

The following scopes necessary for the cluster to function properly are always
added, even if not explicitly specified:

[format="csv"]
|========
{minimum_scopes}
|========

If this flag is not specified the following default scopes are also included:

[format="csv"]
|========
{additional_scopes}
|========

If you want to enable all scopes use the 'cloud-platform' scope.

SCOPE can be either the full URI of the scope or an alias.
Available aliases are:

[format="csv",options="header"]
|========
Alias,URI
{aliases}
|========

{scope_deprecation_msg}
""".format(
    minimum_scopes='\n'.join(constants.MINIMUM_SCOPE_URIS),
    additional_scopes='\n'.join(constants.ADDITIONAL_DEFAULT_SCOPE_URIS),
    aliases=compute_helpers.SCOPE_ALIASES_FOR_HELP,
    scope_deprecation_msg=compute_constants.DEPRECATED_SCOPES_MESSAGES))

  master_boot_disk = parser.add_mutually_exclusive_group()
  worker_boot_disk = parser.add_mutually_exclusive_group()

  # Deprecated, to be removed at a future date.
  master_boot_disk.add_argument(
      '--master-boot-disk-size-gb',
      type=int,
      hidden=True)
  worker_boot_disk.add_argument(
      '--worker-boot-disk-size-gb',
      type=int,
      hidden=True)

  boot_disk_size_detailed_help = """\
      The size of the boot disk. The value must be a
      whole number followed by a size unit of ``KB'' for kilobyte, ``MB''
      for megabyte, ``GB'' for gigabyte, or ``TB'' for terabyte. For example,
      ``10GB'' will produce a 10 gigabyte disk. The minimum size a boot disk
      can have is 10 GB. Disk size must be a multiple of 1 GB.
      """
  master_boot_disk.add_argument(
      '--master-boot-disk-size',
      type=arg_parsers.BinarySize(lower_bound='10GB'),
      help=boot_disk_size_detailed_help)
  worker_boot_disk.add_argument(
      '--worker-boot-disk-size',
      type=arg_parsers.BinarySize(lower_bound='10GB'),
      help=boot_disk_size_detailed_help)

  parser.add_argument(
      '--preemptible-worker-boot-disk-size',
      type=arg_parsers.BinarySize(lower_bound='10GB'),
      help="""\
      The size of the boot disk. The value must be a
      whole number followed by a size unit of ``KB'' for kilobyte, ``MB''
      for megabyte, ``GB'' for gigabyte, or ``TB'' for terabyte. For example,
      ``10GB'' will produce a 10 gigabyte disk. The minimum size a boot disk
      can have is 10 GB. Disk size must be a multiple of 1 GB.
      """)
예제 #11
0
def _GetDeviceCredentialFlags(combine_flags=True, only_modifiable=False):
    """"Generates credentials-related flags."""
    flags = []
    if not only_modifiable:
        flags.extend([
            base.Argument(
                '--path',
                required=True,
                type=str,
                help='The path on disk to the file containing the key.'),
            base.ChoiceArgument('--type',
                                choices=_VALID_KEY_TYPES,
                                required=True,
                                help_str='The type of the key.')
        ])
    flags.append(
        base.Argument('--expiration-time',
                      type=arg_parsers.Datetime.Parse,
                      help=('The expiration time for the key. See '
                            '$ gcloud topic datetimes for information on '
                            'time formats.')))
    if not combine_flags:
        return flags

    sub_argument_help = []
    spec = {}
    for flag in flags:
        name = flag.name.lstrip('-')
        required = flag.kwargs.get('required')
        choices = flag.kwargs.get('choices')
        choices_str = ''
        if choices:
            choices_str = ', '.join(map('`{}`'.format, sorted(choices)))
            choices_str = ' One of [{}].'.format(choices_str)
        help_ = flag.kwargs['help']
        spec[name] = flag.kwargs['type']
        sub_argument_help.append(
            '* *{name}*: {required}.{choices} {help}'.format(
                name=name,
                required=('Required' if required else 'Optional'),
                choices=choices_str,
                help=help_))
    key_type_help = []
    for key_type, description in reversed(sorted(_VALID_KEY_TYPES.items())):
        key_type_help.append('* `{}`: {}'.format(key_type, description))
    flag = base.Argument(
        '--public-key',
        dest='public_keys',
        metavar='path=PATH,type=TYPE,[expiration-time=EXPIRATION-TIME]',
        type=arg_parsers.ArgDict(spec=spec),
        action='append',
        help="""\
Specify a public key.

Supports four key types:

{key_type_help}

The key specification is given via the following sub-arguments:

{sub_argument_help}

For example:

  --public-key \\
      path=/path/to/id_rsa.pem,type=RSA_PEM,expiration-time=2017-01-01T00:00-05

This flag may be provide multiple times to provide multiple keys (maximum 3).
""".format(key_type_help='\n'.join(key_type_help),
           sub_argument_help='\n'.join(sub_argument_help)))
    return [flag]
예제 #12
0
  def Args(parser):
    """Args is called by calliope to gather arguments for this command.

    Please add arguments in alphabetical order except for no- or a clear-
    pair for that argument which can follow the argument itself.
    Args:
      parser: An argparse parser that you can use to add arguments that go
          on the command line after this command. Positional arguments are
          allowed.
    """
    # TODO(b/35705305): move common flags to command_lib.sql.flags
    base.ASYNC_FLAG.AddToParser(parser)
    parser.add_argument(
        '--activation-policy',
        required=False,
        choices=['ALWAYS', 'NEVER', 'ON_DEMAND'],
        default=None,
        help='The activation policy for this instance. This specifies when the '
        'instance should be activated and is applicable only when the '
        'instance state is RUNNABLE. More information on activation policies '
        'can be found here: https://cloud.google.com/sql/faq#activation_policy')
    parser.add_argument(
        '--assign-ip',
        required=False,
        action='store_true',
        default=None,  # Tri-valued: None => don't change the setting.
        help='Specified if the instance must be assigned an IP address.')
    parser.add_argument(
        '--authorized-gae-apps',
        type=arg_parsers.ArgList(min_length=1),
        metavar='APP',
        required=False,
        default=[],
        help='List of App Engine app IDs that can access this instance.')
    parser.add_argument(
        '--authorized-networks',
        type=arg_parsers.ArgList(min_length=1),
        metavar='NETWORK',
        required=False,
        default=[],
        help='The list of external networks that are allowed to connect to the'
        ' instance. Specified in CIDR notation, also known as \'slash\' '
        'notation (e.g. 192.168.100.0/24).')
    parser.add_argument(
        '--backup-start-time',
        required=False,
        help='The start time of daily backups, specified in the 24 hour format '
        '- HH:MM, in the UTC timezone.')
    parser.add_argument(
        '--backup',
        required=False,
        action='store_true',
        default=True,
        help='Enables daily backup.')
    parser.add_argument(
        '--database-version',
        required=False,
        default='MYSQL_5_6',
        help='The database engine type and version. Can be MYSQL_5_5, '
        'MYSQL_5_6, or MYSQL_5_7.')
    parser.add_argument(
        '--enable-bin-log',
        required=False,
        action='store_true',
        default=None,  # Tri-valued: None => don't change the setting.
        help='Specified if binary log should be enabled. If backup '
        'configuration is disabled, binary log must be disabled as well.')
    parser.add_argument(
        '--follow-gae-app',
        required=False,
        help='The App Engine app this instance should follow. It must be in '
        'the same region as the instance.')
    parser.add_argument(
        '--gce-zone',
        required=False,
        help='The preferred Compute Engine zone (e.g. us-central1-a, '
        'us-central1-b, etc.).')
    parser.add_argument(
        'instance',
        help='Cloud SQL instance ID.')
    parser.add_argument(
        '--master-instance-name',
        required=False,
        help='Name of the instance which will act as master in the replication '
        'setup. The newly created instance will be a read replica of the '
        'specified master instance.')
    parser.add_argument(
        '--on-premises-host-port',
        required=False,
        help=argparse.SUPPRESS)
    parser.add_argument(
        '--pricing-plan',
        '-p',
        required=False,
        choices=['PER_USE', 'PACKAGE'],
        default='PER_USE',
        help='The pricing plan for this instance.')
    # TODO(b/31989340): add remote completion
    parser.add_argument(
        '--region',
        required=False,
        default='us-central',
        help='The regional location '
        '(e.g. asia-east1, us-east1). See the full list of regions at '
        'https://cloud.google.com/sql/docs/instance-locations.')
    parser.add_argument(
        '--replication',
        required=False,
        choices=['SYNCHRONOUS', 'ASYNCHRONOUS'],
        default=None,
        help='The type of replication this instance uses.')
    parser.add_argument(
        '--require-ssl',
        required=False,
        action='store_true',
        default=None,  # Tri-valued: None => don't change the setting.
        help='Specified if users connecting over IP must use SSL.')
    parser.add_argument(
        '--tier',
        '-t',
        required=False,
        help='The tier for first generation Cloud SQL instances, for example '
        ' D0, D1, D2. A complete list of tiers is available here: '
        'https://cloud.google.com/sql/pricing#packages')
    parser.add_argument(
        '--database-flags',
        type=arg_parsers.ArgDict(min_length=1),
        metavar='FLAG=VALUE',
        required=False,
        help='A comma-separated list of database flags to set on the instance. '
        'Use an equals sign to separate flag name and value. Flags without '
        'values, like skip_grant_tables, can be written out without a value '
        'after, e.g., `skip_grant_tables=`. Use on/off for '
        'booleans. View the Instance Resource API for allowed flags. '
        '(e.g., `--database-flags max_allowed_packet=55555,skip_grant_tables=,'
        'log_output=1`)')
예제 #13
0
    'characters are allowed.')

VALUE_FORMAT_ERROR = (
    'Only hyphens (-), underscores (_), lowercase characters, and numbers are '
    'allowed. International characters are allowed.')

KEY_FORMAT_VALIDATOR = arg_parsers.CustomFunctionValidator(
    IsValidLabelKey, KEY_FORMAT_ERROR)

VALUE_FORMAT_VALIDATOR = arg_parsers.CustomFunctionValidator(
    IsValidLabelValue, VALUE_FORMAT_ERROR)

CREATE_LABELS_FLAG = base.Argument(
    '--labels',
    metavar='KEY=VALUE',
    type=arg_parsers.ArgDict(
        key_type=KEY_FORMAT_VALIDATOR, value_type=VALUE_FORMAT_VALIDATOR),
    action=arg_parsers.UpdateAction,
    help='A list of label KEY=VALUE pairs to add.')


def _GetUpdateLabelsFlag(extra_message):
  return base.Argument(
      '--update-labels',
      metavar='KEY=VALUE',
      type=arg_parsers.ArgDict(
          key_type=KEY_FORMAT_VALIDATOR, value_type=VALUE_FORMAT_VALIDATOR),
      action=arg_parsers.UpdateAction,
      help="""\
      A list of label KEY=VALUE pairs to update. If a label exists its value
      is modified, otherwise a new label is created.""" + extra_message)
예제 #14
0
def AddDeviceCredentialFlagsToParser(parser,
                                     combine_flags=True,
                                     only_modifiable=False):
    """Get credentials-related flags.

  Adds one of the following:

    * --public-key path=PATH,type=TYPE,expiration-time=EXPIRATION_TIME
    * --path=PATH --type=TYPE --expiration-time=EXPIRATION_TIME

  depending on the value of combine_flags.

  Args:
    parser: argparse parser to which to add these flags.
    combine_flags: bool, whether to combine these flags into one --public-key
      flag or to leave them separate.
    only_modifiable: bool, whether to include all flags or just those that can
      be modified after creation.
  """
    flags = []
    if not only_modifiable:
        flags.extend([
            base.Argument(
                '--path',
                required=True,
                type=str,
                help='The path on disk to the file containing the key.'),
            base.Argument('--type',
                          required=True,
                          type=_KeyTypeValidator,
                          choices=_VALID_KEY_TYPES,
                          help='The type of the key.')
        ])
    flags.append(
        base.Argument('--expiration-time',
                      type=arg_parsers.Datetime.Parse,
                      help=('The expiration time for the key in ISO 8601 '
                            '(ex. `2017-01-01T00:00Z`) format.')))
    if combine_flags:
        sub_argument_help = []
        spec = {}
        for flag in flags:
            name = flag.name.lstrip('-')
            required = flag.kwargs.get('required')
            choices = flag.kwargs.get('choices')
            choices_str = ''
            if choices:
                choices_str = ', '.join(map('`{}`'.format, sorted(choices)))
                choices_str = ' One of [{}].'.format(choices_str)
            help_ = flag.kwargs['help']
            spec[name] = flag.kwargs['type']
            sub_argument_help.append(
                '* *{name}*: {required}.{choices} {help}'.format(
                    name=name,
                    required=('Required' if required else 'Optional'),
                    choices=choices_str,
                    help=help_))
        key_type_help = []
        for key_type, description in reversed(sorted(
                _VALID_KEY_TYPES.items())):
            key_type_help.append('* `{}`: {}'.format(key_type, description))
        base.Argument(
            '--public-key',
            dest='public_keys',
            metavar='path=PATH,type=TYPE,[expiration-time=EXPIRATION-TIME]',
            type=arg_parsers.ArgDict(spec=spec),
            action='append',
            help="""\
Specify a public key.

Supports two key types:

{key_type_help}

The key specification is given via the following sub-arguments:

{sub_argument_help}

For example:

    --public-key \\
        path=/path/to/id_rsa.pem,type=rs256,expiration-time=2017-01-01T00:00-05

This flag may be provide multiple times to provide multiple keys (maximum 3).
""".format(key_type_help='\n'.join(key_type_help),
           sub_argument_help='\n'.join(sub_argument_help))).AddToParser(parser)
    else:
        for flag in flags:
            flag.AddToParser(parser)
예제 #15
0
def AddAndroidTestArgs(parser):
    """Register args which are specific to Android test commands.

  Args:
    parser: An argparse parser used to add arguments that follow a command in
        the CLI.
  """
    parser.add_argument(
        '--app',
        category=base.COMMONLY_USED_FLAGS,
        help='The path to the application binary file. The path may be in the '
        'local filesystem or in Google Cloud Storage using gs:// notation. '
        'Android App Bundles are specified as .aab, all other files are assumed '
        'to be APKs.')
    parser.add_argument(
        '--app-package',
        action=actions.DeprecationAction(
            '--app-package',
            warn=(
                'The `--app-package` flag is deprecated and should no longer '
                'be used. By default, the correct application package name is '
                'parsed from the APK manifest.')),
        help=
        'The Java package of the application under test (default: extracted '
        'from the APK manifest).')
    parser.add_argument(
        '--auto-google-login',
        action='store_true',
        default=None,
        help='Automatically log into the test device using a preconfigured '
        'Google account before beginning the test. Enabled by default, use '
        '--no-auto-google-login to disable.')
    parser.add_argument(
        '--directories-to-pull',
        type=arg_parsers.ArgList(),
        metavar='DIR_TO_PULL',
        help='A list of paths that will be copied from the device\'s storage to '
        'the designated results bucket after the test is complete. These must be '
        'absolute paths under `/sdcard` or `/data/local/tmp` (for example, '
        '`--directories-to-pull /sdcard/tempDir1,/data/local/tmp/tempDir2`). '
        'Path names are restricted to the characters ```a-zA-Z0-9_-./+```. '
        'The paths `/sdcard` and `/data` will be made available and treated as '
        'implicit path substitutions. E.g. if `/sdcard` on a particular device '
        'does not map to external storage, the system will replace it with the '
        'external storage path prefix for that device.')
    parser.add_argument('--environment-variables',
                        type=arg_parsers.ArgDict(),
                        metavar='KEY=VALUE',
                        help="""\
      A comma-separated, key=value map of environment variables and their
      desired values. This flag is repeatable. The environment variables are
      mirrored as extra options to the `am instrument -e KEY1 VALUE1 ...`
      command and passed to your test runner (typically AndroidJUnitRunner).
      Examples:

      Break test cases into four shards and run only the first shard:

      ```
      --environment-variables numShards=4,shardIndex=0
      ```

      Enable code coverage and provide a directory to store the coverage
      results when using Android Test Orchestrator (`--use-orchestrator`):

      ```
      --environment-variables clearPackageData=true,coverage=true,coverageFilePath=/sdcard/
      ```

      Enable code coverage and provide a file path to store the coverage
      results when *not* using Android Test Orchestrator
      (`--no-use-orchestrator`):

      ```
      --environment-variables coverage=true,coverageFile=/sdcard/coverage.ec
      ```

      Note: If you need to embed a comma into a `VALUE` string, please refer to
      `gcloud topic escaping` for ways to change the default list delimiter.
      """)
    parser.add_argument(
        '--obb-files',
        type=arg_parsers.ArgList(min_length=1, max_length=2),
        metavar='OBB_FILE',
        help='A list of one or two Android OBB file names which will be copied '
        'to each test device before the tests will run (default: None). Each '
        'OBB file name must conform to the format as specified by Android (e.g. '
        '[main|patch].0300110.com.example.android.obb) and will be installed '
        'into <shared-storage>/Android/obb/<package-name>/ on the test device.'
    )
    parser.add_argument(
        '--performance-metrics',
        action='store_true',
        default=None,
        help=
        'Monitor and record performance metrics: CPU, memory, network usage,'
        ' and FPS (game-loop only). Enabled by default, use '
        '--no-performance-metrics to disable.')
    parser.add_argument(
        '--results-history-name',
        help=
        'The history name for your test results (an arbitrary string label; '
        'default: the application\'s label from the APK manifest). All tests '
        'which use the same history name will have their results grouped '
        'together in the Firebase console in a time-ordered test history list.'
    )
    parser.add_argument('--type',
                        category=base.COMMONLY_USED_FLAGS,
                        choices=['instrumentation', 'robo', 'game-loop'],
                        help='The type of test to run.')

    # The following args are specific to Android instrumentation tests.

    parser.add_argument(
        '--test',
        category=base.COMMONLY_USED_FLAGS,
        help='The path to the binary file containing instrumentation tests. The '
        'given path may be in the local filesystem or in Google Cloud Storage '
        'using a URL beginning with `gs://`.')
    parser.add_argument(
        '--test-package',
        action=actions.DeprecationAction(
            '--test-package',
            warn=(
                'The `--test-package` flag is deprecated and should no longer '
                'be used. By default, the correct test package name is '
                'parsed from the APK manifest.')),
        category=ANDROID_INSTRUMENTATION_TEST,
        help='The Java package name of the instrumentation test (default: '
        'extracted from the APK manifest).')
    parser.add_argument(
        '--test-runner-class',
        category=ANDROID_INSTRUMENTATION_TEST,
        help='The fully-qualified Java class name of the instrumentation test '
        'runner (default: the last name extracted from the APK manifest).')
    parser.add_argument('--test-targets',
                        category=ANDROID_INSTRUMENTATION_TEST,
                        type=arg_parsers.ArgList(min_length=1),
                        metavar='TEST_TARGET',
                        help="""\
      A list of one or more test target filters to apply (default: run all test
      targets). Each target filter must be fully qualified with the package
      name, class name, or test annotation desired. Any test filter supported by
      `am instrument -e ...` is supported. See
       https://developer.android.com/reference/android/support/test/runner/AndroidJUnitRunner
       for more information. Examples:

         * `--test-targets "package com.my.package.name"`
         * `--test-targets "notPackage com.package.to.skip"`
         * `--test-targets "class com.foo.ClassName"`
         * `--test-targets "notClass com.foo.ClassName#testMethodToSkip"`
         * `--test-targets "annotation com.foo.AnnotationToRun"`
         * `--test-targets "size large notAnnotation com.foo.AnnotationToSkip"`
      """)
    parser.add_argument(
        '--use-orchestrator',
        category=ANDROID_INSTRUMENTATION_TEST,
        action='store_true',
        default=None,
        help='Whether each test runs in its own Instrumentation instance with '
        'the Android Test Orchestrator (default: Orchestrator is not used, same '
        'as specifying --no-use-orchestrator). Orchestrator is only compatible '
        'with AndroidJUnitRunner v1.0 or higher. See '
        'https://developer.android.com/training/testing/junit-runner.html'
        '#using-android-test-orchestrator for more information about Android '
        'Test Orchestrator.')

    # The following args are specific to Android Robo tests.

    parser.add_argument(
        '--robo-directives',
        metavar='TYPE:RESOURCE_NAME=INPUT',
        category=ANDROID_ROBO_TEST,
        type=arg_parsers.ArgDict(),
        help='A comma-separated (`<type>:<key>=<value>`) map of '
        '`robo_directives` that you can use to customize the behavior of Robo '
        'test. The `type` specifies the action type of the directive, which may '
        'take on values `click`, `text` or `ignore`. If no `type` is provided, '
        '`text` will be used by default. Each key should be the Android resource '
        'name of a target UI element and each value should be the text input for '
        'that element. Values are only permitted for `text` type elements, so no '
        'value should be specified for `click` and `ignore` type elements.'
        '\n\n'
        'To provide custom login credentials for your app, use'
        '\n\n'
        '    --robo-directives text:username_resource=username,'
        'text:password_resource=password'
        '\n\n'
        'To instruct Robo to click on the sign-in button, use'
        '\n\n'
        '    --robo-directives click:sign_in_button='
        '\n\n'
        'To instruct Robo to ignore any UI elements with resource names which '
        'equal or start with the user-defined value, use'
        '\n\n'
        '  --robo-directives ignore:ignored_ui_element_resource_name='
        '\n\n'
        'To learn more about Robo test and robo_directives, see '
        'https://firebase.google.com/docs/test-lab/android/command-line#custom_login_and_text_input_with_robo_test.'
        '\n\n'
        'Caution: You should only use credentials for test accounts that are not '
        'associated with real users.')

    # The following args are specific to Android game-loop tests.

    parser.add_argument(
        '--scenario-numbers',
        metavar='int',
        type=arg_parsers.ArgList(element_type=int,
                                 min_length=1,
                                 max_length=1024),
        category=ANDROID_GAME_LOOP_TEST,
        help='A list of game-loop scenario numbers which will be run as part of '
        'the test (default: all scenarios). A maximum of 1024 scenarios may be '
        'specified in one test matrix, but the maximum number may also be '
        'limited by the overall test *--timeout* setting.')

    parser.add_argument(
        '--scenario-labels',
        metavar='LABEL',
        type=arg_parsers.ArgList(min_length=1),
        category=ANDROID_GAME_LOOP_TEST,
        help='A list of game-loop scenario labels (default: None). '
        'Each game-loop scenario may be labeled in the APK manifest file with '
        'one or more arbitrary strings, creating logical groupings (e.g. '
        'GPU_COMPATIBILITY_TESTS). If *--scenario-numbers* and '
        '*--scenario-labels* are specified together, Firebase Test Lab will '
        'first execute each scenario from *--scenario-numbers*. It will then '
        'expand each given scenario label into a list of scenario numbers marked '
        'with that label, and execute those scenarios.')
예제 #16
0
def AddUploadModelFlags(parser):
    """Adds flags for UploadModel."""
    AddRegionResourceArg(parser, 'to upload model')
    base.Argument('--display-name',
                  required=True,
                  help=('Display name of the model.')).AddToParser(parser)
    base.Argument('--description',
                  required=False,
                  help=('Description of the model.')).AddToParser(parser)
    base.Argument('--container-image-uri',
                  required=True,
                  help=("""\
URI of the Model serving container file in the Container Registry
(e.g. gcr.io/myproject/server:latest).
""")).AddToParser(parser)
    base.Argument('--artifact-uri',
                  help=("""\
Path to the directory containing the Model artifact and any of its
supporting files.
""")).AddToParser(parser)
    parser.add_argument(
        '--container-env-vars',
        metavar='KEY=VALUE',
        type=arg_parsers.ArgDict(),
        action=arg_parsers.UpdateAction,
        help='List of key-value pairs to set as environment variables.')
    parser.add_argument('--container-command',
                        type=arg_parsers.ArgList(),
                        metavar='COMMAND',
                        action=arg_parsers.UpdateAction,
                        help="""\
Entrypoint for the container image. If not specified, the container
image's default entrypoint is run.
""")
    parser.add_argument('--container-args',
                        metavar='ARG',
                        type=arg_parsers.ArgList(),
                        action=arg_parsers.UpdateAction,
                        help="""\
Comma-separated arguments passed to the command run by the container
image. If not specified and no `--command` is provided, the container
image's default command is used.
""")
    parser.add_argument('--container-ports',
                        metavar='PORT',
                        type=arg_parsers.ArgList(
                            element_type=arg_parsers.BoundedInt(1, 65535)),
                        action=arg_parsers.UpdateAction,
                        help="""\
Container ports to receive requests at. Must be a number between 1 and 65535,
inclusive.
""")
    parser.add_argument(
        '--container-predict-route',
        help='HTTP path to send prediction requests to inside the container.')
    parser.add_argument(
        '--container-health-route',
        help='HTTP path to send health checks to inside the container.')
    # For Explanation.
    parser.add_argument(
        '--explanation-method',
        help=
        'Method used for explanation. Accepted values are `integrated-gradients`, `xrai` and `sampled-shapley`.'
    )
    parser.add_argument(
        '--explanation-metadata-file',
        help=
        'Path to a local JSON file that contains the metadata describing the Model\'s input and output for explanation.'
    )
    parser.add_argument(
        '--explanation-step-count',
        type=int,
        help='Number of steps to approximate the path integral for explanation.'
    )
    parser.add_argument(
        '--explanation-path-count',
        type=int,
        help=
        'Number of feature permutations to consider when approximating the Shapley values for explanation.'
    )
    parser.add_argument(
        '--smooth-grad-noisy-sample-count',
        type=int,
        help=
        'Number of gradient samples used for approximation at explanation. Only applicable to explanation method `integrated-gradients` or `xrai`.'
    )
    parser.add_argument(
        '--smooth-grad-noise-sigma',
        type=float,
        help=
        'Single float value used to add noise to all the features for explanation. Only applicable to explanation method `integrated-gradients` or `xrai`.'
    )
    parser.add_argument(
        '--smooth-grad-noise-sigma-by-feature',
        metavar='KEY=VALUE',
        type=arg_parsers.ArgDict(),
        action=arg_parsers.UpdateAction,
        help=
        'Noise sigma by features for explanation. Noise sigma represents the standard deviation of the gaussian kernel that will be used to add noise to interpolated inputs prior to computing gradients. Only applicable to explanation method `integrated-gradients` or `xrai`.'
    )
예제 #17
0
def AddIosTestArgs(parser):
    """Register args which are specific to iOS test commands.

  Args:
    parser: An argparse parser used to add arguments that follow a command in
        the CLI.
  """
    parser.add_argument('--type',
                        category=base.COMMONLY_USED_FLAGS,
                        hidden=True,
                        choices=['xctest'],
                        help='The type of iOS test to run.')
    parser.add_argument(
        '--test',
        category=base.COMMONLY_USED_FLAGS,
        metavar='XCTEST_ZIP',
        help='The path to the test package (a zip file containing the iOS app '
        'and XCTest files). The given path may be in the local filesystem or in '
        'Google Cloud Storage using a URL beginning with `gs://`. Note: any '
        '.xctestrun file in this zip file will be ignored if *--xctestrun-file* '
        'is specified.')
    parser.add_argument(
        '--xctestrun-file',
        category=base.COMMONLY_USED_FLAGS,
        metavar='XCTESTRUN_FILE',
        help='The path to an .xctestrun file that will override any .xctestrun '
        'file contained in the *--test* package. Because the .xctestrun file '
        'contains environment variables along with test methods to run and/or '
        'ignore, this can be useful for customizing or sharding test suites. The '
        'given path may be in the local filesystem or in Google Cloud Storage '
        'using a URL beginning with `gs://`.')
    parser.add_argument('--xcode-version',
                        category=base.COMMONLY_USED_FLAGS,
                        help="""\
      The version of Xcode that should be used to run an XCTest. Defaults to the
      latest Xcode version supported in Firebase Test Lab. This Xcode version
      must be supported by all iOS versions selected in the test matrix. The
      list of Xcode versions supported by each version of iOS can be viewed by
      running `$ {parent_command} versions list`.""")
    parser.add_argument('--device',
                        category=base.COMMONLY_USED_FLAGS,
                        type=arg_parsers.ArgDict(min_length=1),
                        action='append',
                        metavar='DIMENSION=VALUE',
                        help="""\
      A list of ``DIMENSION=VALUE'' pairs which specify a target device to test
      against. This flag may be repeated to specify multiple devices. The device
      dimensions are: *model*, *version*, *locale*, and *orientation*. If any
      dimensions are omitted, they will use a default value. The default value,
      and all possible values, for each dimension can be found with the
      ``list'' command for that dimension, such as `$ {parent_command} models
      list`. Omitting this flag entirely will run tests against a single device
      using defaults for every dimension.

      Examples:\n
      ```
      --device model=iphone8plus
      --device version=11.2
      --device model=ipadmini4,version=11.2,locale=zh_CN,orientation=landscape
      ```
      """)
    parser.add_argument(
        '--results-history-name',
        help=
        'The history name for your test results (an arbitrary string label; '
        'default: the bundle ID for the iOS application). All tests '
        'which use the same history name will have their results grouped '
        'together in the Firebase console in a time-ordered test history list.'
    )
예제 #18
0
def BetaArgsForClusterRef(parser):
    """Register beta-only flags for creating a Dataproc cluster."""
    flags.AddComponentFlag(parser)
    flags.AddMinCpuPlatformArgs(parser, base.ReleaseTrack.BETA)

    parser.add_argument('--num-preemptible-worker-local-ssds',
                        type=int,
                        help="""\
      The number of local SSDs to attach to each preemptible worker in
      a cluster.
      """)

    parser.add_argument('--max-idle',
                        type=arg_parsers.Duration(),
                        help="""\
        The duration before cluster is auto-deleted after last job completes,
        such as "2h" or "1d".
        See $ gcloud topic datetimes for information on duration formats.
        """)

    auto_delete_group = parser.add_mutually_exclusive_group()
    auto_delete_group.add_argument('--max-age',
                                   type=arg_parsers.Duration(),
                                   help="""\
        The lifespan of the cluster before it is auto-deleted, such as
        "2h" or "1d".
        See $ gcloud topic datetimes for information on duration formats.
        """)

    auto_delete_group.add_argument('--expiration-time',
                                   type=arg_parsers.Datetime.Parse,
                                   help="""\
        The time when cluster will be auto-deleted, such as
        "2017-08-29T18:52:51.142Z." See $ gcloud topic datetimes for
        information on time formats.
        """)

    for instance_type in ('master', 'worker'):
        help_msg = """\
      Attaches accelerators (e.g. GPUs) to the {instance_type}
      instance(s).
      """.format(instance_type=instance_type)
        if instance_type == 'worker':
            help_msg += """
      Note:
      No accelerators will be attached to preemptible workers, because
      preemptible VMs do not support accelerators.
      """
        help_msg += """
      *type*::: The specific type (e.g. nvidia-tesla-k80 for nVidia Tesla
      K80) of accelerator to attach to the instances. Use 'gcloud compute
      accelerator-types list' to learn about all available accelerator
      types.

      *count*::: The number of pieces of the accelerator to attach to each
      of the instances. The default value is 1.
      """
        parser.add_argument('--{0}-accelerator'.format(instance_type),
                            type=arg_parsers.ArgDict(spec={
                                'type': str,
                                'count': int,
                            }),
                            metavar='type=TYPE,[count=COUNT]',
                            help=help_msg)

    AddAllocationAffinityGroup(parser)
예제 #19
0
def AddServiceProxyConfigArgs(parser, hide_arguments=False):
    """Adds service proxy configuration arguments for instance templates."""
    service_proxy_group = parser.add_group(hidden=hide_arguments)
    service_proxy_group.add_argument(
        '--service-proxy',
        type=arg_parsers.ArgDict(spec={
            'enabled': None,
            'serving-ports': str,
            'proxy-port': int,
            'tracing': service_proxy_aux_data.TracingState,
            'access-log': str,
            'network': str
        },
                                 allow_key_only=True,
                                 required_keys=['enabled']),
        hidden=hide_arguments,
        help="""\
      Controls whether the Traffic Director service proxy (Envoy) and agent are installed and configured on the VM.
      "cloud-platform" scope is enabled automatically to allow connections to the Traffic Director API.
      Do not use the --no-scopes flag.

      *enabled*::: If specified, the service-proxy software will be installed when the instance is created.
      The instance is configured to work with Traffic Director.

      *serving-ports*::: Semi-colon-separated (;) list of the ports, specified inside quotation marks ("), on which the customer's application/workload
      is serving.

      For example:

            --serving-ports="80;8080"

      The service proxy will intercept inbound traffic, then forward it to the specified serving port(s) on localhost.
      If not provided, no incoming traffic is intercepted.

      *proxy-port*::: The port on which the service proxy listens.
      The VM intercepts traffic and redirects it to this port to be handled by the service proxy.
      If omitted, the default value is '15001'.

      *tracing*::: Enables the service proxy to generate distributed tracing information.
      If set to ON, the service proxy's control plane generates a configuration that enables request ID-based tracing.
      For more information, refer to the `generate_request_id` documentation
      for the Envoy proxy. Allowed values are `ON` and `OFF`.

      *access-log*::: The filepath for access logs sent to the service proxy by the control plane.
      All incoming and outgoing requests are recorded in this file.
      For more information, refer to the file access log documentation for the Envoy proxy.

      *network*::: The name of a valid VPC network. The Google Cloud Platform VPC network used by the service proxy's control plane
      to generate dynamic configuration for the service proxy.
      """)
    service_proxy_group.add_argument('--service-proxy-labels',
                                     metavar='KEY=VALUE, ...',
                                     type=arg_parsers.ArgDict(),
                                     hidden=hide_arguments,
                                     help="""\
      Labels that you can apply to your service proxy. These will be reflected in your Envoy proxy's bootstrap metadata.
      These can be any `key=value` pairs that you want to set as proxy metadata (for example, for use with config filtering).
      You might use these flags for application and version labels: `app=review` and/or `version=canary`.
      """)
    service_proxy_group.add_argument('--service-proxy-agent-location',
                                     metavar='LOCATION',
                                     hidden=True,
                                     help="""\
      GCS bucket location of service-proxy-agent. Mainly used for testing and development.
      """)
예제 #20
0
def ArgsForClusterRef(parser, beta=False, include_deprecated=True):     \
    # pylint: disable=unused-argument
    """Register flags for creating a dataproc cluster.

  Args:
    parser: The argparse.ArgParser to configure with dataproc cluster arguments.
    beta: whether or not this is a beta command (may affect flag visibility)
    include_deprecated: whether deprecated flags should be included
  """
    labels_util.AddCreateLabelsFlags(parser)
    instances_flags.AddTagsArgs(parser)
    # 30m is backend timeout + 5m for safety buffer.
    flags.AddTimeoutFlag(parser, default='35m')
    flags.AddZoneFlag(parser, short_flags=include_deprecated)

    parser.add_argument(
        '--metadata',
        type=arg_parsers.ArgDict(min_length=1),
        action='append',
        default=None,
        help=('Metadata to be made available to the guest operating system '
              'running on the instances'),
        metavar='KEY=VALUE')

    # Either allow creating a single node cluster (--single-node), or specifying
    # the number of workers in the multi-node cluster (--num-workers and
    # --num-preemptible-workers)
    node_group = parser.add_argument_group(mutex=True)  # Mutually exclusive
    node_group.add_argument('--single-node',
                            action='store_true',
                            help="""\
      Create a single node cluster.

      A single node cluster has all master and worker components.
      It cannot have any separate worker nodes. If this flag is not
      specified, a cluster with separate workers is created.
      """)
    # Not mutually exclusive
    worker_group = node_group.add_argument_group(
        help='Multi-node cluster flags')
    worker_group.add_argument(
        '--num-workers',
        type=int,
        help='The number of worker nodes in the cluster. Defaults to '
        'server-specified.')
    worker_group.add_argument(
        '--num-preemptible-workers',
        type=int,
        help='The number of preemptible worker nodes in the cluster.')

    parser.add_argument(
        '--master-machine-type',
        help='The type of machine to use for the master. Defaults to '
        'server-specified.')
    parser.add_argument(
        '--worker-machine-type',
        help='The type of machine to use for workers. Defaults to '
        'server-specified.')
    image_parser = parser.add_mutually_exclusive_group()
    # TODO(b/73291743): Add external doc link to --image
    image_parser.add_argument(
        '--image',
        metavar='IMAGE',
        help='The full custom image URI or the custom image name that '
        'will be used to create a cluster.')
    image_parser.add_argument(
        '--image-version',
        metavar='VERSION',
        help='The image version to use for the cluster. Defaults to the '
        'latest version.')
    parser.add_argument(
        '--bucket',
        help='The Google Cloud Storage bucket to use with the Google Cloud '
        'Storage connector. A bucket is auto created when this parameter is '
        'not specified.')

    netparser = parser.add_mutually_exclusive_group()
    netparser.add_argument('--network',
                           help="""\
      The Compute Engine network that the VM instances of the cluster will be
      part of. This is mutually exclusive with --subnet. If neither is
      specified, this defaults to the "default" network.
      """)
    netparser.add_argument('--subnet',
                           help="""\
      Specifies the subnet that the cluster will be part of. This is mutally
      exclusive with --network.
      """)
    parser.add_argument(
        '--num-worker-local-ssds',
        type=int,
        help='The number of local SSDs to attach to each worker in a cluster.')
    parser.add_argument(
        '--num-master-local-ssds',
        type=int,
        help='The number of local SSDs to attach to the master in a cluster.')
    parser.add_argument(
        '--initialization-actions',
        type=arg_parsers.ArgList(min_length=1),
        metavar='CLOUD_STORAGE_URI',
        help=('A list of Google Cloud Storage URIs of '
              'executables to run on each node in the cluster.'))
    parser.add_argument(
        '--initialization-action-timeout',
        type=arg_parsers.Duration(),
        metavar='TIMEOUT',
        default='10m',
        help=('The maximum duration of each initialization action. See '
              '$ gcloud topic datetimes for information on duration formats.'))
    parser.add_argument(
        '--num-masters',
        type=arg_parsers.CustomFunctionValidator(
            lambda n: int(n) in [1, 3],
            'Number of masters must be 1 (Standard) or 3 (High Availability)',
            parser=arg_parsers.BoundedInt(1, 3)),
        help="""\
      The number of master nodes in the cluster.

      Number of Masters | Cluster Mode
      --- | ---
      1 | Standard
      3 | High Availability
      """)
    parser.add_argument('--properties',
                        type=arg_parsers.ArgDict(),
                        metavar='PREFIX:PROPERTY=VALUE',
                        default={},
                        help="""\
Specifies configuration properties for installed packages, such as Hadoop
and Spark.

Properties are mapped to configuration files by specifying a prefix, such as
"core:io.serializations". The following are supported prefixes and their
mappings:

Prefix | File | Purpose of file
--- | --- | ---
capacity-scheduler | capacity-scheduler.xml | Hadoop YARN Capacity Scheduler configuration
core | core-site.xml | Hadoop general configuration
distcp | distcp-default.xml | Hadoop Distributed Copy configuration
hadoop-env | hadoop-env.sh | Hadoop specific environment variables
hdfs | hdfs-site.xml | Hadoop HDFS configuration
hive | hive-site.xml | Hive configuration
mapred | mapred-site.xml | Hadoop MapReduce configuration
mapred-env | mapred-env.sh | Hadoop MapReduce specific environment variables
pig | pig.properties | Pig configuration
spark | spark-defaults.conf | Spark configuration
spark-env | spark-env.sh | Spark specific environment variables
yarn | yarn-site.xml | Hadoop YARN configuration
yarn-env | yarn-env.sh | Hadoop YARN specific environment variables

See https://cloud.google.com/dataproc/docs/concepts/configuring-clusters/cluster-properties
for more information.

""")
    parser.add_argument(
        '--service-account',
        help='The Google Cloud IAM service account to be authenticated as.')
    parser.add_argument('--scopes',
                        type=arg_parsers.ArgList(min_length=1),
                        metavar='SCOPE',
                        help="""\
Specifies scopes for the node instances. Multiple SCOPEs can be specified,
separated by commas.
Examples:

  $ {{command}} example-cluster --scopes https://www.googleapis.com/auth/bigtable.admin

  $ {{command}} example-cluster --scopes sqlservice,bigquery

The following *minimum scopes* are necessary for the cluster to function
properly and are always added, even if not explicitly specified:

  {minimum_scopes}

If the `--scopes` flag is not specified, the following *default scopes*
are also included:

  {additional_scopes}

If you want to enable all scopes use the 'cloud-platform' scope.

{scopes_help}
""".format(minimum_scopes='\n  '.join(constants.MINIMUM_SCOPE_URIS),
           additional_scopes='\n  '.join(
               constants.ADDITIONAL_DEFAULT_SCOPE_URIS),
           scopes_help=compute_helpers.SCOPES_HELP))

    if include_deprecated:
        _AddDiskArgsDeprecated(parser)
    else:
        _AddDiskArgs(parser)

    # --no-address is an exception to the no negative-flag style guildline to be
    # consistent with gcloud compute instances create --no-address
    parser.add_argument('--no-address',
                        action='store_true',
                        help="""\
      If provided, the instances in the cluster will not be assigned external
      IP addresses.

      If omitted the instances in the cluster will each be assigned an
      ephemeral external IP address.

      Note: Dataproc VMs need access to the Dataproc API. This can be achieved
      without external IP addresses using Private Google Access
      (https://cloud.google.com/compute/docs/private-google-access).
      """)

    boot_disk_type_detailed_help = """\
      The type of the boot disk. The value must be ``pd-standard'' or
      ``pd-ssd''.
      """
    parser.add_argument('--master-boot-disk-type',
                        help=boot_disk_type_detailed_help)
    parser.add_argument('--worker-boot-disk-type',
                        help=boot_disk_type_detailed_help)
    parser.add_argument('--preemptible-worker-boot-disk-type',
                        help=boot_disk_type_detailed_help)
def AddIPAliasFlags(parser, hidden=False):
    """Adds flags related to IP aliases to the parser.

  Args:
    parser: A given parser.
    hidden: Whether or not to hide the help text.
  """

    parser.add_argument('--enable-ip-alias',
                        action='store_true',
                        default=None,
                        hidden=hidden,
                        help="""\
Enable use of alias IPs (https://cloud.google.com/compute/docs/alias-ip/)
for pod IPs. This will create two new subnetworks, one for the
instance and pod IPs, and another to reserve space for the services
range.
""")
    parser.add_argument('--services-ipv4-cidr',
                        metavar='CIDR',
                        hidden=hidden,
                        help="""\
Set the IP range for the services IPs.

Can be specified as a netmask size (e.g. '/20') or as in CIDR notion
(e.g. '10.100.0.0/20'). If given as a netmask size, the IP range will
be chosen automatically from the available space in the network.

If unspecified, the services CIDR range will use automatic defaults.

Can not be specified unless '--enable-ip-alias' is also specified.
""")
    parser.add_argument('--create-subnetwork',
                        metavar='KEY=VALUE',
                        hidden=hidden,
                        type=arg_parsers.ArgDict(),
                        help="""\
Create a new subnetwork for the cluster. The name and range of the
subnetwork can be customized via optional 'name' and 'range' key-value
pairs.

'name' specifies the name of the subnetwork to be created.

'range' specifies the IP range for the new subnetwork. This can either
be a netmask size (e.g. '/20') or a CIDR range (e.g. '10.0.0.0/20').
If a netmask size is specified, the IP is automatically taken from
the free space in the cluster's network.

Examples:

Create a new subnetwork with a default name and size.

      $ {command} --create-subnetwork ""

Create a new subnetwork named "my-subnet" with netmask of size 21.

      $ {command} --create-subnetwork name=my-subnet,range=/21

Create a new subnetwork with a default name with the primary range of
10.100.0.0/16.

      $ {command} --create-subnetwork range=10.100.0.0/16

Create a new subnetwork with the name "my-subnet" with a default range.

      $ {command} --create-subnetwork name=my-subnet

Can not be specified unless '--enable-ip-alias' is also specified. Can
not be used in conjunction with the '--subnetwork' option.
""")
    parser.add_argument('--cluster-secondary-range-name',
                        metavar='NAME',
                        hidden=hidden,
                        help="""\
Set the secondary range to be used as the source for pod IPs. Alias
ranges will be allocated from this secondary range.  NAME must be the
name of an existing secondary range in the cluster subnetwork.

Must be used in conjunction with '--enable-ip-alias'. Cannot be used
with --create-subnetwork.
""")
    parser.add_argument('--services-secondary-range-name',
                        metavar='NAME',
                        hidden=hidden,
                        help="""\
Set the secondary range to be used for services
(e.g. ClusterIPs). NAME must be the name of an existing secondary
range in the cluster subnetwork.

Must be used in conjunction with '--enable-ip-alias'. Cannot be used
with --create-subnetwork.
""")
예제 #22
0
파일: patch.py 프로젝트: bopopescu/subtle
    def Args(parser):
        """Args is called by calliope to gather arguments for this command.

    Please add arguments in alphabetical order except for no- or a clear-
    pair for that argument which can follow the argument itself.
    Args:
      parser: An argparse parser that you can use to add arguments that go
          on the command line after this command. Positional arguments are
          allowed.
    """
        parser.add_argument(
            '--activation-policy',
            required=False,
            choices=['ALWAYS', 'NEVER', 'ON_DEMAND'],
            help=
            'The activation policy for this instance. This specifies when the '
            'instance should be activated and is applicable only when the '
            'instance state is RUNNABLE.')
        parser.add_argument(
            '--assign-ip',
            action='store_true',
            default=None,  # Tri-valued: None => don't change the setting.
            help='The instance must be assigned an IP address.')
        gae_apps_group = parser.add_mutually_exclusive_group()
        gae_apps_group.add_argument(
            '--authorized-gae-apps',
            type=arg_parsers.ArgList(min_length=1),
            metavar='APP',
            required=False,
            help='A list of App Engine app IDs that can access this instance.')
        gae_apps_group.add_argument(
            '--clear-gae-apps',
            required=False,
            action='store_true',
            help=
            ('Specified to clear the list of App Engine apps that can access '
             'this instance.'))
        networks_group = parser.add_mutually_exclusive_group()
        networks_group.add_argument(
            '--authorized-networks',
            type=arg_parsers.ArgList(min_length=1),
            metavar='NETWORK',
            required=False,
            help=
            'The list of external networks that are allowed to connect to the '
            'instance. Specified in CIDR notation, also known as \'slash\' '
            'notation (e.g. 192.168.100.0/24).')
        networks_group.add_argument(
            '--clear-authorized-networks',
            required=False,
            action='store_true',
            help=
            'Clear the list of external networks that are allowed to connect '
            'to the instance.')
        backups_group = parser.add_mutually_exclusive_group()
        backups_group.add_argument(
            '--backup-start-time',
            required=False,
            help=
            'The start time of daily backups, specified in the 24 hour format '
            '- HH:MM, in the UTC timezone.')
        backups_group.add_argument(
            '--no-backup',
            required=False,
            action='store_true',
            help='Specified if daily backup should be disabled.')
        database_flags_group = parser.add_mutually_exclusive_group()
        database_flags_group.add_argument(
            '--database-flags',
            type=arg_parsers.ArgDict(min_length=1),
            metavar='FLAG=VALUE',
            required=False,
            help=
            'A comma-separated list of database flags to set on the instance. '
            'Use an equals sign to separate flag name and value. Flags without '
            'values, like skip_grant_tables, can be written out without a value '
            'after, e.g., `skip_grant_tables=`. Use on/off for '
            'booleans. View the Instance Resource API for allowed flags. '
            '(e.g., `--database-flags max_allowed_packet=55555,skip_grant_tables=,'
            'log_output=1`)')
        database_flags_group.add_argument(
            '--clear-database-flags',
            required=False,
            action='store_true',
            help='Clear the database flags set on the instance. '
            'WARNING: Instance will be restarted.')
        parser.add_argument(
            '--cpu',
            type=int,
            required=False,
            help='A whole number value indicating how many cores are desired in'
            'the machine. Both --cpu and --memory must be specified if a custom '
            'machine type is desired, and the --tier flag must be omitted.')
        parser.add_argument(
            '--enable-bin-log',
            action='store_true',
            default=None,  # Tri-valued: None => don't change the setting.
            help=
            'Enable binary log. If backup configuration is disabled, binary '
            'log should be disabled as well.')
        parser.add_argument(
            '--follow-gae-app',
            required=False,
            help='The App Engine app this instance should follow. It must be in '
            'the same region as the instance. '
            'WARNING: Instance may be restarted.')
        parser.add_argument(
            '--gce-zone',
            required=False,
            help='The preferred Compute Engine zone (e.g. us-central1-a, '
            'us-central1-b, etc.). '
            'WARNING: Instance may be restarted.')
        parser.add_argument('instance',
                            completion_resource='sql.instances',
                            help='Cloud SQL instance ID.')
        parser.add_argument(
            '--memory',
            type=arg_parsers.BinarySize(),
            required=False,
            help='A whole number value indicating how much memory is desired in '
            'the machine. A size unit should be provided (eg. 3072MiB or 9GiB) - '
            'if no units are specified, GiB is assumed. Both --cpu and --memory '
            'must be specified if a custom machine type is desired, and the --tier '
            'flag must be omitted.')
        parser.add_argument('--pricing-plan',
                            '-p',
                            required=False,
                            choices=['PER_USE', 'PACKAGE'],
                            help='The pricing plan for this instance. ')
        parser.add_argument('--replication',
                            required=False,
                            choices=['SYNCHRONOUS', 'ASYNCHRONOUS'],
                            help='The type of replication this instance uses.')
        parser.add_argument(
            '--require-ssl',
            action='store_true',
            default=None,  # Tri-valued: None => don't change the setting.
            help=
            'mysqld should default to \'REQUIRE X509\' for users connecting '
            'over IP.')
        parser.add_argument(
            '--tier',
            '-t',
            required=False,
            help='The tier of service for this instance, for example D0, D1. '
            'WARNING: Instance will be restarted.')
        parser.add_argument(
            '--enable-database-replication',
            action='store_true',
            default=None,  # Tri-valued: None => don't change the setting.
            help='Enable database replication. Applicable only '
            'for read replica instance(s). WARNING: Instance will be restarted.'
        )
        parser.add_argument('--async',
                            action='store_true',
                            help='Do not wait for the operation to complete.')
        parser.add_argument(
            '--diff',
            action='store_true',
            help='Show what changed as a result of the update.')
예제 #23
0
def GetTagsFlag():
    return base.Argument('--tags',
                         required=False,
                         type=arg_parsers.ArgDict(),
                         metavar='TAG=VALUE',
                         help='Tags for the package.')
def AddAutoscalerArgs(parser):
    """Adds commandline arguments to parser."""
    parser.add_argument(
        '--scale-based-on-load-balancing',
        action='store_true',
        help=('Sets autoscaling based on load balancing '
              'utilization.'),
    )
    parser.add_argument(
        '--scale-based-on-cpu',
        action='store_true',
        help='Sets autoscaling based on cpu utilization.',
    )
    parser.add_argument(
        '--target',
        help='The managed instance group to scale, '
        'either the fully-qualified URL or the managed instance '
        'group name.',
        required=True,
    )
    parser.add_argument(
        '--cool-down-period',
        type=arg_parsers.Duration(),
        help='The number of seconds to wait after a virtual '
        'machine has been started before the autoscaler starts '
        'collecting information from it. This accounts '
        'for the amount of time it may take for a virtual '
        'machine to initialize, during which the collected usage '
        'information is not reliable for autoscaling. It is '
        'recommended that you set this to at least the amount of '
        'time it takes for your virtual machine and applications '
        'to start.',
    )
    parser.add_argument(
        '--description',
        help='An optional description for this '
        'autoscaler.',
    )
    parser.add_argument(
        '--min-num-replicas',
        type=int,
        help='Sets the minimum number of instances the '
        'autoscaler will maintain. The autoscaler will never '
        'scale the number of instances below this number. If not '
        'provided, the default is 2.',
    )
    parser.add_argument(
        '--max-num-replicas',
        type=int,
        help='Sets the maximum number of instances the '
        'autoscaler will maintain for the managed instance '
        'group.',
        required=True,
    )
    parser.add_argument(
        '--target-cpu-utilization',
        type=float,
        help='The CPU utilization the autoscaler will aim to '
        'maintain. Must be a float between 0.0 to 1.0, '
        'exclusive',
    )
    parser.add_argument(
        '--custom-metric',
        type=str,
        help='Sets a Google Cloud '
        'Monitoring instance metric to scale based on (see '
        'https://developers.google.com/cloud-monitoring/metrics'
        ').',
    )
    parser.add_argument(
        '--target-custom-metric-utilization',
        type=float,
        help='The custom metric level the autoscaler will aim to '
        'maintain. This can be a float that is greater than '
        '0.0.',
    )
    parser.add_argument(
        '--custom-metric-utilization-target-type',
        type=str,
        help='The type of your custom metric. Choose from '
        'the following: {0}.'.format(
            ', '.join(ALLOWED_UTILIZATION_TARGET_TYPES)),
    )
    parser.add_argument(
        '--target-load-balancer-utilization',
        type=float,
        help='The HTTP load balancer utilization level the '
        'autoscaler will maintain. This must be a float greater '
        'than 0.0.',
    )
    custom_metric_utilization = parser.add_argument(
        '--custom-metric-utilization',
        type=arg_parsers.ArgDict(spec={
            'metric': str,
            'utilization-target': float,
            'utilization-target-type': str
        }, ),
        # pylint:disable=protected-access
        action=arg_parsers.FloatingListValuesCatcher(argparse._AppendAction),
        help=(
            'Adds target value of a Google Cloud Monitoring metric Autoscaler '
            'will aim to maintain.'),
        metavar='PROPERTY=VALUE',
    )
    custom_metric_utilization.detailed_help = """
  Adds target value of a Google Cloud Monitoring metric Autoscaler will aim to
  maintain.

  *metric*::: Protocol-free URL of a Google Cloud Monitoring metric.

  *utilization-target*::: Value of the metric Autoscaler will aim to maintain
  on the average (greater than 0.0).

  *utilization-target-type*::: How target is expressed. You can choose from the
  following: {0}.
  """.format(', '.join(ALLOWED_UTILIZATION_TARGET_TYPES))
예제 #25
0
def _CommonArgs(parser,
                release_track,
                support_source_instance,
                support_local_ssd_size=False,
                support_kms=False,
                support_resource_policy=False,
                support_min_node_cpu=False):
    """Adding arguments applicable for creating instance templates."""
    parser.display_info.AddFormat(instance_templates_flags.DEFAULT_LIST_FORMAT)
    metadata_utils.AddMetadataArgs(parser)
    instances_flags.AddDiskArgs(parser, enable_kms=support_kms)
    instances_flags.AddCreateDiskArgs(parser,
                                      enable_kms=support_kms,
                                      resource_policy=support_resource_policy)
    if support_local_ssd_size:
        instances_flags.AddLocalSsdArgsWithSize(parser)
    else:
        instances_flags.AddLocalSsdArgs(parser)
    instances_flags.AddCanIpForwardArgs(parser)
    instances_flags.AddAddressArgs(parser, instances=False)
    instances_flags.AddAcceleratorArgs(parser)
    instances_flags.AddMachineTypeArgs(parser)
    deprecate_maintenance_policy = release_track in [base.ReleaseTrack.ALPHA]
    instances_flags.AddMaintenancePolicyArgs(parser,
                                             deprecate_maintenance_policy)
    instances_flags.AddNoRestartOnFailureArgs(parser)
    instances_flags.AddPreemptibleVmArgs(parser)
    instances_flags.AddServiceAccountAndScopeArgs(parser, False)
    instances_flags.AddTagsArgs(parser)
    instances_flags.AddCustomMachineTypeArgs(parser)
    instances_flags.AddImageArgs(parser)
    instances_flags.AddNetworkArgs(parser)
    instances_flags.AddShieldedInstanceConfigArgs(parser)
    labels_util.AddCreateLabelsFlags(parser)
    instances_flags.AddNetworkTierArgs(parser, instance=True)
    instances_flags.AddPrivateNetworkIpArgs(parser)

    sole_tenancy_flags.AddNodeAffinityFlagToParser(parser)

    if support_min_node_cpu:
        instances_flags.AddMinNodeCpuArg(parser)

    flags.AddRegionFlag(parser,
                        resource_type='subnetwork',
                        operation_type='attach')

    parser.add_argument(
        '--description',
        help='Specifies a textual description for the instance template.')

    Create.InstanceTemplateArg = (
        instance_templates_flags.MakeInstanceTemplateArg())
    Create.InstanceTemplateArg.AddArgument(parser, operation_type='create')
    if support_source_instance:
        instance_templates_flags.MakeSourceInstanceArg().AddArgument(parser)
        parser.add_argument(
            '--configure-disk',
            type=arg_parsers.ArgDict(spec={
                'auto-delete': arg_parsers.ArgBoolean(),
                'device-name': str,
                'instantiate-from': str,
                'custom-image': str,
            }, ),
            metavar='PROPERTY=VALUE',
            action='append',
            help="""\
        This option has effect only when used with `--source-instance`. It
        allows you to override how the source-instance's disks are defined in
        the template.

        *auto-delete*::: If `true`, this persistent disk will be automatically
        deleted when the instance is deleted. However, if the disk is later
        detached from the instance, this option won't apply. If not provided,
        the setting is copied from the source instance. Allowed values of the
        flag are: `false`, `no`, `true`, and `yes`.

        *device-name*::: Name of the device.

        *instantiate-from*::: Specifies whether to include the disk and which
        image to use. Valid values are: {}

        *custom-image*::: The custom image to use if custom-image is specified
        for instantiate-from.
        """.format(', '.join(_INSTANTIATE_FROM_VALUES)),
        )

    instances_flags.AddReservationAffinityGroup(parser,
                                                group_text="""\
Specifies the reservation for instances created from this template.
""",
                                                affinity_text="""\
The type of reservation for instances created from this template.
""")

    parser.display_info.AddCacheUpdater(completers.InstanceTemplatesCompleter)
예제 #26
0
def FlattenedArgDict(value):
    dict_value = arg_parsers.ArgDict()(value)
    return [{'key': key, 'value': value} for key, value in dict_value.items()]
예제 #27
0
def AddCreateInstancesFlags(parser):
    """Adding stateful flags for creating and updating instance configs."""
    parser.add_argument('--instance',
                        required=True,
                        help="""Name of the new instance to create.""")
    stateful_disks_help = """
      Stateful disk for the managed instance group to preserve. Usually,
      a managed instance group deletes disks when deleting instances; however,
      stateful disks are detached from deleted instances and are reattached
      automatically to the instance on recreation, autohealing, updates, and any
      other lifecycle transitions of the instance.

      Stateful disks specified here form part of the per-instance config for
      the new instance.

      The same disk can be attached to many instances but only in read-only
      mode.

      Use this flag multiple times to attach more disks.

      *device-name*::: (Required) Device name under which the disk is or will be
      attached.

      *source*::: (Required) URI of an existing persistent disk to attach under
      the specified device-name.

      *mode*::: Specifies the attachment mode of the disk. Supported options are
      `ro` for read-only and `rw` for read-write. If omitted, defaults to `rw`.
      `mode` can only be specified if `source` is given.
      """ + AUTO_DELETE_ARG_HELP
    parser.add_argument(
        '--stateful-disk',
        type=arg_parsers.ArgDict(
            spec={
                'device-name':
                str,
                'source':
                str,
                'mode':
                str,
                'auto-delete':
                AutoDeleteFlag.ValidatorWithFlagName('--stateful-disk'),
            }),
        action='append',
        help=stateful_disks_help,
    )
    stateful_metadata_argument_name = '--stateful-metadata'
    stateful_metadata_help = """
      Additional metadata to be made available to the guest operating system
      on the instance along with the metadata defined in the instance template.

      Use stateful metadata to define key/value pairs specific to an instance to
      differentiate it from other instances in the managed instance group. The
      stateful metadata forms part of the per-instance config for the new
      instance.

      Stateful metadata key/value pairs are preserved on instance recreation,
      autohealing, updates, and any other lifecycle transitions of the
      instance.

      Only metadata keys provided in this flag are mutated. Stateful metadata
      values defined for the keys already existing in the instance template
      override the  values from the instance template. Other metadata entries
      from the instance  template will remain unaffected and available.

      Each metadata entry is a key/value pair separated by an equals sign.
      Metadata keys must be unique and less than 128 bytes in length.
      Multiple entries can be passed to this flag, e.g.,
      `--stateful-metadata key-1=value-1,key-2=value-2,key-3=value-3`.
  """.format(argument_name=stateful_metadata_argument_name)
    parser.add_argument(stateful_metadata_argument_name,
                        type=arg_parsers.ArgDict(min_length=1),
                        default={},
                        action=arg_parsers.StoreOnceAction,
                        metavar='KEY=VALUE',
                        help=stateful_metadata_help)
예제 #28
0
def AddAndroidBetaArgs(parser):
    """Register args which are only available in the Android beta run command.

  Args:
    parser: An argparse parser used to add args that follow a command.
  """
    parser.add_argument(
        '--network-profile',
        metavar='PROFILE_ID',
        # TODO(b/36366322): use {grandparent_command} once available
        help='The name of the network traffic profile, for example '
        '--network-profile=LTE, which consists of a set of parameters to emulate '
        'network conditions when running the test (default: no network shaping; '
        'see available profiles listed by the `$ gcloud firebase test '
        'network-profiles list` command). This feature only works on physical '
        'devices.')
    parser.add_argument(
        '--robo-script',
        category=ANDROID_ROBO_TEST,
        help='The path to a Robo Script JSON file. The path may be in the local '
        'filesystem or in Google Cloud Storage using gs:// notation. You can '
        'guide the Robo test to perform specific actions by recording a Robo '
        'Script in Android Studio and then specifying this argument. Learn more '
        'at https://firebase.google.com/docs/test-lab/robo-ux-test#scripting.')
    parser.add_argument(
        '--additional-apks',
        type=arg_parsers.ArgList(min_length=1, max_length=100),
        metavar='APK',
        help='A list of up to 100 additional APKs to install, in addition to '
        'those being directly tested. The path may be in the local filesystem or '
        'in Google Cloud Storage using gs:// notation.')
    parser.add_argument('--other-files',
                        type=arg_parsers.ArgDict(min_length=1),
                        action='append',
                        metavar='FILE=DEVICE_DIR',
                        help="""\
      A list of file=device-directory pairs that indicate paths of files to push
      to the device before starting tests, and the device directory to push them
      to.\n
      Source file paths may be in the local filesystem or in Google Cloud
      Storage (gs://...). Device directories must be absolute, whitelisted paths
      (${EXTERNAL_STORAGE}, or ${ANDROID_DATA}/local/tmp).\n
      Examples:\n
      ```
      --other-files local/file1=/sdcard/dir1/
      --other-files gs://bucket/file2=/sdcard/dir2
      ```\n
      This flag only copies files to the device. To install files, like OBB or
      APK files, see --obb-files and --additional-apks.
      """)
    # Mutually exclusive sharding options group.
    sharding_options = parser.add_group(mutex=True, help='Sharding options.')
    sharding_options.add_argument('--num-uniform-shards',
                                  metavar='int',
                                  type=arg_validate.POSITIVE_INT_PARSER,
                                  help="""\
      Specifies the number of shards into which you want to evenly distribute
      test cases. The shards are run in parallel on separate devices. For
      example, if your test execution contains 20 test cases and you specify
      four shards, each shard executes five test cases.

      The number of shards should be less than the total number of test
      cases. The number of shards specified must be >= 1 and <= 50.
      """)
    sharding_options.add_argument('--test-targets-for-shard',
                                  metavar='TEST_TARGETS_FOR_SHARD',
                                  action='append',
                                  help="""\
      Specifies a group of packages, classes, and/or test cases to run in
      each shard (a group of test cases). Shards are run in parallel on
      separate devices. You can repeat this flag up to 50 times to specify
      multiple shards.

      Note: If you include the flags --environment-variable or --test-targets
      when running --test-targets-for-shard, the flags are applied to all the
      shards you create.

      Examples:

      You can also specify multiple packages, classes, or test cases in the
      same shard by separating each item with a comma. For example:


      ```
      --test-targets-for-shard
      "package com.package1.for.shard1,com.package2.for.shard1"
      ```

      ```
      --test-targets-for-shard
      "class com.foo.ClassForShard2#testMethod1,com.foo.ClassForShard2#testMethod2"
      ```

      To specify both package and class in the same shard, separate package
      and class with semi-colons:

      ```
      --test-targets-for-shard
      "class com.foo.ClassForShard3;package com.package.for.shard3"
      ```
      """)
예제 #29
0
파일: create.py 프로젝트: 42force/beta
def _AddArgs(cls, parser, include_alpha=False):
    """Add subnetwork create arguments to parser."""
    cls.SUBNETWORK_ARG = flags.SubnetworkArgument()
    cls.NETWORK_ARG = network_flags.NetworkArgumentForOtherResource(
        'The network to which the subnetwork belongs.')
    cls.SUBNETWORK_ARG.AddArgument(parser, operation_type='create')
    cls.NETWORK_ARG.AddArgument(parser)

    parser.add_argument('--description',
                        help='An optional description of this subnetwork.')

    parser.add_argument(
        '--range',
        required=True,
        help='The IP space allocated to this subnetwork in CIDR format.')

    parser.add_argument(
        '--enable-private-ip-google-access',
        action='store_true',
        default=False,
        help=(
            'Enable/disable access to Google Cloud APIs from this subnet for '
            'instances without a public ip address.'))

    parser.add_argument('--secondary-range',
                        type=arg_parsers.ArgDict(min_length=1),
                        action='append',
                        metavar='PROPERTY=VALUE',
                        help="""\
      Adds a secondary IP range to the subnetwork for use in IP aliasing.

      For example, `--secondary-range range1=192.168.64.0/24` adds
      a secondary range 192.168.64.0/24 with name range1.

      * `RANGE_NAME` - Name of the secondary range.
      * `RANGE` - `IP range in CIDR format.`
      """)

    parser.add_argument(
        '--enable-flow-logs',
        action='store_true',
        default=None,
        help=(
            'Enable/disable VPC flow logging for this subnet. More information '
            'for VPC flow logs can be found at '
            'https://cloud.google.com/vpc/docs/using-flow-logs.'))

    if include_alpha:
        parser.add_argument(
            '--purpose',
            choices={
                'PRIVATE':
                'Regular user created or automatically created subnet.',
                'INTERNAL_HTTPS_LOAD_BALANCER':
                'Reserved for Internal HTTP(S) Load Balancing.'
            },
            type=lambda x: x.replace('-', '_').upper(),
            help='The purpose of this subnetwork.')

        parser.add_argument(
            '--role',
            choices={
                'ACTIVE': 'The ACTIVE subnet that is currently used.',
                'BACKUP': 'The BACKUP subnet that could be promoted to ACTIVE.'
            },
            type=lambda x: x.replace('-', '_').upper(),
            help=
            ('The role of subnetwork. This field is only used when'
             'purpose=INTERNAL_HTTPS_LOAD_BALANCER. The value can be set to '
             'ACTIVE or BACKUP. An ACTIVE subnetwork is one that is currently '
             'being used for Internal HTTP(S) Load Balancing. A BACKUP '
             'subnetwork is one that is ready to be promoted to ACTIVE or is '
             'currently draining.'))

        aggregation_interval_argument = base.ChoiceArgument(
            '--aggregation-interval',
            choices=[
                'interval-5-sec', 'interval-30-sec', 'interval-1-min',
                'interval-5-min', 'interval-10-min', 'interval-15-min'
            ],
            help_str="""\
        Can only be specified if VPC flow logging for this subnetwork is
        enabled. Toggles the aggregation interval for collecting flow logs.
        Increasing the interval time will reduce the amount of generated flow
        logs for long lasting connections. Default is an interval of 5 seconds
        per connection.
        """)
        aggregation_interval_argument.AddToParser(parser)

        parser.add_argument('--flow-sampling',
                            type=arg_parsers.BoundedFloat(lower_bound=0.0,
                                                          upper_bound=1.0),
                            help="""\
        Can only be specified if VPC flow logging for this subnetwork is
        enabled. The value of the field must be in [0, 1]. Set the sampling rate
        of VPC flow logs within the subnetwork where 1.0 means all collected
        logs are reported and 0.0 means no logs are reported. Default is 0.5
        which means half of all collected logs are reported.
        """)

        metadata_argument = base.ChoiceArgument(
            '--metadata',
            choices=['include-all-metadata', 'exclude-all-metadata'],
            help_str="""\
        Can only be specified if VPC flow logging for this subnetwork is
        enabled. Configures whether metadata fields should be added to the
        reported VPC flow logs. Default is to include all metadata.
        """)
        metadata_argument.AddToParser(parser)
예제 #30
0
파일: flags.py 프로젝트: PinTrees/novelhub
    'Not a valid IPV4 CIDR block value for the Cloud SQL instance')

AIRFLOW_CONFIGS_FLAG_GROUP_DESCRIPTION = (
    'Group of arguments for modifying the Airflow configuration.')

CLEAR_AIRFLOW_CONFIGS_FLAG = base.Argument(
    '--clear-airflow-configs',
    action='store_true',
    help="""\
    Removes all Airflow config overrides from the environment.
    """)

UPDATE_AIRFLOW_CONFIGS_FLAG = base.Argument(
    '--update-airflow-configs',
    metavar='KEY=VALUE',
    type=arg_parsers.ArgDict(key_type=str, value_type=str),
    action=arg_parsers.UpdateAction,
    help="""\
    A list of Airflow config override KEY=VALUE pairs to set. If a config
    override exists, its value is updated; otherwise, a new config override
    is created.

    KEYs should specify the configuration section and property name,
    separated by a hyphen, for example `core-print_stats_interval`. The
    section may not contain a closing square brace or period. The property
    name must be non-empty and may not contain an equals sign, semicolon,
    or period. By convention, property names are spelled with
    `snake_case.` VALUEs may contain any character.
    """)

REMOVE_AIRFLOW_CONFIGS_FLAG = base.Argument(