def Args(cls, parser):
   dataproc = dp.Dataproc(cls.ReleaseTrack())
   base.ASYNC_FLAG.AddToParser(parser)
   flags.AddClusterResourceArg(parser, 'create', dataproc.api_version)
   clusters.ArgsForClusterRef(
       parser,
       dataproc,
       cls.BETA,
       include_ttl_config=True,
       include_gke_platform_args=cls.BETA)
   # Add arguments for failure action for primary workers
   if not cls.BETA:
     parser.add_argument(
         '--action-on-failed-primary-workers',
         choices={
             'NO_ACTION': 'take no action',
             'DELETE': 'delete the failed primary workers',
             'FAILURE_ACTION_UNSPECIFIED': 'failure action is not specified'
         },
         type=arg_utils.ChoiceToEnumName,
         help="""
       Failure action to take when primary workers fail during cluster creation
       """)
   # Add gce-pd-kms-key args
   kms_flag_overrides = {
       'kms-key': '--gce-pd-kms-key',
       'kms-keyring': '--gce-pd-kms-key-keyring',
       'kms-location': '--gce-pd-kms-key-location',
       'kms-project': '--gce-pd-kms-key-project'
   }
   kms_resource_args.AddKmsKeyResourceArg(
       parser, 'cluster', flag_overrides=kms_flag_overrides)
Example #2
0
def _CommonArgs(parser, beta=False):
  """Register flags common to all tracks."""
  base.ASYNC_FLAG.AddToParser(parser)
  parser.add_argument('name', help='The name of this cluster.')
  clusters.ArgsForClusterRef(parser, beta)
  # Add gce-pd-kms-key args
  kms_flag_overrides = {'kms-key': '--gce-pd-kms-key',
                        'kms-keyring': '--gce-pd-kms-key-keyring',
                        'kms-location': '--gce-pd-kms-key-location',
                        'kms-project': '--gce-pd-kms-key-project'}
  kms_resource_args.AddKmsKeyResourceArg(
      parser, 'cluster', flag_overrides=kms_flag_overrides)
Example #3
0
 def Args(cls, parser):
     dataproc = dp.Dataproc(cls.ReleaseTrack())
     parser.add_argument('--cluster-name',
                         help="""\
       The name of the managed dataproc cluster.
       If unspecified, the workflow template ID will be used.""")
     clusters.ArgsForClusterRef(parser,
                                cls.Beta(),
                                include_deprecated=cls.Beta())
     flags.AddTemplateResourceArg(parser, 'set managed cluster',
                                  dataproc.api_version)
     if cls.Beta():
         clusters.BetaArgsForClusterRef(parser)
Example #4
0
 def Args(cls, parser):
     dataproc = dp.Dataproc(cls.ReleaseTrack())
     base.ASYNC_FLAG.AddToParser(parser)
     flags.AddClusterResourceArg(parser, 'create', dataproc.api_version)
     clusters.ArgsForClusterRef(parser, cls.BETA, include_ttl_config=True)
     # Add gce-pd-kms-key args
     kms_flag_overrides = {
         'kms-key': '--gce-pd-kms-key',
         'kms-keyring': '--gce-pd-kms-key-keyring',
         'kms-location': '--gce-pd-kms-key-location',
         'kms-project': '--gce-pd-kms-key-project'
     }
     kms_resource_args.AddKmsKeyResourceArg(
         parser, 'cluster', flag_overrides=kms_flag_overrides)
Example #5
0
def _CommonArgs(parser, beta=False):
    """Register flags common to all tracks."""
    base.ASYNC_FLAG.AddToParser(parser)
    parser.add_argument('name', help='The name of this cluster.')
    clusters.ArgsForClusterRef(parser, beta)
    def Args(parser):
        flags.AddTemplateResourceArg(parser, 'set managed cluster')
        parser.add_argument('--cluster-name',
                            help="""\
        The name of the managed dataproc cluster.
        If unspecified, the workflow template ID will be used.""")
        clusters.ArgsForClusterRef(parser, beta=True)
        flags.AddMinCpuPlatformArgs(parser, base.ReleaseTrack.BETA)

        # TODO(b/70164645): Consolidate these arguments with the other beta args
        # All arguments for these arguments are duplicated from the cluster creation
        # beta track. There should be a ArgsForClusterRefBeta method in clusters.py
        # that is invoked here so that we don't have to duplicate the arguments.
        parser.add_argument('--max-idle',
                            type=arg_parsers.Duration(),
                            help="""\
        The duration before cluster is auto-deleted after last job completes,
        such as "2h" or "1d".
        See $ gcloud topic datetimes for information on duration formats.
        """)

        auto_delete_group = parser.add_mutually_exclusive_group()
        auto_delete_group.add_argument('--max-age',
                                       type=arg_parsers.Duration(),
                                       help="""\
        The lifespan of the cluster before it is auto-deleted, such as
        "2h" or "1d".
        See $ gcloud topic datetimes for information on duration formats.
        """)

        auto_delete_group.add_argument('--expiration-time',
                                       type=arg_parsers.Datetime.Parse,
                                       help="""\
        The time when cluster will be auto-deleted, such as
        "2017-08-29T18:52:51.142Z." See $ gcloud topic datetimes for
        information on time formats.
        """)

        for instance_type in ('master', 'worker'):
            help_msg = """\
      Attaches accelerators (e.g. GPUs) to the {instance_type}
      instance(s).
      """.format(instance_type=instance_type)
            if instance_type == 'worker':
                help_msg += """
      Note:
      No accelerators will be attached to preemptible workers, because
      preemptible VMs do not support accelerators.
      """
            help_msg += """
      *type*::: The specific type (e.g. nvidia-tesla-k80 for nVidia Tesla
      K80) of accelerator to attach to the instances. Use 'gcloud compute
      accelerator-types list' to learn about all available accelerator
      types.

      *count*::: The number of pieces of the accelerator to attach to each
      of the instances. The default value is 1.
      """
            parser.add_argument('--{0}-accelerator'.format(instance_type),
                                type=arg_parsers.ArgDict(spec={
                                    'type': str,
                                    'count': int,
                                }),
                                metavar='type=TYPE,[count=COUNT]',
                                help=help_msg)
Example #7
0
    def Args(parser):
        flags.AddTemplateFlag(parser, 'set managed cluster')
        parser.add_argument('--cluster-name',
                            help='The name of the managed dataproc cluster.')
        clusters.ArgsForClusterRef(parser)
        flags.AddZoneFlag(parser)

        parser.add_argument('--num-masters',
                            type=int,
                            help="""\
      The number of master nodes in the cluster.

      [format="csv",options="header"]
      |========
      Number of Masters,Cluster Mode
      1,Standard
      3,High Availability
      |========
      """)

        parser.add_argument('--single-node',
                            action='store_true',
                            help="""\
      Create a single node cluster.

      A single node cluster has all master and worker components.
      It cannot have any separate worker nodes.
      """)

        parser.add_argument('--max-idle',
                            type=arg_parsers.Duration(),
                            help="""\
        The duration before cluster is auto-deleted after last job completes,
        such as "30m", "2h" or "1d".
        """)

        auto_delete_group = parser.add_mutually_exclusive_group()
        auto_delete_group.add_argument('--max-age',
                                       type=arg_parsers.Duration(),
                                       help="""\
        The lifespan of the cluster before it is auto-deleted, such as "30m",
        "2h" or "1d".
        """)

        auto_delete_group.add_argument('--expiration-time',
                                       type=arg_parsers.Datetime.Parse,
                                       help="""\
        The time when cluster will be auto-deleted, such as
        "2017-08-29T18:52:51.142Z"
        """)

        for instance_type in ('master', 'worker'):
            help_msg = """\
      Attaches accelerators (e.g. GPUs) to the {instance_type}
      instance(s).
      """.format(instance_type=instance_type)
            if instance_type == 'worker':
                help_msg += """
      Note:
      No accelerators will be attached to preemptible workers, because
      preemptible VMs do not support accelerators.
      """
            help_msg += """
      *type*::: The specific type (e.g. nvidia-tesla-k80 for nVidia Tesla
      K80) of accelerator to attach to the instances. Use 'gcloud compute
      accelerator-types list' to learn about all available accelerator
      types.

      *count*::: The number of pieces of the accelerator to attach to each
      of the instances. The default value is 1.
      """
            parser.add_argument('--{0}-accelerator'.format(instance_type),
                                type=arg_parsers.ArgDict(spec={
                                    'type': str,
                                    'count': int,
                                }),
                                metavar='type=TYPE,[count=COUNT]',
                                help=help_msg)
        parser.add_argument('--no-address',
                            action='store_true',
                            help="""\
        If provided, the instances in the cluster will not be assigned external
        IP addresses.

        Note: Dataproc VMs need access to the Dataproc API. This can be achieved
        without external IP addresses using Private Google Access
        (https://cloud.google.com/compute/docs/private-google-access).
        """)
Example #8
0
def _CommonArgs(parser, beta, include_deprecated):
    parser.add_argument('--cluster-name',
                        help="""\
        The name of the managed dataproc cluster.
        If unspecified, the workflow template ID will be used.""")
    clusters.ArgsForClusterRef(parser, beta, include_deprecated)