コード例 #1
0
ファイル: import.py プロジェクト: Guliux10/bchacks_deepbreath
 def Args(cls, parser):
     dataproc = dp.Dataproc(cls.ReleaseTrack())
     flags.AddClusterResourceArg(parser, 'import', dataproc.api_version)
     export_util.AddImportFlags(parser, cls.GetSchemaPath(for_help=True))
     base.ASYNC_FLAG.AddToParser(parser)
     # 30m is backend timeout + 5m for safety buffer.
     flags.AddTimeoutFlag(parser, default='35m')
コード例 #2
0
 def Args(cls, parser):
   dataproc = dp.Dataproc(cls.ReleaseTrack())
   base.ASYNC_FLAG.AddToParser(parser)
   flags.AddClusterResourceArg(parser, 'create', dataproc.api_version)
   clusters.ArgsForClusterRef(
       parser,
       dataproc,
       cls.BETA,
       include_ttl_config=True,
       include_gke_platform_args=cls.BETA)
   # Add arguments for failure action for primary workers
   if not cls.BETA:
     parser.add_argument(
         '--action-on-failed-primary-workers',
         choices={
             'NO_ACTION': 'take no action',
             'DELETE': 'delete the failed primary workers',
             'FAILURE_ACTION_UNSPECIFIED': 'failure action is not specified'
         },
         type=arg_utils.ChoiceToEnumName,
         help="""
       Failure action to take when primary workers fail during cluster creation
       """)
   # Add gce-pd-kms-key args
   kms_flag_overrides = {
       'kms-key': '--gce-pd-kms-key',
       'kms-keyring': '--gce-pd-kms-key-keyring',
       'kms-location': '--gce-pd-kms-key-location',
       'kms-project': '--gce-pd-kms-key-project'
   }
   kms_resource_args.AddKmsKeyResourceArg(
       parser, 'cluster', flag_overrides=kms_flag_overrides)
コード例 #3
0
 def Args(cls, parser):
     dataproc = dp.Dataproc(cls.ReleaseTrack())
     flags.AddClusterResourceArg(parser, 'set the policy on',
                                 dataproc.api_version)
     parser.add_argument('policy_file',
                         metavar='POLICY_FILE',
                         help="""\
     Path to a local JSON or YAML formatted file containing a valid policy.
     """)
コード例 #4
0
 def Args(cls, parser):
     flags.AddTimeoutFlag(parser)
     dataproc = dp.Dataproc(cls.ReleaseTrack())
     flags.AddClusterResourceArg(parser, 'diagnose', dataproc.api_version)
     parser.add_argument(
         '--tarball-access',
         type=arg_utils.ChoiceToEnumName,
         choices=Diagnose._GetValidTarballAccessChoices(dataproc),
         hidden=True,
         help='Target access privileges for diagnose tarball.')
コード例 #5
0
    def Args(cls, parser):
        """Method called by Calliope to register flags for this command.

    Args:
      parser: An argparser parser used to register flags.
    """
        dataproc = dp.Dataproc(cls.ReleaseTrack())
        flags.AddClusterResourceArg(parser,
                                    'enable a personal auth session on',
                                    dataproc.api_version)
        flags.AddPersonalAuthSessionArgs(parser)
コード例 #6
0
ファイル: create.py プロジェクト: bopopescu/GoogleAPI
 def Args(cls, parser):
     dataproc = dp.Dataproc(cls.ReleaseTrack())
     base.ASYNC_FLAG.AddToParser(parser)
     flags.AddClusterResourceArg(parser, 'create', dataproc.api_version)
     clusters.ArgsForClusterRef(parser, cls.BETA, include_ttl_config=True)
     # Add gce-pd-kms-key args
     kms_flag_overrides = {
         'kms-key': '--gce-pd-kms-key',
         'kms-keyring': '--gce-pd-kms-key-keyring',
         'kms-location': '--gce-pd-kms-key-location',
         'kms-project': '--gce-pd-kms-key-project'
     }
     kms_resource_args.AddKmsKeyResourceArg(
         parser, 'cluster', flag_overrides=kms_flag_overrides)
コード例 #7
0
    def Args(cls, parser):
        dataproc = dp.Dataproc(cls.ReleaseTrack())
        base.ASYNC_FLAG.AddToParser(parser)
        flags.AddClusterResourceArg(parser, 'create', dataproc.api_version)

        # 30m is backend timeout + 5m for safety buffer.
        flags.AddTimeoutFlag(parser, default='35m')

        parser.add_argument('--spark-engine-version',
                            required=True,
                            help="""\
        The version of the Spark engine to run on this cluster.
        """)

        parser.add_argument('--staging-bucket',
                            help="""\
        The Cloud Storage bucket to use to stage job dependencies, miscellaneous
        config files, and job driver console output when using this cluster.
        """)

        parser.add_argument('--properties',
                            type=arg_parsers.ArgDict(),
                            action=arg_parsers.UpdateAction,
                            default={},
                            metavar='PREFIX:PROPERTY=VALUE',
                            help="""\
        Specifies configuration properties for installed packages, such as
        Spark. Properties are mapped to configuration files by specifying a
        prefix, such as "core:io.serializations".
        """)

        flags.AddGkeClusterResourceArg(parser)
        parser.add_argument('--namespace',
                            help="""\
            The name of the Kubernetes namespace to deploy Dataproc system
            components in. This namespace does not need to exist.
            """)

        gke_clusters.AddPoolsArg(parser)
        parser.add_argument('--setup-workload-identity',
                            action='store_true',
                            help="""\
            Sets up the GKE Workload Identity for your Dataproc on GKE cluster.
            Note that running this requires elevated permissions as it will
            manipulate IAM policies on the Google Service Accounts that will be
            used by your Dataproc on GKE cluster.
            """)
        flags.AddMetastoreServiceResourceArg(parser)
        flags.AddHistoryServerClusterResourceArg(parser)
コード例 #8
0
 def Args(cls, parser):
     dataproc = dp.Dataproc(cls.ReleaseTrack())
     flags.AddClusterResourceArg(parser, 'describe', dataproc.api_version)
コード例 #9
0
    def Args(cls, parser):
        dataproc = dp.Dataproc(cls.ReleaseTrack())
        base.ASYNC_FLAG.AddToParser(parser)
        # Allow the user to specify new labels as well as update/remove existing
        labels_util.AddUpdateLabelsFlags(parser)
        # Updates can take hours if a lot of data needs to be moved on HDFS
        flags.AddTimeoutFlag(parser, default='3h')
        flags.AddClusterResourceArg(parser, 'update', dataproc.api_version)
        parser.add_argument(
            '--num-workers',
            type=int,
            help='The new number of worker nodes in the cluster.')
        parser.add_argument(
            '--num-preemptible-workers',
            type=int,
            help='The new number of preemptible worker nodes in the cluster.')

        parser.add_argument('--graceful-decommission-timeout',
                            type=arg_parsers.Duration(lower_bound='0s',
                                                      upper_bound='1d'),
                            help="""
              The graceful decommission timeout for decommissioning Node Managers
              in the cluster, used when removing nodes. Graceful decommissioning
              allows removing nodes from the cluster without interrupting jobs in
              progress. Timeout specifies how long to wait for jobs in progress to
              finish before forcefully removing nodes (and potentially
              interrupting jobs). Timeout defaults to 0 if not set (for forceful
              decommission), and the maximum allowed timeout is 1 day.
              See $ gcloud topic datetimes for information on duration formats.
              """)

        idle_delete_group = parser.add_mutually_exclusive_group()
        idle_delete_group.add_argument('--max-idle',
                                       type=arg_parsers.Duration(),
                                       help="""\
        The duration before cluster is auto-deleted after last job finished,
        such as "2h" or "1d".
        See $ gcloud topic datetimes for information on duration formats.
        """)
        idle_delete_group.add_argument('--no-max-idle',
                                       action='store_true',
                                       help="""\
        Cancels the cluster auto-deletion by cluster idle duration (configured
         by --max-idle flag)
        """)

        auto_delete_group = parser.add_mutually_exclusive_group()
        auto_delete_group.add_argument('--max-age',
                                       type=arg_parsers.Duration(),
                                       help="""\
        The lifespan of the cluster before it is auto-deleted, such as
        "2h" or "1d".
        See $ gcloud topic datetimes for information on duration formats.
        """)
        auto_delete_group.add_argument('--expiration-time',
                                       type=arg_parsers.Datetime.Parse,
                                       help="""\
        The time when cluster will be auto-deleted, such as
        "2017-08-29T18:52:51.142Z". See $ gcloud topic datetimes for
        information on time formats.
        """)
        auto_delete_group.add_argument('--no-max-age',
                                       action='store_true',
                                       help="""\
        Cancels the cluster auto-deletion by maximum cluster age (configured by
         --max-age or --expiration-time flags)
        """)

        # Can only specify one of --autoscaling-policy or --disable-autoscaling
        autoscaling_group = parser.add_mutually_exclusive_group()
        flags.AddAutoscalingPolicyResourceArgForCluster(autoscaling_group,
                                                        api_version='v1')
        autoscaling_group.add_argument('--disable-autoscaling',
                                       action='store_true',
                                       help="""\
        Disable autoscaling, if it is enabled. This is an alias for passing the
        empty string to --autoscaling-policy'.
        """)
コード例 #10
0
 def Args(cls, parser):
     base.ASYNC_FLAG.AddToParser(parser)
     flags.AddTimeoutFlag(parser)
     dataproc = dp.Dataproc(cls.ReleaseTrack())
     flags.AddClusterResourceArg(parser, 'stop', dataproc.api_version)
コード例 #11
0
 def Args(cls, parser):
     dataproc = dp.Dataproc(cls.ReleaseTrack())
     flags.AddClusterResourceArg(parser, 'export', dataproc.api_version)
     export_util.AddExportFlags(parser, cls.GetSchemaPath(for_help=True))
コード例 #12
0
 def Args(cls, parser):
     dataproc = dp.Dataproc(cls.ReleaseTrack())
     flags.AddClusterResourceArg(parser, 'export', dataproc.api_version)
     export_util.AddExportFlags(parser)
コード例 #13
0
 def Args(cls, parser):
     flags.AddTimeoutFlag(parser)
     dataproc = dp.Dataproc(cls.ReleaseTrack())
     flags.AddClusterResourceArg(parser, 'diagnose', dataproc.api_version)
コード例 #14
0
 def Args(cls, parser):
     dataproc = dp.Dataproc(cls.ReleaseTrack())
     flags.AddClusterResourceArg(parser, 'retrieve the policy for',
                                 dataproc.api_version)
     base.URI_FLAG.RemoveFromParser(parser)