示例#1
0
from __future__ import unicode_literals

from googlecloudsdk.api_lib.ml_engine import jobs
from googlecloudsdk.command_lib.ml_engine import flags
from googlecloudsdk.command_lib.util.apis import arg_utils
from tests.lib import cli_test_base
from tests.lib import completer_test_base
from tests.lib import parameterized
from tests.lib.calliope import util
from tests.lib.surface.iam import unit_test_base
from tests.lib.surface.ml_engine import base

_ACCELERATOR_TYPE_MAPPER = arg_utils.ChoiceEnumMapper(
    'generic-accelerator',
    jobs.GetMessagesModule(
    ).GoogleCloudMlV1AcceleratorConfig.TypeValueValuesEnum,
    help_str='The available types of accelerators.',
    include_filter=lambda x: x != 'ACCELERATOR_TYPE_UNSPECIFIED',
    required=False)

_INVALID_ACCELERATOR_MESSAGE = ('Invalid accelerator: bad-type. Valid '
                                'choices are: [{}]'.format(', '.join(
                                    _ACCELERATOR_TYPE_MAPPER.choices)))


class CompletionTest(unit_test_base.BaseTest,
                     completer_test_base.CompleterBase):
    def SetUp(self):
        self.returned_roles = [
            self.msgs.Role(
                description='Read access to all resources.',
示例#2
0
             '&project={project}')
JOB_FORMAT = 'yaml(jobId,state,startTime.date(tz=LOCAL),endTime.date(tz=LOCAL))'
# Check every 10 seconds if the job is complete (if we didn't fetch any logs the
# last time)
_CONTINUE_INTERVAL = 10

_TF_RECORD_URL = ('https://www.tensorflow.org/versions/r0.12/how_tos/'
                  'reading_data/index.html#file-formats')

_PREDICTION_DATA_FORMAT_MAPPER = arg_utils.ChoiceEnumMapper(
    '--data-format',
    jobs.GetMessagesModule(
    ).GoogleCloudMlV1PredictionInput.DataFormatValueValuesEnum,
    custom_mappings={
        'TEXT': ('text', ('Text files with instances separated '
                          'by the new-line character.')),
        'TF_RECORD':
        ('tf-record', 'TFRecord files; see {}'.format(_TF_RECORD_URL)),
        'TF_RECORD_GZIP': ('tf-record-gzip', 'GZIP-compressed TFRecord files.')
    },
    help_str='Data format of the input files.',
    required=True)

_SCALE_TIER_CHOICES = {
    'BASIC':
    ('basic', ('A single worker instance. This tier is suitable for '
               'learning how to use Cloud ML Engine, and for '
               'experimenting with new models using small datasets.')),
    'STANDARD_1': ('standard-1', 'Many workers and a few parameter servers.'),
    'PREMIUM_1':
    ('premium-1', 'A large number of workers with many parameter servers.'),
示例#3
0
def GetDoeFlagMapper(messages):
    return arg_utils.ChoiceEnumMapper(
        '--denial-of-existence',
        messages.ManagedZoneDnsSecConfig.NonExistenceValueValuesEnum,
        help_str='Requires DNSSEC enabled.')
示例#4
0
    messages = apis.GetMessagesModule('cloudiot', 'v1')
    return messages.GatewayConfig.GatewayAuthMethodValueValuesEnum


GATEWAY_AUTH_METHOD_ENUM_MAPPER = arg_utils.ChoiceEnumMapper(
    '--auth-method',
    _GetAuthMethodEnum(),
    custom_mappings={
        'ASSOCIATION_ONLY':
        ('association-only', ('The device is authenticated through the '
                              'gateway association only. Device credentials '
                              'are ignored if provided.')),
        'DEVICE_AUTH_TOKEN_ONLY':
        ('device-auth-token-only', ('The device is authenticated through its '
                                    'own credentials. Gateway association '
                                    'is not checked.')),
        'ASSOCIATION_AND_DEVICE_AUTH_TOKEN':
        ('association-and-device-auth-token',
         ('The device is authenticated through both device '
          'credentials and gateway association.'))
    },
    required=False,
    help_str=(
        'The authorization/authentication method used by devices in '
        'relation to the gateway. This property is set only on gateways. '
        'If left unspecified, devices will not be able to access '
        'the gateway.'))

CREATE_GATEWAY_ENUM_MAPPER = arg_utils.ChoiceEnumMapper(
    '--device-type',
    _GetGatewayEnum(parent='create_request'),
示例#5
0
class Submit(base.CreateCommand):
    """Submit a build using Google Cloud Build.

  Submit a build using Google Cloud Build.

  ## NOTES

  You can also run a build locally using the
  separate component: `gcloud components install cloud-build-local`.
  """

    _machine_type_flag_map = arg_utils.ChoiceEnumMapper(
        '--machine-type',
        (cloudbuild_util.GetMessagesModule()
         ).BuildOptions.MachineTypeValueValuesEnum,
        # TODO(b/69962368): remove this custom mapping when we can exclude
        # UNSPECIFIED from the proto.
        custom_mappings={
            'N1_HIGHCPU_32': 'n1-highcpu-32',
            'N1_HIGHCPU_8': 'n1-highcpu-8'
        },
        help_str='Machine type used to run the build.')

    @staticmethod
    def Args(parser):
        """Register flags for this command.

    Args:
      parser: An argparse.ArgumentParser-like object. It is mocked out in order
        to capture some information, but behaves like an ArgumentParser.
    """
        source = parser.add_mutually_exclusive_group()
        source.add_argument(
            'source',
            nargs='?',
            default='.',  # By default, the current directory is used.
            help='The location of the source to build. The location can be a '
            'directory on a local disk or a gzipped archive file (.tar.gz) in '
            'Google Cloud Storage. If the source is a local directory, this '
            'command skips the files specified in the `.gcloudignore` file. If a '
            '`.gitignore` file is present in the local source directory, gcloud '
            'will use a Git-compatible `.gcloudignore` file that respects your '
            '.gitignored files. The global `.gitignore` is not respected. For more '
            'information on `.gcloudignore`, see `gcloud topic gcloudignore`.',
        )
        source.add_argument(
            '--no-source',
            action='store_true',
            help='Specify that no source should be uploaded with this build.')

        parser.add_argument(
            '--gcs-source-staging-dir',
            help=
            'A directory in Google Cloud Storage to copy the source used for '
            'staging the build. If the specified bucket does not exist, Cloud '
            'Build will create one. If you don\'t set this field, '
            '```gs://[PROJECT_ID]_cloudbuild/source``` is used.',
        )
        parser.add_argument(
            '--gcs-log-dir',
            help=
            'A directory in Google Cloud Storage to hold build logs. If this '
            'field is not set, '
            '```gs://[PROJECT_NUMBER].cloudbuild-logs.googleusercontent.com/``` '
            'will be created and used.',
        )
        parser.add_argument(
            '--timeout',
            help=
            'Maximum time a build is run before it is failed as `TIMEOUT`. It '
            'is specified as a duration; for example, "2h15m5s" is two hours, '
            'fifteen minutes, and five seconds. If you don\'t specify a unit, '
            'seconds is assumed. For example, "10" is 10 seconds.',
            action=actions.StoreProperty(properties.VALUES.builds.timeout),
        )

        Submit._machine_type_flag_map.choice_arg.AddToParser(parser)

        parser.add_argument(
            '--disk-size',
            type=arg_parsers.BinarySize(lower_bound='100GB',
                                        upper_bound='1TB'),
            help='Machine disk size (GB) to run the build.',
        )
        parser.add_argument('--substitutions',
                            metavar='KEY=VALUE',
                            type=arg_parsers.ArgDict(),
                            help="""\
Parameters to be substituted in the build specification.

For example (using some nonsensical substitution keys; all keys must begin with
an underscore):

    $ gcloud builds submit . --config config.yaml \\
        --substitutions _FAVORITE_COLOR=blue,_NUM_CANDIES=10

This will result in a build where every occurrence of ```${_FAVORITE_COLOR}```
in certain fields is replaced by "blue", and similarly for ```${_NUM_CANDIES}```
and "10".

Only the following built-in variables can be specified with the
`--substitutions` flag: REPO_NAME, BRANCH_NAME, TAG_NAME, REVISION_ID,
COMMIT_SHA, SHORT_SHA.

For more details, see:
https://cloud.google.com/cloud-build/docs/api/build-requests#substitutions
""")

        build_config = parser.add_mutually_exclusive_group()
        build_config.add_argument(
            '--tag',
            '-t',
            help='The tag to use with a "docker build" image creation. '
            'Cloud Build will run a remote "docker build -t '
            '$TAG .", where $TAG is the tag provided by this flag. The tag '
            'must be in the gcr.io/* or *.gcr.io/* namespaces. Specify a tag '
            'if you want Cloud Build to build using a Dockerfile '
            'instead of a build config file. If you specify a tag in this '
            'command, your source must include a Dockerfile. For instructions '
            'on building using a Dockerfile see '
            'https://cloud.google.com/cloud-build/docs/quickstart-docker.',
        )
        build_config.add_argument(
            '--config',
            default=
            'cloudbuild.yaml',  # By default, find this in the current dir
            help=
            'The YAML or JSON file to use as the build configuration file.',
        )
        base.ASYNC_FLAG.AddToParser(parser)
        parser.display_info.AddFormat("""
          table(
            id,
            createTime.date('%Y-%m-%dT%H:%M:%S%Oz', undefined='-'),
            duration(start=startTime,end=finishTime,precision=0,calendar=false,undefined="  -").slice(2:).join(""):label=DURATION,
            build_source(undefined="-"):label=SOURCE,
            build_images(undefined="-"):label=IMAGES,
            status
          )
        """)
        # Do not try to create a URI to update the cache.
        parser.display_info.AddCacheUpdater(None)

    def Run(self, args):
        """This is what gets called when the user runs this command.

    Args:
      args: an argparse namespace. All the arguments that were provided to this
        command invocation.

    Returns:
      Some value that we want to have printed later.

    Raises:
      FailedBuildException: If the build is completed and not 'SUCCESS'.
    """

        project = properties.VALUES.core.project.Get(required=True)
        safe_project = project.replace(':', '_')
        safe_project = safe_project.replace('.', '_')
        # The string 'google' is not allowed in bucket names.
        safe_project = safe_project.replace('google', 'elgoog')

        default_bucket_name = '{}_cloudbuild'.format(safe_project)

        default_gcs_source = False
        if args.gcs_source_staging_dir is None:
            default_gcs_source = True
            args.gcs_source_staging_dir = 'gs://{}/source'.format(
                default_bucket_name)

        client = cloudbuild_util.GetClientInstance()
        messages = cloudbuild_util.GetMessagesModule()

        gcs_client = storage_api.StorageClient()

        # First, create the build request.
        build_timeout = properties.VALUES.builds.timeout.Get()

        if build_timeout is not None:
            try:
                # A bare number is interpreted as seconds.
                build_timeout_secs = int(build_timeout)
            except ValueError:
                build_timeout_duration = times.ParseDuration(build_timeout)
                build_timeout_secs = int(build_timeout_duration.total_seconds)
            timeout_str = str(build_timeout_secs) + 's'
        else:
            timeout_str = None

        if args.tag is not None:
            if (properties.VALUES.builds.check_tag.GetBool()
                    and 'gcr.io/' not in args.tag):
                raise c_exceptions.InvalidArgumentException(
                    '--tag',
                    'Tag value must be in the gcr.io/* or *.gcr.io/* namespace.'
                )
            if properties.VALUES.builds.use_kaniko.GetBool():
                build_config = messages.Build(
                    steps=[
                        messages.BuildStep(
                            name='gcr.io/kaniko-project/executor:latest',
                            args=['--destination', args.tag],
                        ),
                    ],
                    timeout=timeout_str,
                    substitutions=cloudbuild_util.EncodeSubstitutions(
                        args.substitutions, messages))
            else:
                build_config = messages.Build(
                    images=[args.tag],
                    steps=[
                        messages.BuildStep(
                            name='gcr.io/cloud-builders/docker',
                            args=['build', '--no-cache', '-t', args.tag, '.'],
                        ),
                    ],
                    timeout=timeout_str,
                    substitutions=cloudbuild_util.EncodeSubstitutions(
                        args.substitutions, messages))
        elif args.config is not None:
            if not args.config:
                raise c_exceptions.InvalidArgumentException(
                    '--config', 'Config file path must not be empty.')
            build_config = config.LoadCloudbuildConfigFromPath(
                args.config, messages, params=args.substitutions)
        else:
            raise c_exceptions.OneOfArgumentsRequiredException(
                ['--tag', '--config'],
                'Requires either a docker tag or a config file.')

        # If timeout was set by flag, overwrite the config file.
        if timeout_str:
            build_config.timeout = timeout_str

        # --no-source overrides the default --source.
        if not args.IsSpecified('source') and args.no_source:
            args.source = None

        gcs_source_staging = None
        if args.source:
            suffix = '.tgz'
            if args.source.startswith('gs://') or os.path.isfile(args.source):
                _, suffix = os.path.splitext(args.source)

            # Next, stage the source to Cloud Storage.
            staged_object = '{stamp}-{uuid}{suffix}'.format(
                stamp=times.GetTimeStampFromDateTime(times.Now()),
                uuid=uuid.uuid4().hex,
                suffix=suffix,
            )
            gcs_source_staging_dir = resources.REGISTRY.Parse(
                args.gcs_source_staging_dir, collection='storage.objects')

            # We create the bucket (if it does not exist) first. If we do an existence
            # check and then create the bucket ourselves, it would be possible for an
            # attacker to get lucky and beat us to creating the bucket. Block on this
            # creation to avoid this race condition.
            gcs_client.CreateBucketIfNotExists(gcs_source_staging_dir.bucket)

            # If no bucket is specified (for the source `default_gcs_source`), check
            # that the default bucket is also owned by the project (b/33046325).
            if default_gcs_source:
                # This request returns only the buckets owned by the project.
                bucket_list_req = gcs_client.messages.StorageBucketsListRequest(
                    project=project, prefix=default_bucket_name)
                bucket_list = gcs_client.client.buckets.List(bucket_list_req)
                found_bucket = False
                for bucket in bucket_list.items:
                    if bucket.id == default_bucket_name:
                        found_bucket = True
                        break
                if not found_bucket:
                    if default_gcs_source:
                        raise c_exceptions.RequiredArgumentException(
                            'gcs_source_staging_dir',
                            'A bucket with name {} already exists and is owned by '
                            'another project. Specify a bucket using '
                            '--gcs_source_staging_dir.'.format(
                                default_bucket_name))

            if gcs_source_staging_dir.object:
                staged_object = gcs_source_staging_dir.object + '/' + staged_object
            gcs_source_staging = resources.REGISTRY.Create(
                collection='storage.objects',
                bucket=gcs_source_staging_dir.bucket,
                object=staged_object)

            if args.source.startswith('gs://'):
                gcs_source = resources.REGISTRY.Parse(
                    args.source, collection='storage.objects')
                staged_source_obj = gcs_client.Rewrite(gcs_source,
                                                       gcs_source_staging)
                build_config.source = messages.Source(
                    storageSource=messages.StorageSource(
                        bucket=staged_source_obj.bucket,
                        object=staged_source_obj.name,
                        generation=staged_source_obj.generation,
                    ))
            else:
                if not os.path.exists(args.source):
                    raise c_exceptions.BadFileException(
                        'could not find source [{src}]'.format(
                            src=args.source))
                if os.path.isdir(args.source):
                    source_snapshot = snapshot.Snapshot(args.source)
                    size_str = resource_transform.TransformSize(
                        source_snapshot.uncompressed_size)
                    log.status.Print(
                        'Creating temporary tarball archive of {num_files} file(s)'
                        ' totalling {size} before compression.'.format(
                            num_files=len(source_snapshot.files),
                            size=size_str))
                    staged_source_obj = source_snapshot.CopyTarballToGCS(
                        gcs_client, gcs_source_staging)
                    build_config.source = messages.Source(
                        storageSource=messages.StorageSource(
                            bucket=staged_source_obj.bucket,
                            object=staged_source_obj.name,
                            generation=staged_source_obj.generation,
                        ))
                elif os.path.isfile(args.source):
                    unused_root, ext = os.path.splitext(args.source)
                    if ext not in _ALLOWED_SOURCE_EXT:
                        raise c_exceptions.BadFileException(
                            'Local file [{src}] is none of ' +
                            ', '.join(_ALLOWED_SOURCE_EXT))
                    log.status.Print('Uploading local file [{src}] to '
                                     '[gs://{bucket}/{object}].'.format(
                                         src=args.source,
                                         bucket=gcs_source_staging.bucket,
                                         object=gcs_source_staging.object,
                                     ))
                    staged_source_obj = gcs_client.CopyFileToGCS(
                        storage_util.BucketReference.FromBucketUrl(
                            gcs_source_staging.bucket), args.source,
                        gcs_source_staging.object)
                    build_config.source = messages.Source(
                        storageSource=messages.StorageSource(
                            bucket=staged_source_obj.bucket,
                            object=staged_source_obj.name,
                            generation=staged_source_obj.generation,
                        ))
        else:
            # No source
            if not args.no_source:
                raise c_exceptions.InvalidArgumentException(
                    '--no-source', 'To omit source, use the --no-source flag.')

        if args.gcs_log_dir:
            gcs_log_dir = resources.REGISTRY.Parse(
                args.gcs_log_dir, collection='storage.objects')

            build_config.logsBucket = ('gs://' + gcs_log_dir.bucket + '/' +
                                       gcs_log_dir.object)

        # Machine type.
        if args.machine_type is not None:
            machine_type = Submit._machine_type_flag_map.GetEnumForChoice(
                args.machine_type)
            if not build_config.options:
                build_config.options = messages.BuildOptions()
            build_config.options.machineType = machine_type

        # Disk size.
        if args.disk_size is not None:
            disk_size = compute_utils.BytesToGb(args.disk_size)
            if not build_config.options:
                build_config.options = messages.BuildOptions()
            build_config.options.diskSizeGb = int(disk_size)

        log.debug('submitting build: ' + repr(build_config))

        # Start the build.
        op = client.projects_builds.Create(
            messages.CloudbuildProjectsBuildsCreateRequest(
                build=build_config,
                projectId=properties.VALUES.core.project.Get()))
        json = encoding.MessageToJson(op.metadata)
        build = encoding.JsonToMessage(messages.BuildOperationMetadata,
                                       json).build

        build_ref = resources.REGISTRY.Create(
            collection='cloudbuild.projects.builds',
            projectId=build.projectId,
            id=build.id)

        log.CreatedResource(build_ref)
        if build.logUrl:
            log.status.Print('Logs are available at [{log_url}].'.format(
                log_url=build.logUrl))
        else:
            log.status.Print('Logs are available in the Cloud Console.')

        # If the command is run --async, we just print out a reference to the build.
        if args. async:
            return build

        mash_handler = execution.MashHandler(
            execution.GetCancelBuildHandler(client, messages, build_ref))

        # Otherwise, logs are streamed from GCS.
        with execution_utils.CtrlCSection(mash_handler):
            build = cb_logs.CloudBuildClient(client,
                                             messages).Stream(build_ref)

        if build.status == messages.Build.StatusValueValuesEnum.TIMEOUT:
            log.status.Print(
                'Your build timed out. Use the [--timeout=DURATION] flag to change '
                'the timeout threshold.')

        if build.status != messages.Build.StatusValueValuesEnum.SUCCESS:
            raise FailedBuildException(build)

        return build
示例#6
0
def GetAutoscalingModeEnumMapper(messages):
    return arg_utils.ChoiceEnumMapper(
        'mode',
        messages.NodeGroupAutoscalingPolicy.ModeValueValuesEnum,
        custom_mappings=_AUTOSCALING_MODE_MAPPINGS,
    )
示例#7
0
_REVOCATION_MAPPING = {
    'REVOCATION_REASON_UNSPECIFIED': 'unspecified',
    'KEY_COMPROMISE': 'key-compromise',
    'CERTIFICATE_AUTHORITY_COMPROMISE': 'certificate-authority-compromise',
    'AFFILIATION_CHANGED': 'affiliation-changed',
    'SUPERSEDED': 'superseded',
    'CESSATION_OF_OPERATION': 'cessation-of-operation',
    'CERTIFICATE_HOLD': 'certificate-hold',
    'PRIVILEGE_WITHDRAWN': 'privilege-withdrawn',
    'ATTRIBUTE_AUTHORITY_COMPROMISE': 'attribute-authority-compromise'
}

_REVOCATION_REASON_MAPPER = arg_utils.ChoiceEnumMapper(
    arg_name='--reason',
    default='unspecified',
    help_str='Revocation reason to include in the CRL.',
    message_enum=privateca_base.GetMessagesModule(
    ).RevokeCertificateRequest.ReasonValueValuesEnum,
    custom_mappings=_REVOCATION_MAPPING)


def AddRevocationReasonFlag(parser):
    """Add a revocation reason enum flag to the parser.

  Args:
    parser: The argparse parser to add the flag to.
  """
    _REVOCATION_REASON_MAPPER.choice_arg.AddToParser(parser)


def ParseRevocationChoiceToEnum(choice):
示例#8
0
class Update(base.UpdateCommand, dm_base.DmCommand):
  """Update a deployment based on a provided config file.

  This command will update a deployment with the new config file provided.
  Different policies for create, update, and delete policies can be specified.
  """

  detailed_help = {
      'EXAMPLES': """\
          To update an existing deployment with a new config yaml file, run:

            $ {command} my-deployment --config new_config.yaml

          To update an existing deployment with a new config template file, run:

            $ {command} my-deployment --template new_config.{jinja|py}

          To update an existing deployment with a composite type as a new config, run:

            $ {command} my-deployment --composite-type <project-id>/composite:<new-config>


          To preview an update to an existing deployment without actually modifying the resources, run:

            $ {command} my-deployment --config new_config.yaml --preview

          To apply an update that has been previewed, provide the name of the previewed deployment, and no config file:

            $ {command} my-deployment

          To specify different create, update, or delete policies, include any subset of the following flags;

            $ {command} my-deployment --config new_config.yaml --create-policy acquire --delete-policy abandon

          To perform an update without waiting for the operation to complete, run:

            $ {command} my-deployment --config new_config.yaml --async

          To update an existing deployment with a new config file and a fingerprint, run:

            $ {command} my-deployment --config new_config.yaml --fingerprint deployment-fingerprint

          Either the --config, --template, or --composite-type flag is required unless launching an already-previewed update to a deployment.

          More information is available at https://cloud.google.com/deployment-manager/docs/configuration/.
          """,
  }

  _delete_policy_flag_map = flags.GetDeleteFlagEnumMap(
      (apis.GetMessagesModule('deploymentmanager', 'v2')
       .DeploymentmanagerDeploymentsUpdateRequest.DeletePolicyValueValuesEnum))

  _create_policy_flag_map = arg_utils.ChoiceEnumMapper(
      '--create-policy',
      (apis.GetMessagesModule('deploymentmanager', 'v2')
       .DeploymentmanagerDeploymentsUpdateRequest.CreatePolicyValueValuesEnum),
      help_str='Create policy for resources that have changed in the update',
      default='create-or-acquire')

  @staticmethod
  def Args(parser, version=base.ReleaseTrack.GA):
    """Args is called by calliope to gather arguments for this command.

    Args:
      parser: An argparse parser that you can use to add arguments that go
          on the command line after this command. Positional arguments are
          allowed.
      version: The version this tool is running as. base.ReleaseTrack.GA
          is the default.
    """
    flags.AddDeploymentNameFlag(parser)
    flags.AddPropertiesFlag(parser)
    flags.AddAsyncFlag(parser)

    parser.add_argument(
        '--description',
        help='The new description of the deployment.',
        dest='description'
    )

    group = parser.add_mutually_exclusive_group()
    flags.AddConfigFlags(group)

    if version in [base.ReleaseTrack.ALPHA, base.ReleaseTrack.BETA]:
      group.add_argument(
          '--manifest-id',
          help='Manifest Id of a previous deployment. '
          'This flag cannot be used with --config.',
          dest='manifest_id')

    labels_util.AddUpdateLabelsFlags(parser)

    parser.add_argument(
        '--preview',
        help='Preview the requested update without making any changes to the'
        'underlying resources. (default=False)',
        dest='preview',
        default=False,
        action='store_true')

    Update._create_policy_flag_map.choice_arg.AddToParser(parser)
    Update._delete_policy_flag_map.choice_arg.AddToParser(parser)
    flags.AddFingerprintFlag(parser)

    parser.display_info.AddFormat(flags.RESOURCES_AND_OUTPUTS_FORMAT)

  def Epilog(self, resources_were_displayed):
    """Called after resources are displayed if the default format was used.

    Args:
      resources_were_displayed: True if resources were displayed.
    """
    if not resources_were_displayed:
      log.status.Print('No resources or outputs found in your deployment.')

  def Run(self, args):
    """Run 'deployments update'.

    Args:
      args: argparse.Namespace, The arguments that this command was invoked
          with.

    Returns:
      If --async=true, returns Operation to poll.
      Else, returns a struct containing the list of resources and list of
        outputs in the deployment.

    Raises:
      HttpException: An http error response was received while executing api
          request.
    """
    deployment_ref = self.resources.Parse(
        args.deployment_name,
        params={'project': properties.VALUES.core.project.GetOrFail},
        collection='deploymentmanager.deployments')
    if not args.IsSpecified('format') and args.async:
      args.format = flags.OPERATION_FORMAT

    patch_request = False
    deployment = self.messages.Deployment(
        name=deployment_ref.deployment,
    )

    if not (args.config is None and args.template is None
            and args.composite_type is None):
      deployment.target = importer.BuildTargetConfig(
          self.messages,
          config=args.config,
          template=args.template,
          composite_type=args.composite_type,
          properties=args.properties)
    elif (self.ReleaseTrack() in [base.ReleaseTrack.ALPHA,
                                  base.ReleaseTrack.BETA]
          and args.manifest_id):
      deployment.target = importer.BuildTargetConfigFromManifest(
          self.client, self.messages,
          dm_base.GetProject(),
          deployment_ref.deployment, args.manifest_id, args.properties)
    # Get the fingerprint from the deployment to update.
    try:
      current_deployment = self.client.deployments.Get(
          self.messages.DeploymentmanagerDeploymentsGetRequest(
              project=dm_base.GetProject(),
              deployment=deployment_ref.deployment
          )
      )

      if args.fingerprint:
        deployment.fingerprint = dm_util.DecodeFingerprint(args.fingerprint)
      else:
        # If no fingerprint is present, default to an empty fingerprint.
        # TODO(b/34966984): Remove the empty default after cleaning up all
        # deployments that has no fingerprint
        deployment.fingerprint = current_deployment.fingerprint or ''

      # Get the credential from the deployment to update.
      if self.ReleaseTrack() in [base.ReleaseTrack.ALPHA] and args.credential:
        deployment.credential = dm_util.CredentialFrom(self.messages,
                                                       args.credential)

      # Update the labels of the deployment

      deployment.labels = self._GetUpdatedDeploymentLabels(
          args, current_deployment)
      # If no config or manifest_id are specified, but try to update labels,
      # only add patch_request header when directly updating a non-previewed
      # deployment

      no_manifest = (self.ReleaseTrack() is
                     base.ReleaseTrack.GA) or not args.manifest_id
      patch_request = not args.config and no_manifest and (
          bool(args.update_labels) or bool(args.remove_labels))
      if args.description is None:
        deployment.description = current_deployment.description
      elif not args.description or args.description.isspace():
        deployment.description = None
      else:
        deployment.description = args.description
    except apitools_exceptions.HttpError as error:
      raise exceptions.HttpException(error, dm_api_util.HTTP_ERROR_FORMAT)

    if patch_request:
      args.format = flags.DEPLOYMENT_FORMAT
    try:
      # Necessary to handle API Version abstraction below
      parsed_delete_flag = Update._delete_policy_flag_map.GetEnumForChoice(
          args.delete_policy).name
      parsed_create_flag = Update._create_policy_flag_map.GetEnumForChoice(
          args.create_policy).name
      request = self.messages.DeploymentmanagerDeploymentsUpdateRequest(
          deploymentResource=deployment,
          project=dm_base.GetProject(),
          deployment=deployment_ref.deployment,
          preview=args.preview,
          createPolicy=(self.messages.DeploymentmanagerDeploymentsUpdateRequest.
                        CreatePolicyValueValuesEnum(parsed_create_flag)),
          deletePolicy=(self.messages.DeploymentmanagerDeploymentsUpdateRequest.
                        DeletePolicyValueValuesEnum(parsed_delete_flag)))
      client = self.client
      client.additional_http_headers['X-Cloud-DM-Patch'] = patch_request
      operation = client.deployments.Update(request)

      # Fetch and print the latest fingerprint of the deployment.
      updated_deployment = dm_api_util.FetchDeployment(
          self.client, self.messages, dm_base.GetProject(),
          deployment_ref.deployment)
      if patch_request:
        if args.async:
          log.warn('Updating Deployment metadata is synchronous, --async flag '
                   'is ignored.')
        log.status.Print('Update deployment metadata completed successfully.')
        return updated_deployment
      dm_util.PrintFingerprint(updated_deployment.fingerprint)
    except apitools_exceptions.HttpError as error:
      raise exceptions.HttpException(error, dm_api_util.HTTP_ERROR_FORMAT)
    if args.async:
      return operation
    else:
      op_name = operation.name
      try:
        dm_write.WaitForOperation(self.client,
                                  self.messages,
                                  op_name,
                                  'update',
                                  dm_base.GetProject(),
                                  timeout=OPERATION_TIMEOUT)
        log.status.Print('Update operation ' + op_name
                         + ' completed successfully.')
      except apitools_exceptions.HttpError as error:
        raise exceptions.HttpException(error, dm_api_util.HTTP_ERROR_FORMAT)

      return dm_api_util.FetchResourcesAndOutputs(self.client,
                                                  self.messages,
                                                  dm_base.GetProject(),
                                                  deployment_ref.deployment)

  def _GetUpdatedDeploymentLabels(self, args, deployment):
    update_labels = labels_util.GetUpdateLabelsDictFromArgs(args)
    remove_labels = labels_util.GetRemoveLabelsListFromArgs(args)
    return dm_labels.UpdateLabels(deployment.labels,
                                  self.messages.DeploymentLabelEntry,
                                  update_labels, remove_labels)
def GetTierArg(messages, api_version):
    """Adds a --tier flag to the given parser.

  Args:
    messages: The messages module.
    api_version: filestore_client api version.

  Returns:
    the choice arg.
  """
    if ((api_version == filestore_client.ALPHA_API_VERSION)
            or (api_version == filestore_client.BETA_API_VERSION)):
        tier_arg = (arg_utils.ChoiceEnumMapper(
            '--tier',
            messages.Instance.TierValueValuesEnum,
            help_str="""The service tier for the Cloud Filestore instance.
         For more details, see:
         https://cloud.google.com/filestore/docs/instance-tiers """,
            custom_mappings={
                'STANDARD':
                ('standard',
                 """Standard Filestore instance, An alias for BASIC_HDD.
                     Use BASIC_HDD instead whenever possible."""),
                'PREMIUM':
                ('premium',
                 """Premium Filestore instance, An alias for BASIC_SSD.
                            Use BASIC_SSD instead whenever possible."""),
                'BASIC_HDD': ('basic-hdd',
                              'Performant NFS storage system using HDD.'),
                'BASIC_SSD': ('basic-ssd',
                              'Performant NFS storage system using SSD.'),
                'ENTERPRISE':
                ('enterprise', """ENTERPRISE instances offer the features\
                    and availability needed for mission-critical workloads."""
                 ),
                'HIGH_SCALE_SSD':
                ('high-scale-ssd',
                 """NFS storage system with expanded capacity and performance\
                    scaling capabilities.""")
            },
            default='BASIC_HDD'))
    else:
        tier_arg = (arg_utils.ChoiceEnumMapper(
            '--tier',
            messages.Instance.TierValueValuesEnum,
            help_str='The service tier for the Cloud Filestore instance.',
            custom_mappings={
                'STANDARD':
                ('standard',
                 """Standard Filestore instance, an alias for BASIC_HDD.
                     Use BASIC_HDD instead whenever possible."""),
                'PREMIUM':
                ('premium',
                 """Premium Filestore instance, an alias for BASIC_SSD.
                            Use BASIC_SSD instead whenever possible."""),
                'BASIC_HDD':
                ('basic-hdd', 'Performant NFS storage system using HDD.'),
                'BASIC_SSD':
                ('basic-ssd', 'Performant NFS storage system using SSD.'),
                'ENTERPRISE': ('enterprise',
                               """ENTERPRISE instances offer the features\
                    and availability needed for mission-critical workloads.""")
            },
            default='BASIC_HDD'))
    return tier_arg
示例#10
0
    messages = apis.GetMessagesModule('domains', 'v1alpha2')
    return messages.ContactSettings.PrivacyValueValuesEnum


CONTACT_PRIVACY_ENUM_MAPPER = arg_utils.ChoiceEnumMapper(
    '--contact-privacy',
    _GetContactPrivacyEnum(),
    custom_mappings={
        'PRIVATE_CONTACT_DATA':
        ('private-contact-data',
         ('Your contact info won\'t be available to the public. To help '
          'protect your info and prevent spam, a third party provides '
          'alternate (proxy) contact info for your domain in the public '
          'directory at no extra cost. They will forward received messages '
          'to you.')),
        'REDACTED_CONTACT_DATA':
        ('redacted-contact-data',
         ('Limited personal info will be available to the public. The actual '
          'information redacted depends on the domain. For more information '
          'see https://support.google.com/domains/answer/3251242?hl=en.')),
        'PUBLIC_CONTACT_DATA':
        ('public-contact-data',
         ('All the data from contact config is publicly available.')),
    },
    required=False,
    help_str=('The contact privacy mode to use. Supported privacy modes '
              'depend on the domain.'))


def PrivacyChoiceStrength(privacy):
    """Returns privacy strength (stronger privacy means higher returned value)."""
示例#11
0
# limitations under the License.
"""Maps that match gcloud enum values to api enum ones."""

from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals

from googlecloudsdk.api_lib.cloudkms import base as cloudkms_base
from googlecloudsdk.command_lib.util.apis import arg_utils

MESSAGES = cloudkms_base.GetMessagesModule()

DIGESTS = {'sha256', 'sha384', 'sha512'}

ALGORITHM_ENUM = MESSAGES.CryptoKeyVersionTemplate.AlgorithmValueValuesEnum
ALGORITHM_MAPPER = arg_utils.ChoiceEnumMapper('algorithm_enum', ALGORITHM_ENUM)

ALGORITHM_ENUM_FOR_IMPORT = MESSAGES.ImportCryptoKeyVersionRequest.AlgorithmValueValuesEnum
ALGORITHM_MAPPER_FOR_IMPORT = arg_utils.ChoiceEnumMapper(
    'algorithm_enum_for_import', ALGORITHM_ENUM_FOR_IMPORT)

IMPORT_METHOD_ENUM = MESSAGES.ImportJob.ImportMethodValueValuesEnum
IMPORT_METHOD_MAPPER = arg_utils.ChoiceEnumMapper('import_method_enum',
                                                  IMPORT_METHOD_ENUM)

PURPOSE_ENUM = MESSAGES.CryptoKey.PurposeValueValuesEnum
PURPOSE_MAP = {
    'encryption': PURPOSE_ENUM.ENCRYPT_DECRYPT,
    'asymmetric-signing': PURPOSE_ENUM.ASYMMETRIC_SIGN,
    'asymmetric-encryption': PURPOSE_ENUM.ASYMMETRIC_DECRYPT,
}
示例#12
0
    action='store_true',
    help='Output multiline log messages as single records.')
TASK_NAME = base.Argument(
    '--task-name',
    required=False,
    default=None,
    help='If set, display only the logs for this particular task.')

_FRAMEWORK_CHOICES = {
    'TENSORFLOW': 'tensorflow',
    'SCIKIT_LEARN': 'scikit-learn',
    'XGBOOST': 'xgboost'
}
FRAMEWORK_MAPPER = arg_utils.ChoiceEnumMapper(
    '--framework', (versions_api.GetMessagesModule().GoogleCloudMlV1Version.
                    FrameworkValueValuesEnum),
    custom_mappings=_FRAMEWORK_CHOICES,
    help_str=('The ML framework used to train this version of the model. '
              'If not specified, defaults to `tensorflow`'))


def AddPythonVersionFlag(parser, context):
    help_str = (
        'Version of Python used {context}. If not set, the default '
        'version is 2.7. Python 3.5 is available when `runtime_version` is '
        'set to 1.4 and above. Python 2.7 works with all supported runtime '
        'versions.').format(context=context)
    version = base.Argument('--python-version', help=help_str)
    version.AddToParser(parser)


def GetModelName(positional=True, required=False):
示例#13
0
def _Run(args,
         track=None,
         enable_runtime=True,
         enable_build_worker_pool=False):
    """Run a function deployment with the given args."""
    # Check for labels that start with `deployment`, which is not allowed.
    labels_util.CheckNoDeploymentLabels('--remove-labels', args.remove_labels)
    labels_util.CheckNoDeploymentLabels('--update-labels', args.update_labels)

    # Check that exactly one trigger type is specified properly.
    trigger_util.ValidateTriggerArgs(args.trigger_event, args.trigger_resource,
                                     args.IsSpecified('retry'),
                                     args.IsSpecified('trigger_http'))
    trigger_params = trigger_util.GetTriggerEventParams(
        args.trigger_http, args.trigger_bucket, args.trigger_topic,
        args.trigger_event, args.trigger_resource)

    function_ref = args.CONCEPTS.name.Parse()
    function_url = function_ref.RelativeName()

    messages = api_util.GetApiMessagesModule(track)

    # Get an existing function or create a new one.
    function = api_util.GetFunction(function_url)
    is_new_function = function is None
    had_vpc_connector = bool(
        function.vpcConnector) if not is_new_function else False
    if is_new_function:
        trigger_util.CheckTriggerSpecified(args)
        function = messages.CloudFunction()
        function.name = function_url
    elif trigger_params:
        # If the new deployment would implicitly change the trigger_event type
        # raise error
        trigger_util.CheckLegacyTriggerUpdate(function.eventTrigger,
                                              trigger_params['trigger_event'])

    # Keep track of which fields are updated in the case of patching.
    updated_fields = []

    # Populate function properties based on args.
    if args.entry_point:
        function.entryPoint = args.entry_point
        updated_fields.append('entryPoint')
    if args.timeout:
        function.timeout = '{}s'.format(args.timeout)
        updated_fields.append('timeout')
    if args.memory:
        function.availableMemoryMb = utils.BytesToMb(args.memory)
        updated_fields.append('availableMemoryMb')
    if args.service_account:
        function.serviceAccountEmail = args.service_account
        updated_fields.append('serviceAccountEmail')
    if (args.IsSpecified('max_instances')
            or args.IsSpecified('clear_max_instances')):
        max_instances = 0 if args.clear_max_instances else args.max_instances
        function.maxInstances = max_instances
        updated_fields.append('maxInstances')
    if enable_runtime:
        if args.IsSpecified('runtime'):
            function.runtime = args.runtime
            updated_fields.append('runtime')
            if args.runtime in ['nodejs6']:
                log.warning(
                    'The Node.js 6 runtime is deprecated on Cloud Functions. '
                    'Please migrate to Node.js 8 (--runtime=nodejs8) or Node.js 10 '
                    '(--runtime=nodejs10). '
                    'See https://cloud.google.com/functions/docs/migrating/nodejs-runtimes'
                )
        elif is_new_function:
            raise exceptions.RequiredArgumentException(
                'runtime', 'Flag `--runtime` is required for new functions.')
    if args.vpc_connector or args.clear_vpc_connector:
        function.vpcConnector = ('' if args.clear_vpc_connector else
                                 args.vpc_connector)
        updated_fields.append('vpcConnector')
    if args.IsSpecified('egress_settings'):
        will_have_vpc_connector = ((had_vpc_connector
                                    and not args.clear_vpc_connector)
                                   or args.vpc_connector)
        if not will_have_vpc_connector:
            raise exceptions.RequiredArgumentException(
                'vpc-connector', 'Flag `--vpc-connector` is '
                'required for setting `egress-settings`.')
        egress_settings_enum = arg_utils.ChoiceEnumMapper(
            arg_name='egress_settings',
            message_enum=function.VpcConnectorEgressSettingsValueValuesEnum,
            custom_mappings=flags.EGRESS_SETTINGS_MAPPING).GetEnumForChoice(
                args.egress_settings)
        function.vpcConnectorEgressSettings = egress_settings_enum
        updated_fields.append('vpcConnectorEgressSettings')
    if args.IsSpecified('ingress_settings'):
        ingress_settings_enum = arg_utils.ChoiceEnumMapper(
            arg_name='ingress_settings',
            message_enum=function.IngressSettingsValueValuesEnum,
            custom_mappings=flags.INGRESS_SETTINGS_MAPPING).GetEnumForChoice(
                args.ingress_settings)
        function.ingressSettings = ingress_settings_enum
        updated_fields.append('ingressSettings')
    if enable_build_worker_pool:
        if args.build_worker_pool or args.clear_build_worker_pool:
            function.buildWorkerPool = ('' if args.clear_build_worker_pool else
                                        args.build_worker_pool)
            updated_fields.append('buildWorkerPool')
    # Populate trigger properties of function based on trigger args.
    if args.trigger_http:
        function.httpsTrigger = messages.HttpsTrigger()
        function.eventTrigger = None
        updated_fields.extend(['eventTrigger', 'httpsTrigger'])
    if trigger_params:
        function.eventTrigger = trigger_util.CreateEventTrigger(
            **trigger_params)
        function.httpsTrigger = None
        updated_fields.extend(['eventTrigger', 'httpsTrigger'])
    if args.IsSpecified('retry'):
        updated_fields.append('eventTrigger.failurePolicy')
        if args.retry:
            function.eventTrigger.failurePolicy = messages.FailurePolicy()
            function.eventTrigger.failurePolicy.retry = messages.Retry()
        else:
            function.eventTrigger.failurePolicy = None
    elif function.eventTrigger:
        function.eventTrigger.failurePolicy = None

    # Populate source properties of function based on source args.
    # Only Add source to function if its explicitly provided, a new function,
    # using a stage bucket or deploy of an existing function that previously
    # used local source.
    if (args.source or args.stage_bucket or is_new_function
            or function.sourceUploadUrl):
        updated_fields.extend(
            source_util.SetFunctionSourceProps(function, function_ref,
                                               args.source, args.stage_bucket,
                                               args.ignore_file))

    # Apply label args to function
    if labels_util.SetFunctionLabels(function, args.update_labels,
                                     args.remove_labels, args.clear_labels):
        updated_fields.append('labels')

    # Apply environment variables args to function
    updated_fields.extend(_ApplyEnvVarsArgsToFunction(function, args))

    ensure_all_users_invoke = flags.ShouldEnsureAllUsersInvoke(args)
    deny_all_users_invoke = flags.ShouldDenyAllUsersInvoke(args)

    if is_new_function:
        if (not ensure_all_users_invoke and not deny_all_users_invoke and
                api_util.CanAddFunctionIamPolicyBinding(_GetProject(args))):
            ensure_all_users_invoke = console_io.PromptContinue(prompt_string=(
                'Allow unauthenticated invocations of new function [{}]?'.
                format(args.NAME)),
                                                                default=False)

        op = api_util.CreateFunction(function,
                                     function_ref.Parent().RelativeName())
        if (not ensure_all_users_invoke and not deny_all_users_invoke):
            template = ('Function created with limited-access IAM policy. '
                        'To enable unauthorized access consider "%s"')
            log.warning(template %
                        _CreateBindPolicyCommand(args.NAME, args.region))
            deny_all_users_invoke = True

    elif updated_fields:
        op = api_util.PatchFunction(function, updated_fields)

    else:
        op = None  # Nothing to wait for
        if not ensure_all_users_invoke and not deny_all_users_invoke:
            log.status.Print('Nothing to update.')
            return

    stop_trying_perm_set = [False]

    # The server asyncrhonously sets allUsers invoker permissions some time after
    # we create the function. That means, to remove it, we need do so after the
    # server adds it. We can remove this mess after the default changes.
    # TODO(b/139026575): Remove the "remove" path, only bother adding. Remove the
    # logic from the polling loop. Remove the ability to add logic like this to
    # the polling loop.
    def TryToSetInvokerPermission():
        """Try to make the invoker permission be what we said it should.

    This is for executing in the polling loop, and will stop trying as soon as
    it succeeds at making a change.
    """
        if stop_trying_perm_set[0]:
            return
        try:
            if ensure_all_users_invoke:
                api_util.AddFunctionIamPolicyBinding(function.name)
                stop_trying_perm_set[0] = True
            elif deny_all_users_invoke:
                stop_trying_perm_set[0] = (
                    api_util.RemoveFunctionIamPolicyBindingIfFound(
                        function.name))
        except exceptions.HttpException:
            stop_trying_perm_set[0] = True
            log.warning('Setting IAM policy failed, try "%s"' %
                        _CreateBindPolicyCommand(args.NAME, args.region))

    log_stackdriver_url = [True]

    def TryToLogStackdriverURL(op):
        """Logs stackdriver URL.

    This is for executing in the polling loop, and will stop trying as soon as
    it succeeds at making a change.

    Args:
      op: the operation
    """
        if log_stackdriver_url[0] and op.metadata:
            metadata = encoding.PyValueToMessage(
                messages.OperationMetadataV1,
                encoding.MessageToPyValue(op.metadata))
            if metadata.buildId:
                sd_info_template = '\nFor Cloud Build Stackdriver Logs, visit: %s'
                log.status.Print(sd_info_template %
                                 _CreateStackdriverURLforBuildLogs(
                                     metadata.buildId, _GetProject(args)))
                log_stackdriver_url[0] = False

    if op:
        api_util.WaitForFunctionUpdateOperation(
            op,
            try_set_invoker=TryToSetInvokerPermission,
            on_every_poll=[TryToLogStackdriverURL])
    return api_util.GetFunction(function.name)
示例#14
0
# limitations under the License.
"""Maps that match gcloud enum values to api enum ones."""

from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals

from googlecloudsdk.api_lib.cloudkms import base as cloudkms_base
from googlecloudsdk.command_lib.util.apis import arg_utils

MESSAGES = cloudkms_base.GetMessagesModule()

DIGESTS = {'sha256', 'sha384', 'sha512'}

ALGORITHM_ENUM = MESSAGES.CryptoKeyVersionTemplate.AlgorithmValueValuesEnum
ALGORITHM_MAPPER = arg_utils.ChoiceEnumMapper('algorithm_enum', ALGORITHM_ENUM)

PURPOSE_ENUM = MESSAGES.CryptoKey.PurposeValueValuesEnum
PURPOSE_MAP = {
    'encryption': PURPOSE_ENUM.ENCRYPT_DECRYPT,
    'asymmetric-signing': PURPOSE_ENUM.ASYMMETRIC_SIGN,
    'asymmetric-encryption': PURPOSE_ENUM.ASYMMETRIC_DECRYPT,
}

PROTECTION_LEVEL_ENUM = (
    MESSAGES.CryptoKeyVersionTemplate.ProtectionLevelValueValuesEnum)
PROTECTION_LEVEL_MAPPER = arg_utils.ChoiceEnumMapper('protection_level_enum',
                                                     PROTECTION_LEVEL_ENUM)

# Add new algorithms according to their purposes here.
VALID_ALGORITHMS_MAP = {
示例#15
0
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals

from googlecloudsdk.api_lib.cloudbuild import cloudbuild_util
from googlecloudsdk.calliope import actions
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.util import completers
from googlecloudsdk.command_lib.util.apis import arg_utils
from googlecloudsdk.core import properties
import six

_machine_type_flag_map = arg_utils.ChoiceEnumMapper(
    '--machine-type', (cloudbuild_util.GetMessagesModule()
                       ).BuildOptions.MachineTypeValueValuesEnum,
    include_filter=lambda s: six.text_type(s) != 'UNSPECIFIED',
    help_str='Machine type used to run the build.')


class BuildsCompleter(completers.ListCommandCompleter):
    def __init__(self, **kwargs):
        super(BuildsCompleter,
              self).__init__(collection='cloudbuild.projects.builds',
                             list_command='builds list --uri',
                             **kwargs)


def AddBuildArg(parser, intro=None):
    """Adds a 'build' arg to the given parser.
示例#16
0
 def testEnumProperty(self):
   mapper = arg_utils.ChoiceEnumMapper(
       '--test_arg', self.test_enum, help_str='Auxilio aliis.')
   self.assertEqual(self.test_enum, mapper.enum)
示例#17
0
def _Run(args,
         track=None,
         enable_runtime=True,
         enable_max_instances=False,
         enable_vpc_connector=False,
         enable_traffic_control=False,
         enable_allow_unauthenticated=False):
    """Run a function deployment with the given args."""
    # Check for labels that start with `deployment`, which is not allowed.
    labels_util.CheckNoDeploymentLabels('--remove-labels', args.remove_labels)
    labels_util.CheckNoDeploymentLabels('--update-labels', args.update_labels)

    # Check that exactly one trigger type is specified properly.
    trigger_util.ValidateTriggerArgs(args.trigger_event, args.trigger_resource,
                                     args.IsSpecified('retry'),
                                     args.IsSpecified('trigger_http'))
    trigger_params = trigger_util.GetTriggerEventParams(
        args.trigger_http, args.trigger_bucket, args.trigger_topic,
        args.trigger_event, args.trigger_resource)

    function_ref = args.CONCEPTS.name.Parse()
    function_url = function_ref.RelativeName()

    messages = api_util.GetApiMessagesModule(track)

    # Get an existing function or create a new one.
    function = api_util.GetFunction(function_url)
    is_new_function = function is None
    had_vpc_connector = bool(
        function.vpcConnector) if not is_new_function else False
    if is_new_function:
        trigger_util.CheckTriggerSpecified(args)
        function = messages.CloudFunction()
        function.name = function_url
    elif trigger_params:
        # If the new deployment would implicitly change the trigger_event type
        # raise error
        trigger_util.CheckLegacyTriggerUpdate(function.eventTrigger,
                                              trigger_params['trigger_event'])

    # Keep track of which fields are updated in the case of patching.
    updated_fields = []

    # Populate function properties based on args.
    if args.entry_point:
        function.entryPoint = args.entry_point
        updated_fields.append('entryPoint')
    if args.timeout:
        function.timeout = '{}s'.format(args.timeout)
        updated_fields.append('timeout')
    if args.memory:
        function.availableMemoryMb = utils.BytesToMb(args.memory)
        updated_fields.append('availableMemoryMb')
    if args.service_account:
        function.serviceAccountEmail = args.service_account
        updated_fields.append('serviceAccountEmail')
    if enable_runtime:
        if args.IsSpecified('runtime'):
            function.runtime = args.runtime
            updated_fields.append('runtime')
            if args.runtime in ['nodejs',
                                'nodejs6']:  # nodejs is nodejs6 alias
                log.warning(
                    'The Node.js 6 runtime is deprecated on Cloud Functions. '
                    'Please migrate to Node.js 8 (--runtime=nodejs8) or Node.js 10 '
                    '(--runtime=nodejs10). '
                    'See https://cloud.google.com/functions/docs/migrating/nodejs-runtimes'
                )
        elif is_new_function:
            raise exceptions.RequiredArgumentException(
                'runtime', 'Flag `--runtime` is required for new functions.')
    if enable_max_instances:
        if (args.IsSpecified('max_instances')
                or args.IsSpecified('clear_max_instances')):
            max_instances = 0 if args.clear_max_instances else args.max_instances
            function.maxInstances = max_instances
            updated_fields.append('maxInstances')
    if enable_vpc_connector:
        if args.IsSpecified('vpc_connector'):
            function.vpcConnector = args.vpc_connector
            updated_fields.append('vpcConnector')
    if enable_traffic_control:
        if args.IsSpecified('egress_settings'):
            if not (had_vpc_connector or args.IsSpecified('vpc_connector')):
                raise exceptions.RequiredArgumentException(
                    'vpc-connector', 'Flag `--vpc-connector` is '
                    'required for setting `egress-settings`.')
            egress_settings_enum = arg_utils.ChoiceEnumMapper(
                arg_name='egress_settings',
                message_enum=function.
                VpcConnectorEgressSettingsValueValuesEnum,
                custom_mappings=flags.EGRESS_SETTINGS_MAPPING
            ).GetEnumForChoice(args.egress_settings)
            function.vpcConnectorEgressSettings = egress_settings_enum
            updated_fields.append('vpcConnectorEgressSettings')
        if args.IsSpecified('ingress_settings'):
            ingress_settings_enum = arg_utils.ChoiceEnumMapper(
                arg_name='ingress_settings',
                message_enum=function.IngressSettingsValueValuesEnum,
                custom_mappings=flags.INGRESS_SETTINGS_MAPPING
            ).GetEnumForChoice(args.ingress_settings)
            function.ingressSettings = ingress_settings_enum
            updated_fields.append('ingressSettings')
    # Populate trigger properties of function based on trigger args.
    if args.trigger_http:
        function.httpsTrigger = messages.HttpsTrigger()
        function.eventTrigger = None
        updated_fields.extend(['eventTrigger', 'httpsTrigger'])
    if trigger_params:
        function.eventTrigger = trigger_util.CreateEventTrigger(
            **trigger_params)
        function.httpsTrigger = None
        updated_fields.extend(['eventTrigger', 'httpsTrigger'])
    if args.IsSpecified('retry'):
        updated_fields.append('eventTrigger.failurePolicy')
        if args.retry:
            function.eventTrigger.failurePolicy = messages.FailurePolicy()
            function.eventTrigger.failurePolicy.retry = messages.Retry()
        else:
            function.eventTrigger.failurePolicy = None
    elif function.eventTrigger:
        function.eventTrigger.failurePolicy = None

    # Populate source properties of function based on source args.
    # Only Add source to function if its explicitly provided, a new function,
    # using a stage bucket or deploy of an existing function that previously
    # used local source.
    if (args.source or args.stage_bucket or is_new_function
            or function.sourceUploadUrl):
        updated_fields.extend(
            source_util.SetFunctionSourceProps(function, function_ref,
                                               args.source, args.stage_bucket,
                                               args.ignore_file))

    # Apply label args to function
    if labels_util.SetFunctionLabels(function, args.update_labels,
                                     args.remove_labels, args.clear_labels):
        updated_fields.append('labels')

    # Apply environment variables args to function
    updated_fields.extend(_ApplyEnvVarsArgsToFunction(function, args))

    ensure_all_users_invoke = (enable_allow_unauthenticated
                               and flags.ShouldEnsureAllUsersInvoke(args))
    deny_all_users_invoke = (enable_allow_unauthenticated
                             and flags.ShouldDenyAllUsersInvoke(args))

    if is_new_function:
        if (enable_allow_unauthenticated and not ensure_all_users_invoke
                and not deny_all_users_invoke and
                api_util.CanAddFunctionIamPolicyBinding(_GetProject(args))):
            ensure_all_users_invoke = console_io.PromptContinue(prompt_string=(
                'Allow unauthenticated invocations of new function [{}]?'.
                format(args.NAME)),
                                                                default=False)

        op = api_util.CreateFunction(function,
                                     function_ref.Parent().RelativeName())
        if (enable_allow_unauthenticated and not ensure_all_users_invoke
                and not deny_all_users_invoke):
            template = ('Function created with default IAM policy. '
                        'To enable unauthorized access consider "%s"')
            log.warning(template %
                        _CreateBindPolicyCommand(args.NAME, args.region))

    elif updated_fields:
        op = api_util.PatchFunction(function, updated_fields)

    else:
        op = None  # Nothing to wait for
        if not ensure_all_users_invoke and not deny_all_users_invoke:
            log.status.Print('Nothing to update.')
            return

    try:
        if ensure_all_users_invoke:
            api_util.AddFunctionIamPolicyBinding(function.name)
        elif deny_all_users_invoke:
            api_util.RemoveFunctionIamPolicyBindingIfFound(function.name)
    except exceptions.HttpException:
        log.warning('Setting IAM policy failed, try "%s"' %
                    _CreateBindPolicyCommand(args.NAME, args.region))

    if op:
        api_util.WaitForFunctionUpdateOperation(op)
    return api_util.GetFunction(function.name)
示例#18
0
 def testEnumFromChoiceString(self):
   mapper = arg_utils.ChoiceEnumMapper(
       '--test_arg', self.test_enum, help_str='Auxilio aliis.')
   for enum in self.test_enum:
     self.assertEqual(enum,
                      mapper.GetEnumForChoice(self.string_mapping[enum.name]))
示例#19
0
             '&project={project}')
JOB_FORMAT = 'yaml(jobId,state,startTime.date(tz=LOCAL),endTime.date(tz=LOCAL))'
# Check every 10 seconds if the job is complete (if we didn't fetch any logs the
# last time)
_CONTINUE_INTERVAL = 10

_TF_RECORD_URL = ('https://www.tensorflow.org/versions/r0.12/how_tos/'
                  'reading_data/index.html#file-formats')

_PREDICTION_DATA_FORMAT_MAPPER = arg_utils.ChoiceEnumMapper(
    '--data-format',
    jobs.GetMessagesModule(
    ).GoogleCloudMlV1PredictionInput.DataFormatValueValuesEnum,
    custom_mappings={
        'TEXT': ('text', ('Text files with instances separated '
                          'by the new-line character.')),
        'TF_RECORD':
        ('tf-record', 'TFRecord files; see {}'.format(_TF_RECORD_URL)),
        'TF_RECORD_GZIP': ('tf-record-gzip', 'GZIP-compressed TFRecord files.')
    },
    help_str='Data format of the input files.',
    required=True)

_ACCELERATOR_MAP = arg_utils.ChoiceEnumMapper(
    '--accelerator-type',
    jobs.GetMessagesModule(
    ).GoogleCloudMlV1AcceleratorConfig.TypeValueValuesEnum,
    custom_mappings={
        'NVIDIA_TESLA_K80': ('nvidia-tesla-k80', 'NVIDIA Tesla K80 GPU'),
        'NVIDIA_TESLA_P100': ('nvidia-tesla-p100', 'NVIDIA Tesla P100 GPU.')
    },
示例#20
0
 def testChoiceStringFromEnumValue(self):
   mapper = arg_utils.ChoiceEnumMapper(
       '--test_arg', self.test_enum, help_str='Auxilio aliis.')
   for enum_string, choice in six.iteritems(self.string_mapping):
     self.assertEqual(choice, mapper.GetChoiceForEnum(
         self.test_enum(enum_string)))
示例#21
0
def GetDeleteFlagEnumMap(policy_enum):
    return arg_utils.ChoiceEnumMapper(_DELETE_FLAG_KWARGS['name'],
                                      policy_enum,
                                      help_str=_DELETE_FLAG_KWARGS['help_str'],
                                      default=_DELETE_FLAG_KWARGS['default'])
示例#22
0
_REVOCATION_MAPPING = {
    'REVOCATION_REASON_UNSPECIFIED': 'unspecified',
    'KEY_COMPROMISE': 'key-compromise',
    'CERTIFICATE_AUTHORITY_COMPROMISE': 'certificate-authority-compromise',
    'AFFILIATION_CHANGED': 'affiliation-changed',
    'SUPERSEDED': 'superseded',
    'CESSATION_OF_OPERATION': 'cessation-of-operation',
    'CERTIFICATE_HOLD': 'certificate-hold',
    'PRIVILEGE_WITHDRAWN': 'privilege-withdrawn',
    'ATTRIBUTE_AUTHORITY_COMPROMISE': 'attribute-authority-compromise'
}

_REVOCATION_REASON_MAPPER = arg_utils.ChoiceEnumMapper(
    arg_name='--reason',
    default='unspecified',
    help_str='Revocation reason to include in the CRL.',
    message_enum=privateca_base.GetMessagesModule().RevokeCertificateRequest
    .ReasonValueValuesEnum,
    custom_mappings=_REVOCATION_MAPPING)

_TIER_MAPPING = {
    'ENTERPRISE': 'enterprise',
    'DEVOPS': 'devops',
}

_TIER_MAPPER = arg_utils.ChoiceEnumMapper(
    arg_name='--tier',
    default='enterprise',
    help_str='The tier for the Certificate Authority.',
    message_enum=privateca_base.GetMessagesModule().CertificateAuthority
    .TierValueValuesEnum,
示例#23
0
def GetMaintenancePolicyEnumMapper(messages):
  return arg_utils.ChoiceEnumMapper(
      '--maintenance-policy',
      messages.NodeGroup.MaintenancePolicyValueValuesEnum,
      custom_mappings=_MAINTENANCE_POLICY_MAPPINGS,
  )

def _ToCamelCase(name):
  """Converts hyphen-case name to CamelCase."""
  parts = name.split('-')
  return ''.join(x.title() for x in parts)


def _InvalidValueError(value, flag, detail):
  return arg_parsers.ArgumentTypeError(
      'Invalid value [{}] for argument {}. {}'.format(value, flag, detail))


_TAINT_EFFECT_ENUM_MAPPER = arg_utils.ChoiceEnumMapper(
    '--node-taints',
    api_util.GetMessagesModule().GoogleCloudGkemulticloudV1NodeTaint
    .EffectValueValuesEnum,
    include_filter=lambda effect: 'UNSPECIFIED' not in effect)

_TAINT_FORMAT_HELP = 'Node taint is of format key=value:effect.'

_TAINT_EFFECT_HELP = 'Effect must be one of: {}.'.format(', '.join(
    [_ToCamelCase(e) for e in _TAINT_EFFECT_ENUM_MAPPER.choices]))

_REPLICAPLACEMENT_FORMAT_HELP = (
    'Replica placement is of format subnetid:zone, for example subnetid12345:1')

_LOGGING_CHOICES = [constants.SYSTEM, constants.WORKLOAD]


def AddRegion(parser):
示例#25
0
TASK_NAME = base.Argument(
    '--task-name',
    required=False,
    default=None,
    help='If set, display only the logs for this particular task.')


_FRAMEWORK_CHOICES = {
    'TENSORFLOW': 'tensorflow',
    'SCIKIT_LEARN': 'scikit-learn',
    'XGBOOST': 'xgboost'
}
FRAMEWORK_MAPPER = arg_utils.ChoiceEnumMapper(
    '--framework',
    (versions_api.GetMessagesModule().
     GoogleCloudMlV1Version.FrameworkValueValuesEnum),
    custom_mappings=_FRAMEWORK_CHOICES,
    help_str=('The ML framework used to train this version of the model. '
              'If not specified, defaults to \'tensorflow\''))


def AddKmsKeyFlag(parser, resource):
  permission_info = '{} must hold permission {}'.format(
      "The 'AI Platform Service Agent' service account",
      "'Cloud KMS CryptoKey Encrypter/Decrypter'")
  kms_resource_args.AddKmsKeyResourceArg(
      parser, resource, permission_info=permission_info)


def AddPythonVersionFlag(parser, context):
  help_str = """\
示例#26
0
class Create(base.CreateCommand, dm_base.DmCommand):
    """Create a deployment.

  This command inserts (creates) a new deployment based on a provided config
  file.
  """

    detailed_help = {
        'EXAMPLES':
        """\
          To create a new deployment from a top-level yaml file, run:

            $ {command} my-deployment --config config.yaml --description "My deployment"

          To create a new deployment from a top-level template file, run:

            $ gcloud deployment-manager deployments create my-deployment \
            --template template.{jinja|py} \
            --properties "string-key:'string-value',integer-key:12345"

          To create a new deployment directly from a composite type, run:

            $ gcloud deployment-manager deployments create my-deployment \
            --composite-type <project-id>/composite:<type-name> \
            --properties "string-key:'string-value',integer-key:12345"

          To preview a deployment without actually creating resources, run:

            $ {command} my-new-deployment --config config.yaml --preview

          To instantiate a deployment that has been previewed, issue an update command for that deployment without specifying a config file.

          More information is available at https://cloud.google.com/deployment-manager/docs/configuration/.
          """,
    }

    _create_policy_flag_map = arg_utils.ChoiceEnumMapper(
        '--create-policy', (apis.GetMessagesModule(
            'deploymentmanager',
            'v2beta').DeploymentmanagerDeploymentsUpdateRequest.
                            CreatePolicyValueValuesEnum),
        help_str='Create policy for resources that have changed in the update',
        default='create-or-acquire')

    @staticmethod
    def Args(parser, version=base.ReleaseTrack.GA):
        """Args is called by calliope to gather arguments for this command.

    Args:
      parser: An argparse parser that you can use to add arguments that go
          on the command line after this command. Positional arguments are
          allowed.
      version: The version this tool is running as. base.ReleaseTrack.GA
          is the default.
    """
        group = parser.add_mutually_exclusive_group()

        config_group = parser.add_mutually_exclusive_group(required=True)

        flags.AddConfigFlags(config_group)
        flags.AddAsyncFlag(group)
        flags.AddDeploymentNameFlag(parser)
        flags.AddPropertiesFlag(parser)
        labels_util.AddCreateLabelsFlags(parser)

        group.add_argument(
            '--automatic-rollback-on-error',
            help='If the create request results in a deployment with resource '
            'errors, delete that deployment immediately after creation. '
            '(default=False)',
            dest='automatic_rollback',
            default=False,
            action='store_true')

        parser.add_argument(
            '--description',
            help='Optional description of the deployment to insert.',
            dest='description')

        parser.add_argument(
            '--preview',
            help=
            'Preview the requested create without actually instantiating the '
            'underlying resources. (default=False)',
            dest='preview',
            default=False,
            action='store_true')

        parser.display_info.AddFormat(flags.RESOURCES_AND_OUTPUTS_FORMAT)

    def Epilog(self, resources_were_displayed):
        """Called after resources are displayed if the default format was used.

    Args:
      resources_were_displayed: True if resources were displayed.
    """
        if not resources_were_displayed:
            log.status.Print(
                'No resources or outputs found in your deployment.')

    def Run(self, args):
        """Run 'deployments create'.

    Args:
      args: argparse.Namespace, The arguments that this command was invoked
          with.

    Returns:
      If --async=true, returns Operation to poll.
      Else, returns a struct containing the list of resources and list of
        outputs in the deployment.

    Raises:
      HttpException: An http error response was received while executing api
          request.
      ConfigError: Config file could not be read or parsed, or the
          deployment creation operation encountered an error.
    """
        deployment_ref = self.resources.Parse(
            args.deployment_name,
            params={'project': properties.VALUES.core.project.GetOrFail},
            collection='deploymentmanager.deployments')
        if (not args.IsSpecified('format')) and (args. async):
            args.format = flags.OPERATION_FORMAT

        deployment = self.messages.Deployment(
            name=deployment_ref.deployment,
            target=importer.BuildTargetConfig(
                self.messages,
                config=args.config,
                template=args.template,
                composite_type=args.composite_type,
                properties=args.properties))

        self._SetMetadata(args, deployment)

        try:
            operation = self.client.deployments.Insert(
                self._BuildRequest(args=args,
                                   project=dm_base.GetProject(),
                                   deployment=deployment))

            # Fetch and print the latest fingerprint of the deployment.
            fingerprint = dm_api_util.FetchDeploymentFingerprint(
                self.client, self.messages, dm_base.GetProject(),
                deployment_ref.deployment)
            dm_util.PrintFingerprint(fingerprint)

        except apitools_exceptions.HttpError as error:
            raise exceptions.HttpException(error,
                                           dm_api_util.HTTP_ERROR_FORMAT)
        if args. async:
            return operation
        else:
            op_name = operation.name
            try:
                operation = dm_write.WaitForOperation(
                    self.client,
                    self.messages,
                    op_name,
                    operation_description='create',
                    project=dm_base.GetProject(),
                    timeout=OPERATION_TIMEOUT)
                dm_util.LogOperationStatus(operation, 'Create')
            except apitools_exceptions.HttpError as error:
                # TODO(b/37911296): Use gcloud default error handling.
                raise exceptions.HttpException(error,
                                               dm_api_util.HTTP_ERROR_FORMAT)
            except dm_exceptions.OperationError as error:
                response = self._HandleOperationError(error, args, operation,
                                                      dm_base.GetProject(),
                                                      deployment_ref)
                if getattr(args, 'automatic_rollback', False):
                    args.format = flags.OPERATION_FORMAT
                return response

            return dm_api_util.FetchResourcesAndOutputs(
                self.client, self.messages, dm_base.GetProject(),
                deployment_ref.deployment,
                self.ReleaseTrack() is base.ReleaseTrack.ALPHA)

    def _BuildRequest(self,
                      args,
                      project,
                      deployment,
                      supports_create_policy=False):
        request = self.messages.DeploymentmanagerDeploymentsInsertRequest(
            project=project, deployment=deployment, preview=args.preview)
        if supports_create_policy and args.create_policy:
            parsed_create_flag = Create._create_policy_flag_map.GetEnumForChoice(
                args.create_policy).name
            request.createPolicy = (
                self.messages.DeploymentmanagerDeploymentsInsertRequest.
                CreatePolicyValueValuesEnum(parsed_create_flag))
        return request

    def _HandleOperationError(self, error, args, operation, project,
                              deployment_ref):
        if args.automatic_rollback:
            delete_operation = self._PerformRollback(deployment_ref.deployment,
                                                     str(error))
            create_operation = dm_api_util.GetOperation(
                self.client, self.messages, operation, project)

            return [create_operation, delete_operation]

        raise error

    def _SetMetadata(self, args, deployment):
        if args.description:
            deployment.description = args.description
        label_dict = labels_util.GetUpdateLabelsDictFromArgs(args)
        if label_dict:
            deployment.labels = [
                self.messages.DeploymentLabelEntry(key=k, value=v)
                for k, v in sorted(six.iteritems(label_dict))
            ]

    def _PerformRollback(self, deployment_name, error_message):
        # Print information about the failure.
        log.warning('There was an error deploying ' + deployment_name + ':\n' +
                    error_message)

        log.status.Print('`--automatic-rollback-on-error` flag was supplied; '
                         'deleting failed deployment...')

        # Delete the deployment.
        try:
            delete_operation = self.client.deployments.Delete(
                self.messages.DeploymentmanagerDeploymentsDeleteRequest(
                    project=dm_base.GetProject(),
                    deployment=deployment_name,
                ))
        except apitools_exceptions.HttpError as error:
            raise exceptions.HttpException(error,
                                           dm_api_util.HTTP_ERROR_FORMAT)

        # TODO(b/37481635): Use gcloud default operation polling.
        dm_write.WaitForOperation(self.client,
                                  self.messages,
                                  delete_operation.name,
                                  'delete',
                                  dm_base.GetProject(),
                                  timeout=OPERATION_TIMEOUT)

        completed_operation = dm_api_util.GetOperation(self.client,
                                                       self.messages,
                                                       delete_operation,
                                                       dm_base.GetProject())
        return completed_operation
示例#27
0
from googlecloudsdk.api_lib.genomics import genomics_util
from googlecloudsdk.calliope import arg_parsers
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.util.apis import arg_utils


def _GetFormatEnum():
  """Get Enum for Variant File Formats."""
  genomics_messages = genomics_util.GetGenomicsMessages()
  return genomics_messages.ImportVariantsRequest.FormatValueValuesEnum


_FILE_FORMAT_MAPPER = arg_utils.ChoiceEnumMapper(
    '--file-format',
    _GetFormatEnum(),
    custom_mappings={
        'FORMAT_COMPLETE_GENOMICS': 'complete-genomics', 'FORMAT_VCF': 'vcf'},
    default='vcf',
    help_str='Set the file format of the `--source-uris`.')


class Import(base.Command):
  """Imports variants into Google Genomics.

  Import variants from VCF or MasterVar files that are in Google Cloud Storage.
  """

  @staticmethod
  def Args(parser):
    """Register flags for this command."""
    parser.add_argument('--variantset-id',
示例#28
0
def _NotificationCategoryEnumMapper(notification_category_enum_message):
    return arg_utils.ChoiceEnumMapper('--notification-categories',
                                      notification_category_enum_message)
示例#29
0
def GetKeyAlgorithmFlag(key_type, messages):
    return arg_utils.ChoiceEnumMapper(
        '--{}-algorithm'.format(key_type),
        messages.DnsKeySpec.AlgorithmValueValuesEnum,
        help_str='String mnemonic specifying the DNSSEC algorithm of the '
        'key-signing key. Requires DNSSEC enabled')
示例#30
0
# limitations under the License.
"""Maps that match gcloud enum values to api enum ones."""

from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals

from googlecloudsdk.api_lib.cloudkms import base as cloudkms_base
from googlecloudsdk.command_lib.util.apis import arg_utils

MESSAGES = cloudkms_base.GetMessagesModule()

DIGESTS = {'sha256', 'sha384', 'sha512'}

ALGORITHM_ENUM = MESSAGES.CryptoKeyVersionTemplate.AlgorithmValueValuesEnum
ALGORITHM_MAPPER = arg_utils.ChoiceEnumMapper('algorithm_enum', ALGORITHM_ENUM)

ALGORITHM_ENUM_FOR_IMPORT = MESSAGES.ImportCryptoKeyVersionRequest.AlgorithmValueValuesEnum
ALGORITHM_MAPPER_FOR_IMPORT = arg_utils.ChoiceEnumMapper(
    'algorithm_enum_for_import', ALGORITHM_ENUM_FOR_IMPORT)

IMPORT_METHOD_ENUM = MESSAGES.ImportJob.ImportMethodValueValuesEnum
IMPORT_METHOD_MAPPER = arg_utils.ChoiceEnumMapper('import_method_enum',
                                                  IMPORT_METHOD_ENUM)

PURPOSE_ENUM = MESSAGES.CryptoKey.PurposeValueValuesEnum
PURPOSE_MAP = {
    'encryption': PURPOSE_ENUM.ENCRYPT_DECRYPT,
    'asymmetric-signing': PURPOSE_ENUM.ASYMMETRIC_SIGN,
    'asymmetric-encryption': PURPOSE_ENUM.ASYMMETRIC_DECRYPT,
}