Beispiel #1
0
    def _ConstructCreateSettingsFromArgs(cls,
                                         sql_messages,
                                         args,
                                         instance=None,
                                         release_track=DEFAULT_RELEASE_TRACK):
        """Constructs create settings object from base settings and args."""
        original_settings = instance.settings if instance else None
        settings = cls._ConstructBaseSettingsFromArgs(sql_messages, args,
                                                      instance, release_track)

        backup_configuration = (reducers.BackupConfiguration(
            sql_messages,
            instance,
            backup=args.backup,
            backup_start_time=args.backup_start_time,
            enable_bin_log=args.enable_bin_log))
        if backup_configuration:
            cls.AddBackupConfigToSettings(settings, backup_configuration)

        settings.databaseFlags = (reducers.DatabaseFlags(
            sql_messages,
            original_settings,
            database_flags=args.database_flags))

        settings.maintenanceWindow = (reducers.MaintenanceWindow(
            sql_messages,
            instance,
            maintenance_release_channel=args.maintenance_release_channel,
            maintenance_window_day=args.maintenance_window_day,
            maintenance_window_hour=args.maintenance_window_hour))

        if args.storage_type:
            settings.dataDiskType = STORAGE_TYPE_PREFIX + args.storage_type

        # BETA args.
        if _IsBetaOrNewer(release_track):
            settings.userLabels = labels_util.ParseCreateArgs(
                args, sql_messages.Settings.UserLabelsValue)

        return settings
  def Run(self, args):
    project = properties.VALUES.core.project.GetOrFail()
    region_ref = args.CONCEPTS.region.Parse()
    region = region_ref.AsDict()['locationsId']
    validation.ValidateRegion(region)

    with endpoint_util.AiplatformEndpointOverrides(
        version=self._version, region=region):
      api_client = client.CustomJobsClient(version=self._version)
      job_spec = self._PrepareJobSpec(args, api_client, project)
      labels = labels_util.ParseCreateArgs(
          args,
          api_client.CustomJobMessage().LabelsValue)

      response = api_client.Create(
          parent=region_ref.RelativeName(),
          display_name=args.display_name,
          job_spec=job_spec,
          kms_key_name=common_validation.GetAndValidateKmsKey(args),
          labels=labels)
      self._DisplayResult(response)
      return response
Beispiel #3
0
    def Run(self, args):
        client = certificates.CertificateClient()
        cert_ref = args.CONCEPTS.certificate.Parse()
        location_ref = cert_ref.Parent()
        labels = labels_util.ParseCreateArgs(
            args, client.messages.Certificate.LabelsValue)

        response = client.Create(
            location_ref,
            cert_ref.certificatesId,
            self_managed_cert_data=client.messages.SelfManagedCertData(
                certificatePem=args.certificate_file.encode('utf-8'),
                privateKeyPem=args.private_key_file.encode('utf-8'),
            ),
            description=args.description,
            labels=labels)
        operation_response = util.WaitForOperation(response,
                                                   is_async=args.async_)
        log.CreatedResource(cert_ref.Name(),
                            'certificate',
                            is_async=args.async_)
        return operation_response
Beispiel #4
0
  def Run(self, args):
    dataproc = dp.Dataproc(self.ReleaseTrack())
    messages = dataproc.messages

    template_ref = args.CONCEPTS.template.Parse()
    # TODO(b/109837200) make the dataproc discovery doc parameters consistent
    # Parent() fails for the collection because of projectId/projectsId and
    # regionId/regionsId inconsistencies.
    # parent = template_ref.Parent().RelativePath()
    parent = '/'.join(template_ref.RelativeName().split('/')[0:4])

    workflow_template = messages.WorkflowTemplate(
        id=template_ref.Name(), name=template_ref.RelativeName(),
        labels=labels_util.ParseCreateArgs(
            args, messages.WorkflowTemplate.LabelsValue))

    request = messages.DataprocProjectsRegionsWorkflowTemplatesCreateRequest(
        parent=parent, workflowTemplate=workflow_template)

    template = dataproc.client.projects_regions_workflowTemplates.Create(
        request)
    return template
Beispiel #5
0
    def Run(self, args):
        dataproc = dp.Dataproc(self.ReleaseTrack())
        messages = dataproc.messages

        template_ref = args.CONCEPTS.template.Parse()
        parent = '/'.join(template_ref.RelativeName().split('/')[0:4])

        workflow_template = messages.WorkflowTemplate(
            id=template_ref.Name(),
            name=template_ref.RelativeName(),
            labels=labels_util.ParseCreateArgs(
                args, messages.WorkflowTemplate.LabelsValue))

        if args.dag_timeout:
            workflow_template.dagTimeout = six.text_type(
                args.dag_timeout) + 's'

        request = messages.DataprocProjectsRegionsWorkflowTemplatesCreateRequest(
            parent=parent, workflowTemplate=workflow_template)

        template = dataproc.client.projects_regions_workflowTemplates.Create(
            request)
        return template
Beispiel #6
0
    def Run(self, args):
        dataproc = dp.Dataproc(self.ReleaseTrack())

        template_ref = args.CONCEPTS.template.Parse()

        workflow_template = dataproc.GetRegionsWorkflowTemplate(
            template_ref, args.version)

        if args.cluster_name:
            cluster_name = args.cluster_name
        else:
            cluster_name = template_ref.workflowTemplatesId

        compute_resources = compute_helpers.GetComputeResources(
            self.GetComputeReleaseTrack(), cluster_name,
            template_ref.regionsId)

        cluster_config = clusters.GetClusterConfig(
            args,
            dataproc,
            template_ref.projectsId,
            compute_resources,
            self.Beta(),
            include_deprecated=self.Beta())

        labels = labels_util.ParseCreateArgs(
            args, dataproc.messages.ManagedCluster.LabelsValue)

        managed_cluster = dataproc.messages.ManagedCluster(
            clusterName=cluster_name, config=cluster_config, labels=labels)

        workflow_template.placement = dataproc.messages.WorkflowTemplatePlacement(
            managedCluster=managed_cluster)

        response = dataproc.client.projects_regions_workflowTemplates.Update(
            workflow_template)
        return response
Beispiel #7
0
 def Run(self, args):
     """Create a Cloud Filestore instance in the current project."""
     instance_ref = args.CONCEPTS.instance.Parse()
     client = filestore_client.FilestoreClient(self._API_VERSION)
     tier = instances_flags.GetTierArg(client.messages).GetEnumForChoice(
         args.tier)
     labels = labels_util.ParseCreateArgs(
         args, client.messages.Instance.LabelsValue)
     instance = client.ParseFilestoreConfig(tier=tier,
                                            description=args.description,
                                            file_share=args.file_share,
                                            network=args.network,
                                            labels=labels)
     try:
         client.ValidateFileShares(instance)
     except filestore_client.InvalidCapacityError as e:
         raise exceptions.InvalidArgumentException('--file-share',
                                                   six.text_type(e))
     result = client.CreateInstance(instance_ref, args. async, instance)
     if args. async:
         log.status.Print(
             '\nCheck the status of the new instance by listing all instances:\n  '
             '$ gcloud alpha filestore instances list')
     return result
Beispiel #8
0
  def Run(self, args):
    messages = secrets_api.GetMessages()
    secret_ref = args.CONCEPTS.secret.Parse()
    data = secrets_util.ReadFileOrStdin(args.data_file)
    labels = labels_util.ParseCreateArgs(args, messages.Secret.LabelsValue)

    # Differentiate between the flag being provided with an empty value and the
    # flag being omitted. See b/138796299 for info.
    if args.data_file == '':  # pylint: disable=g-explicit-bool-comparison
      raise exceptions.ToolException(self.EMPTY_DATA_FILE_MESSAGE)

    # Create the secret
    response = secrets_api.Secrets().Create(
        secret_ref, labels=labels, locations=args.locations)

    # Create the version if data was given
    if data:
      version = secrets_api.Secrets().SetData(secret_ref, data)
      version_ref = secrets_args.ParseVersionRef(version.name)
      secrets_log.Versions().Created(version_ref)
    else:
      secrets_log.Secrets().Created(secret_ref)

    return response
Beispiel #9
0
  def Run(self, args):
    client = registrations.RegistrationsClient()

    registration_ref = args.CONCEPTS.registration.Parse()
    location_ref = registration_ref.Parent()

    labels = labels_util.ParseCreateArgs(
        args, client.messages.Registration.LabelsValue)

    name_servers = util.ParseNameServers(args.name_servers, args.cloud_dns_zone,
                                         registration_ref.registrationsId)
    registrant_contact = util.ParseWhoisContact(
        args.registrant_contact_from_file)
    if registrant_contact is None:
      registrant_contact = util.PromptForWhoisContact()
    if registrant_contact is None:
      raise exceptions.Error(
          'Registrant contact is required. It can be provided interactively or '
          'through --registrant-contact-from-file flag.')

    availability = client.CheckAvailability(
        location_ref, registration_ref.registrationsId).availability

    if availability.available != client.availability_enum.AVAILABLE:
      raise exceptions.Error(
          'Domain [{}] is not available for registration: [{}]'.format(
              registration_ref.registrationsId, availability.available))

    whois_privacy = util.ParseWhoisPrivacy(args.whois_privacy)
    if whois_privacy is None:
      whois_privacy = util.PromptForWhoisPrivacy(
          availability.supportedWhoisPrivacy)

    hsts_notice_accepted = False
    if client.notices_enum.HSTS_PRELOADED in availability.notices:
      console_io.PromptContinue(
          ('{} is a secure namespace. You may purchase {} now but it will '
           'require an SSL certificate for website connection.').format(
               util.DomainNamespace(availability.domainName),
               availability.domainName),
          throw_if_unattended=True,
          cancel_on_no=True)
      hsts_notice_accepted = True

    console_io.PromptContinue(
        'Yearly price: {}\n'.format(
            util.TransformMoneyType(availability.yearlyPrice)),
        throw_if_unattended=True,
        cancel_on_no=True)

    response = client.Create(
        location_ref,
        registration_ref.registrationsId,
        name_servers=name_servers,
        registrant_contact=registrant_contact,
        whois_privacy=whois_privacy,
        yearly_price=availability.yearlyPrice,
        hsts_notice_accepted=hsts_notice_accepted,
        labels=labels,
        validate_only=args.validate_only)

    if args.validate_only:
      # TODO(b/110077203): Log something sensible.
      return

    if args.async:
      # TODO(b/110077203): Log something sensible.
      return response

    operations_client = operations.Client.FromApiVersion('v1alpha1')
    operation_ref = util.ParseOperation(response.name)
    response = operations_client.WaitForOperation(
        operation_ref,
        'Waiting for [{}] to complete'.format(operation_ref.Name()))

    log.CreatedResource(registration_ref.Name(), 'registration')
    return response
def ParseCreateLabels(models_client, args):
    return labels_util.ParseCreateArgs(
        args, models_client.messages.GoogleCloudMlV1Model.LabelsValue)
Beispiel #11
0
    def Run(self, args):
        """This is what gets called when the user runs this command.

    Args:
      args: argparse.Namespace, All the arguments that were provided to this
        command invocation.

    Raises:
      files.Error: A file argument could not be read.
      GenomicsError: User input was invalid.
      HttpException: An http error response was received while executing api
          request.
    Returns:
      Operation representing the running pipeline.
    """
        v2 = False
        pipeline = None
        apitools_client = genomics_util.GetGenomicsClient('v1alpha2')
        genomics_messages = genomics_util.GetGenomicsMessages('v1alpha2')
        if args.pipeline_file:
            if args.command_line:
                # TODO(b/79982664): Use a mutex argument group instead.
                raise exceptions.GenomicsError(
                    '--command-line cannot be used with --pipeline-file.')

            pipeline = genomics_util.GetFileAsMessage(
                args.pipeline_file, genomics_messages.Pipeline,
                self.context[lib.STORAGE_V1_CLIENT_KEY])
            pipeline.projectId = genomics_util.GetProjectId()

            if not pipeline.docker:
                v2 = True
                apitools_client = genomics_util.GetGenomicsClient('v2alpha1')
                genomics_messages = genomics_util.GetGenomicsMessages(
                    'v2alpha1')
                pipeline = genomics_util.GetFileAsMessage(
                    args.pipeline_file, genomics_messages.Pipeline,
                    self.context[lib.STORAGE_V1_CLIENT_KEY])
        elif args.command_line:
            v2 = True
            apitools_client = genomics_util.GetGenomicsClient('v2alpha1')
            genomics_messages = genomics_util.GetGenomicsMessages('v2alpha1')
            pipeline = genomics_messages.Pipeline(actions=[
                genomics_messages.Action(imageUri=args.docker_image,
                                         commands=['-c', args.command_line],
                                         entrypoint='bash')
            ])
        else:
            raise exceptions.GenomicsError(
                'Either --pipeline-file or --command-line is required.')

        arg_inputs, is_local_file = _ValidateAndMergeArgInputs(args)

        request = None
        if v2:
            # Create messages up front to avoid checking for None everywhere.
            if not pipeline.resources:
                pipeline.resources = genomics_messages.Resources()
            resources = pipeline.resources

            if not resources.virtualMachine:
                resources.virtualMachine = genomics_messages.VirtualMachine(
                    machineType='n1-standard-1')
            virtual_machine = resources.virtualMachine

            if not virtual_machine.serviceAccount:
                virtual_machine.serviceAccount = genomics_messages.ServiceAccount(
                )

            # Always set the project id.
            resources.projectId = genomics_util.GetProjectId()

            # Update the pipeline based on arguments.
            if args.memory or args.cpus:
                # Default to n1-standard1 sizes.
                virtual_machine.machineType = 'custom-%d-%d' % (
                    args.cpus or 1, (args.memory or 3.84) * 1000)

            if args.preemptible:
                virtual_machine.preemptible = args.preemptible

            if args.zones:
                resources.zones = args.zones
            elif not resources.zones and properties.VALUES.compute.zone.Get():
                resources.zones = [properties.VALUES.compute.zone.Get()]

            if args.regions:
                resources.regions = args.regions
            elif not resources.regions and properties.VALUES.compute.region.Get(
            ):
                resources.regions = [properties.VALUES.compute.region.Get()]

            if args.service_account_email != 'default':
                virtual_machine.serviceAccount.email = args.service_account_email

            if args.service_account_scopes:
                virtual_machine.serviceAccount.scopes = args.service_account_scopes

            # Always add a scope for GCS in case any arguments need it.
            virtual_machine.serviceAccount.scopes.append(
                'https://www.googleapis.com/auth/devstorage.read_write')

            # Generate paths for inputs and outputs in a shared location and put them
            # into the environment for actions based on their name.
            env = {}
            if arg_inputs:
                input_generator = _SharedPathGenerator('input')
                for name, value in arg_inputs.items():
                    if genomics_util.IsGcsPath(value):
                        env[name] = input_generator.Generate()
                        pipeline.actions.insert(
                            0,
                            genomics_messages.Action(
                                imageUri=CLOUD_SDK_IMAGE,
                                commands=[
                                    '/bin/sh', '-c',
                                    'gsutil -q cp %s ${%s}' % (value, name)
                                ]))
                    elif name in is_local_file:
                        env[name] = input_generator.Generate()
                        pipeline.actions.insert(
                            0,
                            genomics_messages.Action(
                                imageUri=CLOUD_SDK_IMAGE,
                                commands=[
                                    '/bin/sh', '-c',
                                    'echo "%s" | base64 -d > ${%s}' %
                                    (base64.b64encode(value), name)
                                ]))
                    else:
                        env[name] = value

            if args.outputs:
                output_generator = _SharedPathGenerator('output')
                for name, value in args.outputs.items():
                    env[name] = output_generator.Generate()
                    pipeline.actions.append(
                        genomics_messages.Action(imageUri=CLOUD_SDK_IMAGE,
                                                 commands=[
                                                     '/bin/sh', '-c',
                                                     'gsutil -q cp ${%s} %s' %
                                                     (name, value)
                                                 ]))

            # Merge any existing pipeline arguments into the generated environment and
            # update the pipeline.
            if pipeline.environment:
                for val in pipeline.environment.additionalProperties:
                    if val.key not in env:
                        env[val.key] = val.value

            pipeline.environment = genomics_messages.Pipeline.EnvironmentValue(
                additionalProperties=genomics_util.
                ArgDictToAdditionalPropertiesList(
                    env, genomics_messages.Pipeline.EnvironmentValue.
                    AdditionalProperty))

            if arg_inputs or args.outputs:
                virtual_machine.disks.append(
                    genomics_messages.Disk(name=SHARED_DISK))

                for action in pipeline.actions:
                    action.mounts.append(
                        genomics_messages.Mount(disk=SHARED_DISK,
                                                path='/' + SHARED_DISK))

            if args.logging:
                pipeline.actions.append(
                    genomics_messages.Action(
                        imageUri=CLOUD_SDK_IMAGE,
                        commands=[
                            '/bin/sh', '-c',
                            'gsutil -q cp /google/logs/output ' + args.logging
                        ],
                        flags=[(genomics_messages.Action.
                                FlagsValueListEntryValuesEnum.ALWAYS_RUN)]))

            # Update disk sizes if specified, potentially including the shared disk.
            if args.disk_size:
                disk_sizes = {}
                for disk_encoding in args.disk_size.split(','):
                    parts = disk_encoding.split(':', 1)
                    try:
                        disk_sizes[parts[0]] = int(parts[1])
                    except:
                        raise exceptions.GenomicsError('Invalid --disk-size.')

                for disk in virtual_machine.disks:
                    size = disk_sizes[disk.name]
                    if size:
                        disk.sizeGb = size

            request = genomics_messages.RunPipelineRequest(
                pipeline=pipeline,
                labels=labels_util.ParseCreateArgs(
                    args, genomics_messages.RunPipelineRequest.LabelsValue))
        else:
            inputs = genomics_util.ArgDictToAdditionalPropertiesList(
                arg_inputs, genomics_messages.RunPipelineArgs.InputsValue.
                AdditionalProperty)
            outputs = genomics_util.ArgDictToAdditionalPropertiesList(
                args.outputs, genomics_messages.RunPipelineArgs.OutputsValue.
                AdditionalProperty)

            # Set "overrides" on the resources. If the user did not pass anything on
            # the command line, do not set anything in the resource: preserve the
            # user-intent "did not set" vs. "set an empty value/list"

            resources = genomics_messages.PipelineResources(
                preemptible=args.preemptible)
            if args.memory:
                resources.minimumRamGb = args.memory
            if args.cpus:
                resources.minimumCpuCores = args.cpus
            if args.disk_size:
                resources.disks = []
                for disk_encoding in args.disk_size.split(','):
                    disk_args = disk_encoding.split(':', 1)
                    resources.disks.append(
                        genomics_messages.Disk(name=disk_args[0],
                                               sizeGb=int(disk_args[1])))

            # Progression for picking the right zones...
            #   If specified on the command line, use them.
            #   If specified in the Pipeline definition, use them.
            #   If there is a GCE default zone in the local configuration, use it.
            #   Else let the API select a zone
            if args.zones:
                resources.zones = args.zones
            elif pipeline.resources and pipeline.resources.zones:
                pass
            elif properties.VALUES.compute.zone.Get():
                resources.zones = [properties.VALUES.compute.zone.Get()]

            request = genomics_messages.RunPipelineRequest(
                ephemeralPipeline=pipeline,
                pipelineArgs=genomics_messages.RunPipelineArgs(
                    inputs=genomics_messages.RunPipelineArgs.InputsValue(
                        additionalProperties=inputs),
                    outputs=genomics_messages.RunPipelineArgs.OutputsValue(
                        additionalProperties=outputs),
                    clientId=args.run_id,
                    logging=genomics_messages.LoggingOptions(
                        gcsPath=args.logging),
                    labels=labels_util.ParseCreateArgs(
                        args, genomics_messages.RunPipelineArgs.LabelsValue),
                    projectId=genomics_util.GetProjectId(),
                    serviceAccount=genomics_messages.ServiceAccount(
                        email=args.service_account_email,
                        scopes=args.service_account_scopes),
                    resources=resources))

        result = apitools_client.pipelines.Run(request)
        log.status.Print('Running [{0}].'.format(result.name))
        return result
  def Run(self, args):
    """Issues an InstanceTemplates.Insert request.

    Args:
      args: the argparse arguments that this command was invoked with.

    Returns:
      an InstanceTemplate message object
    """
    self._ValidateArgs(args)
    instances_flags.ValidateNetworkTierArgs(args)

    holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
    container_mount_disk = instances_flags.GetValidatedContainerMountDisk(
        holder, args.container_mount_disk, args.disk, args.create_disk)

    client = holder.client
    instance_template_ref = self._GetInstanceTemplateRef(args, holder)
    image_uri = self._GetImageUri(args, client, holder, instance_template_ref)
    labels = containers_utils.GetLabelsMessageWithCosVersion(
        None, image_uri, holder.resources, client.messages.InstanceProperties)
    argument_labels = labels_util.ParseCreateArgs(
        args, client.messages.InstanceProperties.LabelsValue)
    if argument_labels:
      labels.additionalProperties.extend(argument_labels.additionalProperties)

    metadata = self._GetUserMetadata(
        args,
        client,
        instance_template_ref,
        container_mount_disk_enabled=True,
        container_mount_disk=container_mount_disk)
    network_interfaces = self._GetNetworkInterfaces(args, client, holder)
    scheduling = self._GetScheduling(args, client)
    service_accounts = self._GetServiceAccounts(args, client)
    machine_type = self._GetMachineType(args)
    disks = self._GetDisks(
        args,
        client,
        holder,
        instance_template_ref,
        image_uri,
        match_container_mount_disks=True)
    guest_accelerators = (
        instance_template_utils.CreateAcceleratorConfigMessages(
            client.messages, getattr(args, 'accelerator', None)))

    properties = client.messages.InstanceProperties(
        machineType=machine_type,
        disks=disks,
        canIpForward=args.can_ip_forward,
        labels=labels,
        metadata=metadata,
        minCpuPlatform=args.min_cpu_platform,
        networkInterfaces=network_interfaces,
        serviceAccounts=service_accounts,
        scheduling=scheduling,
        tags=containers_utils.CreateTagsMessage(client.messages, args.tags),
        guestAccelerators=guest_accelerators)

    if args.private_ipv6_google_access_type is not None:
      properties.privateIpv6GoogleAccess = (
          instances_flags.GetPrivateIpv6GoogleAccessTypeFlagMapperForTemplate(
              client.messages).GetEnumForChoice(
                  args.private_ipv6_google_access_type))

    request = client.messages.ComputeInstanceTemplatesInsertRequest(
        instanceTemplate=client.messages.InstanceTemplate(
            properties=properties,
            description=args.description,
            name=instance_template_ref.Name(),
        ),
        project=instance_template_ref.project)

    return client.MakeRequests([(client.apitools_client.instanceTemplates,
                                 'Insert', request)])
    def Run(self, args):
        messages = secrets_api.GetMessages()
        secret_ref = args.CONCEPTS.secret.Parse()
        data = secrets_util.ReadFileOrStdin(args.data_file)
        replication_policy_contents = secrets_util.ReadFileOrStdin(
            args.replication_policy_file, is_binary=False)
        labels = labels_util.ParseCreateArgs(args, messages.Secret.LabelsValue)
        replication_policy = args.replication_policy
        locations = args.locations
        kms_keys = []

        if args.replication_policy_file and args.replication_policy:
            raise exceptions.ConflictingArgumentsException(
                self.POLICY_AND_POLICY_FILE_MESSAGE)
        if args.replication_policy_file and args.locations:
            raise exceptions.ConflictingArgumentsException(
                self.LOCATIONS_AND_POLICY_FILE_MESSAGE)
        if args.replication_policy_file and args.kms_key_name:
            raise exceptions.ConflictingArgumentsException(
                self.KMS_KEY_AND_POLICY_FILE_MESSAGE)

        if args.kms_key_name:
            kms_keys.append(args.kms_key_name)
        if args.replication_policy_file:
            if not replication_policy_contents:
                raise exceptions.InvalidArgumentException(
                    'replication-policy',
                    self.REPLICATION_POLICY_FILE_EMPTY_MESSAGE)
            replication_policy, locations, kms_keys = secrets_util.ParseReplicationFileContents(
                replication_policy_contents)

        else:

            if not replication_policy:
                replication_policy = properties.VALUES.secrets.replication_policy.Get(
                )
            default_to_automatic = replication_policy is None
            if default_to_automatic:
                replication_policy = 'automatic'

            if replication_policy not in {'user-managed', 'automatic'}:
                if args.replication_policy:
                    raise exceptions.InvalidArgumentException(
                        'replication-policy', self.INVALID_POLICY_MESSAGE)
                raise exceptions.InvalidArgumentException(
                    'replication-policy', self.INVALID_POLICY_PROP_MESSAGE)
            if replication_policy == 'user-managed' and kms_keys:
                raise exceptions.InvalidArgumentException(
                    'kms-key-name', self.KMS_KEY_AND_USER_MANAGED_MESSAGE)

            if not locations:
                # if locations weren't given, try to get them from properties
                locations = properties.VALUES.secrets.locations.Get()
                if locations:
                    locations = locations.split(',')
            if replication_policy == 'user-managed' and not locations:
                raise exceptions.RequiredArgumentException(
                    'locations', self.MANAGED_BUT_NO_LOCATIONS_MESSAGE)
            if replication_policy == 'automatic':
                if args.locations:
                    # check args.locations separately from locations because we have
                    # different error messages depending on whether the user used the
                    # --locations flag or the secrets/locations property
                    if args.replication_policy:
                        raise exceptions.InvalidArgumentException(
                            'locations', self.AUTOMATIC_AND_LOCATIONS_MESSAGE)
                    if default_to_automatic:
                        raise exceptions.InvalidArgumentException(
                            'locations', self.NO_POLICY_AND_LOCATIONS_MESSAGE)
                    raise exceptions.InvalidArgumentException(
                        'locations', self.AUTOMATIC_PROP_AND_LOCATIONS_MESSAGE)
                if locations:
                    raise exceptions.InvalidArgumentException(
                        'replication-policy',
                        self.AUTOMATIC_AND_LOCATIONS_PROP_MESSAGE)
                locations = []

        # Differentiate between the flag being provided with an empty value and the
        # flag being omitted. See b/138796299 for info.
        if args.data_file == '':  # pylint: disable=g-explicit-bool-comparison
            raise exceptions.BadFileException(self.EMPTY_DATA_FILE_MESSAGE)

        if args.expire_time:
            msg = self.CONFIRM_EXPIRE_TIME_MESSAGE.format(
                expire_time=args.expire_time)
            console_io.PromptContinue(msg,
                                      throw_if_unattended=True,
                                      cancel_on_no=True)

        if args.ttl:
            msg = self.CONFIRM_TTL_MESSAGE.format(ttl=args.ttl)
            console_io.PromptContinue(msg,
                                      throw_if_unattended=True,
                                      cancel_on_no=True)

        # Create the secret
        response = secrets_api.Secrets().Create(
            secret_ref,
            labels=labels,
            locations=locations,
            policy=replication_policy,
            expire_time=args.expire_time,
            ttl=args.ttl,
            keys=kms_keys,
            next_rotation_time=args.next_rotation_time,
            rotation_period=args.rotation_period,
            topics=args.topics)

        if data:
            data_crc32c = crc32c.get_crc32c(data)
            version = secrets_api.Secrets().AddVersion(
                secret_ref, data, crc32c.get_checksum(data_crc32c))
            version_ref = secrets_args.ParseVersionRef(version.name)
            secrets_log.Versions().Created(version_ref)
        else:
            secrets_log.Secrets().Created(secret_ref)

        return response
Beispiel #14
0
def _Run(args, legacy_output=False):
    """Creates one or more topics."""
    client = topics.TopicsClient()

    labels = labels_util.ParseCreateArgs(args,
                                         client.messages.Topic.LabelsValue)

    kms_key = None
    kms_ref = args.CONCEPTS.kms_key.Parse()
    if kms_ref:
        kms_key = kms_ref.RelativeName()
    else:
        # Did user supply any topic-encryption-key flags?
        for keyword in [
                'topic-encryption-key', 'topic-encryption-key-project',
                'topic-encryption-key-location', 'topic-encryption-key-keyring'
        ]:
            if args.IsSpecified(keyword.replace('-', '_')):
                raise core_exceptions.Error(
                    '--topic-encryption-key was not fully specified.')

    retention_duration = getattr(args, 'message_retention_duration', None)
    if retention_duration:
        retention_duration = util.FormatDuration(retention_duration)

    message_storage_policy_allowed_regions = args.message_storage_policy_allowed_regions

    schema = getattr(args, 'schema', None)
    if schema:
        schema = args.CONCEPTS.schema.Parse().RelativeName()
    message_encoding_list = getattr(args, 'message_encoding', None)
    message_encoding = None
    if message_encoding_list:
        message_encoding = message_encoding_list[0]

    failed = []
    for topic_ref in args.CONCEPTS.topic.Parse():
        try:
            result = client.Create(
                topic_ref,
                labels=labels,
                kms_key=kms_key,
                message_retention_duration=retention_duration,
                message_storage_policy_allowed_regions=
                message_storage_policy_allowed_regions,
                schema=schema,
                message_encoding=message_encoding)
        except api_ex.HttpError as error:
            exc = exceptions.HttpException(error)
            log.CreatedResource(topic_ref.RelativeName(),
                                kind='topic',
                                failed=exc.payload.status_message)
            failed.append(topic_ref.topicsId)
            continue

        if legacy_output:
            result = util.TopicDisplayDict(result)
        log.CreatedResource(topic_ref.RelativeName(), kind='topic')
        yield result

    if failed:
        raise util.RequestsFailedError(failed, 'create')
Beispiel #15
0
 def ConfigureCluster(messages, args, cluster):
     """Performs any additional configuration of the cluster."""
     cluster.labels = labels_util.ParseCreateArgs(
         args, messages.Cluster.LabelsValue)
Beispiel #16
0
    def Run(self, args):
        """Returns a list of requests necessary for snapshotting disks."""
        holder = base_classes.ComputeApiHolder(self.ReleaseTrack())

        disk_refs = SnapshotDisks.disks_arg.ResolveAsResource(
            args,
            holder.resources,
            scope_lister=flags.GetDefaultScopeLister(holder.client))
        if args.snapshot_names:
            if len(disk_refs) != len(args.snapshot_names):
                raise exceptions.ToolException(
                    '[--snapshot-names] must have the same number of values as disks '
                    'being snapshotted.')
            snapshot_names = args.snapshot_names
        else:
            # Generates names like "d52jsqy3db4q".
            snapshot_names = [
                name_generator.GenerateRandomName() for _ in disk_refs
            ]

        snapshot_refs = [
            holder.resources.Parse(
                snapshot_name,
                params={
                    'project': properties.VALUES.core.project.GetOrFail,
                },
                collection='compute.snapshots')
            for snapshot_name in snapshot_names
        ]

        client = holder.client.apitools_client
        messages = holder.client.messages

        requests = []

        for disk_ref, snapshot_ref in zip(disk_refs, snapshot_refs):
            # This feature is only exposed in alpha/beta
            allow_rsa_encrypted = self.ReleaseTrack() in [
                base.ReleaseTrack.ALPHA, base.ReleaseTrack.BETA
            ]
            csek_keys = csek_utils.CsekKeyStore.FromArgs(
                args, allow_rsa_encrypted)
            disk_key_or_none = csek_utils.MaybeLookupKeyMessage(
                csek_keys, disk_ref, client)

            snapshot_message = messages.Snapshot(
                name=snapshot_ref.Name(),
                description=args.description,
                sourceDiskEncryptionKey=disk_key_or_none)
            if (hasattr(args, 'storage_location')
                    and args.IsSpecified('storage_location')):
                snapshot_message.storageLocations = [args.storage_location]
            if (hasattr(args, 'labels') and args.IsSpecified('labels')):
                snapshot_message.labels = labels_util.ParseCreateArgs(
                    args, messages.Snapshot.LabelsValue)

            if disk_ref.Collection() == 'compute.disks':
                request = messages.ComputeDisksCreateSnapshotRequest(
                    disk=disk_ref.Name(),
                    snapshot=snapshot_message,
                    project=disk_ref.project,
                    zone=disk_ref.zone,
                    guestFlush=args.guest_flush)
                requests.append((client.disks, 'CreateSnapshot', request))
            elif disk_ref.Collection() == 'compute.regionDisks':
                request = messages.ComputeRegionDisksCreateSnapshotRequest(
                    disk=disk_ref.Name(),
                    snapshot=snapshot_message,
                    project=disk_ref.project,
                    region=disk_ref.region)
                if hasattr(request,
                           'guestFlush'):  # only available in alpha API
                    guest_flush = getattr(args, 'guest_flush', None)
                    if guest_flush is not None:
                        request.guestFlush = guest_flush
                requests.append(
                    (client.regionDisks, 'CreateSnapshot', request))

        errors_to_collect = []
        responses = holder.client.BatchRequests(requests, errors_to_collect)
        for r in responses:
            err = getattr(r, 'error', None)
            if err:
                errors_to_collect.append(poller.OperationErrors(err.errors))
        if errors_to_collect:
            raise core_exceptions.MultiError(errors_to_collect)

        operation_refs = [
            holder.resources.Parse(r.selfLink) for r in responses
        ]

        if args. async:
            for operation_ref in operation_refs:
                log.status.Print('Disk snapshot in progress for [{}].'.format(
                    operation_ref.SelfLink()))
            log.status.Print(
                'Use [gcloud compute operations describe URI] command '
                'to check the status of the operation(s).')
            return responses

        operation_poller = poller.BatchPoller(holder.client, client.snapshots,
                                              snapshot_refs)
        return waiter.WaitFor(operation_poller,
                              poller.OperationBatch(operation_refs),
                              'Creating snapshot(s) {0}'.format(', '.join(
                                  s.Name() for s in snapshot_refs)),
                              max_wait_ms=None)
Beispiel #17
0
def _RunCreate(compute_api,
               args,
               support_source_instance,
               support_network_tier=False,
               support_shielded_vms=False,
               support_node_affinity=False):
    """Common routine for creating instance template.

  This is shared between various release tracks.

  Args:
      compute_api: The compute api.
      args: argparse.Namespace, An object that contains the values for the
          arguments specified in the .Args() method.
      support_source_instance: indicates whether source instance is supported.
      support_network_tier: Indicates whether network tier is supported or not.
      support_shielded_vms: Indicate whether a shielded vm config is supported
      or not.
      support_node_affinity: Indicate whether node affinity is supported or not.

  Returns:
      A resource object dispatched by display.Displayer().
  """
    _ValidateInstancesFlags(args)
    if support_network_tier:
        instances_flags.ValidateNetworkTierArgs(args)

    client = compute_api.client

    boot_disk_size_gb = utils.BytesToGb(args.boot_disk_size)
    utils.WarnIfDiskSizeIsTooSmall(boot_disk_size_gb, args.boot_disk_type)

    instance_template_ref = (Create.InstanceTemplateArg.ResolveAsResource(
        args, compute_api.resources))

    metadata = metadata_utils.ConstructMetadataMessage(
        client.messages,
        metadata=args.metadata,
        metadata_from_file=args.metadata_from_file)

    if hasattr(args, 'network_interface') and args.network_interface:
        network_interfaces = (
            instance_template_utils.CreateNetworkInterfaceMessages)(
                resources=compute_api.resources,
                scope_lister=flags.GetDefaultScopeLister(client),
                messages=client.messages,
                network_interface_arg=args.network_interface,
                region=args.region,
                support_network_tier=support_network_tier)
    else:
        network_tier = getattr(args, 'network_tier', None)
        network_interfaces = [
            instance_template_utils.CreateNetworkInterfaceMessage(
                resources=compute_api.resources,
                scope_lister=flags.GetDefaultScopeLister(client),
                messages=client.messages,
                network=args.network,
                region=args.region,
                subnet=args.subnet,
                address=(instance_template_utils.EPHEMERAL_ADDRESS
                         if not args.no_address and not args.address else
                         args.address),
                network_tier=network_tier)
        ]

    # Compute the shieldedVmConfig message.
    if support_shielded_vms:
        shieldedvm_config_message = BuildShieldedVMConfigMessage(
            messages=client.messages, args=args)

    node_affinities = None
    if support_node_affinity:
        node_affinities = sole_tenancy_util.GetSchedulingNodeAffinityListFromArgs(
            args, client.messages)

    scheduling = instance_utils.CreateSchedulingMessage(
        messages=client.messages,
        maintenance_policy=args.maintenance_policy,
        preemptible=args.preemptible,
        restart_on_failure=args.restart_on_failure,
        node_affinities=node_affinities)

    if args.no_service_account:
        service_account = None
    else:
        service_account = args.service_account
    service_accounts = instance_utils.CreateServiceAccountMessages(
        messages=client.messages,
        scopes=[] if args.no_scopes else args.scopes,
        service_account=service_account)

    create_boot_disk = not instance_utils.UseExistingBootDisk(args.disk or [])
    if create_boot_disk:
        image_expander = image_utils.ImageExpander(client,
                                                   compute_api.resources)
        try:
            image_uri, _ = image_expander.ExpandImageFlag(
                user_project=instance_template_ref.project,
                image=args.image,
                image_family=args.image_family,
                image_project=args.image_project,
                return_image_resource=True)
        except utils.ImageNotFoundError as e:
            if args.IsSpecified('image_project'):
                raise e
            image_uri, _ = image_expander.ExpandImageFlag(
                user_project=instance_template_ref.project,
                image=args.image,
                image_family=args.image_family,
                image_project=args.image_project,
                return_image_resource=False)
            raise utils.ImageNotFoundError(
                'The resource [{}] was not found. Is the image located in another '
                'project? Use the --image-project flag to specify the '
                'project where the image is located.'.format(image_uri))
    else:
        image_uri = None

    if args.tags:
        tags = client.messages.Tags(items=args.tags)
    else:
        tags = None

    persistent_disks = (
        instance_template_utils.CreatePersistentAttachedDiskMessages(
            client.messages, args.disk or []))

    persistent_create_disks = (
        instance_template_utils.CreatePersistentCreateDiskMessages(
            client, compute_api.resources, instance_template_ref.project,
            getattr(args, 'create_disk', [])))

    if create_boot_disk:
        boot_disk_list = [
            instance_template_utils.CreateDefaultBootAttachedDiskMessage(
                messages=client.messages,
                disk_type=args.boot_disk_type,
                disk_device_name=args.boot_disk_device_name,
                disk_auto_delete=args.boot_disk_auto_delete,
                disk_size_gb=boot_disk_size_gb,
                image_uri=image_uri)
        ]
    else:
        boot_disk_list = []

    local_ssds = []
    for x in args.local_ssd or []:
        local_ssd = instance_utils.CreateLocalSsdMessage(
            compute_api.resources, client.messages, x.get('device-name'),
            x.get('interface'), x.get('size'))
        local_ssds.append(local_ssd)

    disks = (boot_disk_list + persistent_disks + persistent_create_disks +
             local_ssds)

    machine_type = instance_utils.InterpretMachineType(
        machine_type=args.machine_type,
        custom_cpu=args.custom_cpu,
        custom_memory=args.custom_memory,
        ext=getattr(args, 'custom_extensions', None))

    guest_accelerators = (
        instance_template_utils.CreateAcceleratorConfigMessages(
            client.messages, getattr(args, 'accelerator', None)))

    instance_template = client.messages.InstanceTemplate(
        properties=client.messages.InstanceProperties(
            machineType=machine_type,
            disks=disks,
            canIpForward=args.can_ip_forward,
            metadata=metadata,
            minCpuPlatform=args.min_cpu_platform,
            networkInterfaces=network_interfaces,
            serviceAccounts=service_accounts,
            scheduling=scheduling,
            tags=tags,
            guestAccelerators=guest_accelerators,
        ),
        description=args.description,
        name=instance_template_ref.Name(),
    )

    if support_shielded_vms:
        instance_template.properties.shieldedVmConfig = shieldedvm_config_message

    request = client.messages.ComputeInstanceTemplatesInsertRequest(
        instanceTemplate=instance_template,
        project=instance_template_ref.project)

    request.instanceTemplate.properties.labels = labels_util.ParseCreateArgs(
        args, client.messages.InstanceProperties.LabelsValue)

    _AddSourceInstanceToTemplate(compute_api, args, instance_template,
                                 support_source_instance)

    return client.MakeRequests([(client.apitools_client.instanceTemplates,
                                 'Insert', request)])
Beispiel #18
0
    def Run(self, args):
        self.client = privateca_base.GetClientInstance(api_version='v1')
        self.messages = privateca_base.GetMessagesModule(api_version='v1')

        self._ValidateArgs(args)

        cert_ref = args.CONCEPTS.certificate.Parse()
        labels = labels_util.ParseCreateArgs(
            args, self.messages.Certificate.LabelsValue)

        request = self.messages.PrivatecaProjectsLocationsCaPoolsCertificatesCreateRequest(
        )
        request.certificate = self.messages.Certificate()
        request.certificateId = cert_ref.Name()
        request.certificate.lifetime = flags_v1.ParseValidityFlag(args)
        request.certificate.labels = labels
        request.parent = cert_ref.Parent().RelativeName()
        request.requestId = request_utils.GenerateRequestId()
        request.validateOnly = args.validate_only
        if args.IsSpecified('ca'):
            request.issuingCertificateAuthorityId = args.ca

        template_ref = args.CONCEPTS.template.Parse()
        if template_ref:
            if template_ref.locationsId != cert_ref.locationsId:
                raise exceptions.InvalidArgumentException(
                    '--template',
                    'The certificate template must be in the same location as the '
                    'issuing CA Pool.')
            request.certificate.certificateTemplate = template_ref.RelativeName(
            )

        if args.csr:
            request.certificate.pemCsr = _ReadCsr(args.csr)
        else:
            request.certificate.config = self._GenerateCertificateConfig(
                request, args)

        certificate = self.client.projects_locations_caPools_certificates.Create(
            request)

        # Validate-only certs don't have a resource name or pem certificate.
        if args.validate_only:
            return certificate

        status_message = 'Created Certificate'

        if certificate.name:
            status_message += ' [{}]'.format(certificate.name)
        else:
            Create._PrintWarningsForUnpersistedCert(args)

        if certificate.pemCertificate:
            status_message += ' and saved it to [{}]'.format(
                args.cert_output_file)
            _WritePemChain(certificate.pemCertificate,
                           certificate.pemCertificateChain,
                           args.cert_output_file)

        status_message += '.'
        log.status.Print(status_message)
Beispiel #19
0
    def Run(self, args):
        kms_key_version_ref, ca_ref, issuer_ref = _ParseResourceArgs(args)
        kms_key_ref = kms_key_version_ref.Parent()
        project_ref = ca_ref.Parent().Parent()

        subject_config = flags.ParseSubjectFlags(args, is_ca=True)
        issuing_options = flags.ParseIssuingOptions(args)
        issuance_policy = flags.ParseIssuancePolicy(args)
        reusable_config_wrapper = flags.ParseReusableConfig(args,
                                                            ca_ref.locationsId,
                                                            is_ca=True)
        lifetime = flags.ParseValidityFlag(args)
        labels = labels_util.ParseCreateArgs(
            args, self.messages.CertificateAuthority.LabelsValue)

        iam.CheckCreateCertificateAuthorityPermissions(project_ref,
                                                       kms_key_ref)
        if issuer_ref:
            iam.CheckCreateCertificatePermissions(issuer_ref)

        p4sa_email = p4sa.GetOrCreate(project_ref)
        bucket_ref = storage.CreateBucketForCertificateAuthority(ca_ref)

        p4sa.AddResourceRoleBindings(p4sa_email, kms_key_ref, bucket_ref)

        new_ca = self.messages.CertificateAuthority(
            type=self.messages.CertificateAuthority.TypeValueValuesEnum.
            SUBORDINATE,
            lifetime=lifetime,
            config=self.messages.CertificateConfig(
                reusableConfig=reusable_config_wrapper,
                subjectConfig=subject_config),
            cloudKmsKeyVersion=kms_key_version_ref.RelativeName(),
            certificatePolicy=issuance_policy,
            issuingOptions=issuing_options,
            gcsBucket=bucket_ref.bucket,
            labels=labels)

        operations.Await(
            self.client.projects_locations_certificateAuthorities.Create(
                self.messages.
                PrivatecaProjectsLocationsCertificateAuthoritiesCreateRequest(
                    certificateAuthority=new_ca,
                    certificateAuthorityId=ca_ref.Name(),
                    parent=ca_ref.Parent().RelativeName(),
                    requestId=request_utils.GenerateRequestId())),
            'Creating Certificate Authority.')

        csr_response = self.client.projects_locations_certificateAuthorities.GetCsr(
            self.messages.
            PrivatecaProjectsLocationsCertificateAuthoritiesGetCsrRequest(
                name=ca_ref.RelativeName()))
        csr = csr_response.pemCsr

        if args.create_csr:
            files.WriteFileContents(args.csr_output_file, csr)
            log.status.Print(
                "Created Certificate Authority [{}] and saved CSR to '{}'.".
                format(ca_ref.RelativeName(), args.csr_output_file))
            return

        if issuer_ref:
            ca_certificate = self._SignCsr(issuer_ref, csr, lifetime)
            self._ActivateCertificateAuthority(ca_ref, ca_certificate)
            log.status.Print('Created Certificate Authority [{}].'.format(
                ca_ref.RelativeName()))
            return

        # This should not happen because of the required arg group, but it protects
        # us in case of future additions.
        raise exceptions.OneOfArgumentsRequiredException([
            '--issuer', '--create-csr'
        ], ('To create a subordinate CA, please provide either an issuer or the '
            '--create-csr flag to output a CSR to be signed by another issuer.'
            ))
Beispiel #20
0
    def _ConstructCreateSettingsFromArgs(cls,
                                         sql_messages,
                                         args,
                                         instance=None,
                                         release_track=DEFAULT_RELEASE_TRACK):
        """Constructs create settings object from base settings and args."""
        original_settings = instance.settings if instance else None
        settings = cls._ConstructBaseSettingsFromArgs(sql_messages, args,
                                                      instance, release_track)

        backup_configuration = (reducers.BackupConfiguration(
            sql_messages,
            instance,
            backup_enabled=args.backup,
            backup_location=args.backup_location,
            backup_start_time=args.backup_start_time,
            enable_bin_log=args.enable_bin_log,
            enable_point_in_time_recovery=args.enable_point_in_time_recovery))
        if backup_configuration:
            cls.AddBackupConfigToSettings(settings, backup_configuration)

        settings.databaseFlags = (reducers.DatabaseFlags(
            sql_messages,
            original_settings,
            database_flags=args.database_flags))

        settings.maintenanceWindow = (reducers.MaintenanceWindow(
            sql_messages,
            instance,
            maintenance_release_channel=args.maintenance_release_channel,
            maintenance_window_day=args.maintenance_window_day,
            maintenance_window_hour=args.maintenance_window_hour))

        if args.deny_maintenance_period_start_date and args.deny_maintenance_period_end_date:
            settings.denyMaintenancePeriods = []
            settings.denyMaintenancePeriods.append(
                reducers.DenyMaintenancePeriod(
                    sql_messages,
                    instance,
                    deny_maintenance_period_start_date=args.
                    deny_maintenance_period_start_date,
                    deny_maintenance_period_end_date=args.
                    deny_maintenance_period_end_date,
                    deny_maintenance_period_time=args.
                    deny_maintenance_period_time))

        if args.storage_type:
            settings.dataDiskType = _ParseStorageType(
                sql_messages, STORAGE_TYPE_PREFIX + args.storage_type)

        # BETA args.
        if _IsBetaOrNewer(release_track):
            settings.userLabels = labels_util.ParseCreateArgs(
                args, sql_messages.Settings.UserLabelsValue)

        # ALPHA args.
        if _IsAlpha(release_track):
            if args.active_directory_domain is not None:
                settings.activeDirectoryConfig = (
                    reducers.ActiveDirectoryConfig(
                        sql_messages, args.active_directory_domain))

        return settings
    def Run(self, args):
        """This is what gets called when the user runs this command.

    Args:
      args: argparse.Namespace, All the arguments that were provided to this
        command invocation.

    Raises:
      files.Error: A file argument could not be read.
      GenomicsError: User input was invalid.
      HttpException: An http error response was received while executing api
          request.
    Returns:
      Operation representing the running pipeline.
    """
        pipeline = None
        apitools_client = genomics_util.GetGenomicsClient('v2alpha1')
        genomics_messages = genomics_util.GetGenomicsMessages('v2alpha1')
        if args.pipeline_file:
            if args.command_line:
                # TODO(b/79982664): Use a mutex argument group instead.
                raise exceptions.GenomicsError(
                    '--command-line cannot be used with --pipeline-file.')

            pipeline = genomics_util.GetFileAsMessage(
                args.pipeline_file, genomics_messages.Pipeline,
                self.context[lib.STORAGE_V1_CLIENT_KEY])
        elif args.command_line:
            pipeline = genomics_messages.Pipeline(actions=[
                genomics_messages.Action(imageUri=args.docker_image,
                                         commands=['-c', args.command_line],
                                         entrypoint='bash')
            ])
        else:
            raise exceptions.GenomicsError(
                'Either --pipeline-file or --command-line is required.')

        arg_inputs, is_local_file = _ValidateAndMergeArgInputs(args)

        request = None
        # Create messages up front to avoid checking for None everywhere.
        if not pipeline.resources:
            pipeline.resources = genomics_messages.Resources()
        resources = pipeline.resources

        if not resources.virtualMachine:
            resources.virtualMachine = genomics_messages.VirtualMachine(
                machineType='n1-standard-1')
        virtual_machine = resources.virtualMachine

        if not virtual_machine.serviceAccount:
            virtual_machine.serviceAccount = genomics_messages.ServiceAccount()

        # Always set the project id.
        resources.projectId = genomics_util.GetProjectId()

        # Update the pipeline based on arguments.
        if args.memory or args.cpus:
            # Default to n1-standard1 sizes.
            virtual_machine.machineType = 'custom-%d-%d' % (
                args.cpus or 1, (args.memory or 3.75) * 1024)

        if args.preemptible:
            virtual_machine.preemptible = args.preemptible

        if args.zones:
            resources.zones = args.zones
        elif not resources.zones and properties.VALUES.compute.zone.Get():
            resources.zones = [properties.VALUES.compute.zone.Get()]

        if args.regions:
            resources.regions = args.regions
        elif not resources.regions and properties.VALUES.compute.region.Get():
            resources.regions = [properties.VALUES.compute.region.Get()]

        if args.service_account_email != 'default':
            virtual_machine.serviceAccount.email = args.service_account_email

        if args.service_account_scopes:
            virtual_machine.serviceAccount.scopes = args.service_account_scopes

        # Always add a scope for GCS in case any arguments need it.
        virtual_machine.serviceAccount.scopes.append(
            'https://www.googleapis.com/auth/devstorage.read_write')

        # Attach custom network/subnetwork (if set).
        if args.network or args.subnetwork:
            if not virtual_machine.network:
                virtual_machine.network = genomics_messages.Network()
            if args.network:
                virtual_machine.network.name = args.network
            if args.subnetwork:
                virtual_machine.network.subnetwork = args.subnetwork

        if args.boot_disk_size is not None:
            if args.boot_disk_size <= 0:
                raise exceptions.GenomicsError(
                    'Boot disk size must be greater than zero.')
            virtual_machine.bootDiskSizeGb = args.boot_disk_size

        # Generate paths for inputs and outputs in a shared location and put them
        # into the environment for actions based on their name.
        env = {}
        if arg_inputs:
            input_generator = _SharedPathGenerator('input')
            for name, value in arg_inputs.items():
                if genomics_util.IsGcsPath(value):
                    env[name] = input_generator.Generate()
                    pipeline.actions.insert(
                        0,
                        genomics_messages.Action(
                            imageUri=CLOUD_SDK_IMAGE,
                            commands=[
                                '/bin/sh', '-c',
                                'gsutil -m -q cp %s ${%s}' % (value, name)
                            ]))
                elif name in is_local_file:
                    # TODO(b/183206325): Get test coverage to 100%.
                    env[name] = input_generator.Generate()
                    pipeline.actions.insert(
                        0,
                        genomics_messages.Action(
                            imageUri=CLOUD_SDK_IMAGE,
                            commands=[
                                '/bin/sh', '-c',
                                'echo "%s" | base64 -d > ${%s}' %
                                (base64.b64encode(
                                    value.encode()).decode(), name)
                            ]))
                else:
                    env[name] = value

        if args.outputs:
            output_generator = _SharedPathGenerator('output')
            for name, value in args.outputs.items():
                env[name] = output_generator.Generate()
                pipeline.actions.append(
                    genomics_messages.Action(imageUri=CLOUD_SDK_IMAGE,
                                             commands=[
                                                 '/bin/sh', '-c',
                                                 'gsutil -m -q cp ${%s} %s' %
                                                 (name, value)
                                             ]))
        if args.env_vars:
            for name, value in args.env_vars.items():
                env[name] = value

        # Merge any existing pipeline arguments into the generated environment and
        # update the pipeline.
        if pipeline.environment:
            for val in pipeline.environment.additionalProperties:
                if val.key not in env:
                    env[val.key] = val.value

        pipeline.environment = genomics_messages.Pipeline.EnvironmentValue(
            additionalProperties=genomics_util.
            ArgDictToAdditionalPropertiesList(
                env, genomics_messages.Pipeline.EnvironmentValue.
                AdditionalProperty))

        if arg_inputs or args.outputs:
            virtual_machine.disks.append(
                genomics_messages.Disk(name=SHARED_DISK))

            for action in pipeline.actions:
                action.mounts.append(
                    genomics_messages.Mount(disk=SHARED_DISK,
                                            path='/' + SHARED_DISK))

        if args.logging:
            pipeline.actions.append(
                genomics_messages.Action(
                    imageUri=CLOUD_SDK_IMAGE,
                    commands=[
                        '/bin/sh', '-c',
                        'gsutil -m -q cp /google/logs/output ' + args.logging
                    ],
                    flags=[(genomics_messages.Action.
                            FlagsValueListEntryValuesEnum.ALWAYS_RUN)]))

        # Update disk sizes if specified, potentially including the shared disk.
        if args.disk_size:
            disk_sizes = {}
            for disk_encoding in args.disk_size.split(','):
                parts = disk_encoding.split(':', 1)
                try:
                    disk_sizes[parts[0]] = int(parts[1])
                except:
                    raise exceptions.GenomicsError('Invalid --disk-size.')

            for disk in virtual_machine.disks:
                if disk.name in disk_sizes:
                    disk.sizeGb = disk_sizes[disk.name]

        request = genomics_messages.RunPipelineRequest(
            pipeline=pipeline,
            labels=labels_util.ParseCreateArgs(
                args, genomics_messages.RunPipelineRequest.LabelsValue))

        result = apitools_client.pipelines.Run(request)
        log.status.Print('Running [{0}].'.format(result.name))
        return result
Beispiel #22
0
    def Run(self, args):
        # We explicitly want to allow --networks='' as a valid option and we need
        # to differentiate between that option and not passing --networks at all.
        if args.visibility == 'public' and args.IsSpecified('networks'):
            raise exceptions.InvalidArgumentException(
                '--networks',
                'If --visibility is set to public (default), setting networks is '
                'not allowed.')
        if args.visibility == 'private' and args.networks is None:
            raise exceptions.RequiredArgumentException('--networks', ("""\
           If --visibility is set to private, a list of networks must be
           provided.'
         NOTE: You can provide an empty value ("") for private zones that
          have NO network binding.
          """))

        dns = apis.GetClientInstance('dns', 'v1')
        messages = apis.GetMessagesModule('dns', 'v1')

        registry = util.GetRegistry('v1')

        zone_ref = registry.Parse(args.dns_zone,
                                  params={
                                      'project':
                                      properties.VALUES.core.project.GetOrFail,
                                  },
                                  collection='dns.managedZones')

        visibility = messages.ManagedZone.VisibilityValueValuesEnum(
            args.visibility)
        visibility_config = None
        if visibility == messages.ManagedZone.VisibilityValueValuesEnum.private:
            # Handle explicitly empty networks case (--networks='')
            networks = args.networks if args.networks != [''] else []

            def GetNetworkSelfLink(network):
                return registry.Parse(network,
                                      collection='compute.networks',
                                      params={
                                          'project': zone_ref.project
                                      }).SelfLink()

            network_urls = [GetNetworkSelfLink(n) for n in networks]
            network_configs = [
                messages.ManagedZonePrivateVisibilityConfigNetwork(
                    networkUrl=nurl) for nurl in network_urls
            ]
            visibility_config = messages.ManagedZonePrivateVisibilityConfig(
                networks=network_configs)

        dnssec_config = _MakeDnssecConfig(args, messages)

        labels = labels_util.ParseCreateArgs(args,
                                             messages.ManagedZone.LabelsValue)

        zone = messages.ManagedZone(name=zone_ref.managedZone,
                                    dnsName=util.AppendTrailingDot(
                                        args.dns_name),
                                    description=args.description,
                                    dnssecConfig=dnssec_config,
                                    labels=labels,
                                    visibility=visibility,
                                    privateVisibilityConfig=visibility_config)

        result = dns.managedZones.Create(
            messages.DnsManagedZonesCreateRequest(managedZone=zone,
                                                  project=zone_ref.project))
        log.CreatedResource(zone_ref)
        return [result]
Beispiel #23
0
    def Run(self, args):
        # We explicitly want to allow --networks='' as a valid option and we need
        # to differentiate between that option and not passing --networks at all.
        if args.visibility == 'public':
            if args.IsSpecified('networks'):
                raise exceptions.InvalidArgumentException(
                    '--networks',
                    'If --visibility is set to public (default), setting networks is '
                    'not allowed.')
        if args.visibility == 'private' and args.networks is None and args.gkeclusters is None:
            raise exceptions.RequiredArgumentException(
                '--networks, --gkeclusters',
                ("""If --visibility is set to private, a list of networks or list of
           GKE clusters must be provided.'
         NOTE: You can provide an empty value ("") for private zones that
          have NO network or GKE clusters binding.
          """))

        # We explicitly want to allow --gkeclusters='' as an optional flag.
        if args.visibility == 'public' and args.IsSpecified('gkeclusters'):
            raise exceptions.InvalidArgumentException(
                '--gkeclusters',
                'If --visibility is set to public (default), setting gkeclusters is '
                'not allowed.')

        api_version = util.GetApiFromTrackAndArgs(self.ReleaseTrack(), args)
        dns = util.GetApiClient(api_version)
        messages = apis.GetMessagesModule('dns', api_version)
        registry = util.GetRegistry(api_version)

        zone_ref = registry.Parse(args.dns_zone,
                                  util.GetParamsForRegistry(api_version, args),
                                  collection='dns.managedZones')

        visibility_flag = args.visibility
        private_enum = None
        if api_version == 'v2':
            # v2 doesn't set lower_camel_enums, so enums are in upper case
            private_enum = messages.ManagedZone.VisibilityValueValuesEnum.PRIVATE
            visibility_flag = args.visibility.upper()
        else:
            private_enum = messages.ManagedZone.VisibilityValueValuesEnum.private
        visibility = messages.ManagedZone.VisibilityValueValuesEnum(
            visibility_flag)
        visibility_config = None
        if visibility == private_enum:
            # Handle explicitly empty networks case (--networks='')
            networks = args.networks if args.networks and args.networks != [
                ''
            ] else []

            def GetNetworkSelfLink(network):
                return registry.Parse(network,
                                      collection='compute.networks',
                                      params={
                                          'project': zone_ref.project
                                      }).SelfLink()

            network_urls = [GetNetworkSelfLink(n) for n in networks]
            network_configs = [
                messages.ManagedZonePrivateVisibilityConfigNetwork(
                    networkUrl=nurl) for nurl in network_urls
            ]

            # Handle the case when '--gkeclusters' is not specified.
            gkeclusters = args.gkeclusters or []

            gkecluster_configs = [
                messages.ManagedZonePrivateVisibilityConfigGKECluster(
                    gkeClusterName=name) for name in gkeclusters
            ]
            visibility_config = messages.ManagedZonePrivateVisibilityConfig(
                networks=network_configs, gkeClusters=gkecluster_configs)

        if args.forwarding_targets or args.private_forwarding_targets:
            forwarding_config = command_util.BetaParseManagedZoneForwardingConfigWithForwardingPath(
                messages=messages,
                server_list=args.forwarding_targets,
                private_server_list=args.private_forwarding_targets)
        else:
            forwarding_config = None

        dnssec_config = _MakeDnssecConfig(args, messages, api_version)
        labels = labels_util.ParseCreateArgs(args,
                                             messages.ManagedZone.LabelsValue)

        peering_config = None
        if args.target_project and args.target_network:
            peering_network = 'https://www.googleapis.com/compute/v1/projects/{}/global/networks/{}'.format(
                args.target_project, args.target_network)
            peering_config = messages.ManagedZonePeeringConfig()
            peering_config.targetNetwork = messages.ManagedZonePeeringConfigTargetNetwork(
                networkUrl=peering_network)

        reverse_lookup_config = None
        if args.IsSpecified(
                'managed_reverse_lookup') and args.managed_reverse_lookup:
            reverse_lookup_config = messages.ManagedZoneReverseLookupConfig()

        service_directory_config = None
        if args.IsSpecified('service_directory_namespace'
                            ) and args.service_directory_namespace:
            service_directory_config = messages.ManagedZoneServiceDirectoryConfig(
                namespace=messages.ManagedZoneServiceDirectoryConfigNamespace(
                    namespaceUrl=args.service_directory_namespace))

        cloud_logging_config = None
        if args.IsSpecified('log_dns_queries'):
            cloud_logging_config = messages.ManagedZoneCloudLoggingConfig()
            cloud_logging_config.enableLogging = args.log_dns_queries

        zone = messages.ManagedZone(
            name=zone_ref.managedZone,
            dnsName=util.AppendTrailingDot(args.dns_name),
            description=args.description,
            dnssecConfig=dnssec_config,
            labels=labels,
            visibility=visibility,
            forwardingConfig=forwarding_config,
            privateVisibilityConfig=visibility_config,
            peeringConfig=peering_config,
            reverseLookupConfig=reverse_lookup_config,
            serviceDirectoryConfig=service_directory_config,
            cloudLoggingConfig=cloud_logging_config)

        request = messages.DnsManagedZonesCreateRequest(
            managedZone=zone, project=zone_ref.project)

        if api_version == 'v2':
            # For a request with location, use v2 api.
            request.location = args.location

        result = dns.managedZones.Create(request)
        log.CreatedResource(zone_ref)
        return [result]
Beispiel #24
0
def CreateCAFromArgs(args, is_subordinate):
    """Creates a GA CA object from CA create flags.

  Args:
    args: The parser that contains the flag values.
    is_subordinate: If True, a subordinate CA is returned, otherwise a root CA.

  Returns:
    A tuple for the CA to create with (CA object, CA ref, issuer).
  """

    client = privateca_base.GetClientInstance(api_version='v1')
    messages = privateca_base.GetMessagesModule(api_version='v1')

    ca_ref, source_ca_ref, issuer_ref = _ParseCAResourceArgs(args)
    pool_ref = ca_ref.Parent()
    source_ca = None

    if source_ca_ref:
        source_ca = client.projects_locations_caPools_certificateAuthorities.Get(
            messages.
            PrivatecaProjectsLocationsCaPoolsCertificateAuthoritiesGetRequest(
                name=source_ca_ref.RelativeName()))
        if not source_ca:
            raise exceptions.InvalidArgumentException(
                '--from-ca', 'The provided source CA could not be retrieved.')

    ca_pool = client.projects_locations_caPools.Get(
        messages.PrivatecaProjectsLocationsCaPoolsGetRequest(
            name=pool_ref.RelativeName()))

    keyspec = flags_v1.ParseKeySpec(args)
    if ca_pool.tier == messages.CaPool.TierValueValuesEnum.DEVOPS and keyspec.cloudKmsKeyVersion:
        raise exceptions.InvalidArgumentException(
            '--kms-key-version',
            'The DevOps tier does not support user-specified KMS keys.')

    subject_config = messages.SubjectConfig(
        subject=messages.Subject(), subjectAltName=messages.SubjectAltNames())
    if args.IsSpecified('subject'):
        subject_config.subject = flags_v1.ParseSubject(args)
    elif source_ca:
        subject_config.subject = source_ca.config.subjectConfig.subject

    if flags_v1.SanFlagsAreSpecified(args):
        subject_config.subjectAltName = flags_v1.ParseSanFlags(args)
    elif source_ca:
        subject_config.subjectAltName = source_ca.config.subjectConfig.subjectAltName
    flags_v1.ValidateSubjectConfig(subject_config, is_ca=True)

    # Populate x509 params to default.
    x509_parameters = flags_v1.ParseX509Parameters(args, is_ca_command=True)
    if source_ca and not flags_v1.X509ConfigFlagsAreSpecified(args):
        x509_parameters = source_ca.config.x509Config

    # Args.validity will be populated to default if not specified.
    lifetime = flags_v1.ParseValidityFlag(args)
    if source_ca and not args.IsSpecified('validity'):
        lifetime = source_ca.lifetime

    labels = labels_util.ParseCreateArgs(
        args, messages.CertificateAuthority.LabelsValue)

    new_ca = messages.CertificateAuthority(
        type=messages.CertificateAuthority.TypeValueValuesEnum.SUBORDINATE
        if is_subordinate else
        messages.CertificateAuthority.TypeValueValuesEnum.SELF_SIGNED,
        lifetime=lifetime,
        config=messages.CertificateConfig(subjectConfig=subject_config,
                                          x509Config=x509_parameters),
        keySpec=keyspec,
        gcsBucket=None,
        labels=labels)

    return (new_ca, ca_ref, issuer_ref)
Beispiel #25
0
 def ConfigureJob(messages, job, args):
     """Add type-specific job configuration to job message."""
     # Parse labels (if present)
     job.labels = labels_util.ParseCreateArgs(args,
                                              messages.Job.LabelsValue)
Beispiel #26
0
    def Run(self, args):
        # We explicitly want to allow --networks='' as a valid option and we need
        # to differentiate between that option and not passing --networks at all.
        if args.visibility == 'public' and args.IsSpecified('networks'):
            raise exceptions.InvalidArgumentException(
                '--networks',
                'If --visibility is set to public (default), setting networks is '
                'not allowed.')
        if args.visibility == 'private' and args.networks is None:
            raise exceptions.RequiredArgumentException('--networks', ("""
           If --visibility is set to private, a list of networks must be
           provided.'
         NOTE: You can provide an empty value ("") for private zones that
          have NO network binding.
          """))

        dns = util.GetApiClient('v1')
        messages = apis.GetMessagesModule('dns', 'v1')

        registry = util.GetRegistry('v1')

        zone_ref = registry.Parse(args.dns_zone,
                                  params={
                                      'project':
                                      properties.VALUES.core.project.GetOrFail,
                                  },
                                  collection='dns.managedZones')

        visibility = messages.ManagedZone.VisibilityValueValuesEnum(
            args.visibility)
        visibility_config = None
        if visibility == messages.ManagedZone.VisibilityValueValuesEnum.private:
            # Handle explicitly empty networks case (--networks='')
            networks = args.networks if args.networks != [''] else []

            def GetNetworkSelfLink(network):
                return registry.Parse(network,
                                      collection='compute.networks',
                                      params={
                                          'project': zone_ref.project
                                      }).SelfLink()

            network_urls = [GetNetworkSelfLink(n) for n in networks]
            network_configs = [
                messages.ManagedZonePrivateVisibilityConfigNetwork(
                    networkUrl=nurl) for nurl in network_urls
            ]
            visibility_config = messages.ManagedZonePrivateVisibilityConfig(
                networks=network_configs)

        if args.IsSpecified('forwarding_targets') or args.IsSpecified(
                'private_forwarding_targets'):
            forwarding_config = command_util.ParseManagedZoneForwardingConfigWithForwardingPath(
                messages=messages,
                server_list=args.forwarding_targets,
                private_server_list=args.private_forwarding_targets)
        else:
            forwarding_config = None

        dnssec_config = _MakeDnssecConfig(args, messages)

        labels = labels_util.ParseCreateArgs(args,
                                             messages.ManagedZone.LabelsValue)

        peering_config = None
        if args.target_project and args.target_network:
            peering_network = 'https://www.googleapis.com/compute/v1/projects/{}/global/networks/{}'.format(
                args.target_project, args.target_network)
            peering_config = messages.ManagedZonePeeringConfig()
            peering_config.targetNetwork = messages.ManagedZonePeeringConfigTargetNetwork(
                networkUrl=peering_network)

        reverse_lookup_config = None
        if args.IsSpecified(
                'managed_reverse_lookup') and args.managed_reverse_lookup:
            reverse_lookup_config = messages.ManagedZoneReverseLookupConfig()

        service_directory_config = None
        if args.IsSpecified('service_directory_namespace'
                            ) and args.service_directory_namespace:
            service_directory_config = messages.ManagedZoneServiceDirectoryConfig(
                namespace=messages.ManagedZoneServiceDirectoryConfigNamespace(
                    namespaceUrl=args.service_directory_namespace))

        cloud_logging_config = None
        if args.IsSpecified('log_dns_queries'):
            cloud_logging_config = messages.ManagedZoneCloudLoggingConfig()
            cloud_logging_config.enableLogging = args.log_dns_queries

        zone = messages.ManagedZone(
            name=zone_ref.managedZone,
            dnsName=util.AppendTrailingDot(args.dns_name),
            description=args.description,
            dnssecConfig=dnssec_config,
            labels=labels,
            visibility=visibility,
            forwardingConfig=forwarding_config,
            privateVisibilityConfig=visibility_config,
            peeringConfig=peering_config,
            reverseLookupConfig=reverse_lookup_config,
            serviceDirectoryConfig=service_directory_config,
            cloudLoggingConfig=cloud_logging_config)

        result = dns.managedZones.Create(
            messages.DnsManagedZonesCreateRequest(managedZone=zone,
                                                  project=zone_ref.project))
        log.CreatedResource(zone_ref)
        return [result]
Beispiel #27
0
def ParseCreateLabels(client, args):
  return labels_util.ParseCreateArgs(args, client.version_class.LabelsValue)
Beispiel #28
0
def AddLabels(instance_ref, args, create_request):
    messages = util.GetMessagesForResource(instance_ref)
    create_request.instance.labels = labels_util.ParseCreateArgs(
        args, messages.Instance.LabelsValue)
    return create_request
def ParseCreateLabels(jobs_client, args):
    return labels_util.ParseCreateArgs(args, jobs_client.job_class.LabelsValue)
def _ParseLabelsIntoCreateMessage(message, args, api_field):
  labels_cls = _GetLabelsClass(message, api_field)
  labels_field = labels_util.ParseCreateArgs(args, labels_cls)
  arg_utils.SetFieldInMessage(message, api_field, labels_field)