Beispiel #1
0
    def Run(self, args):
        """This is what gets called when the user runs this command.

    Args:
      args: an argparse namespace. All the arguments that were provided to this
        command invocation.

    Returns:
      Some value that we want to have printed later.

    Raises:
      FailedBuildException: If the build is completed and not 'SUCCESS'.
    """

        project = properties.VALUES.core.project.Get()
        safe_project = project.replace(':', '_')
        safe_project = safe_project.replace('.', '_')
        # The string 'google' is not allowed in bucket names.
        safe_project = safe_project.replace('google', 'elgoog')

        default_bucket_name = '{}_cloudbuild'.format(safe_project)

        default_gcs_source = False
        if args.gcs_source_staging_dir is None:
            default_gcs_source = True
            args.gcs_source_staging_dir = 'gs://{}/source'.format(
                default_bucket_name)

        default_gcs_log_dir = False
        if args.gcs_log_dir is None:
            default_gcs_log_dir = True
            args.gcs_log_dir = 'gs://{}/logs'.format(default_bucket_name)

        client = cloudbuild_util.GetClientInstance()
        messages = cloudbuild_util.GetMessagesModule()
        registry = self.context['registry']

        gcs_client = storage_api.StorageClient()

        # First, create the build request.
        build_timeout = properties.VALUES.container.build_timeout.Get()

        if build_timeout is not None:
            try:
                # A bare number is interpreted as seconds.
                build_timeout_secs = int(build_timeout)
            except ValueError:
                build_timeout_duration = times.ParseDuration(build_timeout)
                build_timeout_secs = int(build_timeout_duration.total_seconds)
            timeout_str = str(build_timeout_secs) + 's'
        else:
            timeout_str = None

        if args.tag:
            if 'gcr.io/' not in args.tag:
                raise c_exceptions.InvalidArgumentException(
                    '--tag',
                    'Tag value must be in the gcr.io/* or *.gcr.io/* namespace.'
                )
            build_config = messages.Build(
                images=[args.tag],
                steps=[
                    messages.BuildStep(
                        name='gcr.io/cloud-builders/docker',
                        args=['build', '--no-cache', '-t', args.tag, '.'],
                    ),
                ],
                timeout=timeout_str,
            )
        elif args.config:
            build_config = config.LoadCloudbuildConfig(args.config, messages)

        if build_config.timeout is None:
            build_config.timeout = timeout_str

        suffix = '.tgz'
        if args.source.startswith('gs://') or os.path.isfile(args.source):
            _, suffix = os.path.splitext(args.source)

        # Next, stage the source to Cloud Storage.
        staged_object = '{stamp}_{tag_ish}{suffix}'.format(
            stamp=times.GetTimeStampFromDateTime(times.Now()),
            tag_ish='_'.join(build_config.images
                             or ['null']).replace('/', '_'),
            suffix=suffix,
        )
        gcs_source_staging_dir = registry.Parse(args.gcs_source_staging_dir,
                                                collection='storage.objects')

        # We first try to create the bucket, before doing all the checks, in order
        # to avoid a race condition. If we do the check first, an attacker could
        # be lucky enough to create the bucket after the check and before this
        # bucket creation.
        gcs_client.CreateBucketIfNotExists(gcs_source_staging_dir.bucket)

        # If no bucket is specified (for the source `default_gcs_source` or for the
        # logs `default_gcs_log_dir`), check that the default bucket is also owned
        # by the project (b/33046325).
        if default_gcs_source or default_gcs_log_dir:
            # This request returns only the buckets owned by the project.
            bucket_list_req = gcs_client.messages.StorageBucketsListRequest(
                project=project, prefix=default_bucket_name)
            bucket_list = gcs_client.client.buckets.List(bucket_list_req)
            found_bucket = False
            for bucket in bucket_list.items:
                if bucket.id == default_bucket_name:
                    found_bucket = True
                    break
            if not found_bucket:
                if default_gcs_source:
                    raise c_exceptions.RequiredArgumentException(
                        'gcs_source_staging_dir',
                        'A bucket with name {} already exists and is owned by '
                        'another project. Specify a bucket using '
                        '--gcs_source_staging_dir.'.format(
                            default_bucket_name))
                elif default_gcs_log_dir:
                    raise c_exceptions.RequiredArgumentException(
                        'gcs-log-dir',
                        'A bucket with name {} already exists and is owned by '
                        'another project. Specify a bucket to hold build logs '
                        'using --gcs-log-dir.'.format(default_bucket_name))

        if gcs_source_staging_dir.object:
            staged_object = gcs_source_staging_dir.object + '/' + staged_object

        gcs_source_staging = registry.Create(
            collection='storage.objects',
            bucket=gcs_source_staging_dir.bucket,
            object=staged_object)

        if args.source.startswith('gs://'):
            gcs_source = registry.Parse(args.source,
                                        collection='storage.objects')
            staged_source_obj = gcs_client.Rewrite(gcs_source,
                                                   gcs_source_staging)
            build_config.source = messages.Source(
                storageSource=messages.StorageSource(
                    bucket=staged_source_obj.bucket,
                    object=staged_source_obj.name,
                    generation=staged_source_obj.generation,
                ))
        else:
            if not os.path.exists(args.source):
                raise c_exceptions.BadFileException(
                    'could not find source [{src}]'.format(src=args.source))
            if os.path.isdir(args.source):
                source_snapshot = snapshot.Snapshot(args.source)
                size_str = resource_transform.TransformSize(
                    source_snapshot.uncompressed_size)
                log.status.Print(
                    'Creating temporary tarball archive of {num_files} file(s)'
                    ' totalling {size} before compression.'.format(
                        num_files=len(source_snapshot.files), size=size_str))
                staged_source_obj = source_snapshot.CopyTarballToGCS(
                    gcs_client, gcs_source_staging)
                build_config.source = messages.Source(
                    storageSource=messages.StorageSource(
                        bucket=staged_source_obj.bucket,
                        object=staged_source_obj.name,
                        generation=staged_source_obj.generation,
                    ))
            elif os.path.isfile(args.source):
                unused_root, ext = os.path.splitext(args.source)
                if ext not in _ALLOWED_SOURCE_EXT:
                    raise c_exceptions.BadFileException(
                        'Local file [{src}] is none of ' +
                        ', '.join(_ALLOWED_SOURCE_EXT))
                log.status.Print('Uploading local file [{src}] to '
                                 '[gs://{bucket}/{object}].'.format(
                                     src=args.source,
                                     bucket=gcs_source_staging.bucket,
                                     object=gcs_source_staging.object,
                                 ))
                staged_source_obj = gcs_client.CopyFileToGCS(
                    storage_util.BucketReference.FromBucketUrl(
                        gcs_source_staging.bucket), args.source,
                    gcs_source_staging.object)
                build_config.source = messages.Source(
                    storageSource=messages.StorageSource(
                        bucket=staged_source_obj.bucket,
                        object=staged_source_obj.name,
                        generation=staged_source_obj.generation,
                    ))

        gcs_log_dir = registry.Parse(args.gcs_log_dir,
                                     collection='storage.objects')

        if gcs_log_dir.bucket != gcs_source_staging.bucket:
            # Create the logs bucket if it does not yet exist.
            gcs_client.CreateBucketIfNotExists(gcs_log_dir.bucket)
        build_config.logsBucket = 'gs://' + gcs_log_dir.bucket + '/' + gcs_log_dir.object

        log.debug('submitting build: ' + repr(build_config))

        # Start the build.
        op = client.projects_builds.Create(
            messages.CloudbuildProjectsBuildsCreateRequest(
                build=build_config,
                projectId=properties.VALUES.core.project.Get()))
        json = encoding.MessageToJson(op.metadata)
        build = encoding.JsonToMessage(messages.BuildOperationMetadata,
                                       json).build

        build_ref = registry.Create(collection='cloudbuild.projects.builds',
                                    projectId=build.projectId,
                                    id=build.id)

        log.CreatedResource(build_ref)
        if build.logUrl:
            log.status.Print(
                'Logs are permanently available at [{log_url}].'.format(
                    log_url=build.logUrl))
        else:
            log.status.Print('Logs are available in the Cloud Console.')

        # If the command is run --async, we just print out a reference to the build.
        if args. async:
            return build

        def _CancelBuildHandler(unused_signal_number, unused_stack_frame):
            log.status.Print('Cancelling...')
            client.projects_builds.Cancel(
                messages.CloudbuildProjectsBuildsCancelRequest(
                    projectId=build_ref.projectId, id=build_ref.id))
            log.status.Print('Cancelled [{r}].'.format(r=str(build_ref)))

        # Otherwise, logs are streamed from GCS.
        with execution_utils.CtrlCSection(_CancelBuildHandler):
            build = cb_logs.CloudBuildClient(client,
                                             messages).Stream(build_ref)

        if build.status == messages.Build.StatusValueValuesEnum.TIMEOUT:
            log.status.Print(
                'Your build timed out. Use the [--timeout=DURATION] flag to change '
                'the timeout threshold.')

        if build.status != messages.Build.StatusValueValuesEnum.SUCCESS:
            raise FailedBuildException(build.status)

        return build
Beispiel #2
0
class Create(base.CreateCommand):
    """Create a cluster for running containers."""
    @staticmethod
    def Args(parser):
        _Args(parser)
        _AddAdditionalZonesFlag(parser, deprecated=True)
        flags.AddNodeLocationsFlag(parser)
        flags.AddAddonsFlags(parser)
        flags.AddClusterAutoscalingFlags(parser)
        flags.AddEnableAutoRepairFlag(parser, for_create=True)
        flags.AddEnableKubernetesAlphaFlag(parser)
        flags.AddEnableLegacyAuthorizationFlag(parser)
        flags.AddIPAliasFlags(parser)
        flags.AddLabelsFlag(parser)
        flags.AddLocalSSDFlag(parser)
        flags.AddMaintenanceWindowFlag(parser)
        flags.AddMasterAuthorizedNetworksFlags(parser)
        flags.AddMinCpuPlatformFlag(parser)
        flags.AddNetworkPolicyFlags(parser)
        flags.AddNodeTaintsFlag(parser)
        flags.AddPreemptibleFlag(parser)
        flags.AddDeprecatedClusterNodeIdentityFlags(parser)
        flags.AddPrivateClusterFlags(parser, with_deprecated=False)

    def ParseCreateOptions(self, args):
        return ParseCreateOptionsBase(args)

    def Run(self, args):
        """This is what gets called when the user runs this command.

    Args:
      args: an argparse namespace. All the arguments that were provided to this
        command invocation.

    Returns:
      Cluster message for the successfully created cluster.

    Raises:
      util.Error, if creation failed.
    """
        if args. async and not args.IsSpecified('format'):
            args.format = util.OPERATIONS_FORMAT

        util.CheckKubectlInstalled()

        adapter = self.context['api_adapter']
        location_get = self.context['location_get']
        location = location_get(args)

        cluster_ref = adapter.ParseCluster(args.name, location)
        options = self.ParseCreateOptions(args)

        if options.private_cluster and not (
                options.enable_master_authorized_networks
                or options.master_authorized_networks):
            log.warning(
                '`--private-cluster` makes the master inaccessible from '
                'cluster-external IP addresses, by design. To allow limited '
                'access to the master, see the `--master-authorized-networks` flags '
                'and our documentation on setting up private clusters: '
                'https://cloud.google.com'
                '/kubernetes-engine/docs/how-to/private-clusters')

        if not (options.metadata
                and 'disable-legacy-endpoints' in options.metadata):
            log.warning(
                'Starting in 1.12, default node pools in new clusters '
                'will have their legacy Compute Engine instance metadata '
                'endpoints disabled by default. To create a cluster with '
                'legacy instance metadata endpoints disabled in the default '
                'node pool, run `clusters create` with the flag '
                '`--metadata disable-legacy-endpoints=true`.')

        if options.enable_kubernetes_alpha:
            console_io.PromptContinue(
                message=constants.KUBERNETES_ALPHA_PROMPT,
                throw_if_unattended=True,
                cancel_on_no=True)

        if options.enable_autorepair is not None:
            log.status.Print(
                messages.AutoUpdateUpgradeRepairMessage(
                    options.enable_autorepair, 'autorepair'))

        if options.enable_autoupgrade is not None:
            log.status.Print(
                messages.AutoUpdateUpgradeRepairMessage(
                    options.enable_autoupgrade, 'autoupgrade'))

        if options.accelerators is not None:
            log.status.Print(constants.KUBERNETES_GPU_LIMITATION_MSG)

        operation = None
        try:
            operation_ref = adapter.CreateCluster(cluster_ref, options)
            if args. async:
                return adapter.GetCluster(cluster_ref)

            operation = adapter.WaitForOperation(
                operation_ref,
                'Creating cluster {0} in {1}'.format(cluster_ref.clusterId,
                                                     cluster_ref.zone),
                timeout_s=args.timeout)
            cluster = adapter.GetCluster(cluster_ref)
        except apitools_exceptions.HttpError as error:
            raise exceptions.HttpException(error, util.HTTP_ERROR_FORMAT)

        log.CreatedResource(cluster_ref)
        cluster_url = util.GenerateClusterUrl(cluster_ref)
        log.status.Print('To inspect the contents of your cluster, go to: ' +
                         cluster_url)
        if operation.detail:
            # Non-empty detail on a DONE create operation should be surfaced as
            # a warning to end user.
            log.warning(operation.detail)

        try:
            util.ClusterConfig.Persist(cluster, cluster_ref.projectId)
        except kconfig.MissingEnvVarError as error:
            log.warning(error)

        return [cluster]
Beispiel #3
0
 def Push(image, dest_name, creds, http_obj, src_name,
          session_push_type):
     with session_push_type(dest_name, creds, http_obj) as push:
         push.upload(image)
         log.CreatedResource(dest_name)
     log.UpdatedResource(src_name)
Beispiel #4
0
  def Run(self, args):
    """This is what gets called when the user runs this command.

    Args:
      args: an argparse namespace, All the arguments that were provided to this
        command invocation.

    Returns:

    """
    apitools_client = self.context[commands.APITOOLS_CLIENT_KEY]
    bigquery_messages = self.context[commands.BIGQUERY_MESSAGES_MODULE_KEY]
    resource_parser = self.context[commands.BIGQUERY_REGISTRY_KEY]
    project_id = properties.VALUES.core.project.Get(required=True)
    table_resource = resource_parser.Parse(
        args.destination_table, collection='bigquery.tables')
    # TODO(user): Define constants for collection names in one place
    table_reference = message_conversions.TableResourceToReference(
        bigquery_messages, table_resource)

    sources = _ProcessSources(args.source)

    if args.schema:
      table_schema = bigquery_schemas.ReadSchema(args.schema, bigquery_messages)
    elif args.schema_file:
      table_schema = bigquery_schemas.ReadSchemaFile(
          args.schema_file, bigquery_messages)
    else:
      table_schema = None

    normalized_source_format = bigquery_client_helper.NormalizeTextualFormat(
        args.source_format)

    if (not normalized_source_format) or normalized_source_format == 'CSV':
      normalized_quote = (
          args.quote
          and bigquery_client_helper.NormalizeFieldDelimiter(args.quote))
      normalized_skip_leading_rows = args.skip_leading_rows
    else:
      # Server accepts non-None quote and skipLeadingRows only for CSV source
      # format:
      normalized_quote = None
      normalized_skip_leading_rows = None

    load_config = bigquery_messages.JobConfigurationLoad(
        allowJaggedRows=args.allow_jagged_rows,
        allowQuotedNewlines=args.allow_quoted_newlines,
        destinationTable=table_reference,
        encoding=args.encoding and args.encoding.upper(),
        fieldDelimiter=(
            args.field_delimiter
            and bigquery_client_helper.NormalizeFieldDelimiter(
                args.field_delimiter)),
        ignoreUnknownValues=args.ignore_unknown_values,
        maxBadRecords=args.max_bad_records,
        quote=normalized_quote,
        schema=table_schema,
        skipLeadingRows=normalized_skip_leading_rows,
        sourceFormat=normalized_source_format,
        sourceUris=sources if sources[0].startswith('gs://') else [],
        writeDisposition='WRITE_TRUNCATE' if args.replace else None,
    )
    job = job_control.ExecuteJob(
        apitools_client,
        bigquery_messages,
        args,
        configuration=bigquery_messages.JobConfiguration(load=load_config),
        async=args.async,
        project_id=project_id,
        upload_file=None if sources[0].startswith('gs://') else sources[0],
        job_id=job_ids.JobIdProvider().GetJobId(
            args.job_id, args.fingerprint_job_id))
    if args.async:
      job_resource = resource_parser.Create(
          'bigquery.jobs',
          projectId=job.jobReference.projectId,
          jobId=job.jobReference.jobId)
      log.CreatedResource(job_resource)
Beispiel #5
0
def RunBaseCreateCommand(args):
    """Creates a new Cloud SQL instance.

  Args:
    args: argparse.Namespace, The arguments that this command was invoked
        with.

  Returns:
    A dict object representing the operations resource describing the create
    operation if the create was successful.
  Raises:
    HttpException: A http error response was received while executing api
        request.
    ToolException: An error other than http error occured while executing the
        command.
  """
    client = api_util.SqlClient(api_util.API_VERSION_DEFAULT)
    sql_client = client.sql_client
    sql_messages = client.sql_messages

    validate.ValidateInstanceName(args.instance)
    instance_ref = client.resource_parser.Parse(
        args.instance,
        params={'project': properties.VALUES.core.project.GetOrFail},
        collection='sql.instances')
    instance_resource = instances.InstancesV1Beta4.ConstructInstanceFromArgs(
        sql_messages, args, instance_ref=instance_ref)

    if args.pricing_plan == 'PACKAGE':
        if not console_io.PromptContinue(
                'Charges will begin accruing immediately. Really create Cloud '
                'SQL instance?'):
            raise exceptions.ToolException('canceled by the user.')

    operation_ref = None
    try:
        result_operation = sql_client.instances.Insert(instance_resource)

        operation_ref = client.resource_parser.Create(
            'sql.operations',
            operation=result_operation.name,
            project=instance_ref.project)

        if args. async:
            if not args.IsSpecified('format'):
                args.format = 'default'
            return sql_client.operations.Get(
                sql_messages.SqlOperationsGetRequest(
                    project=operation_ref.project,
                    operation=operation_ref.operation))

        operations.OperationsV1Beta4.WaitForOperation(
            sql_client, operation_ref, 'Creating Cloud SQL instance')

        log.CreatedResource(instance_ref)

        new_resource = sql_client.instances.Get(
            sql_messages.SqlInstancesGetRequest(
                project=instance_ref.project, instance=instance_ref.instance))
        return new_resource
    except apitools_exceptions.HttpError as error:
        log.debug('operation : %s', str(operation_ref))
        exc = exceptions.HttpException(error)
        if resource_property.Get(
                exc.payload.content,
                resource_lex.ParseKey('error.errors[0].reason'),
                None) == 'errorMaxInstancePerLabel':
            msg = resource_property.Get(exc.payload.content,
                                        resource_lex.ParseKey('error.message'),
                                        None)
            raise exceptions.HttpException(msg)
        raise
Beispiel #6
0
    def Run(self, args):
        client = registrations.RegistrationsClient()

        registration_ref = args.CONCEPTS.registration.Parse()
        location_ref = registration_ref.Parent()

        labels = labels_util.ParseCreateArgs(
            args, client.messages.Registration.LabelsValue)

        name_servers = util.ParseNameServers(args.name_servers,
                                             args.cloud_dns_zone,
                                             registration_ref.registrationsId)
        registrant_contact = util.ParseWhoisContact(
            args.registrant_contact_from_file)
        if registrant_contact is None:
            registrant_contact = util.PromptForWhoisContact()
        if registrant_contact is None:
            raise exceptions.Error(
                'Registrant contact is required. It can be provided interactively or '
                'through --registrant-contact-from-file flag.')

        availability = client.CheckAvailability(
            location_ref, registration_ref.registrationsId).availability

        if availability.available != client.availability_enum.AVAILABLE:
            raise exceptions.Error(
                'Domain [{}] is not available for registration: [{}]'.format(
                    registration_ref.registrationsId, availability.available))

        whois_privacy = util.ParseWhoisPrivacy(args.whois_privacy)
        if whois_privacy is None:
            whois_privacy = util.PromptForWhoisPrivacy(
                availability.supportedWhoisPrivacy)

        hsts_notice_accepted = False
        if client.notices_enum.HSTS_PRELOADED in availability.notices:
            console_io.PromptContinue((
                '{} is a secure namespace. You may purchase {} now but it will '
                'require an SSL certificate for website connection.').format(
                    util.DomainNamespace(availability.domainName),
                    availability.domainName),
                                      throw_if_unattended=True,
                                      cancel_on_no=True)
            hsts_notice_accepted = True

        console_io.PromptContinue(
            'Yearly registration price: {}\n'
            'Yearly renewal price: {}\n'.format(
                util.TransformMoneyType(availability.registrationPrice),
                util.TransformMoneyType(availability.renewalPrice)),
            throw_if_unattended=True,
            cancel_on_no=True)

        response = client.Create(
            location_ref,
            registration_ref.registrationsId,
            name_servers=name_servers,
            registrant_contact=registrant_contact,
            whois_privacy=whois_privacy,
            registration_price=availability.registrationPrice,
            renewal_price=availability.renewalPrice,
            hsts_notice_accepted=hsts_notice_accepted,
            labels=labels,
            validate_only=args.validate_only)

        if args.validate_only:
            # TODO(b/110077203): Log something sensible.
            return

        if args. async:
            # TODO(b/110077203): Log something sensible.
            return response

        operations_client = operations.Client.FromApiVersion('v1alpha1')
        operation_ref = util.ParseOperation(response.name)
        response = operations_client.WaitForOperation(
            operation_ref,
            'Waiting for [{}] to complete'.format(operation_ref.Name()))

        log.CreatedResource(registration_ref.Name(), 'registration')
        return response
Beispiel #7
0
  def Run(self, args):
    """This is what gets called when the user runs this command.

    Args:
      args: an argparse namespace. All the arguments that were provided to this
        command invocation.

    Returns:
      Some value that we want to have printed later.
    """

    client = cloudbuild_util.GetClientInstanceAlpha()
    messages = cloudbuild_util.GetMessagesModuleAlpha()

    parent = properties.VALUES.core.project.Get(required=True)

    # Get the workerpool proto from either the flags or the specified file.
    wp = messages.WorkerPool()
    if args.config_from_file is not None:
      wp = workerpool_config.LoadWorkerpoolConfigFromPath(
          args.config_from_file, messages)
    else:
      wp.name = args.WORKER_POOL
      if args.worker_count is not None:
        try:
          wp.workerCount = int(args.worker_count)
        except ValueError as e:
          raise c_exceptions.InvalidArgumentException('--worker-count', e)
      if args.regions is not None:
        for region_str in args.regions:
          region = Create._region_choice_to_enum[region_str]
          wp.regions.append(region)
      worker_config = messages.WorkerConfig()
      if args.worker_machine_type is not None:
        worker_config.machineType = args.worker_machine_type
      if args.worker_disk_size is not None:
        worker_config.diskSizeGb = compute_utils.BytesToGb(
            args.worker_disk_size)
      if any([
          args.worker_network_project is not None,
          args.worker_network_name is not None,
          args.worker_network_subnet is not None
      ]):
        if not all([
            args.worker_network_project is not None,
            args.worker_network_name is not None,
            args.worker_network_subnet is not None
        ]):
          raise c_exceptions.RequiredArgumentException(
              '--worker_network_*',
              'The flags --worker_network_project, --worker_network_name, and '
              '--worker_network_subnet must all be set if any of them are set.')
        # At this point all network flags are set, but possibly empty string.
        # The API handles default values.
        network = messages.Network()
        network.projectId = args.worker_network_project
        network.network = args.worker_network_name
        network.subnetwork = args.worker_network_subnet
        worker_config.network = network
      if args.worker_tag is not None:
        worker_config.tag = args.worker_tag
      wp.workerConfig = worker_config

    # Get the parent project ref
    parent_resource = resources.REGISTRY.Create(
        collection='cloudbuild.projects', projectId=parent)

    # Send the Create request
    created_wp = client.projects_workerPools.Create(
        messages.CloudbuildProjectsWorkerPoolsCreateRequest(
            workerPool=wp, parent=parent_resource.RelativeName()))

    # Get the workerpool ref
    wp_resource = resources.REGISTRY.Parse(
        None,
        collection='cloudbuild.projects.workerPools',
        api_version='v1alpha1',
        params={
            'projectsId': parent,
            'workerPoolsId': wp.name,
        })
    log.CreatedResource(wp_resource)

    return created_wp
Beispiel #8
0
  def Run(self, args):
    """This is what gets called when the user runs this command.

    Args:
      args: an argparse namespace. All the arguments that were provided to this
        command invocation.

    Returns:
      Cluster message for the successfully created cluster.

    Raises:
      util.Error, if creation failed.
    """
    if args.async_ and not args.IsSpecified('format'):
      args.format = util.OPERATIONS_FORMAT

    util.CheckKubectlInstalled()

    adapter = self.context['api_adapter']
    location_get = self.context['location_get']
    location = location_get(args)

    cluster_ref = adapter.ParseCluster(args.name, location)
    options = self.ParseCreateOptions(args)

    if options.private_cluster and not (
        options.enable_master_authorized_networks or
        options.master_authorized_networks):
      log.warning(
          '`--private-cluster` makes the master inaccessible from '
          'cluster-external IP addresses, by design. To allow limited '
          'access to the master, see the `--master-authorized-networks` flags '
          'and our documentation on setting up private clusters: '
          'https://cloud.google.com'
          '/kubernetes-engine/docs/how-to/private-clusters')

    if not (options.metadata and
            'disable-legacy-endpoints' in options.metadata):
      log.warning('Starting in 1.12, default node pools in new clusters '
                  'will have their legacy Compute Engine instance metadata '
                  'endpoints disabled by default. To create a cluster with '
                  'legacy instance metadata endpoints disabled in the default '
                  'node pool, run `clusters create` with the flag '
                  '`--metadata disable-legacy-endpoints=true`.')

    if options.enable_ip_alias:
      log.warning(
          'The Pod address range limits the maximum size of the cluster. '
          'Please refer to https://cloud.google.com/kubernetes-engine/docs/how-to/flexible-pod-cidr to learn how to optimize IP address allocation.'
      )
    else:
      max_node_number = util.CalculateMaxNodeNumberByPodRange(
          options.cluster_ipv4_cidr)
      if max_node_number > 0:
        log.warning(
            'Your Pod address range (`--cluster-ipv4-cidr`) can accommodate at most %d node(s). '
            % max_node_number)

    if options.enable_kubernetes_alpha:
      console_io.PromptContinue(
          message=constants.KUBERNETES_ALPHA_PROMPT,
          throw_if_unattended=True,
          cancel_on_no=True)

    if options.enable_autorepair is not None:
      log.status.Print(
          messages.AutoUpdateUpgradeRepairMessage(options.enable_autorepair,
                                                  'autorepair'))

    if options.accelerators is not None:
      log.status.Print(constants.KUBERNETES_GPU_LIMITATION_MSG)

    operation = None
    try:
      operation_ref = adapter.CreateCluster(cluster_ref, options)
      if args.async_:
        return adapter.GetCluster(cluster_ref)

      operation = adapter.WaitForOperation(
          operation_ref,
          'Creating cluster {0} in {1}'.format(cluster_ref.clusterId,
                                               cluster_ref.zone),
          timeout_s=args.timeout)
      cluster = adapter.GetCluster(cluster_ref)
    except apitools_exceptions.HttpError as error:
      raise exceptions.HttpException(error, util.HTTP_ERROR_FORMAT)

    log.CreatedResource(cluster_ref)
    cluster_url = util.GenerateClusterUrl(cluster_ref)
    log.status.Print('To inspect the contents of your cluster, go to: ' +
                     cluster_url)
    if operation.detail:
      # Non-empty detail on a DONE create operation should be surfaced as
      # a warning to end user.
      log.warning(operation.detail)

    try:
      util.ClusterConfig.Persist(cluster, cluster_ref.projectId)
    except kconfig.MissingEnvVarError as error:
      log.warning(error)

    return [cluster]
Beispiel #9
0
    def Run(self, args):
        """Creates an SSL certificate for a Cloud SQL instance.

    Args:
      args: argparse.Namespace, The arguments that this command was invoked
          with.

    Returns:
      A dict object representing the operations resource describing the create
      operation if the create was successful.
    Raises:
      HttpException: A http error response was received while executing api
          request.
      ToolException: An error other than http error occured while executing the
          command.
    """

        if os.path.exists(args.cert_file):
            raise exceptions.ToolException(
                'file [{path}] already exists'.format(path=args.cert_file))

        # First check if args.out_file is writeable. If not, abort and don't create
        # the useless cert.
        try:
            with files.OpenForWritingPrivate(args.cert_file) as cf:
                cf.write('placeholder\n')
        except (files.Error, OSError) as e:
            raise exceptions.ToolException(
                'unable to write [{path}]: {error}'.format(path=args.cert_file,
                                                           error=str(e)))

        sql_client = self.context['sql_client']
        sql_messages = self.context['sql_messages']
        resources = self.context['registry']

        validate.ValidateInstanceName(args.instance)
        instance_ref = resources.Parse(args.instance,
                                       collection='sql.instances')

        # TODO(user): figure out how to rectify the common_name and the
        # sha1fingerprint, so that things can work with the resource parser.

        result = sql_client.sslCerts.Insert(
            sql_messages.SqlSslCertsInsertRequest(
                project=instance_ref.project,
                instance=instance_ref.instance,
                sslCertsInsertRequest=sql_messages.SslCertsInsertRequest(
                    commonName=args.common_name)))

        private_key = result.clientCert.certPrivateKey

        with files.OpenForWritingPrivate(args.cert_file) as cf:
            cf.write(private_key)
            cf.write('\n')

        cert_ref = resources.Create(
            collection='sql.sslCerts',
            project=instance_ref.project,
            instance=instance_ref.instance,
            sha1Fingerprint=result.clientCert.certInfo.sha1Fingerprint)

        log.CreatedResource(cert_ref)
        return result.clientCert.certInfo
def LogResource(request, async):
    log.CreatedResource(request.typeProvider.name,
                        kind='type_provider',
                        async=async)
    def Run(self, args):
        api_version = util.GetApiFromTrackAndArgs(self.ReleaseTrack(), args)

        if not os.path.exists(args.records_file):
            raise import_util.RecordsFileNotFound(
                'Specified record file [{0}] not found.'.format(
                    args.records_file))
        if os.path.isdir(args.records_file):
            raise import_util.RecordsFileIsADirectory(
                'Specified record file [{0}] is a directory'.format(
                    args.records_file))

        dns = util.GetApiClient(api_version)

        # Get the managed-zone.
        zone_ref = util.GetRegistry(api_version).Parse(
            args.zone,
            params=util.GetParamsForRegistry(api_version, args),
            collection='dns.managedZones')

        try:
            get_request = dns.MESSAGES_MODULE.DnsManagedZonesGetRequest(
                project=zone_ref.project, managedZone=zone_ref.managedZone)

            if api_version == 'v2' and self._IsBetaOrAlpha():
                get_request.location = args.location

            zone = dns.managedZones.Get(get_request)
        except apitools_exceptions.HttpError as error:
            raise calliope_exceptions.HttpException(error)

        # Get the current record-sets.
        current = {}
        list_request = dns.MESSAGES_MODULE.DnsResourceRecordSetsListRequest(
            project=zone_ref.project, managedZone=zone_ref.Name())

        if api_version == 'v2':
            list_request.location = args.location

        for record in list_pager.YieldFromList(dns.resourceRecordSets,
                                               list_request,
                                               field='rrsets'):
            current[(record.name, record.type)] = record

        # Get the imported record-sets.
        try:
            with files.FileReader(args.records_file) as import_file:
                if args.zone_file_format:
                    imported = import_util.RecordSetsFromZoneFile(
                        import_file, zone.dnsName, api_version=api_version)
                else:
                    imported = import_util.RecordSetsFromYamlFile(
                        import_file,
                        include_extended_records=self._IsAlpha(),
                        api_version=api_version)
        except Exception as exp:
            msg = (
                'Unable to read record-sets from specified records-file [{0}] '
                'because [{1}]')
            msg = msg.format(args.records_file, exp.message)
            raise import_util.UnableToReadRecordsFile(msg)

        # Get the change resulting from the imported record-sets.
        change = import_util.ComputeChange(current,
                                           imported,
                                           args.delete_all_existing,
                                           zone.dnsName,
                                           args.replace_origin_ns,
                                           api_version=api_version)
        if not change:
            msg = 'Nothing to do, all the records in [{0}] already exist.'.format(
                args.records_file)
            log.status.Print(msg)
            return None

        # Send the change to the service.
        create_request = dns.MESSAGES_MODULE.DnsChangesCreateRequest(
            change=change, managedZone=zone.name, project=zone_ref.project)

        if api_version == 'v2' and self._IsBetaOrAlpha():
            create_request.location = args.location

        result = dns.changes.Create(create_request)
        param = util.GetParamsForRegistry(api_version,
                                          args,
                                          parent='managedZones')
        param['changeId'] = result.id
        change_ref = util.GetRegistry(api_version).Parse(
            line=None, collection='dns.changes', params=param)
        msg = 'Imported record-sets from [{0}] into managed-zone [{1}].'.format(
            args.records_file, zone_ref.Name())
        log.status.Print(msg)
        log.CreatedResource(change_ref)
        return result
def RunBaseCreateCommand(args, release_track):
  """Creates a new Cloud SQL instance.

  Args:
    args: argparse.Namespace, The arguments that this command was invoked with.
    release_track: base.ReleaseTrack, the release track that this was run under.

  Returns:
    A dict object representing the operations resource describing the create
    operation if the create was successful.
  Raises:
    HttpException: A http error response was received while executing api
        request.
    ArgumentError: An argument supplied by the user was incorrect, such as
      specifying an invalid CMEK configuration or attempting to create a V1
      instance.
    RequiredArgumentException: A required argument was not supplied by the user,
      such as omitting --root-password on a SQL Server instance.
  """
  client = common_api_util.SqlClient(common_api_util.API_VERSION_DEFAULT)
  sql_client = client.sql_client
  sql_messages = client.sql_messages

  validate.ValidateInstanceName(args.instance)
  validate.ValidateInstanceLocation(args)
  instance_ref = client.resource_parser.Parse(
      args.instance,
      params={'project': properties.VALUES.core.project.GetOrFail},
      collection='sql.instances')

  # Get the region, tier, and database version from the master if these fields
  # are not specified.
  # TODO(b/64266672): Remove once API does not require these fields.
  if args.IsSpecified('master_instance_name'):
    master_instance_ref = client.resource_parser.Parse(
        args.master_instance_name,
        params={'project': properties.VALUES.core.project.GetOrFail},
        collection='sql.instances')
    try:
      master_instance_resource = sql_client.instances.Get(
          sql_messages.SqlInstancesGetRequest(
              project=instance_ref.project,
              instance=master_instance_ref.instance))
    except apitools_exceptions.HttpError as error:
      # TODO(b/64292220): Remove once API gives helpful error message.
      log.debug('operation : %s', six.text_type(master_instance_ref))
      exc = exceptions.HttpException(error)
      if resource_property.Get(exc.payload.content,
                               resource_lex.ParseKey('error.errors[0].reason'),
                               None) == 'notAuthorized':
        msg = ('You are either not authorized to access the master instance or '
               'it does not exist.')
        raise exceptions.HttpException(msg)
      raise
    if not args.IsSpecified('region'):
      args.region = master_instance_resource.region
    if not args.IsSpecified('database_version'):
      args.database_version = master_instance_resource.databaseVersion.name
    if not args.IsSpecified('tier') and not (
        args.IsSpecified('cpu') or
        args.IsSpecified('memory')) and master_instance_resource.settings:
      args.tier = master_instance_resource.settings.tier

    # Validate master/replica CMEK configurations.
    if master_instance_resource.diskEncryptionConfiguration:
      if args.region == master_instance_resource.region:
        # Warn user that same-region replicas inherit their master's CMEK
        # configuration.
        command_util.ShowCmekWarning('replica', 'the master instance')
      elif not args.IsSpecified('disk_encryption_key'):
        # Raise error that cross-region replicas require their own CMEK key if
        # the master is CMEK.
        raise exceptions.RequiredArgumentException(
            '--disk-encryption-key',
            '`--disk-encryption-key` is required when creating a cross-region '
            'replica of an instance with customer-managed encryption.')
      else:
        command_util.ShowCmekWarning('replica')
    elif args.IsSpecified('disk_encryption_key'):
      # Raise error that cross-region replicas cannot be CMEK encrypted if their
      # master is not.
      raise sql_exceptions.ArgumentError(
          '`--disk-encryption-key` cannot be specified when creating a replica '
          'of an instance without customer-managed encryption.')

  # --root-password is required when creating SQL Server instances
  if args.IsSpecified('database_version') and args.database_version.startswith(
      'SQLSERVER') and not args.IsSpecified('root_password'):
    raise exceptions.RequiredArgumentException(
        '--root-password',
        '`--root-password` is required when creating SQL Server instances.')

  if not args.backup:
    if args.IsSpecified('enable_bin_log'):
      raise sql_exceptions.ArgumentError(
          '`--enable-bin-log` cannot be specified when --no-backup is '
          'specified')
    elif args.IsSpecified('enable_point_in_time_recovery'):
      raise sql_exceptions.ArgumentError(
          '`--enable-point-in-time-recovery` cannot be specified when '
          '--no-backup is specified')
  if release_track == base.ReleaseTrack.ALPHA:
    if args.IsSpecified('workload_tier'):
      if not (args.IsSpecified('cpu') and args.IsSpecified('memory')):
        raise sql_exceptions.ArgumentError(
            '`--workload-tier` requires `--cpu` and `--memory`')

  instance_resource = (
      command_util.InstancesV1Beta4.ConstructCreateInstanceFromArgs(
          sql_messages,
          args,
          instance_ref=instance_ref,
          release_track=release_track))

  # TODO(b/122660263): Remove when V1 instances are no longer supported.
  # V1 instances are deprecated.
  # Note that the exception type is intentionally vague because the user may not
  # have directly supplied the offending argument.  For example, creating a read
  # replica defaults its tier to that of its master.
  if api_util.IsInstanceV1(sql_messages, instance_resource):
    raise sql_exceptions.ArgumentError(
        'First Generation instances can no longer be created.')

  operation_ref = None
  try:
    result_operation = sql_client.instances.Insert(
        sql_messages.SqlInstancesInsertRequest(
            databaseInstance=instance_resource,
            project=instance_ref.project))

    operation_ref = client.resource_parser.Create(
        'sql.operations',
        operation=result_operation.name,
        project=instance_ref.project)

    if args.async_:
      if not args.IsSpecified('format'):
        args.format = 'default'
      return sql_client.operations.Get(
          sql_messages.SqlOperationsGetRequest(
              project=operation_ref.project, operation=operation_ref.operation))

    operations.OperationsV1Beta4.WaitForOperation(
        sql_client,
        operation_ref,
        'Creating Cloud SQL instance',
        # TODO(b/138403566): Remove the override once we improve creation times.
        max_wait_seconds=680)

    log.CreatedResource(instance_ref)

    new_resource = sql_client.instances.Get(
        sql_messages.SqlInstancesGetRequest(
            project=instance_ref.project, instance=instance_ref.instance))
    return new_resource
  except apitools_exceptions.HttpError as error:
    log.debug('operation : %s', six.text_type(operation_ref))
    exc = exceptions.HttpException(error)
    if resource_property.Get(exc.payload.content,
                             resource_lex.ParseKey('error.errors[0].reason'),
                             None) == 'errorMaxInstancePerLabel':
      msg = resource_property.Get(exc.payload.content,
                                  resource_lex.ParseKey('error.message'), None)
      raise exceptions.HttpException(msg)
    raise
Beispiel #13
0
def RunBaseCreateCommand(args, release_track):
    """Creates a new Cloud SQL instance.

  Args:
    args: argparse.Namespace, The arguments that this command was invoked
        with.
    release_track: base.ReleaseTrack, the release track that this was run under.

  Returns:
    A dict object representing the operations resource describing the create
    operation if the create was successful.
  Raises:
    HttpException: A http error response was received while executing api
        request.
  """
    client = common_api_util.SqlClient(common_api_util.API_VERSION_DEFAULT)
    sql_client = client.sql_client
    sql_messages = client.sql_messages

    validate.ValidateInstanceName(args.instance)
    instance_ref = client.resource_parser.Parse(
        args.instance,
        params={'project': properties.VALUES.core.project.GetOrFail},
        collection='sql.instances')

    # Get the region, tier, and database version from the master if these fields
    # are not specified.
    # TODO(b/64266672): Remove once API does not require these fields.
    if args.IsSpecified('master_instance_name'):
        master_instance_ref = client.resource_parser.Parse(
            args.master_instance_name,
            params={'project': properties.VALUES.core.project.GetOrFail},
            collection='sql.instances')
        try:
            master_instance_resource = sql_client.instances.Get(
                sql_messages.SqlInstancesGetRequest(
                    project=instance_ref.project,
                    instance=master_instance_ref.instance))
        except apitools_exceptions.HttpError as error:
            # TODO(b/64292220): Remove once API gives helpful error message.
            log.debug('operation : %s', str(master_instance_ref))
            exc = exceptions.HttpException(error)
            if resource_property.Get(
                    exc.payload.content,
                    resource_lex.ParseKey('error.errors[0].reason'),
                    None) == 'notAuthorized':
                msg = (
                    'You are either not authorized to access the master instance or '
                    'it does not exist.')
                raise exceptions.HttpException(msg)
            raise
        if not args.IsSpecified('region'):
            args.region = master_instance_resource.region
        if not args.IsSpecified('database_version'):
            args.database_version = master_instance_resource.databaseVersion
        if not args.IsSpecified('tier') and master_instance_resource.settings:
            args.tier = master_instance_resource.settings.tier

    instance_resource = (
        command_util.InstancesV1Beta4.ConstructCreateInstanceFromArgs(
            sql_messages,
            args,
            instance_ref=instance_ref,
            release_track=release_track))

    if args.pricing_plan == 'PACKAGE':
        console_io.PromptContinue(
            'Charges will begin accruing immediately. Really create Cloud '
            'SQL instance?',
            cancel_on_no=True)

    operation_ref = None
    try:
        result_operation = sql_client.instances.Insert(instance_resource)

        operation_ref = client.resource_parser.Create(
            'sql.operations',
            operation=result_operation.name,
            project=instance_ref.project)

        if args. async:
            if not args.IsSpecified('format'):
                args.format = 'default'
            return sql_client.operations.Get(
                sql_messages.SqlOperationsGetRequest(
                    project=operation_ref.project,
                    operation=operation_ref.operation))

        operations.OperationsV1Beta4.WaitForOperation(
            sql_client, operation_ref, 'Creating Cloud SQL instance')

        log.CreatedResource(instance_ref)

        new_resource = sql_client.instances.Get(
            sql_messages.SqlInstancesGetRequest(
                project=instance_ref.project, instance=instance_ref.instance))
        return new_resource
    except apitools_exceptions.HttpError as error:
        log.debug('operation : %s', str(operation_ref))
        exc = exceptions.HttpException(error)
        if resource_property.Get(
                exc.payload.content,
                resource_lex.ParseKey('error.errors[0].reason'),
                None) == 'errorMaxInstancePerLabel':
            msg = resource_property.Get(exc.payload.content,
                                        resource_lex.ParseKey('error.message'),
                                        None)
            raise exceptions.HttpException(msg)
        raise
Beispiel #14
0
    def Run(self, args):
        project_id = command_lib.GetProjectResourceName()

        namespace = policy_api.Create(project_id, args.kubernetes_name)
        log.CreatedResource(namespace.name)
        return namespace
Beispiel #15
0
    def Run(self, args):
        self.ValidateArgs(args)

        client = self.context['dataproc_client']
        messages = self.context['dataproc_messages']

        cluster_ref = util.ParseCluster(args.name, self.context)

        compute_resources = compute_helpers.GetComputeResources(
            self.ReleaseTrack(), args.name)

        master_accelerator_type = None
        worker_accelerator_type = None
        master_accelerator_count = None
        worker_accelerator_count = None
        if self.ReleaseTrack() == base.ReleaseTrack.BETA:
            if args.master_accelerator:
                master_accelerator_type = args.master_accelerator['type']
                master_accelerator_count = args.master_accelerator.get(
                    'count', 1)
            if args.worker_accelerator:
                worker_accelerator_type = args.worker_accelerator['type']
                worker_accelerator_count = args.worker_accelerator.get(
                    'count', 1)

        # Resolve GCE resources
        zone_ref = compute_resources.Create(
            'compute.zones',
            project=cluster_ref.projectId,
            zone=properties.VALUES.compute.zone.GetOrFail())
        image_ref = args.image and compute_resources.Parse(
            args.image,
            params={'project': cluster_ref.projectId},
            collection='compute.images')
        master_machine_type_ref = (args.master_machine_type
                                   and compute_resources.Parse(
                                       args.master_machine_type,
                                       params={
                                           'project': cluster_ref.projectId,
                                           'zone': zone_ref.Name(),
                                       },
                                       collection='compute.machineTypes'))
        worker_machine_type_ref = (args.worker_machine_type
                                   and compute_resources.Parse(
                                       args.worker_machine_type,
                                       params={
                                           'project': cluster_ref.projectId,
                                           'zone': zone_ref.Name(),
                                       },
                                       collection='compute.machineTypes'))
        network_ref = args.network and compute_resources.Parse(
            args.network,
            params={'project': cluster_ref.projectId},
            collection='compute.networks')
        subnetwork_ref = args.subnet and compute_resources.Parse(
            args.subnet,
            params={
                'project': cluster_ref.projectId,
                'region': properties.VALUES.compute.region.GetOrFail,
            },
            collection='compute.subnetworks')
        master_accelerator_type_ref = (
            master_accelerator_type
            and compute_resources.Parse(master_accelerator_type,
                                        params={
                                            'project': cluster_ref.projectId,
                                            'zone': zone_ref.Name(),
                                        },
                                        collection='compute.acceleratorTypes'))
        worker_accelerator_type_ref = (
            worker_accelerator_type
            and compute_resources.Parse(worker_accelerator_type,
                                        params={
                                            'project': cluster_ref.projectId,
                                            'zone': zone_ref.Name(),
                                        },
                                        collection='compute.acceleratorTypes'))

        init_actions = []
        timeout_str = str(args.initialization_action_timeout) + 's'
        if args.initialization_actions:
            init_actions = [
                messages.NodeInitializationAction(executableFile=exe,
                                                  executionTimeout=timeout_str)
                for exe in args.initialization_actions
            ]
        expanded_scopes = compute_helpers.ExpandScopeAliases(args.scopes)

        software_config = messages.SoftwareConfig(
            imageVersion=args.image_version)

        master_boot_disk_size_gb = args.master_boot_disk_size_gb
        if args.master_boot_disk_size:
            master_boot_disk_size_gb = (api_utils.BytesToGb(
                args.master_boot_disk_size))

        worker_boot_disk_size_gb = args.worker_boot_disk_size_gb
        if args.worker_boot_disk_size:
            worker_boot_disk_size_gb = (api_utils.BytesToGb(
                args.worker_boot_disk_size))

        preemptible_worker_boot_disk_size_gb = (api_utils.BytesToGb(
            args.preemptible_worker_boot_disk_size))

        if args.single_node:
            args.properties[constants.ALLOW_ZERO_WORKERS_PROPERTY] = 'true'

        if args.properties:
            software_config.properties = encoding.DictToMessage(
                args.properties, messages.SoftwareConfig.PropertiesValue)

        gce_cluster_config = messages.GceClusterConfig(
            networkUri=network_ref and network_ref.SelfLink(),
            subnetworkUri=subnetwork_ref and subnetwork_ref.SelfLink(),
            serviceAccount=args.service_account,
            serviceAccountScopes=expanded_scopes,
            zoneUri=zone_ref and zone_ref.SelfLink())

        if args.tags:
            gce_cluster_config.tags = args.tags

        if args.metadata:
            flat_metadata = dict(
                (k, v) for d in args.metadata for k, v in d.items())
            gce_cluster_config.metadata = encoding.DictToMessage(
                flat_metadata, messages.GceClusterConfig.MetadataValue)

        master_accelerators = []
        if master_accelerator_type:
            master_accelerators.append(
                messages.AcceleratorConfig(
                    acceleratorTypeUri=master_accelerator_type_ref
                    and master_accelerator_type_ref.SelfLink(),
                    acceleratorCount=master_accelerator_count))
        worker_accelerators = []
        if worker_accelerator_type:
            worker_accelerators.append(
                messages.AcceleratorConfig(
                    acceleratorTypeUri=worker_accelerator_type_ref
                    and worker_accelerator_type_ref.SelfLink(),
                    acceleratorCount=worker_accelerator_count))

        cluster_config = messages.ClusterConfig(
            configBucket=args.bucket,
            gceClusterConfig=gce_cluster_config,
            masterConfig=messages.InstanceGroupConfig(
                numInstances=args.num_masters,
                imageUri=image_ref and image_ref.SelfLink(),
                machineTypeUri=master_machine_type_ref
                and master_machine_type_ref.SelfLink(),
                accelerators=master_accelerators,
                diskConfig=messages.DiskConfig(
                    bootDiskSizeGb=master_boot_disk_size_gb,
                    numLocalSsds=args.num_master_local_ssds,
                ),
            ),
            workerConfig=messages.InstanceGroupConfig(
                numInstances=args.num_workers,
                imageUri=image_ref and image_ref.SelfLink(),
                machineTypeUri=worker_machine_type_ref
                and worker_machine_type_ref.SelfLink(),
                accelerators=worker_accelerators,
                diskConfig=messages.DiskConfig(
                    bootDiskSizeGb=worker_boot_disk_size_gb,
                    numLocalSsds=args.num_worker_local_ssds,
                ),
            ),
            initializationActions=init_actions,
            softwareConfig=software_config,
        )

        # Secondary worker group is optional. However, users may specify
        # future pVM disk size at creation time.
        if (args.num_preemptible_workers is not None
                or preemptible_worker_boot_disk_size_gb is not None):
            cluster_config.secondaryWorkerConfig = (
                messages.InstanceGroupConfig(
                    numInstances=args.num_preemptible_workers,
                    diskConfig=messages.DiskConfig(
                        bootDiskSizeGb=preemptible_worker_boot_disk_size_gb, ))
            )

        cluster = messages.Cluster(config=cluster_config,
                                   clusterName=cluster_ref.clusterName,
                                   projectId=cluster_ref.projectId)

        self.ConfigureCluster(messages, args, cluster)

        operation = client.projects_regions_clusters.Create(
            messages.DataprocProjectsRegionsClustersCreateRequest(
                projectId=cluster_ref.projectId,
                region=cluster_ref.region,
                cluster=cluster))

        if args. async:
            log.status.write('Creating [{0}] with operation [{1}].'.format(
                cluster_ref, operation.name))
            return

        operation = util.WaitForOperation(
            operation, self.context, 'Waiting for cluster creation operation')

        get_request = messages.DataprocProjectsRegionsClustersGetRequest(
            projectId=cluster_ref.projectId,
            region=cluster_ref.region,
            clusterName=cluster_ref.clusterName)
        cluster = client.projects_regions_clusters.Get(get_request)
        if cluster.status.state == (
                messages.ClusterStatus.StateValueValuesEnum.RUNNING):
            log.CreatedResource(cluster_ref)
        else:
            log.error('Create cluster failed!')
            if operation.details:
                log.error('Details:\n' + operation.details)
        return cluster
Beispiel #16
0
def LogResource(request, async):
    log.CreatedResource(request.compositeType.name,
                        kind='composite_type',
                        async=async)
Beispiel #17
0
def _Run(args, enable_labels=False, legacy_output=False):
  """Creates one or more subscriptions."""
  flags.ValidateDeadLetterPolicy(args)

  client = subscriptions.SubscriptionsClient()

  topic_ref = args.CONCEPTS.topic.Parse()
  push_config = util.ParsePushConfig(args)
  enable_message_ordering = getattr(args, 'enable_message_ordering', None)
  filter_string = getattr(args, 'message_filter', None)
  dead_letter_topic = getattr(args, 'dead_letter_topic', None)
  max_delivery_attempts = getattr(args, 'max_delivery_attempts', None)
  retain_acked_messages = getattr(args, 'retain_acked_messages', None)
  retention_duration = getattr(args, 'message_retention_duration', None)
  if retention_duration:
    retention_duration = util.FormatDuration(retention_duration)
  min_retry_delay = getattr(args, 'min_retry_delay', None)
  if min_retry_delay:
    min_retry_delay = util.FormatDuration(min_retry_delay)
  max_retry_delay = getattr(args, 'max_retry_delay', None)
  if max_retry_delay:
    max_retry_delay = util.FormatDuration(max_retry_delay)

  no_expiration = False
  expiration_period = getattr(args, 'expiration_period', None)
  if expiration_period:
    if expiration_period == subscriptions.NEVER_EXPIRATION_PERIOD_VALUE:
      no_expiration = True
      expiration_period = None

  if dead_letter_topic:
    dead_letter_topic = args.CONCEPTS.dead_letter_topic.Parse().RelativeName()

  labels = None
  if enable_labels:
    labels = labels_util.ParseCreateArgs(
        args, client.messages.Subscription.LabelsValue)

  failed = []
  for subscription_ref in args.CONCEPTS.subscription.Parse():

    try:
      result = client.Create(
          subscription_ref,
          topic_ref,
          args.ack_deadline,
          push_config,
          retain_acked_messages,
          retention_duration,
          labels=labels,
          no_expiration=no_expiration,
          expiration_period=expiration_period,
          enable_message_ordering=enable_message_ordering,
          filter_string=filter_string,
          dead_letter_topic=dead_letter_topic,
          max_delivery_attempts=max_delivery_attempts,
          min_retry_delay=min_retry_delay,
          max_retry_delay=max_retry_delay)
    except api_ex.HttpError as error:
      exc = exceptions.HttpException(error)
      log.CreatedResource(subscription_ref.RelativeName(),
                          kind='subscription',
                          failed=exc.payload.status_message)
      failed.append(subscription_ref.subscriptionsId)
      continue

    if legacy_output:
      result = util.SubscriptionDisplayDict(result)

    log.CreatedResource(subscription_ref.RelativeName(), kind='subscription')
    yield result

  if failed:
    raise util.RequestsFailedError(failed, 'create')
Beispiel #18
0
  def Run(self, args):
    """Clones a Cloud SQL instance.

    Args:
      args: argparse.Namespace, The arguments that this command was invoked
          with.

    Returns:
      A dict object representing the operations resource describing the
      clone operation if the clone was successful.
    Raises:
      ArgumentError: The arguments are invalid for some reason.
    """

    client = api_util.SqlClient(api_util.API_VERSION_DEFAULT)
    sql_client = client.sql_client
    sql_messages = client.sql_messages

    source_instance_ref, destination_instance_ref = (
        self._GetInstanceRefsFromArgs(args, client))

    request = sql_messages.SqlInstancesCloneRequest(
        project=source_instance_ref.project,
        instance=source_instance_ref.instance,
        instancesCloneRequest=sql_messages.InstancesCloneRequest(
            cloneContext=sql_messages.CloneContext(
                destinationInstanceName=destination_instance_ref.instance)))

    self._UpdateRequestFromArgs(request, args, sql_messages)

    # Check if source has customer-managed key; show warning if so.
    try:
      source_instance_resource = sql_client.instances.Get(
          sql_messages.SqlInstancesGetRequest(
              project=source_instance_ref.project,
              instance=source_instance_ref.instance))
      if source_instance_resource.diskEncryptionConfiguration:
        command_util.ShowCmekWarning('clone', 'the source instance')
    except apitools_exceptions.HttpError:
      # This is for informational purposes, so don't throw an error if failure.
      pass

    result = sql_client.instances.Clone(request)

    operation_ref = client.resource_parser.Create(
        'sql.operations',
        operation=result.name,
        project=destination_instance_ref.project)

    if args.async_:
      if not args.IsSpecified('format'):
        args.format = 'default'
      return sql_client.operations.Get(
          sql_messages.SqlOperationsGetRequest(
              project=operation_ref.project, operation=operation_ref.operation))
    operations.OperationsV1Beta4.WaitForOperation(sql_client, operation_ref,
                                                  'Cloning Cloud SQL instance')
    log.CreatedResource(destination_instance_ref)
    rsource = sql_client.instances.Get(
        sql_messages.SqlInstancesGetRequest(
            project=destination_instance_ref.project,
            instance=destination_instance_ref.instance))
    return rsource
Beispiel #19
0
    def _Run(self, args, support_keepalive_interval=False):
        holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
        messages = holder.client.messages
        service = holder.client.apitools_client.routers

        router_ref = self.ROUTER_ARG.ResolveAsResource(args, holder.resources)
        network_ref = self.NETWORK_ARG.ResolveAsResource(
            args, holder.resources)

        router_resource = messages.Router(name=router_ref.Name(),
                                          description=args.description,
                                          network=network_ref.SelfLink())

        if support_keepalive_interval:
            # Add bgp field with the assigned asn and/or keepalive_interval
            if args.asn is not None or args.keepalive_interval is not None:
                router_resource.bgp = (messages.RouterBgp(
                    asn=args.asn, keepaliveInterval=args.keepalive_interval))
        else:
            # Add bgp field with the assigned asn.
            if args.asn is not None:
                router_resource.bgp = messages.RouterBgp(asn=args.asn)

        if router_utils.HasReplaceAdvertisementFlags(args):
            mode, groups, ranges = router_utils.ParseAdvertisements(
                messages=messages,
                resource_class=messages.RouterBgp,
                args=args)

            attrs = {
                'advertiseMode': mode,
                'advertisedGroups': groups,
                'advertisedIpRanges': ranges,
            }
            # Create an empty bgp field if not generated yet.
            if args.asn is None:
                router_resource.bgp = messages.RouterBgp()
            for attr, value in six.iteritems(attrs):
                if value is not None:
                    setattr(router_resource.bgp, attr, value)

        result = service.Insert(
            messages.ComputeRoutersInsertRequest(router=router_resource,
                                                 region=router_ref.region,
                                                 project=router_ref.project))

        operation_ref = resources.REGISTRY.Parse(
            result.name,
            collection='compute.regionOperations',
            params={
                'project': router_ref.project,
                'region': router_ref.region,
            })

        if args. async:
            # Override the networks list format with the default operations format
            if not args.IsSpecified('format'):
                args.format = 'none'
            log.CreatedResource(
                operation_ref,
                kind='router [{0}]'.format(router_ref.Name()),
                is_async=True,
                details='Run the [gcloud compute operations describe] command '
                'to check the status of this operation.')
            return result

        target_router_ref = holder.resources.Parse(
            router_ref.Name(),
            collection='compute.routers',
            params={
                'project': router_ref.project,
                'region': router_ref.region,
            })

        operation_poller = poller.Poller(service, target_router_ref)
        return waiter.WaitFor(
            operation_poller, operation_ref,
            'Creating router [{0}]'.format(router_ref.Name()))
    def Run(self, args):
        """This is what gets called when the user runs this command.

    Args:
      args: an argparse namespace. All the arguments that were provided to this
        command invocation.

    Returns:
      Some value that we want to have printed later.
    """

        client = cloudbuild_util.GetClientInstance()
        messages = cloudbuild_util.GetMessagesModule()

        trigger = messages.BuildTrigger()
        if args.trigger_config:
            trigger = cloudbuild_util.LoadMessageFromPath(
                path=args.trigger_config,
                msg_type=messages.BuildTrigger,
                msg_friendly_name='build trigger config',
                skip_camel_case=['substitutions'])
        else:
            repo_ref = args.CONCEPTS.repo.Parse()
            repo = repo_ref.reposId
            trigger = messages.BuildTrigger(
                description=args.description,
                triggerTemplate=messages.RepoSource(
                    repoName=repo,
                    branchName=args.branch_pattern,
                    tagName=args.tag_pattern,
                ),
            )

            # Build Config
            if args.build_config:
                trigger.filename = args.build_config
                trigger.substitutions = cloudbuild_util.EncodeTriggerSubstitutions(
                    args.substitutions, messages)
            if args.dockerfile:
                project = properties.VALUES.core.project.Get(required=True)
                image = args.dockerfile_image if args.dockerfile_image else 'gcr.io/%s/%s:$COMMIT_SHA' % (
                    project, repo)
                trigger.build = messages.Build(steps=[
                    messages.BuildStep(
                        name='gcr.io/cloud-builders/docker',
                        dir=args.dockerfile_dir,
                        args=[
                            'build', '-t', image, '-f', args.dockerfile, '.'
                        ],
                    )
                ])
            # Include/Exclude files
            if args.included_files:
                trigger.includedFiles = args.included_files
            if args.ignored_files:
                trigger.ignoredFiles = args.ignored_files

        # Send the Create request
        project = properties.VALUES.core.project.Get(required=True)
        created_trigger = client.projects_triggers.Create(
            messages.CloudbuildProjectsTriggersCreateRequest(
                buildTrigger=trigger, projectId=project))

        trigger_resource = resources.REGISTRY.Parse(
            None,
            collection='cloudbuild.projects.triggers',
            api_version='v1',
            params={
                'projectId': project,
                'triggerId': created_trigger.id,
            })
        log.CreatedResource(trigger_resource)

        return created_trigger
Beispiel #21
0
  def Run(self, args):
    """Creates a new Cloud SQL instance.

    Args:
      args: argparse.Namespace, The arguments that this command was invoked
          with.

    Returns:
      A dict object representing the operations resource describing the create
      operation if the create was successful.
    Raises:
      HttpException: A http error response was received while executing api
          request.
      ToolException: An error other than http error occured while executing the
          command.
    """

    sql_client = self.context['sql_client']
    sql_messages = self.context['sql_messages']
    resources = self.context['registry']

    validate.ValidateInstanceName(args.instance)
    instance_ref = resources.Parse(args.instance, collection='sql.instances')
    instance_resource = instances.InstancesV1Beta4.ConstructInstanceFromArgs(
        sql_messages, args, instance_ref=instance_ref)

    if args.pricing_plan == 'PACKAGE':
      if not console_io.PromptContinue(
          'Charges will begin accruing immediately. Really create Cloud '
          'SQL instance?'):
        raise exceptions.ToolException('canceled by the user.')

    operation_ref = None
    try:
      result_operation = sql_client.instances.Insert(instance_resource)

      operation_ref = resources.Create(
          'sql.operations',
          operation=result_operation.name,
          project=instance_ref.project)

      if args.async:
        return sql_client.operations.Get(
            sql_messages.SqlOperationsGetRequest(
                project=operation_ref.project,
                operation=operation_ref.operation))

      operations.OperationsV1Beta4.WaitForOperation(
          sql_client, operation_ref, 'Creating Cloud SQL instance')

      log.CreatedResource(instance_ref)

      new_resource = sql_client.instances.Get(
          sql_messages.SqlInstancesGetRequest(
              project=instance_ref.project,
              instance=instance_ref.instance))
      cache = remote_completion.RemoteCompletion()
      cache.AddToCache(instance_ref.SelfLink())
      return new_resource
    except apitools_exceptions.HttpError:
      log.debug('operation : %s', str(operation_ref))
      raise
Beispiel #22
0
    def Run(self, args):
        """Run 'runtime-configs waiters create'.

    Args:
      args: argparse.Namespace, The arguments that this command was invoked
          with.

    Returns:
      The associated waiter operation.

    Raises:
      HttpException: An http error response was received while executing api
          request.
    """
        waiter_client = util.WaiterClient()
        messages = util.Messages()

        waiter_resource = util.ParseWaiterName(args.name, args)
        project = waiter_resource.projectsId
        config = waiter_resource.configsId
        name = waiter_resource.Name()

        success = messages.EndCondition(cardinality=messages.Cardinality(
            path=args.success_cardinality_path,
            number=args.success_cardinality_number,
        ))

        if args.failure_cardinality_path:
            failure = messages.EndCondition(cardinality=messages.Cardinality(
                path=args.failure_cardinality_path,
                number=args.failure_cardinality_number,
            ))
        else:
            failure = None

        result = waiter_client.Create(
            messages.RuntimeconfigProjectsConfigsWaitersCreateRequest(
                projectsId=project,
                configsId=config,
                waiter=messages.Waiter(
                    name=util.WaiterPath(project, config, name),
                    timeout='{0}s'.format(args.timeout),
                    success=success,
                    failure=failure,
                )))

        log.CreatedResource(waiter_resource)

        if args. async:
            # In async mode, we return the current waiter representation.
            # The waiter resource exists immediately after creation; the
            # operation resource returned from CreateWaiter only tracks the
            # waiting process.
            self._async_resource = waiter_resource
            result = waiter_client.Get(waiter_resource.Request())
        else:
            self._async_resource = None
            result = util.WaitForWaiter(waiter_resource)
            if util.IsFailedWaiter(result):
                self.exit_code = 2  # exit with code 2 if the result waiter failed.

        return util.FormatWaiter(result)
Beispiel #23
0
def RunBaseCreateCommand(args, release_track):
    """Creates a new Cloud SQL instance.

  Args:
    args: argparse.Namespace, The arguments that this command was invoked with.
    release_track: base.ReleaseTrack, the release track that this was run under.

  Returns:
    A dict object representing the operations resource describing the create
    operation if the create was successful.
  Raises:
    HttpException: A http error response was received while executing api
        request.
  """
    client = common_api_util.SqlClient(common_api_util.API_VERSION_DEFAULT)
    sql_client = client.sql_client
    sql_messages = client.sql_messages

    validate.ValidateInstanceName(args.instance)
    instance_ref = client.resource_parser.Parse(
        args.instance,
        params={'project': properties.VALUES.core.project.GetOrFail},
        collection='sql.instances')

    # Get the region, tier, and database version from the master if these fields
    # are not specified.
    # TODO(b/64266672): Remove once API does not require these fields.
    if args.IsSpecified('master_instance_name'):
        master_instance_ref = client.resource_parser.Parse(
            args.master_instance_name,
            params={'project': properties.VALUES.core.project.GetOrFail},
            collection='sql.instances')
        try:
            master_instance_resource = sql_client.instances.Get(
                sql_messages.SqlInstancesGetRequest(
                    project=instance_ref.project,
                    instance=master_instance_ref.instance))
        except apitools_exceptions.HttpError as error:
            # TODO(b/64292220): Remove once API gives helpful error message.
            log.debug('operation : %s', six.text_type(master_instance_ref))
            exc = exceptions.HttpException(error)
            if resource_property.Get(
                    exc.payload.content,
                    resource_lex.ParseKey('error.errors[0].reason'),
                    None) == 'notAuthorized':
                msg = (
                    'You are either not authorized to access the master instance or '
                    'it does not exist.')
                raise exceptions.HttpException(msg)
            raise
        if not args.IsSpecified('region'):
            args.region = master_instance_resource.region
        if not args.IsSpecified('database_version'):
            args.database_version = master_instance_resource.databaseVersion
        if not args.IsSpecified('tier') and master_instance_resource.settings:
            args.tier = master_instance_resource.settings.tier
        # Check for CMEK usage; warn the user about replica inheriting the setting.
        if master_instance_resource.diskEncryptionConfiguration:
            command_util.ShowCmekWarning('replica', 'the master instance')

    # --root-password is required when creating SQL Server instances
    if args.IsSpecified(
            'database_version') and args.database_version.startswith(
                'SQLSERVER') and not args.IsSpecified('root_password'):
        raise exceptions.RequiredArgumentException(
            '--root-password',
            '`--root-password` is required when creating SQL Server instances.'
        )

    instance_resource = (
        command_util.InstancesV1Beta4.ConstructCreateInstanceFromArgs(
            sql_messages,
            args,
            instance_ref=instance_ref,
            release_track=release_track))

    # TODO(b/122660263): Remove when V1 instances are no longer supported.
    # V1 instances are deprecated. Prompt to continue if one is being created.
    if api_util.IsInstanceV1(instance_resource):
        log.warning(
            'First Generation instances will be automatically upgraded '
            'to Second Generation starting March 4th, 2020, and First Generation '
            'will be fully decommissioned on March 25, 2020. We recommend you '
            'create a Second Generation instance.')
        console_io.PromptContinue(cancel_on_no=True)

    if args.pricing_plan == 'PACKAGE':
        console_io.PromptContinue(
            'Charges will begin accruing immediately. Really create Cloud '
            'SQL instance?',
            cancel_on_no=True)

    operation_ref = None
    try:
        result_operation = sql_client.instances.Insert(instance_resource)

        operation_ref = client.resource_parser.Create(
            'sql.operations',
            operation=result_operation.name,
            project=instance_ref.project)

        if args.async_:
            if not args.IsSpecified('format'):
                args.format = 'default'
            return sql_client.operations.Get(
                sql_messages.SqlOperationsGetRequest(
                    project=operation_ref.project,
                    operation=operation_ref.operation))

        operations.OperationsV1Beta4.WaitForOperation(
            sql_client,
            operation_ref,
            'Creating Cloud SQL instance',
            # TODO(b/138403566): Remove the override once we improve creation times.
            max_wait_seconds=680)

        log.CreatedResource(instance_ref)

        new_resource = sql_client.instances.Get(
            sql_messages.SqlInstancesGetRequest(
                project=instance_ref.project, instance=instance_ref.instance))
        return new_resource
    except apitools_exceptions.HttpError as error:
        log.debug('operation : %s', six.text_type(operation_ref))
        exc = exceptions.HttpException(error)
        if resource_property.Get(
                exc.payload.content,
                resource_lex.ParseKey('error.errors[0].reason'),
                None) == 'errorMaxInstancePerLabel':
            msg = resource_property.Get(exc.payload.content,
                                        resource_lex.ParseKey('error.message'),
                                        None)
            raise exceptions.HttpException(msg)
        raise
Beispiel #24
0
    def Run(self, args):
        """This is what gets called when the user runs this command.

    Args:
      args: an argparse namespace, All the arguments that were provided to this
        command invocation.

    Raises:
      bigquery.DuplicateError: if table already exists.
    Returns:
      None
    """
        apitools_client = self.context[commands.APITOOLS_CLIENT_KEY]
        bigquery_messages = self.context[commands.BIGQUERY_MESSAGES_MODULE_KEY]
        resource_parser = self.context[commands.BIGQUERY_REGISTRY_KEY]
        resource = resource_parser.Parse(args.table,
                                         collection='bigquery.tables')
        reference = message_conversions.TableResourceToReference(
            bigquery_messages, resource)

        table_or_view = 'View' if args.view else 'Table'
        if bigquery_client_helper.TableExists(apitools_client,
                                              bigquery_messages, reference):
            if args.if_exists == 'skip':
                log.status.Print(
                    'Skipping this operation because a table or view named '
                    '[{0}] already exists.'.format(reference))
                return
            else:
                message = (
                    '{0} [{1}] could not be created; a table with this name '
                    'already exists.'.format(table_or_view, reference))
                raise bigquery.DuplicateError(message, None, [])
        if args.schema:
            schema = bigquery_schemas.ReadSchema(args.schema,
                                                 bigquery_messages)
        elif args.schema_file:
            schema = bigquery_schemas.ReadSchemaFile(args.schema_file,
                                                     bigquery_messages)
        else:
            schema = None

        if args.expiration:
            expiration_instant_seconds = time.time() + args.expiration
            expiration_instant_millis = int(1000 * expiration_instant_seconds)
        else:
            expiration_instant_millis = None

        if args.view:
            view_definition = bigquery_messages.ViewDefinition(query=args.view)
        else:
            view_definition = None

        request = bigquery_messages.BigqueryTablesInsertRequest(
            projectId=reference.projectId,
            datasetId=reference.datasetId,
            table=bigquery_messages.Table(
                tableReference=bigquery_messages.TableReference(
                    projectId=reference.projectId,
                    datasetId=reference.datasetId,
                    tableId=reference.tableId),
                description=args.description,
                expirationTime=expiration_instant_millis,
                schema=schema,
                view=view_definition))

        try:
            apitools_client.tables.Insert(request)
        except exceptions.HttpError as server_error:
            raise bigquery.Error.ForHttpError(server_error)

        log.CreatedResource(resource)
Beispiel #25
0
    def Run(self, args):
        # We explicitly want to allow --networks='' as a valid option and we need
        # to differentiate between that option and not passing --networks at all.
        if args.visibility == 'public' and args.IsSpecified('networks'):
            raise exceptions.InvalidArgumentException(
                '--networks',
                'If --visibility is set to public (default), setting networks is '
                'not allowed.')
        if args.visibility == 'private' and args.networks is None:
            raise exceptions.RequiredArgumentException('--networks', ("""\
           If --visibility is set to private, a list of networks must be
           provided.'
         NOTE: You can provide an empty value ("") for private zones that
          have NO network binding.
          """))

        dns = apis.GetClientInstance('dns', 'v1')
        messages = apis.GetMessagesModule('dns', 'v1')

        registry = util.GetRegistry('v1')

        zone_ref = registry.Parse(args.dns_zone,
                                  params={
                                      'project':
                                      properties.VALUES.core.project.GetOrFail,
                                  },
                                  collection='dns.managedZones')

        visibility = messages.ManagedZone.VisibilityValueValuesEnum(
            args.visibility)
        visibility_config = None
        if visibility == messages.ManagedZone.VisibilityValueValuesEnum.private:
            # Handle explicitly empty networks case (--networks='')
            networks = args.networks if args.networks != [''] else []

            def GetNetworkSelfLink(network):
                return registry.Parse(network,
                                      collection='compute.networks',
                                      params={
                                          'project': zone_ref.project
                                      }).SelfLink()

            network_urls = [GetNetworkSelfLink(n) for n in networks]
            network_configs = [
                messages.ManagedZonePrivateVisibilityConfigNetwork(
                    networkUrl=nurl) for nurl in network_urls
            ]
            visibility_config = messages.ManagedZonePrivateVisibilityConfig(
                networks=network_configs)

        if args.forwarding_targets:
            forward_config = command_util.ParseManagedZoneForwardingConfig(
                args.forwarding_targets, messages)
        else:
            forward_config = None

        dnssec_config = _MakeDnssecConfig(args, messages)

        labels = labels_util.ParseCreateArgs(args,
                                             messages.ManagedZone.LabelsValue)

        zone = messages.ManagedZone(name=zone_ref.managedZone,
                                    dnsName=util.AppendTrailingDot(
                                        args.dns_name),
                                    description=args.description,
                                    dnssecConfig=dnssec_config,
                                    labels=labels,
                                    visibility=visibility,
                                    forwardingConfig=forward_config,
                                    privateVisibilityConfig=visibility_config)

        result = dns.managedZones.Create(
            messages.DnsManagedZonesCreateRequest(managedZone=zone,
                                                  project=zone_ref.project))
        log.CreatedResource(zone_ref)
        return [result]
Beispiel #26
0
  def Run(self, args):
    """This is what gets called when the user runs this command.

    Args:
      args: an argparse namespace. All the arguments that were provided to this
        command invocation.

    Returns:
      Some value that we want to have printed later.

    Raises:
      FailedBuildException: If the build is completed and not 'SUCCESS'.
    """

    project = properties.VALUES.core.project.Get(required=True)
    safe_project = project.replace(':', '_')
    safe_project = safe_project.replace('.', '_')
    # The string 'google' is not allowed in bucket names.
    safe_project = safe_project.replace('google', 'elgoog')

    default_bucket_name = '{}_cloudbuild'.format(safe_project)

    default_gcs_source = False
    if args.gcs_source_staging_dir is None:
      default_gcs_source = True
      args.gcs_source_staging_dir = 'gs://{}/source'.format(default_bucket_name)

    client = cloudbuild_util.GetClientInstance()
    messages = cloudbuild_util.GetMessagesModule()

    gcs_client = storage_api.StorageClient()

    # First, create the build request.
    build_timeout = properties.VALUES.container.build_timeout.Get()

    if build_timeout is not None:
      try:
        # A bare number is interpreted as seconds.
        build_timeout_secs = int(build_timeout)
      except ValueError:
        build_timeout_duration = times.ParseDuration(build_timeout)
        build_timeout_secs = int(build_timeout_duration.total_seconds)
      timeout_str = str(build_timeout_secs) + 's'
    else:
      timeout_str = None

    if args.tag:
      if (properties.VALUES.container.build_check_tag.GetBool() and
          'gcr.io/' not in args.tag):
        raise c_exceptions.InvalidArgumentException(
            '--tag',
            'Tag value must be in the gcr.io/* or *.gcr.io/* namespace.')
      build_config = messages.Build(
          images=[args.tag],
          steps=[
              messages.BuildStep(
                  name='gcr.io/cloud-builders/docker',
                  args=['build', '--no-cache', '-t', args.tag, '.'],
              ),
          ],
          timeout=timeout_str,
          substitutions=cloudbuild_util.EncodeSubstitutions(args.substitutions,
                                                            messages)
      )
    elif args.config:
      build_config = config.LoadCloudbuildConfigFromPath(
          args.config, messages, params=args.substitutions)

    # If timeout was set by flag, overwrite the config file.
    if timeout_str:
      build_config.timeout = timeout_str

    # --no-source overrides the default --source.
    if not args.IsSpecified('source') and args.no_source:
      args.source = None

    gcs_source_staging = None
    if args.source:
      suffix = '.tgz'
      if args.source.startswith('gs://') or os.path.isfile(args.source):
        _, suffix = os.path.splitext(args.source)

      # Next, stage the source to Cloud Storage.
      staged_object = '{stamp}-{uuid}{suffix}'.format(
          stamp=times.GetTimeStampFromDateTime(times.Now()),
          uuid=uuid.uuid4().hex,
          suffix=suffix,
      )
      gcs_source_staging_dir = resources.REGISTRY.Parse(
          args.gcs_source_staging_dir, collection='storage.objects')

      # We create the bucket (if it does not exist) first. If we do an existence
      # check and then create the bucket ourselves, it would be possible for an
      # attacker to get lucky and beat us to creating the bucket. Block on this
      # creation to avoid this race condition.
      gcs_client.CreateBucketIfNotExists(gcs_source_staging_dir.bucket)

      # If no bucket is specified (for the source `default_gcs_source`), check
      # that the default bucket is also owned by the project (b/33046325).
      if default_gcs_source:
        # This request returns only the buckets owned by the project.
        bucket_list_req = gcs_client.messages.StorageBucketsListRequest(
            project=project,
            prefix=default_bucket_name)
        bucket_list = gcs_client.client.buckets.List(bucket_list_req)
        found_bucket = False
        for bucket in bucket_list.items:
          if bucket.id == default_bucket_name:
            found_bucket = True
            break
        if not found_bucket:
          if default_gcs_source:
            raise c_exceptions.RequiredArgumentException(
                'gcs_source_staging_dir',
                'A bucket with name {} already exists and is owned by '
                'another project. Specify a bucket using '
                '--gcs_source_staging_dir.'.format(default_bucket_name))

      if gcs_source_staging_dir.object:
        staged_object = gcs_source_staging_dir.object + '/' + staged_object
      gcs_source_staging = resources.REGISTRY.Create(
          collection='storage.objects',
          bucket=gcs_source_staging_dir.bucket,
          object=staged_object)

      if args.source.startswith('gs://'):
        gcs_source = resources.REGISTRY.Parse(
            args.source, collection='storage.objects')
        staged_source_obj = gcs_client.Rewrite(gcs_source, gcs_source_staging)
        build_config.source = messages.Source(
            storageSource=messages.StorageSource(
                bucket=staged_source_obj.bucket,
                object=staged_source_obj.name,
                generation=staged_source_obj.generation,
            ))
      else:
        if not os.path.exists(args.source):
          raise c_exceptions.BadFileException(
              'could not find source [{src}]'.format(src=args.source))
        if os.path.isdir(args.source):
          source_snapshot = snapshot.Snapshot(args.source)
          size_str = resource_transform.TransformSize(
              source_snapshot.uncompressed_size)
          log.status.Print(
              'Creating temporary tarball archive of {num_files} file(s)'
              ' totalling {size} before compression.'.format(
                  num_files=len(source_snapshot.files),
                  size=size_str))
          staged_source_obj = source_snapshot.CopyTarballToGCS(
              gcs_client, gcs_source_staging)
          build_config.source = messages.Source(
              storageSource=messages.StorageSource(
                  bucket=staged_source_obj.bucket,
                  object=staged_source_obj.name,
                  generation=staged_source_obj.generation,
              ))
        elif os.path.isfile(args.source):
          unused_root, ext = os.path.splitext(args.source)
          if ext not in _ALLOWED_SOURCE_EXT:
            raise c_exceptions.BadFileException(
                'Local file [{src}] is none of '+', '.join(_ALLOWED_SOURCE_EXT))
          log.status.Print(
              'Uploading local file [{src}] to '
              '[gs://{bucket}/{object}].'.format(
                  src=args.source,
                  bucket=gcs_source_staging.bucket,
                  object=gcs_source_staging.object,
              ))
          staged_source_obj = gcs_client.CopyFileToGCS(
              storage_util.BucketReference.FromBucketUrl(
                  gcs_source_staging.bucket),
              args.source, gcs_source_staging.object)
          build_config.source = messages.Source(
              storageSource=messages.StorageSource(
                  bucket=staged_source_obj.bucket,
                  object=staged_source_obj.name,
                  generation=staged_source_obj.generation,
              ))
    else:
      # No source
      if not args.no_source:
        raise c_exceptions.InvalidArgumentException(
            '--no-source',
            'To omit source, use the --no-source flag.')

    if args.gcs_log_dir:
      gcs_log_dir = resources.REGISTRY.Parse(
          args.gcs_log_dir, collection='storage.objects')

      build_config.logsBucket = (
          'gs://'+gcs_log_dir.bucket+'/'+gcs_log_dir.object)

    # Machine type.
    if args.machine_type is not None:
      machine_type = Submit._machine_type_flag_map.GetEnumForChoice(
          args.machine_type)
      if not build_config.options:
        build_config.options = messages.BuildOptions()
      build_config.options.machineType = machine_type

    # Disk size.
    if args.disk_size is not None:
      disk_size = compute_utils.BytesToGb(args.disk_size)
      if not build_config.options:
        build_config.options = messages.BuildOptions()
      build_config.options.diskSizeGb = int(disk_size)

    log.debug('submitting build: '+repr(build_config))

    # Start the build.
    op = client.projects_builds.Create(
        messages.CloudbuildProjectsBuildsCreateRequest(
            build=build_config,
            projectId=properties.VALUES.core.project.Get()))
    json = encoding.MessageToJson(op.metadata)
    build = encoding.JsonToMessage(messages.BuildOperationMetadata, json).build

    build_ref = resources.REGISTRY.Create(
        collection='cloudbuild.projects.builds',
        projectId=build.projectId,
        id=build.id)

    log.CreatedResource(build_ref)
    if build.logUrl:
      log.status.Print('Logs are available at [{log_url}].'.format(
          log_url=build.logUrl))
    else:
      log.status.Print('Logs are available in the Cloud Console.')

    # If the command is run --async, we just print out a reference to the build.
    if args.async:
      return build

    mash_handler = execution.MashHandler(
        execution.GetCancelBuildHandler(client, messages, build_ref))

    # Otherwise, logs are streamed from GCS.
    with execution_utils.CtrlCSection(mash_handler):
      build = cb_logs.CloudBuildClient(client, messages).Stream(build_ref)

    if build.status == messages.Build.StatusValueValuesEnum.TIMEOUT:
      log.status.Print(
          'Your build timed out. Use the [--timeout=DURATION] flag to change '
          'the timeout threshold.')

    if build.status != messages.Build.StatusValueValuesEnum.SUCCESS:
      raise FailedBuildException(build)

    return build
Beispiel #27
0
    def Run(self, args):
        """Creates an SSL certificate for a Cloud SQL instance.

    Args:
      args: argparse.Namespace, The arguments that this command was invoked
          with.

    Returns:
      A dict object representing the operations resource describing the create
      operation if the create was successful.
    Raises:
      ArgumentError: If the file path provided cannot be written to.
    """

        if os.path.exists(args.cert_file):
            raise exceptions.ArgumentError(
                'file [{path}] already exists'.format(path=args.cert_file))

        # First check if args.out_file is writeable. If not, abort and don't create
        # the useless cert.
        try:
            files.WriteFileContents(args.cert_file,
                                    'placeholder\n',
                                    private=True)
        except (files.Error, OSError) as e:
            raise exceptions.ArgumentError(
                'unable to write [{path}]: {error}'.format(path=args.cert_file,
                                                           error=str(e)))

        client = api_util.SqlClient(api_util.API_VERSION_DEFAULT)
        sql_client = client.sql_client
        sql_messages = client.sql_messages

        validate.ValidateInstanceName(args.instance)
        instance_ref = client.resource_parser.Parse(
            args.instance,
            params={'project': properties.VALUES.core.project.GetOrFail},
            collection='sql.instances')

        # TODO(b/36049399): figure out how to rectify the common_name and the
        # sha1fingerprint, so that things can work with the resource parser.

        result = sql_client.sslCerts.Insert(
            sql_messages.SqlSslCertsInsertRequest(
                project=instance_ref.project,
                instance=instance_ref.instance,
                sslCertsInsertRequest=sql_messages.SslCertsInsertRequest(
                    commonName=args.common_name)))

        private_key = result.clientCert.certPrivateKey
        files.WriteFileContents(args.cert_file,
                                private_key + '\n',
                                private=True)

        cert_ref = client.resource_parser.Create(
            collection='sql.sslCerts',
            project=instance_ref.project,
            instance=instance_ref.instance,
            sha1Fingerprint=result.clientCert.certInfo.sha1Fingerprint)

        log.CreatedResource(cert_ref)
        return result.clientCert.certInfo
Beispiel #28
0
  def Run(self, args):
    client = self.context['dataproc_client']
    messages = self.context['dataproc_messages']

    cluster_ref = util.ParseCluster(args.name, self.context)

    config_helper = compute_helpers.ConfigurationHelper.FromContext(
        self.context)
    compute_uris = config_helper.ResolveGceUris(
        args.name,
        args.image,
        args.master_machine_type,
        args.worker_machine_type,
        args.network,
        args.subnet)

    init_actions = []
    timeout_str = str(args.initialization_action_timeout) + 's'
    if args.initialization_actions:
      init_actions = [messages.NodeInitializationAction(
          executableFile=exe, executionTimeout=timeout_str)
                      for exe in args.initialization_actions]
    expanded_scopes = compute_helpers.ExpandScopeAliases(args.scopes)

    software_config = messages.SoftwareConfig(
        imageVersion=args.image_version)

    if args.properties:
      software_config.properties = encoding.DictToMessage(
          args.properties, messages.SoftwareConfig.PropertiesValue)

    gce_cluster_config = messages.GceClusterConfig(
        networkUri=compute_uris['network'],
        subnetworkUri=compute_uris['subnetwork'],
        serviceAccountScopes=expanded_scopes,
        zoneUri=compute_uris['zone'])

    if args.tags:
      gce_cluster_config.tags = args.tags

    if args.metadata:
      flat_metadata = dict((k, v) for d in args.metadata for k, v in d.items())
      gce_cluster_config.metadata = encoding.DictToMessage(
          flat_metadata, messages.GceClusterConfig.MetadataValue)

    cluster_config = messages.ClusterConfig(
        configBucket=args.bucket,
        gceClusterConfig=gce_cluster_config,
        masterConfig=messages.InstanceGroupConfig(
            imageUri=compute_uris['image'],
            machineTypeUri=compute_uris['master_machine_type'],
            diskConfig=messages.DiskConfig(
                bootDiskSizeGb=args.master_boot_disk_size_gb,
                numLocalSsds=args.num_master_local_ssds,
            ),
        ),
        workerConfig=messages.InstanceGroupConfig(
            numInstances=args.num_workers,
            imageUri=compute_uris['image'],
            machineTypeUri=compute_uris['worker_machine_type'],
            diskConfig=messages.DiskConfig(
                bootDiskSizeGb=args.worker_boot_disk_size_gb,
                numLocalSsds=args.num_worker_local_ssds,
            ),
        ),
        initializationActions=init_actions,
        softwareConfig=software_config,
    )

    # Secondary worker group is optional.
    if args.num_preemptible_workers is not None:
      cluster_config.secondaryWorkerConfig = (
          messages.InstanceGroupConfig(
              numInstances=args.num_preemptible_workers))

    cluster = messages.Cluster(
        config=cluster_config,
        clusterName=cluster_ref.clusterName,
        projectId=cluster_ref.projectId)

    operation = client.projects_regions_clusters.Create(
        messages.DataprocProjectsRegionsClustersCreateRequest(
            projectId=cluster_ref.projectId,
            region=cluster_ref.region,
            cluster=cluster))

    if args.async:
      log.status.write(
          'Creating [{0}] with operation [{1}].'.format(
              cluster_ref, operation.name))
      return

    operation = util.WaitForOperation(
        operation, self.context, 'Waiting for cluster creation operation')

    cluster = client.projects_regions_clusters.Get(cluster_ref.Request())
    if cluster.status.state == (
        messages.ClusterStatus.StateValueValuesEnum.RUNNING):
      log.CreatedResource(cluster_ref)
    else:
      log.error('Create cluster failed!')
      if operation.details:
        log.error('Details:\n' + operation.details)
    return cluster
Beispiel #29
0
class Create(base.CreateCommand):
  """Create a cluster for running containers."""

  @staticmethod
  def Args(parser):
    _Args(parser)
    _AddAdditionalZonesFlag(parser, deprecated=True)
    flags.AddNodeLocationsFlag(parser)
    flags.AddAddonsFlags(parser)
    flags.AddClusterAutoscalingFlags(parser)
    flags.AddDiskTypeFlag(parser, suppressed=True)
    flags.AddEnableAutoRepairFlag(parser)
    flags.AddEnableKubernetesAlphaFlag(parser)
    flags.AddEnableLegacyAuthorizationFlag(parser)
    flags.AddIPAliasFlags(parser)
    flags.AddLabelsFlag(parser)
    flags.AddLocalSSDFlag(parser)
    flags.AddMaintenanceWindowFlag(parser)
    flags.AddMasterAuthorizedNetworksFlags(parser)
    flags.AddMinCpuPlatformFlag(parser)
    flags.AddNetworkPolicyFlags(parser)
    flags.AddNodeTaintsFlag(parser)
    flags.AddPreemptibleFlag(parser)
    flags.AddDeprecatedClusterNodeIdentityFlags(parser)

  def ParseCreateOptions(self, args):
    return ParseCreateOptionsBase(args)

  def Run(self, args):
    """This is what gets called when the user runs this command.

    Args:
      args: an argparse namespace. All the arguments that were provided to this
        command invocation.

    Returns:
      Cluster message for the successfully created cluster.

    Raises:
      util.Error, if creation failed.
    """
    if args.async and not args.IsSpecified('format'):
      args.format = util.OPERATIONS_FORMAT

    util.CheckKubectlInstalled()

    adapter = self.context['api_adapter']
    location_get = self.context['location_get']
    location = location_get(args)

    cluster_ref = adapter.ParseCluster(args.name, location)
    options = self.ParseCreateOptions(args)

    if options.enable_kubernetes_alpha:
      console_io.PromptContinue(message=constants.KUBERNETES_ALPHA_PROMPT,
                                throw_if_unattended=True,
                                cancel_on_no=True)

    if getattr(args, 'region', None):
      # TODO(b/68496825): Remove this completely after regional clusters beta
      # launch.
      if self._release_track == base.ReleaseTrack.ALPHA:
        console_io.PromptContinue(
            message=constants.KUBERNETES_REGIONAL_CHARGES_PROMPT,
            throw_if_unattended=True,
            cancel_on_no=True)

    if options.enable_autorepair is not None:
      log.status.Print(messages.AutoUpdateUpgradeRepairMessage(
          options.enable_autorepair, 'autorepair'))

    if options.enable_autoupgrade is not None:
      log.status.Print(messages.AutoUpdateUpgradeRepairMessage(
          options.enable_autoupgrade, 'autoupgrade'))

    if options.accelerators is not None:
      log.status.Print(constants.KUBERNETES_GPU_LIMITATION_MSG)

    operation = None
    try:
      operation_ref = adapter.CreateCluster(cluster_ref, options)
      if args.async:
        return adapter.GetCluster(cluster_ref)

      operation = adapter.WaitForOperation(
          operation_ref,
          'Creating cluster {0}'.format(cluster_ref.clusterId),
          timeout_s=args.timeout)
      cluster = adapter.GetCluster(cluster_ref)
    except apitools_exceptions.HttpError as error:
      raise exceptions.HttpException(error, util.HTTP_ERROR_FORMAT)

    log.CreatedResource(cluster_ref)
    cluster_url = util.GenerateClusterUrl(cluster_ref)
    log.status.Print(
        'To inspect the contents of your cluster, go to: ' + cluster_url)
    if operation.detail:
      # Non-empty detail on a DONE create operation should be surfaced as
      # a warning to end user.
      log.warning(operation.detail)

    try:
      util.ClusterConfig.Persist(cluster, cluster_ref.projectId)
    except kconfig.MissingEnvVarError as error:
      log.warning(error)

    return [cluster]
Beispiel #30
0
  def Run(self, args):
    self.ValidateArgs(args)

    dataproc = dp.Dataproc()

    cluster_ref = dataproc.ParseCluster(args.name)

    compute_resources = compute_helpers.GetComputeResources(
        self.ReleaseTrack(), args.name)

    master_accelerator_type = None
    worker_accelerator_type = None
    master_accelerator_count = None
    worker_accelerator_count = None
    if self.ReleaseTrack() == base.ReleaseTrack.BETA:
      if args.master_accelerator:
        master_accelerator_type = args.master_accelerator['type']
        master_accelerator_count = args.master_accelerator.get('count', 1)
      if args.worker_accelerator:
        worker_accelerator_type = args.worker_accelerator['type']
        worker_accelerator_count = args.worker_accelerator.get('count', 1)

    # Resolve non-zonal GCE resources
    # We will let the server resolve short names of zonal resources because
    # if auto zone is requested, we will not know the zone before sending the
    # request
    image_ref = args.image and compute_resources.Parse(
        args.image,
        params={'project': cluster_ref.projectId},
        collection='compute.images')
    network_ref = args.network and compute_resources.Parse(
        args.network,
        params={'project': cluster_ref.projectId},
        collection='compute.networks')
    subnetwork_ref = args.subnet and compute_resources.Parse(
        args.subnet,
        params={
            'project': cluster_ref.projectId,
            'region': properties.VALUES.compute.region.GetOrFail,
        },
        collection='compute.subnetworks')
    timeout_str = str(args.initialization_action_timeout) + 's'
    init_actions = [
        dataproc.messages.NodeInitializationAction(
            executableFile=exe, executionTimeout=timeout_str)
        for exe in (args.initialization_actions or [])]
    # Increase the client timeout for each initialization action.
    args.timeout += args.initialization_action_timeout * len(init_actions)

    expanded_scopes = compute_helpers.ExpandScopeAliases(args.scopes)

    software_config = dataproc.messages.SoftwareConfig(
        imageVersion=args.image_version)

    master_boot_disk_size_gb = args.master_boot_disk_size_gb
    if args.master_boot_disk_size:
      master_boot_disk_size_gb = (
          api_utils.BytesToGb(args.master_boot_disk_size))

    worker_boot_disk_size_gb = args.worker_boot_disk_size_gb
    if args.worker_boot_disk_size:
      worker_boot_disk_size_gb = (
          api_utils.BytesToGb(args.worker_boot_disk_size))

    preemptible_worker_boot_disk_size_gb = (
        api_utils.BytesToGb(args.preemptible_worker_boot_disk_size))

    if args.single_node:
      args.properties[constants.ALLOW_ZERO_WORKERS_PROPERTY] = 'true'

    if args.properties:
      software_config.properties = encoding.DictToMessage(
          args.properties, dataproc.messages.SoftwareConfig.PropertiesValue)

    gce_cluster_config = dataproc.messages.GceClusterConfig(
        networkUri=network_ref and network_ref.SelfLink(),
        subnetworkUri=subnetwork_ref and subnetwork_ref.SelfLink(),
        internalIpOnly=args.no_address,
        serviceAccount=args.service_account,
        serviceAccountScopes=expanded_scopes,
        zoneUri=properties.VALUES.compute.zone.GetOrFail())

    if args.tags:
      gce_cluster_config.tags = args.tags

    if args.metadata:
      flat_metadata = dict((k, v) for d in args.metadata for k, v in d.items())
      gce_cluster_config.metadata = encoding.DictToMessage(
          flat_metadata, dataproc.messages.GceClusterConfig.MetadataValue)

    master_accelerators = []
    if master_accelerator_type:
      master_accelerators.append(
          dataproc.messages.AcceleratorConfig(
              acceleratorTypeUri=master_accelerator_type,
              acceleratorCount=master_accelerator_count))
    worker_accelerators = []
    if worker_accelerator_type:
      worker_accelerators.append(
          dataproc.messages.AcceleratorConfig(
              acceleratorTypeUri=worker_accelerator_type,
              acceleratorCount=worker_accelerator_count))

    cluster_config = dataproc.messages.ClusterConfig(
        configBucket=args.bucket,
        gceClusterConfig=gce_cluster_config,
        masterConfig=dataproc.messages.InstanceGroupConfig(
            numInstances=args.num_masters,
            imageUri=image_ref and image_ref.SelfLink(),
            machineTypeUri=args.master_machine_type,
            accelerators=master_accelerators,
            diskConfig=dataproc.messages.DiskConfig(
                bootDiskSizeGb=master_boot_disk_size_gb,
                numLocalSsds=args.num_master_local_ssds,),),
        workerConfig=dataproc.messages.InstanceGroupConfig(
            numInstances=args.num_workers,
            imageUri=image_ref and image_ref.SelfLink(),
            machineTypeUri=args.worker_machine_type,
            accelerators=worker_accelerators,
            diskConfig=dataproc.messages.DiskConfig(
                bootDiskSizeGb=worker_boot_disk_size_gb,
                numLocalSsds=args.num_worker_local_ssds,),),
        initializationActions=init_actions,
        softwareConfig=software_config,)

    # Secondary worker group is optional. However, users may specify
    # future pVM disk size at creation time.
    if (args.num_preemptible_workers is not None or
        preemptible_worker_boot_disk_size_gb is not None):
      cluster_config.secondaryWorkerConfig = (
          dataproc.messages.InstanceGroupConfig(
              numInstances=args.num_preemptible_workers,
              diskConfig=dataproc.messages.DiskConfig(
                  bootDiskSizeGb=preemptible_worker_boot_disk_size_gb,
              )))

    cluster = dataproc.messages.Cluster(
        config=cluster_config,
        clusterName=cluster_ref.clusterName,
        projectId=cluster_ref.projectId)

    self.ConfigureCluster(dataproc.messages, args, cluster)

    operation = dataproc.client.projects_regions_clusters.Create(
        dataproc.messages.DataprocProjectsRegionsClustersCreateRequest(
            projectId=cluster_ref.projectId,
            region=cluster_ref.region,
            cluster=cluster))

    if args.async:
      log.status.write(
          'Creating [{0}] with operation [{1}].'.format(
              cluster_ref, operation.name))
      return

    operation = dataproc.WaitForOperation(
        operation,
        message='Waiting for cluster creation operation',
        timeout_s=args.timeout)

    get_request = dataproc.messages.DataprocProjectsRegionsClustersGetRequest(
        projectId=cluster_ref.projectId,
        region=cluster_ref.region,
        clusterName=cluster_ref.clusterName)
    cluster = dataproc.client.projects_regions_clusters.Get(get_request)
    if cluster.status.state == (
        dataproc.messages.ClusterStatus.StateValueValuesEnum.RUNNING):

      zone_uri = cluster.config.gceClusterConfig.zoneUri
      zone_short_name = zone_uri.split('/')[-1]

      # Log the URL of the cluster
      log.CreatedResource(
          cluster_ref,
          # Also indicate which zone the cluster was placed in. This is helpful
          # if the server picked a zone (auto zone)
          details='Cluster placed in zone [{0}]'.format(zone_short_name))
    else:
      log.error('Create cluster failed!')
      if operation.details:
        log.error('Details:\n' + operation.details)
    return cluster