Пример #1
0
  def Run(self, args):
    holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
    client = holder.client.apitools_client
    messages = holder.client.messages

    address_ref = self.ADDRESS_ARG.ResolveAsResource(
        args,
        holder.resources,
        scope_lister=compute_flags.GetDefaultScopeLister(holder.client))

    labels_diff = labels_util.Diff.FromUpdateArgs(args)
    if not labels_diff.MayHaveUpdates():
      raise calliope_exceptions.RequiredArgumentException(
          'LABELS', 'At least one of --update-labels or '
          '--remove-labels must be specified.')

    if address_ref.Collection() == 'compute.globalAddresses':
      address = client.globalAddresses.Get(
          messages.ComputeGlobalAddressesGetRequest(
              **address_ref.AsDict()))
      labels_value = messages.GlobalSetLabelsRequest.LabelsValue
    else:
      address = client.addresses.Get(
          messages.ComputeAddressesGetRequest(
              **address_ref.AsDict()))
      labels_value = messages.RegionSetLabelsRequest.LabelsValue

    labels_update = labels_diff.Apply(labels_value, address.labels)

    if not labels_update.needs_update:
      return address

    if address_ref.Collection() == 'compute.globalAddresses':
      request = messages.ComputeGlobalAddressesSetLabelsRequest(
          project=address_ref.project,
          resource=address_ref.Name(),
          globalSetLabelsRequest=messages.GlobalSetLabelsRequest(
              labelFingerprint=address.labelFingerprint,
              labels=labels_update.labels))

      operation = client.globalAddresses.SetLabels(request)
      operation_ref = holder.resources.Parse(
          operation.selfLink, collection='compute.globalOperations')

      operation_poller = poller.Poller(client.globalAddresses)
    else:
      request = messages.ComputeAddressesSetLabelsRequest(
          project=address_ref.project,
          resource=address_ref.Name(),
          region=address_ref.region,
          regionSetLabelsRequest=messages.RegionSetLabelsRequest(
              labelFingerprint=address.labelFingerprint,
              labels=labels_update.labels))

      operation = client.addresses.SetLabels(request)
      operation_ref = holder.resources.Parse(
          operation.selfLink, collection='compute.regionOperations')

      operation_poller = poller.Poller(client.addresses)

    return waiter.WaitFor(operation_poller, operation_ref,
                          'Updating labels of address [{0}]'.format(
                              address_ref.Name()))
Пример #2
0
    def Run(self, args):
        messages = secrets_api.GetMessages(
            version=secrets_util.GetVersionFromReleasePath(
                self.ReleaseTrack()))
        secret_ref = args.CONCEPTS.secret.Parse()
        data = secrets_util.ReadFileOrStdin(args.data_file)
        labels = labels_util.ParseCreateArgs(args, messages.Secret.LabelsValue)
        replication_policy = args.replication_policy
        if not replication_policy:
            replication_policy = properties.VALUES.secrets.replication_policy.Get(
            )

        if not replication_policy:
            raise exceptions.RequiredArgumentException(
                'replication-policy', self.MISSING_POLICY_MESSAGE)
        if replication_policy not in {'user-managed', 'automatic'}:
            if args.replication_policy:
                raise exceptions.InvalidArgumentException(
                    'replication-policy', self.INVALID_POLICY_MESSAGE)
            raise exceptions.InvalidArgumentException(
                'replication-policy', self.INVALID_POLICY_PROP_MESSAGE)

        locations = args.locations
        if not locations:
            # if locations weren't given, try to get them from properties
            locations = properties.VALUES.secrets.locations.Get()
            if locations:
                locations = locations.split(',')
        if replication_policy == 'user-managed' and not locations:
            raise exceptions.RequiredArgumentException(
                'locations', self.MANAGED_BUT_NO_LOCATIONS_MESSAGE)
        if replication_policy == 'automatic':
            if args.locations:
                # check args.locations separately from locations because we have
                # different error messages depending on whether the user used the
                # --locations flag or the secrets/locations property
                if args.replication_policy:
                    raise exceptions.InvalidArgumentException(
                        'locations', self.AUTOMATIC_AND_LOCATIONS_MESSAGE)
                raise exceptions.InvalidArgumentException(
                    'locations', self.AUTOMATIC_PROP_AND_LOCATIONS_MESSAGE)
            if locations:
                raise exceptions.InvalidArgumentException(
                    'replication-policy',
                    self.AUTOMATIC_AND_LOCATIONS_PROP_MESSAGE)
            locations = []

        # Differentiate between the flag being provided with an empty value and the
        # flag being omitted. See b/138796299 for info.
        if args.data_file == '':  # pylint: disable=g-explicit-bool-comparison
            raise exceptions.BadFileException(self.EMPTY_DATA_FILE_MESSAGE)
        # Create the secret
        response = secrets_api.Secrets(
            version=secrets_util.GetVersionFromReleasePath(
                self.ReleaseTrack())).Create(secret_ref,
                                             labels=labels,
                                             locations=locations,
                                             policy=replication_policy)

        if data:
            version = secrets_api.Secrets(
                version=secrets_util.GetVersionFromReleasePath(
                    self.ReleaseTrack())).AddVersion(secret_ref, data)
            version_ref = secrets_args.ParseVersionRef(version.name)
            secrets_log.Versions().Created(version_ref)
        else:
            secrets_log.Secrets().Created(secret_ref)

        return response
Пример #3
0
  def Run(self, args):
    """This is what gets called when the user runs this command.

    Args:
      args: an argparse namespace. All the arguments that were provided to this
        command invocation.

    Returns:
      Some value that we want to have printed later.

    Raises:
      FailedBuildException: If the build is completed and not 'SUCCESS'.
    """

    project = properties.VALUES.core.project.Get()
    safe_project = project.replace(':', '_')
    safe_project = safe_project.replace('.', '_')
    # The string 'google' is not allowed in bucket names.
    safe_project = safe_project.replace('google', 'elgoog')

    default_bucket_name = '{}_cloudbuild'.format(safe_project)

    default_gcs_source = False
    if args.gcs_source_staging_dir is None:
      default_gcs_source = True
      args.gcs_source_staging_dir = 'gs://{}/source'.format(default_bucket_name)

    default_gcs_log_dir = False
    if args.gcs_log_dir is None:
      default_gcs_log_dir = True
      args.gcs_log_dir = 'gs://{}/logs'.format(default_bucket_name)

    client = cloudbuild_util.GetClientInstance()
    messages = cloudbuild_util.GetMessagesModule()

    gcs_client = storage_api.StorageClient()

    # First, create the build request.
    build_timeout = properties.VALUES.container.build_timeout.Get()

    if build_timeout is not None:
      try:
        # A bare number is interpreted as seconds.
        build_timeout_secs = int(build_timeout)
      except ValueError:
        build_timeout_duration = times.ParseDuration(build_timeout)
        build_timeout_secs = int(build_timeout_duration.total_seconds)
      timeout_str = str(build_timeout_secs) + 's'
    else:
      timeout_str = None

    if args.tag:
      if 'gcr.io/' not in args.tag:
        raise c_exceptions.InvalidArgumentException(
            '--tag',
            'Tag value must be in the gcr.io/* or *.gcr.io/* namespace.')
      build_config = messages.Build(
          images=[args.tag],
          steps=[
              messages.BuildStep(
                  name='gcr.io/cloud-builders/docker',
                  args=['build', '--no-cache', '-t', args.tag, '.'],
              ),
          ],
          timeout=timeout_str,
          substitutions=cloudbuild_util.EncodeSubstitutions(args.substitutions,
                                                            messages)
      )
    elif args.config:
      build_config = config.LoadCloudbuildConfigFromPath(
          args.config, messages, params=args.substitutions)

    # If timeout was set by flag, overwrite the config file.
    if timeout_str:
      build_config.timeout = timeout_str

    gcs_source_staging = None
    if args.source:
      if args.no_source:
        raise c_exceptions.InvalidArgumentException(
            '--no-source',
            'Cannot provide both source [{src}] and [--no-source].'.format(
                src=args.source,
            ))

      suffix = '.tgz'
      if args.source.startswith('gs://') or os.path.isfile(args.source):
        _, suffix = os.path.splitext(args.source)

      # Next, stage the source to Cloud Storage.
      staged_object = '{stamp}{suffix}'.format(
          stamp=times.GetTimeStampFromDateTime(times.Now()),
          suffix=suffix,
      )
      gcs_source_staging_dir = resources.REGISTRY.Parse(
          args.gcs_source_staging_dir, collection='storage.objects')

      # We first try to create the bucket, before doing all the checks, in order
      # to avoid a race condition. If we do the check first, an attacker could
      # be lucky enough to create the bucket after the check and before this
      # bucket creation.
      gcs_client.CreateBucketIfNotExists(gcs_source_staging_dir.bucket)

      # If no bucket is specified (for the source `default_gcs_source` or for
      # the logs `default_gcs_log_dir`), check that the default bucket is also
      # owned by the project (b/33046325).
      if default_gcs_source or default_gcs_log_dir:
        # This request returns only the buckets owned by the project.
        bucket_list_req = gcs_client.messages.StorageBucketsListRequest(
            project=project,
            prefix=default_bucket_name)
        bucket_list = gcs_client.client.buckets.List(bucket_list_req)
        found_bucket = False
        for bucket in bucket_list.items:
          if bucket.id == default_bucket_name:
            found_bucket = True
            break
        if not found_bucket:
          if default_gcs_source:
            raise c_exceptions.RequiredArgumentException(
                'gcs_source_staging_dir',
                'A bucket with name {} already exists and is owned by '
                'another project. Specify a bucket using '
                '--gcs_source_staging_dir.'.format(default_bucket_name))
          elif default_gcs_log_dir:
            raise c_exceptions.RequiredArgumentException(
                'gcs-log-dir',
                'A bucket with name {} already exists and is owned by '
                'another project. Specify a bucket to hold build logs '
                'using --gcs-log-dir.'.format(default_bucket_name))

      if gcs_source_staging_dir.object:
        staged_object = gcs_source_staging_dir.object + '/' + staged_object
      gcs_source_staging = resources.REGISTRY.Create(
          collection='storage.objects',
          bucket=gcs_source_staging_dir.bucket,
          object=staged_object)

      if args.source.startswith('gs://'):
        gcs_source = resources.REGISTRY.Parse(
            args.source, collection='storage.objects')
        staged_source_obj = gcs_client.Rewrite(gcs_source, gcs_source_staging)
        build_config.source = messages.Source(
            storageSource=messages.StorageSource(
                bucket=staged_source_obj.bucket,
                object=staged_source_obj.name,
                generation=staged_source_obj.generation,
            ))
      else:
        if not os.path.exists(args.source):
          raise c_exceptions.BadFileException(
              'could not find source [{src}]'.format(src=args.source))
        if os.path.isdir(args.source):
          source_snapshot = snapshot.Snapshot(args.source)
          size_str = resource_transform.TransformSize(
              source_snapshot.uncompressed_size)
          log.status.Print(
              'Creating temporary tarball archive of {num_files} file(s)'
              ' totalling {size} before compression.'.format(
                  num_files=len(source_snapshot.files),
                  size=size_str))
          staged_source_obj = source_snapshot.CopyTarballToGCS(
              gcs_client, gcs_source_staging)
          build_config.source = messages.Source(
              storageSource=messages.StorageSource(
                  bucket=staged_source_obj.bucket,
                  object=staged_source_obj.name,
                  generation=staged_source_obj.generation,
              ))
        elif os.path.isfile(args.source):
          unused_root, ext = os.path.splitext(args.source)
          if ext not in _ALLOWED_SOURCE_EXT:
            raise c_exceptions.BadFileException(
                'Local file [{src}] is none of '+', '.join(_ALLOWED_SOURCE_EXT))
          log.status.Print(
              'Uploading local file [{src}] to '
              '[gs://{bucket}/{object}].'.format(
                  src=args.source,
                  bucket=gcs_source_staging.bucket,
                  object=gcs_source_staging.object,
              ))
          staged_source_obj = gcs_client.CopyFileToGCS(
              storage_util.BucketReference.FromBucketUrl(
                  gcs_source_staging.bucket),
              args.source, gcs_source_staging.object)
          build_config.source = messages.Source(
              storageSource=messages.StorageSource(
                  bucket=staged_source_obj.bucket,
                  object=staged_source_obj.name,
                  generation=staged_source_obj.generation,
              ))
    else:
      # No source
      if not args.no_source:
        raise c_exceptions.InvalidArgumentException(
            '--no-source',
            'To omit source, use the --no-source flag.')

    gcs_log_dir = resources.REGISTRY.Parse(
        args.gcs_log_dir, collection='storage.objects')

    if gcs_source_staging and gcs_log_dir.bucket != gcs_source_staging.bucket:
      # Create the logs bucket if it does not yet exist.
      gcs_client.CreateBucketIfNotExists(gcs_log_dir.bucket)
    build_config.logsBucket = 'gs://'+gcs_log_dir.bucket+'/'+gcs_log_dir.object

    log.debug('submitting build: '+repr(build_config))

    # Start the build.
    op = client.projects_builds.Create(
        messages.CloudbuildProjectsBuildsCreateRequest(
            build=build_config,
            projectId=properties.VALUES.core.project.Get()))
    json = encoding.MessageToJson(op.metadata)
    build = encoding.JsonToMessage(messages.BuildOperationMetadata, json).build

    build_ref = resources.REGISTRY.Create(
        collection='cloudbuild.projects.builds',
        projectId=build.projectId,
        id=build.id)

    log.CreatedResource(build_ref)
    if build.logUrl:
      log.status.Print('Logs are available at [{log_url}].'.format(
          log_url=build.logUrl))
    else:
      log.status.Print('Logs are available in the Cloud Console.')

    # If the command is run --async, we just print out a reference to the build.
    if args.async:
      return build

    mash_handler = execution.MashHandler(
        execution.GetCancelBuildHandler(client, messages, build_ref))

    # Otherwise, logs are streamed from GCS.
    with execution_utils.CtrlCSection(mash_handler):
      build = cb_logs.CloudBuildClient(client, messages).Stream(build_ref)

    if build.status == messages.Build.StatusValueValuesEnum.TIMEOUT:
      log.status.Print(
          'Your build timed out. Use the [--timeout=DURATION] flag to change '
          'the timeout threshold.')

    if build.status != messages.Build.StatusValueValuesEnum.SUCCESS:
      raise FailedBuildException(build)

    return build
Пример #4
0
def ConfigName(args, required=True):
  if required and not getattr(args, 'config_name', None):
    raise sdk_exceptions.RequiredArgumentException(
        'config', '--config-name parameter is required.')

  return getattr(args, 'config_name', None)
Пример #5
0
    def GetAddress(self, messages, args, address, address_ref,
                   resource_parser):
        network_tier = self.ConstructNetworkTier(messages, args)

        if args.ip_version or (address is None and address_ref.Collection()
                               == 'compute.globalAddresses'):
            ip_version = messages.Address.IpVersionValueValuesEnum(
                args.ip_version or 'IPV4')
        else:
            # IP version is only specified in global requests if an address is not
            # specified to determine whether an ipv4 or ipv6 address should be
            # allocated.
            ip_version = None

        if args.subnet and args.network:
            raise exceptions.ConflictingArgumentsException(
                '--network', '--subnet')

        purpose = None
        if args.purpose and not args.network and not args.subnet:
            raise exceptions.MinimumArgumentException(
                ['--network', '--subnet'], ' if --purpose is specified')

        # TODO(b/36862747): get rid of args.subnet check
        if args.subnet:
            if address_ref.Collection() == 'compute.globalAddresses':
                raise exceptions.ToolException(
                    '[--subnet] may not be specified for global addresses.')
            if not args.subnet_region:
                args.subnet_region = address_ref.region
            subnetwork_url = flags.SubnetworkArgument().ResolveAsResource(
                args, resource_parser).SelfLink()
            purpose = messages.Address.PurposeValueValuesEnum(
                args.purpose or 'GCE_ENDPOINT')
            if purpose != messages.Address.PurposeValueValuesEnum.GCE_ENDPOINT:
                raise exceptions.InvalidArgumentException(
                    '--purpose',
                    'must be GCE_ENDPOINT for regional internal addresses.')
        else:
            subnetwork_url = None

        network_url = None
        if args.network:
            if address_ref.Collection() == 'compute.addresses':
                raise exceptions.InvalidArgumentException(
                    '--network',
                    'network may not be specified for regional addresses.')
            network_url = flags.NetworkArgument().ResolveAsResource(
                args, resource_parser).SelfLink()
            purpose = messages.Address.PurposeValueValuesEnum(args.purpose
                                                              or 'VPC_PEERING')
            if purpose != messages.Address.PurposeValueValuesEnum.VPC_PEERING:
                raise exceptions.InvalidArgumentException(
                    '--purpose',
                    'must be VPC_PEERING for global internal addresses.')
            if not args.prefix_length:
                raise exceptions.RequiredArgumentException(
                    '--prefix-length',
                    'prefix length is needed for reserving IP ranges.')

        if args.prefix_length:
            if purpose != messages.Address.PurposeValueValuesEnum.VPC_PEERING:
                raise exceptions.InvalidArgumentException(
                    '--prefix-length',
                    'can only be used with [--purpose VPC_PEERING].')

        return messages.Address(
            address=address,
            prefixLength=args.prefix_length,
            description=args.description,
            networkTier=network_tier,
            ipVersion=ip_version,
            name=address_ref.Name(),
            addressType=(messages.Address.AddressTypeValueValuesEnum.INTERNAL
                         if subnetwork_url or network_url else None),
            purpose=purpose,
            subnetwork=subnetwork_url,
            network=network_url)
Пример #6
0
def _ValidateUtilizationTargetHasType(args):
  if (args.IsSpecified('stackdriver_metric_utilization_target') and
      not args.IsSpecified('stackdriver_metric_utilization_target_type')):
    raise calliope_exceptions.RequiredArgumentException(
        '--stackdriver-metric-utilization-target-type',
        'Required with [--stackdriver-metric-utilization-target].')
Пример #7
0
def ValidateInstanceLocation(args, enable_secondary_zone):
    if enable_secondary_zone:
        if args.IsSpecified('secondary_zone') and not args.IsSpecified('zone'):
            raise exceptions.RequiredArgumentException(
                '--zone', '`--zone` is required if --secondary-zone is used '
                'while creating an instance.')
Пример #8
0
def VerifyParentForAnalyzeIamPolicy(organization, attribute='root cloud asset'):
  """Verify the parent name."""
  if organization is None:
    raise gcloud_exceptions.RequiredArgumentException(
        '--organization',
        'Should specify the organization for {0}.'.format(attribute))
Пример #9
0
def _Run(args,
         track=None,
         enable_runtime=True,
         enable_max_instances=False,
         enable_connected_vpc=False,
         enable_vpc_connector=False):
    """Run a function deployment with the given args."""
    # Check for labels that start with `deployment`, which is not allowed.
    labels_util.CheckNoDeploymentLabels('--remove-labels', args.remove_labels)
    labels_util.CheckNoDeploymentLabels('--update-labels', args.update_labels)

    # Check that exactly one trigger type is specified properly.
    trigger_util.ValidateTriggerArgs(args.trigger_event, args.trigger_resource,
                                     args.IsSpecified('retry'),
                                     args.IsSpecified('trigger_http'))

    trigger_params = trigger_util.GetTriggerEventParams(
        args.trigger_http, args.trigger_bucket, args.trigger_topic,
        args.trigger_event, args.trigger_resource)

    function_ref = args.CONCEPTS.name.Parse()
    function_url = function_ref.RelativeName()

    messages = api_util.GetApiMessagesModule(track)

    # Get an existing function or create a new one.
    function = api_util.GetFunction(function_url)
    is_new_function = function is None
    if is_new_function:
        trigger_util.CheckTriggerSpecified(args)
        function = messages.CloudFunction()
        function.name = function_url
    elif trigger_params:
        # If the new deployment would implicitly change the trigger_event type
        # raise error
        trigger_util.CheckLegacyTriggerUpdate(function.eventTrigger,
                                              trigger_params['trigger_event'])

    # Keep track of which fields are updated in the case of patching.
    updated_fields = []

    # Populate function properties based on args.
    if args.entry_point:
        function.entryPoint = args.entry_point
        updated_fields.append('entryPoint')
    if args.timeout:
        function.timeout = '{}s'.format(args.timeout)
        updated_fields.append('timeout')
    if args.memory:
        function.availableMemoryMb = utils.BytesToMb(args.memory)
        updated_fields.append('availableMemoryMb')
    if args.service_account:
        function.serviceAccountEmail = args.service_account
        updated_fields.append('serviceAccountEmail')
    if enable_runtime:
        if args.IsSpecified('runtime'):
            function.runtime = args.runtime
            updated_fields.append('runtime')
            if args.runtime in ['nodejs',
                                'nodejs6']:  # nodejs is nodejs6 alias
                log.warning(
                    'The Node.js 6 runtime is deprecated on Cloud Functions. '
                    'Please migrate to Node.js 8 (--runtime=nodejs8) or Node.js 10 '
                    '(--runtime=nodejs10). '
                    'See https://cloud.google.com/functions/docs/migrating/nodejs-runtimes'
                )
        elif is_new_function:
            raise exceptions.RequiredArgumentException(
                'runtime', 'Flag `--runtime` is required for new functions.')
    if enable_max_instances:
        if (args.IsSpecified('max_instances')
                or args.IsSpecified('clear_max_instances')):
            max_instances = 0 if args.clear_max_instances else args.max_instances
            function.maxInstances = max_instances
            updated_fields.append('maxInstances')
    if enable_connected_vpc:
        if args.connected_vpc:
            function.network = args.connected_vpc
            updated_fields.append('network')
        if args.IsSpecified('vpc_connector'):
            function.vpcConnector = args.vpc_connector
            updated_fields.append('vpcConnector')
    if enable_vpc_connector:
        if args.IsSpecified('vpc_connector'):
            function.vpcConnector = args.vpc_connector
            updated_fields.append('vpcConnector')

    # Populate trigger properties of function based on trigger args.
    if args.trigger_http:
        function.httpsTrigger = messages.HttpsTrigger()
        function.eventTrigger = None
        updated_fields.extend(['eventTrigger', 'httpsTrigger'])
    if trigger_params:
        function.eventTrigger = trigger_util.CreateEventTrigger(
            **trigger_params)
        function.httpsTrigger = None
        updated_fields.extend(['eventTrigger', 'httpsTrigger'])
    if args.IsSpecified('retry'):
        updated_fields.append('eventTrigger.failurePolicy')
        if args.retry:
            function.eventTrigger.failurePolicy = messages.FailurePolicy()
            function.eventTrigger.failurePolicy.retry = messages.Retry()
        else:
            function.eventTrigger.failurePolicy = None
    elif function.eventTrigger:
        function.eventTrigger.failurePolicy = None

    # Populate source properties of function based on source args.
    # Only Add source to function if its explicitly provided, a new function,
    # using a stage bucket or deploy of an existing function that previously
    # used local source.
    if (args.source or args.stage_bucket or is_new_function
            or function.sourceUploadUrl):
        updated_fields.extend(
            source_util.SetFunctionSourceProps(function, function_ref,
                                               args.source, args.stage_bucket))

    # Apply label args to function
    if labels_util.SetFunctionLabels(function, args.update_labels,
                                     args.remove_labels, args.clear_labels):
        updated_fields.append('labels')

    # Apply environment variables args to function
    updated_fields.extend(_ApplyEnvVarsArgsToFunction(function, args))

    if is_new_function:
        return api_util.CreateFunction(function,
                                       function_ref.Parent().RelativeName())
    if updated_fields:
        return api_util.PatchFunction(function, updated_fields)
    log.status.Print('Nothing to update.')
Пример #10
0
    def _CreateGlobalRequests(self, holder, args, backend_services_ref):
        """Returns a global backend service create request."""

        if args.load_balancing_scheme == 'INTERNAL':
            raise exceptions.RequiredArgumentException(
                '--region',
                'Must specify --region for internal load balancer.')
        if (self._support_failover
                and (args.IsSpecified('connection_drain_on_failover')
                     or args.IsSpecified('drop_traffic_if_unhealthy')
                     or args.IsSpecified('failover_ratio'))):
            raise exceptions.InvalidArgumentException(
                '--global',
                'cannot specify failover policies for global backend services.'
            )
        backend_service = self._CreateBackendService(holder, args,
                                                     backend_services_ref)

        client = holder.client
        if args.connection_draining_timeout is not None:
            backend_service.connectionDraining = (
                client.messages.ConnectionDraining(
                    drainingTimeoutSec=args.connection_draining_timeout))

        if args.enable_cdn is not None:
            backend_service.enableCDN = args.enable_cdn

        backend_services_utils.ApplyCdnPolicyArgs(
            client,
            args,
            backend_service,
            is_update=False,
            apply_signed_url_cache_max_age=True,
            support_flexible_cache_step_one=self.
            _support_flexible_cache_step_one,
            support_flexible_cache_step_two=self.
            _support_flexible_cache_step_two,
            support_negative_cache=self._support_negative_cache,
            support_request_coalescing=self._support_request_coalescing)

        if args.session_affinity is not None:
            backend_service.sessionAffinity = (
                client.messages.BackendService.SessionAffinityValueValuesEnum(
                    args.session_affinity))
        if args.affinity_cookie_ttl is not None:
            backend_service.affinityCookieTtlSec = args.affinity_cookie_ttl
        if args.custom_request_header is not None:
            backend_service.customRequestHeaders = args.custom_request_header
        if self._support_flexible_cache_step_one:
            if args.custom_response_header is not None:
                backend_service.customResponseHeaders = args.custom_response_header
            if (backend_service.cdnPolicy is not None
                    and backend_service.cdnPolicy.cacheMode
                    and args.enable_cdn is not False):  # pylint: disable=g-bool-id-comparison
                backend_service.enableCDN = True

        self._ApplyIapArgs(client.messages, args.iap, backend_service)

        if args.load_balancing_scheme != 'EXTERNAL':
            backend_service.loadBalancingScheme = (
                client.messages.BackendService.
                LoadBalancingSchemeValueValuesEnum(args.load_balancing_scheme))

        backend_services_utils.ApplyLogConfigArgs(
            client.messages,
            args,
            backend_service,
            support_logging=self._support_logging)

        request = client.messages.ComputeBackendServicesInsertRequest(
            backendService=backend_service,
            project=backend_services_ref.project)

        return [(client.apitools_client.backendServices, 'Insert', request)]
Пример #11
0
  def Run(self, args):
    """Run the update command."""
    identifiers = args.CONCEPTS.product.Parse().AsDict()

    product = apigee.ProductsClient.Describe(identifiers)

    ## Quota related.
    if args.quota is not None:
      product["quota"] = "%d" % args.quota
    if args.quota_interval is not None:
      product["quotaInterval"] = "%d" % args.quota_interval
    if args.quota_unit:
      product["quotaTimeUnit"] = args.quota_unit
    # Check that AFTER these updates, all three quota settings are present.
    quota_field_names = ["quota", "quotaInterval", "quotaTimeUnit"]
    quota_fields_exist = [field in product for field in quota_field_names]
    if any(quota_fields_exist) and not all(quota_fields_exist):
      if not args.quota_interval:
        missing_arg = "--quota-interval"
      elif not args.quota_unit:
        missing_arg = "--quota-unit"
      else:
        missing_arg = "--quota"
      raise exceptions.RequiredArgumentException(
          missing_arg,
          "Products with quotas must specify all three quota settings.")
    if args.clear_quota:
      del product["quota"]
      del product["quotaInterval"]
      del product["quotaTimeUnit"]
      args.clear_quota = None

    ## Attribute list related
    attribute_list = product["attributes"] if "attributes" in product else []
    attribute_list = [(item["name"], item["value"]) for item in attribute_list]
    attributes = collections.OrderedDict(attribute_list)

    if args.add_attribute is not None:
      add_attributes = args.add_attribute
      if ("access" in add_attributes and
          add_attributes["access"] not in ["public", "private", "internal"]):
        raise exceptions.BadArgumentException(
            "--add-attribute",
            "The `access` attribute must be set to one of \"public\", "
            "\"private\", or \"internal\".")
      attributes.update(add_attributes)
      args.add_attribute = None

    if args.remove_attribute is not None:
      for sublist in args.remove_attribute:
        if "access" in sublist and not args.access:
          raise exceptions.BadArgumentException(
              "--remove-attribute", "The `access` attribute is required.")
        for item in sublist:
          if item in attributes:
            del attributes[item]
      args.remove_attribute = None

    if args.clear_attributes:
      # It doesn't make sense that the server would return an API product
      # without access rules, but the API physically allows it, and an
      # unexpected response mustn't cause gcloud to crash.
      access = attributes["access"] if "access" in attributes else None
      attributes = {"access": access} if access else {}
      args.clear_attributes = None

    if args.access:
      attributes["access"] = args.access

    attribute_dict = lambda item: {"name": item[0], "value": item[1]}
    attributes_dicts = [attribute_dict(item) for item in attributes.items()]
    product["attributes"] = attributes_dicts

    # Python lint rules don't allow direct comparison with the empty string;
    # detect it by process of elimination (not truthy, not None) instead.
    if not args.set_displayName and args.set_displayName is not None:
      raise exceptions.BadArgumentException(
          "--display-name", "An API product's display name cannot be blank.")

    # The rest of the fields can be filled in directly from arguments.
    emptied_lists = set()
    arg_dict = vars(args)
    for key, value in arg_dict.items():
      if value is None or "_" not in key:
        continue
      label, field = key.split("_", 1)
      if label == "add":
        if field not in product:
          product[field] = []
        for sublist in value:
          product[field] += sublist
      elif label == "remove" and field in product:
        for sublist in value:
          for item in sublist:
            if item in product[field]:
              product[field].remove(item)
              if not product[field]:
                # This removed the last item from `field`. None it out so it's
                # not sent to the server in the update call.
                product[field] = None
                emptied_lists.add(field)
      elif label == "set":
        product[field] = value
      elif label == "clear" and value and field in product:
        del product[field]

    # For API proxies, resources, and environments, don't allow the user to
    # empty the list without explicitly stating that they intend to include ALL
    # proxies/resources/environments. Otherwise the user may get results they
    # didn't expect (removing a proxy -> the number of proxies exposed goes up).
    if "proxies" in emptied_lists:
      # User removed the last API proxy but didn't say to clear proxies. The
      # result may not be what the user expected.
      raise exceptions.BadArgumentException(
          "--remove-api",
          "An API product must include at least one API proxy, or else all "
          "API proxies will implicitly be included. If this was intended, use "
          "[--all-apis] instead of removing APIs individually.")

    if "apiResources" in emptied_lists:
      raise exceptions.BadArgumentException(
          "--remove-resource",
          "An API product must include at least one API resource, or else all "
          "resources will implicitly be included. If this was intended, use "
          "[--all-resources] instead of removing resources individually.")

    if "environments" in emptied_lists:
      raise exceptions.BadArgumentException(
          "--remove-environment",
          "An API product must include at least one environment, or else all "
          "environments will implicitly be included. If this was intended, use "
          "[--all-environments] instead of removing environments individually.")

    # Clean up the product structure; remove any irrelevant fields that might
    # have been populated by global gcloud args, and populate any empty fields
    # with None.
    product = {
        key: (product[key] if key in product else None)
        for key in apigee.ProductsInfo._fields
    }

    product["name"] = identifiers["apiproductsId"]

    updated_product = apigee.ProductsInfo(**product)
    return apigee.ProductsClient.Update(identifiers, updated_product)
Пример #12
0
def RunBaseCreateCommand(args, release_track):
    """Creates a new Cloud SQL instance.

  Args:
    args: argparse.Namespace, The arguments that this command was invoked with.
    release_track: base.ReleaseTrack, the release track that this was run under.

  Returns:
    A dict object representing the operations resource describing the create
    operation if the create was successful.
  Raises:
    HttpException: A http error response was received while executing api
        request.
    ArgumentError: An argument supplied by the user was incorrect, such as
      specifying an invalid CMEK configuration or attempting to create a V1
      instance.
    RequiredArgumentException: A required argument was not supplied by the user,
      such as omitting --root-password on a SQL Server instance.
  """
    client = common_api_util.SqlClient(common_api_util.API_VERSION_DEFAULT)
    sql_client = client.sql_client
    sql_messages = client.sql_messages

    validate.ValidateInstanceName(args.instance)
    instance_ref = client.resource_parser.Parse(
        args.instance,
        params={'project': properties.VALUES.core.project.GetOrFail},
        collection='sql.instances')

    # Get the region, tier, and database version from the master if these fields
    # are not specified.
    # TODO(b/64266672): Remove once API does not require these fields.
    if args.IsSpecified('master_instance_name'):
        master_instance_ref = client.resource_parser.Parse(
            args.master_instance_name,
            params={'project': properties.VALUES.core.project.GetOrFail},
            collection='sql.instances')
        try:
            master_instance_resource = sql_client.instances.Get(
                sql_messages.SqlInstancesGetRequest(
                    project=instance_ref.project,
                    instance=master_instance_ref.instance))
        except apitools_exceptions.HttpError as error:
            # TODO(b/64292220): Remove once API gives helpful error message.
            log.debug('operation : %s', six.text_type(master_instance_ref))
            exc = exceptions.HttpException(error)
            if resource_property.Get(
                    exc.payload.content,
                    resource_lex.ParseKey('error.errors[0].reason'),
                    None) == 'notAuthorized':
                msg = (
                    'You are either not authorized to access the master instance or '
                    'it does not exist.')
                raise exceptions.HttpException(msg)
            raise
        if not args.IsSpecified('region'):
            args.region = master_instance_resource.region
        if not args.IsSpecified('database_version'):
            args.database_version = master_instance_resource.databaseVersion.name
        if not args.IsSpecified('tier') and not (
                args.IsSpecified('cpu') or args.IsSpecified('memory')
        ) and master_instance_resource.settings:
            args.tier = master_instance_resource.settings.tier

        # Validate master/replica CMEK configurations.
        if master_instance_resource.diskEncryptionConfiguration:
            if args.region == master_instance_resource.region:
                # Warn user that same-region replicas inherit their master's CMEK
                # configuration.
                command_util.ShowCmekWarning('replica', 'the master instance')
            elif not args.IsSpecified('disk_encryption_key'):
                # Raise error that cross-region replicas require their own CMEK key if
                # the master is CMEK.
                raise exceptions.RequiredArgumentException(
                    '--disk-encryption-key',
                    '`--disk-encryption-key` is required when creating a cross-region '
                    'replica of an instance with customer-managed encryption.')
            else:
                command_util.ShowCmekWarning('replica')
        elif args.IsSpecified('disk_encryption_key'):
            # Raise error that cross-region replicas cannot be CMEK encrypted if their
            # master is not.
            raise sql_exceptions.ArgumentError(
                '`--disk-encryption-key` cannot be specified when creating a replica '
                'of an instance without customer-managed encryption.')

    # --root-password is required when creating SQL Server instances
    if args.IsSpecified(
            'database_version') and args.database_version.startswith(
                'SQLSERVER') and not args.IsSpecified('root_password'):
        raise exceptions.RequiredArgumentException(
            '--root-password',
            '`--root-password` is required when creating SQL Server instances.'
        )

    if not args.backup:
        if args.IsSpecified('enable_bin_log'):
            raise sql_exceptions.ArgumentError(
                '`--enable-bin-log` cannot be specified when --no-backup is '
                'specified')
        elif args.IsSpecified('enable_point_in_time_recovery'):
            raise sql_exceptions.ArgumentError(
                '`--enable-point-in-time-recovery` cannot be specified when '
                '--no-backup is specified')

    instance_resource = (
        command_util.InstancesV1Beta4.ConstructCreateInstanceFromArgs(
            sql_messages,
            args,
            instance_ref=instance_ref,
            release_track=release_track))

    # TODO(b/122660263): Remove when V1 instances are no longer supported.
    # V1 instances are deprecated.
    # Note that the exception type is intentionally vague because the user may not
    # have directly supplied the offending argument.  For example, creating a read
    # replica defaults its tier to that of its master.
    if api_util.IsInstanceV1(sql_messages, instance_resource):
        raise sql_exceptions.ArgumentError(
            'First Generation instances can no longer be created.')

    operation_ref = None
    try:
        result_operation = sql_client.instances.Insert(instance_resource)

        operation_ref = client.resource_parser.Create(
            'sql.operations',
            operation=result_operation.name,
            project=instance_ref.project)

        if args.async_:
            if not args.IsSpecified('format'):
                args.format = 'default'
            return sql_client.operations.Get(
                sql_messages.SqlOperationsGetRequest(
                    project=operation_ref.project,
                    operation=operation_ref.operation))

        operations.OperationsV1Beta4.WaitForOperation(
            sql_client,
            operation_ref,
            'Creating Cloud SQL instance',
            # TODO(b/138403566): Remove the override once we improve creation times.
            max_wait_seconds=680)

        log.CreatedResource(instance_ref)

        new_resource = sql_client.instances.Get(
            sql_messages.SqlInstancesGetRequest(
                project=instance_ref.project, instance=instance_ref.instance))
        return new_resource
    except apitools_exceptions.HttpError as error:
        log.debug('operation : %s', six.text_type(operation_ref))
        exc = exceptions.HttpException(error)
        if resource_property.Get(
                exc.payload.content,
                resource_lex.ParseKey('error.errors[0].reason'),
                None) == 'errorMaxInstancePerLabel':
            msg = resource_property.Get(exc.payload.content,
                                        resource_lex.ParseKey('error.message'),
                                        None)
            raise exceptions.HttpException(msg)
        raise
Пример #13
0
  def Run(self, args):
    # We explicitly want to allow --networks='' as a valid option and we need
    # to differentiate between that option and not passing --networks at all.
    if args.visibility == 'public' and args.IsSpecified('networks'):
      raise exceptions.InvalidArgumentException(
          '--networks',
          'If --visibility is set to public (default), setting networks is '
          'not allowed.')
    if args.visibility == 'private' and args.networks is None:
      raise exceptions.RequiredArgumentException('--networks', ("""
           If --visibility is set to private, a list of networks must be
           provided.'
         NOTE: You can provide an empty value ("") for private zones that
          have NO network binding.
          """))

    dns = util.GetApiClient('v1')
    messages = apis.GetMessagesModule('dns', 'v1')

    registry = util.GetRegistry('v1')

    zone_ref = registry.Parse(
        args.dns_zone,
        params={
            'project': properties.VALUES.core.project.GetOrFail,
        },
        collection='dns.managedZones')

    visibility = messages.ManagedZone.VisibilityValueValuesEnum(args.visibility)
    visibility_config = None
    if visibility == messages.ManagedZone.VisibilityValueValuesEnum.private:
      # Handle explicitly empty networks case (--networks='')
      networks = args.networks if args.networks != [''] else []

      def GetNetworkSelfLink(network):
        return registry.Parse(
            network,
            collection='compute.networks',
            params={
                'project': zone_ref.project
            }).SelfLink()

      network_urls = [GetNetworkSelfLink(n) for n in networks]
      network_configs = [
          messages.ManagedZonePrivateVisibilityConfigNetwork(networkUrl=nurl)
          for nurl in network_urls
      ]
      visibility_config = messages.ManagedZonePrivateVisibilityConfig(
          networks=network_configs)

    if args.forwarding_targets:
      forward_config = command_util.ParseManagedZoneForwardingConfig(
          args.forwarding_targets, messages)
    else:
      forward_config = None

    dnssec_config = _MakeDnssecConfig(args, messages)

    labels = labels_util.ParseCreateArgs(args, messages.ManagedZone.LabelsValue)

    peering_config = None
    if args.target_project and args.target_network:
      peering_network = 'https://www.googleapis.com/compute/v1/projects/{}/global/networks/{}'.format(
          args.target_project, args.target_network)
      peering_config = messages.ManagedZonePeeringConfig()
      peering_config.targetNetwork = messages.ManagedZonePeeringConfigTargetNetwork(
          networkUrl=peering_network)

    zone = messages.ManagedZone(
        name=zone_ref.managedZone,
        dnsName=util.AppendTrailingDot(args.dns_name),
        description=args.description,
        dnssecConfig=dnssec_config,
        labels=labels,
        visibility=visibility,
        forwardingConfig=forward_config,
        privateVisibilityConfig=visibility_config,
        peeringConfig=peering_config)

    result = dns.managedZones.Create(
        messages.DnsManagedZonesCreateRequest(managedZone=zone,
                                              project=zone_ref.project))
    log.CreatedResource(zone_ref)
    return [result]
Пример #14
0
    def ConstructCreateInstanceFromArgs(cls,
                                        sql_messages,
                                        args,
                                        original=None,
                                        instance_ref=None,
                                        release_track=DEFAULT_RELEASE_TRACK):
        """Constructs Instance for create request from base instance and args."""
        ShowZoneDeprecationWarnings(args)
        instance_resource = cls._ConstructBaseInstanceFromArgs(
            sql_messages, args, original, instance_ref)

        instance_resource.region = reducers.Region(args.region, _GetZone(args))
        instance_resource.databaseVersion = _ParseDatabaseVersion(
            sql_messages, args.database_version)
        instance_resource.masterInstanceName = args.master_instance_name
        instance_resource.rootPassword = args.root_password

        # BETA: Set the host port and return early if external master instance.
        if _IsBetaOrNewer(release_track) and args.IsSpecified(
                'source_ip_address'):
            on_premises_configuration = reducers.OnPremisesConfiguration(
                sql_messages, args.source_ip_address, args.source_port)
            instance_resource.onPremisesConfiguration = on_premises_configuration
            return instance_resource

        instance_resource.settings = cls._ConstructCreateSettingsFromArgs(
            sql_messages, args, original, release_track)

        if args.master_instance_name:
            replication = sql_messages.Settings.ReplicationTypeValueValuesEnum.ASYNCHRONOUS
            if args.replica_type == 'FAILOVER':
                instance_resource.replicaConfiguration = (
                    sql_messages.ReplicaConfiguration(
                        kind='sql#demoteMasterMysqlReplicaConfiguration',
                        failoverTarget=True))
        else:
            replication = sql_messages.Settings.ReplicationTypeValueValuesEnum.SYNCHRONOUS
        if not args.replication:
            instance_resource.settings.replicationType = replication

        if args.failover_replica_name:
            instance_resource.failoverReplica = (
                sql_messages.DatabaseInstance.FailoverReplicaValue(
                    name=args.failover_replica_name))

        # BETA: Config for creating a replica of an external master instance.
        if _IsBetaOrNewer(release_track) and args.IsSpecified(
                'master_username'):
            # Ensure that the master instance name is specified.
            if not args.IsSpecified('master_instance_name'):
                raise exceptions.RequiredArgumentException(
                    '--master-instance-name',
                    'To create a read replica of an external '
                    'master instance, [--master-instance-name] must be specified'
                )

            # TODO(b/78648703): Remove when mutex required status is fixed.
            # Ensure that the master replication user password is specified.
            if not (args.IsSpecified('master_password')
                    or args.IsSpecified('prompt_for_master_password')):
                raise exceptions.RequiredArgumentException(
                    '--master-password',
                    'To create a read replica of an external '
                    'master instance, [--master-password] or '
                    '[--prompt-for-master-password] must be specified')

            # Get password if not specified on command line.
            if args.prompt_for_master_password:
                args.master_password = getpass.getpass(
                    'Master Instance Password: '******'sql#diskEncryptionConfiguration', kmsKeyName=key_name)
            instance_resource.diskEncryptionConfiguration = config

        return instance_resource
Пример #15
0
    def Run(self, args):
        if args.enable and args.disable:
            raise exceptions.Error(
                '--enable and --disable cannot both be set.')

        # If neither flag is set, disable workload certificate management for the
        # memberships.
        enable = args.enable

        all_memberships = base.ListMemberships()
        if not all_memberships:
            raise exceptions.Error('No memberships available in the fleet.')
        memberships = []

        if args.all_memberships:
            memberships = all_memberships
        elif args.memberships:
            memberships = args.memberships.split(',')

        if not memberships:  # The user didn't provide --memberships.
            if console_io.CanPrompt():
                index = console_io.PromptChoice(
                    options=all_memberships,
                    message='Please specify a membership:\n',
                    cancel_option=True)
                memberships.append(all_memberships[index])
            else:
                raise calliope_exceptions.RequiredArgumentException(
                    '--memberships',
                    ('Cannot prompt a console for membership. Membership is required. '
                     'Please specify `--memberships` to select at least one membership.'
                     ))

        for membership in memberships:
            if membership not in all_memberships:
                raise exceptions.Error(
                    'Membership {} does not exist in the fleet.'.format(
                        membership))

        # All memberships in memberships are valid.
        f = self.GetFeature()
        membership_specs = {}
        for membership_str in memberships:
            membership = self.MembershipResourceName(membership_str)
            patch = self.messages.MembershipFeatureSpec()

            # Use current spec if it exists.
            for name, spec in self.hubclient.ToPyDict(
                    f.membershipSpecs).items():
                if name == membership and spec:
                    patch = spec
                    break

            if not patch.workloadcertificate:
                patch.workloadcertificate = self.messages.MembershipSpec()

            if enable:
                patch.workloadcertificate.certificateManagement = self.messages.MembershipSpec.CertificateManagementValueValuesEnum.ENABLED
            else:
                patch.workloadcertificate.certificateManagement = self.messages.MembershipSpec.CertificateManagementValueValuesEnum.DISABLED

            membership_specs[membership] = patch

        f = self.messages.Feature(
            membershipSpecs=self.hubclient.ToMembershipSpecs(membership_specs))
        self.Update(['membershipSpecs'], f)
Пример #16
0
def CreateBuildConfig(tag, no_cache, messages, substitutions, arg_config,
                      is_specified_source, no_source, source,
                      gcs_source_staging_dir, ignore_file, arg_gcs_log_dir,
                      arg_machine_type, arg_disk_size):
    """Returns a build config."""
    # Get the build timeout.
    build_timeout = properties.VALUES.builds.timeout.Get()
    if build_timeout is not None:
        try:
            # A bare number is interpreted as seconds.
            build_timeout_secs = int(build_timeout)
        except ValueError:
            build_timeout_duration = times.ParseDuration(build_timeout)
            build_timeout_secs = int(build_timeout_duration.total_seconds)
        timeout_str = six.text_type(build_timeout_secs) + 's'
    else:
        timeout_str = None

    if tag is not None:
        if (properties.VALUES.builds.check_tag.GetBool()
                and 'gcr.io/' not in tag):
            raise c_exceptions.InvalidArgumentException(
                '--tag',
                'Tag value must be in the gcr.io/* or *.gcr.io/* namespace.')
        if properties.VALUES.builds.use_kaniko.GetBool():
            if no_cache:
                ttl = '0h'
            else:
                ttl = '{}h'.format(
                    properties.VALUES.builds.kaniko_cache_ttl.Get())
            build_config = messages.Build(
                steps=[
                    messages.BuildStep(
                        name=properties.VALUES.builds.kaniko_image.Get(),
                        args=[
                            '--destination',
                            tag,
                            '--cache',
                            '--cache-ttl',
                            ttl,
                            '--cache-dir',
                            '',
                        ],
                    ),
                ],
                timeout=timeout_str,
                substitutions=cloudbuild_util.EncodeSubstitutions(
                    substitutions, messages))
        else:
            if no_cache:
                raise c_exceptions.InvalidArgumentException(
                    'no-cache',
                    'Cannot specify --no-cache if builds/use_kaniko property is '
                    'False')
            build_config = messages.Build(
                images=[tag],
                steps=[
                    messages.BuildStep(
                        name='gcr.io/cloud-builders/docker',
                        args=[
                            'build', '--network', 'cloudbuild', '--no-cache',
                            '-t', tag, '.'
                        ],
                    ),
                ],
                timeout=timeout_str,
                substitutions=cloudbuild_util.EncodeSubstitutions(
                    substitutions, messages))
    elif arg_config is not None:
        if no_cache:
            raise c_exceptions.ConflictingArgumentsException(
                '--config', '--no-cache')
        if not arg_config:
            raise c_exceptions.InvalidArgumentException(
                '--config', 'Config file path must not be empty.')
        build_config = config.LoadCloudbuildConfigFromPath(
            arg_config, messages, params=substitutions)
    else:
        raise c_exceptions.OneOfArgumentsRequiredException(
            ['--tag', '--config'],
            'Requires either a docker tag or a config file.')

    # If timeout was set by flag, overwrite the config file.
    if timeout_str:
        build_config.timeout = timeout_str

    # Set the source for the build config.
    default_gcs_source = False
    default_bucket_name = None
    if gcs_source_staging_dir is None:
        default_gcs_source = True
        default_bucket_name = staging_bucket_util.GetDefaultStagingBucket()
        gcs_source_staging_dir = 'gs://{}/source'.format(default_bucket_name)
    gcs_client = storage_api.StorageClient()

    # --no-source overrides the default --source.
    if not is_specified_source and no_source:
        source = None

    gcs_source_staging = None
    if source:
        suffix = '.tgz'
        if source.startswith('gs://') or os.path.isfile(source):
            _, suffix = os.path.splitext(source)

        # Next, stage the source to Cloud Storage.
        staged_object = '{stamp}-{uuid}{suffix}'.format(
            stamp=times.GetTimeStampFromDateTime(times.Now()),
            uuid=uuid.uuid4().hex,
            suffix=suffix,
        )
        gcs_source_staging_dir = resources.REGISTRY.Parse(
            gcs_source_staging_dir, collection='storage.objects')

        # We create the bucket (if it does not exist) first. If we do an existence
        # check and then create the bucket ourselves, it would be possible for an
        # attacker to get lucky and beat us to creating the bucket. Block on this
        # creation to avoid this race condition.
        gcs_client.CreateBucketIfNotExists(gcs_source_staging_dir.bucket)

        # If no bucket is specified (for the source `default_gcs_source`), check
        # that the default bucket is also owned by the project (b/33046325).
        if default_gcs_source and not staging_bucket_util.BucketIsInProject(
                gcs_client, default_bucket_name):
            raise c_exceptions.RequiredArgumentException(
                'gcs-source-staging-dir',
                'A bucket with name {} already exists and is owned by '
                'another project. Specify a bucket using '
                '--gcs-source-staging-dir.'.format(default_bucket_name))

        if gcs_source_staging_dir.object:
            staged_object = gcs_source_staging_dir.object + '/' + staged_object
        gcs_source_staging = resources.REGISTRY.Create(
            collection='storage.objects',
            bucket=gcs_source_staging_dir.bucket,
            object=staged_object)

        if source.startswith('gs://'):
            gcs_source = resources.REGISTRY.Parse(source,
                                                  collection='storage.objects')
            staged_source_obj = gcs_client.Rewrite(gcs_source,
                                                   gcs_source_staging)
            build_config.source = messages.Source(
                storageSource=messages.StorageSource(
                    bucket=staged_source_obj.bucket,
                    object=staged_source_obj.name,
                    generation=staged_source_obj.generation,
                ))
        else:
            if not os.path.exists(source):
                raise c_exceptions.BadFileException(
                    'could not find source [{src}]'.format(src=source))
            if os.path.isdir(source):
                source_snapshot = snapshot.Snapshot(source,
                                                    ignore_file=ignore_file)
                size_str = resource_transform.TransformSize(
                    source_snapshot.uncompressed_size)
                log.status.Print(
                    'Creating temporary tarball archive of {num_files} file(s)'
                    ' totalling {size} before compression.'.format(
                        num_files=len(source_snapshot.files), size=size_str))
                staged_source_obj = source_snapshot.CopyTarballToGCS(
                    gcs_client, gcs_source_staging, ignore_file=ignore_file)
                build_config.source = messages.Source(
                    storageSource=messages.StorageSource(
                        bucket=staged_source_obj.bucket,
                        object=staged_source_obj.name,
                        generation=staged_source_obj.generation,
                    ))
            elif os.path.isfile(source):
                unused_root, ext = os.path.splitext(source)
                if ext not in _ALLOWED_SOURCE_EXT:
                    raise c_exceptions.BadFileException(
                        'Local file [{src}] is none of ' +
                        ', '.join(_ALLOWED_SOURCE_EXT))
                log.status.Print('Uploading local file [{src}] to '
                                 '[gs://{bucket}/{object}].'.format(
                                     src=source,
                                     bucket=gcs_source_staging.bucket,
                                     object=gcs_source_staging.object,
                                 ))
                staged_source_obj = gcs_client.CopyFileToGCS(
                    source, gcs_source_staging)
                build_config.source = messages.Source(
                    storageSource=messages.StorageSource(
                        bucket=staged_source_obj.bucket,
                        object=staged_source_obj.name,
                        generation=staged_source_obj.generation,
                    ))
    else:
        # No source
        if not no_source:
            raise c_exceptions.InvalidArgumentException(
                '--no-source', 'To omit source, use the --no-source flag.')

    # Set a Google Cloud Storage directory to hold build logs.
    if arg_gcs_log_dir:
        gcs_log_dir = resources.REGISTRY.Parse(arg_gcs_log_dir,
                                               collection='storage.objects')
        build_config.logsBucket = ('gs://' + gcs_log_dir.bucket + '/' +
                                   gcs_log_dir.object)

    # Set the machine type used to run the build.
    if arg_machine_type is not None:
        machine_type = flags.GetMachineType(arg_machine_type)
        if not build_config.options:
            build_config.options = messages.BuildOptions()
        build_config.options.machineType = machine_type

    # Set the disk size used to run the build.
    if arg_disk_size is not None:
        disk_size = compute_utils.BytesToGb(arg_disk_size)
        if not build_config.options:
            build_config.options = messages.BuildOptions()
        build_config.options.diskSizeGb = int(disk_size)

    return build_config
Пример #17
0
  def _ConstructBaseSettingsFromArgs(cls,
                                     sql_messages,
                                     args,
                                     instance=None,
                                     release_track=DEFAULT_RELEASE_TRACK):
    """Constructs instance settings from the command line arguments.

    Args:
      sql_messages: module, The messages module that should be used.
      args: argparse.Namespace, The arguments that this command was invoked
          with.
      instance: sql_messages.DatabaseInstance, The original instance, for
          settings that depend on the previous state.
      release_track: base.ReleaseTrack, the release track that this was run
          under.

    Returns:
      A settings object representing the instance settings.

    Raises:
      ToolException: An error other than http error occurred while executing the
          command.
    """
    settings = sql_messages.Settings(
        tier=reducers.MachineType(instance, args.tier, args.memory, args.cpu),
        pricingPlan=args.pricing_plan,
        replicationType=args.replication,
        activationPolicy=_ParseActivationPolicy(args.activation_policy))

    if args.authorized_gae_apps:
      settings.authorizedGaeApplications = args.authorized_gae_apps

    if any([
        args.assign_ip is not None, args.require_ssl is not None,
        args.authorized_networks
    ]):
      settings.ipConfiguration = sql_messages.IpConfiguration()
      if args.assign_ip is not None:
        cls.SetIpConfigurationEnabled(settings, args.assign_ip)

      if args.authorized_networks:
        cls.SetAuthorizedNetworks(settings, args.authorized_networks,
                                  sql_messages.AclEntry)

      if args.require_ssl is not None:
        settings.ipConfiguration.requireSsl = args.require_ssl

    if any([args.follow_gae_app, args.gce_zone]):
      settings.locationPreference = sql_messages.LocationPreference(
          followGaeApplication=args.follow_gae_app, zone=args.gce_zone)

    if args.storage_size:
      settings.dataDiskSizeGb = int(args.storage_size / constants.BYTES_TO_GB)

    if args.storage_auto_increase is not None:
      settings.storageAutoResize = args.storage_auto_increase

    if args.IsSpecified('availability_type'):
      settings.availabilityType = args.availability_type.upper()

    # BETA args.
    if release_track == base.ReleaseTrack.BETA:
      if args.IsSpecified('storage_auto_increase_limit'):
        # Resize limit should be settable if the original instance has resize
        # turned on, or if the instance to be created has resize flag.
        if (instance and instance.settings.storageAutoResize) or (
            args.storage_auto_increase):
          # If the limit is set to None, we want it to be set to 0. This is a
          # backend requirement.
          settings.storageAutoResizeLimit = (args.storage_auto_increase_limit or
                                             0)
        else:
          raise exceptions.RequiredArgumentException(
              '--storage-auto-increase', 'To set the storage capacity limit '
              'using [--storage-auto-increase-limit], '
              '[--storage-auto-increase] must be enabled.')

    return settings
Пример #18
0
    def Run(self, args):
        """This is what gets called when the user runs this command.

    Args:
      args: an argparse namespace. All the arguments that were provided to this
        command invocation.

    Returns:
      Some value that we want to have printed later.
    """

        if args.pull_request_preview:
            if args.repo_type != 'github':
                raise c_exceptions.InvalidArgumentException(
                    '--repo-type',
                    "Repo type must be 'github' to configure pull request previewing."
                )
            if args.namespace:
                raise c_exceptions.InvalidArgumentException(
                    '--namespace',
                    'Namespace must not be provided to configure pull request '
                    'previewing. --namespace must only be provided when configuring '
                    'automated deployments with the --branch-pattern or --tag-pattern '
                    'flags.')
            if args.preview_expiry <= 0:
                raise c_exceptions.InvalidArgumentException(
                    '--preview-expiry', 'Preview expiry must be > 0.')

        # Determine github or csr
        github_repo_name = None
        github_repo_owner = None
        csr_repo_name = None

        if args.repo_type == 'github':
            if not args.repo_owner:
                raise c_exceptions.RequiredArgumentException(
                    '--repo-owner',
                    'Repo owner is required for --repo-type=github.')
            github_repo_name = args.repo_name
            github_repo_owner = args.repo_owner
            # We do not have to verify that this repo exists because the request to
            # create the BuildTrigger will fail with the appropriate message asking
            # the user to connect their repo, if the repo is not found.

        elif args.repo_type == 'csr':
            if args.repo_owner:
                raise c_exceptions.InvalidArgumentException(
                    '--repo-owner',
                    'Repo owner must not be provided for --repo-type=csr.')
            csr_repo_name = args.repo_name
            self._VerifyCSRRepoExists(csr_repo_name)

        elif args.repo_type == 'bitbucket_mirrored':
            if not args.repo_owner:
                raise c_exceptions.RequiredArgumentException(
                    '--repo-owner',
                    'Repo owner is required for --repo-type=bitbucket_mirrored.'
                )
            csr_repo_name = 'bitbucket_{}_{}'.format(args.repo_owner,
                                                     args.repo_name)
            self._VerifyBitbucketCSRRepoExists(csr_repo_name, args.repo_owner,
                                               args.repo_name)

        elif args.repo_type == 'github_mirrored':
            if not args.repo_owner:
                raise c_exceptions.RequiredArgumentException(
                    '--repo-owner',
                    'Repo owner is required for --repo-type=github_mirrored.')
            csr_repo_name = 'github_{}_{}'.format(args.repo_owner,
                                                  args.repo_name)
            self._VerifyGitHubCSRRepoExists(csr_repo_name, args.repo_owner,
                                            args.repo_name)

        self._VerifyClusterExists(args.cluster, args.location)

        # Determine app_name
        if args.app_name:
            app_name = args.app_name
        else:
            app_name = args.repo_name

        # Determine gcs_config_staging_dir_bucket, gcs_config_staging_dir_object
        if args.gcs_config_staging_dir is None:
            gcs_config_staging_dir_bucket = \
              staging_bucket_util.GetDefaultStagingBucket()
            gcs_config_staging_dir_object = 'deploy/config'
        else:
            try:
                gcs_config_staging_dir_ref = resources.REGISTRY.Parse(
                    args.gcs_config_staging_dir, collection='storage.objects')
                gcs_config_staging_dir_object = gcs_config_staging_dir_ref.object
            except resources.WrongResourceCollectionException:
                gcs_config_staging_dir_ref = resources.REGISTRY.Parse(
                    args.gcs_config_staging_dir, collection='storage.buckets')
                gcs_config_staging_dir_object = None
            gcs_config_staging_dir_bucket = gcs_config_staging_dir_ref.bucket

        gcs_client = storage_api.StorageClient()
        gcs_client.CreateBucketIfNotExists(gcs_config_staging_dir_bucket)

        # If we are using a default bucket check that it is owned by user project
        # (b/33046325)
        if (args.gcs_config_staging_dir is None
                and not staging_bucket_util.BucketIsInProject(
                    gcs_client, gcs_config_staging_dir_bucket)):
            raise c_exceptions.RequiredArgumentException(
                '--gcs-config-staging-dir',
                'A bucket with name {} already exists and is owned by '
                'another project. Specify a bucket using '
                '--gcs-config-staging-dir.'.format(
                    gcs_config_staging_dir_bucket))

        if gcs_config_staging_dir_object:
            gcs_config_staging_path = '{}/{}'.format(
                gcs_config_staging_dir_bucket, gcs_config_staging_dir_object)
        else:
            gcs_config_staging_path = gcs_config_staging_dir_bucket

        if args.pull_request_preview:
            log.status.Print(
                'Setting up previewing {} on pull requests.\n'.format(
                    github_repo_name))
            self._ConfigurePRPreview(
                repo_owner=github_repo_owner,
                repo_name=github_repo_name,
                pull_request_pattern=args.pull_request_pattern,
                preview_expiry=args.preview_expiry,
                comment_control=args.comment_control,
                dockerfile_path=args.dockerfile,
                app_name=app_name,
                config_path=args.config,
                expose_port=args.expose,
                gcs_config_staging_path=gcs_config_staging_path,
                cluster=args.cluster,
                location=args.location)
        else:
            log.status.Print(
                'Setting up automated deployments for {}.\n'.format(
                    args.repo_name))
            self._ConfigureGitPushBuildTrigger(
                repo_type=args.repo_type,
                csr_repo_name=csr_repo_name,
                github_repo_owner=github_repo_owner,
                github_repo_name=github_repo_name,
                branch_pattern=args.branch_pattern,
                tag_pattern=args.tag_pattern,
                dockerfile_path=args.dockerfile,
                app_name=app_name,
                config_path=args.config,
                namespace=args.namespace,
                expose_port=args.expose,
                gcs_config_staging_path=gcs_config_staging_path,
                cluster=args.cluster,
                location=args.location)
Пример #19
0
def _Run(args, track=None, enable_runtime=True, enable_traffic_control=False):
    """Run a function deployment with the given args."""
    # Check for labels that start with `deployment`, which is not allowed.
    labels_util.CheckNoDeploymentLabels('--remove-labels', args.remove_labels)
    labels_util.CheckNoDeploymentLabels('--update-labels', args.update_labels)

    # Check that exactly one trigger type is specified properly.
    trigger_util.ValidateTriggerArgs(args.trigger_event, args.trigger_resource,
                                     args.IsSpecified('retry'),
                                     args.IsSpecified('trigger_http'))
    trigger_params = trigger_util.GetTriggerEventParams(
        args.trigger_http, args.trigger_bucket, args.trigger_topic,
        args.trigger_event, args.trigger_resource)

    function_ref = args.CONCEPTS.name.Parse()
    function_url = function_ref.RelativeName()

    messages = api_util.GetApiMessagesModule(track)

    # Get an existing function or create a new one.
    function = api_util.GetFunction(function_url)
    is_new_function = function is None
    had_vpc_connector = bool(
        function.vpcConnector) if not is_new_function else False
    if is_new_function:
        trigger_util.CheckTriggerSpecified(args)
        function = messages.CloudFunction()
        function.name = function_url
    elif trigger_params:
        # If the new deployment would implicitly change the trigger_event type
        # raise error
        trigger_util.CheckLegacyTriggerUpdate(function.eventTrigger,
                                              trigger_params['trigger_event'])

    # Keep track of which fields are updated in the case of patching.
    updated_fields = []

    # Populate function properties based on args.
    if args.entry_point:
        function.entryPoint = args.entry_point
        updated_fields.append('entryPoint')
    if args.timeout:
        function.timeout = '{}s'.format(args.timeout)
        updated_fields.append('timeout')
    if args.memory:
        function.availableMemoryMb = utils.BytesToMb(args.memory)
        updated_fields.append('availableMemoryMb')
    if args.service_account:
        function.serviceAccountEmail = args.service_account
        updated_fields.append('serviceAccountEmail')
    if (args.IsSpecified('max_instances')
            or args.IsSpecified('clear_max_instances')):
        max_instances = 0 if args.clear_max_instances else args.max_instances
        function.maxInstances = max_instances
        updated_fields.append('maxInstances')
    if enable_runtime:
        if args.IsSpecified('runtime'):
            function.runtime = args.runtime
            updated_fields.append('runtime')
            if args.runtime in ['nodejs6']:
                log.warning(
                    'The Node.js 6 runtime is deprecated on Cloud Functions. '
                    'Please migrate to Node.js 8 (--runtime=nodejs8) or Node.js 10 '
                    '(--runtime=nodejs10). '
                    'See https://cloud.google.com/functions/docs/migrating/nodejs-runtimes'
                )
        elif is_new_function:
            raise exceptions.RequiredArgumentException(
                'runtime', 'Flag `--runtime` is required for new functions.')
    if args.vpc_connector or args.clear_vpc_connector:
        function.vpcConnector = ('' if args.clear_vpc_connector else
                                 args.vpc_connector)
        updated_fields.append('vpcConnector')
    if enable_traffic_control:
        if args.IsSpecified('egress_settings'):
            will_have_vpc_connector = ((had_vpc_connector
                                        and not args.clear_vpc_connector)
                                       or args.vpc_connector)
            if not will_have_vpc_connector:
                raise exceptions.RequiredArgumentException(
                    'vpc-connector', 'Flag `--vpc-connector` is '
                    'required for setting `egress-settings`.')
            egress_settings_enum = arg_utils.ChoiceEnumMapper(
                arg_name='egress_settings',
                message_enum=function.
                VpcConnectorEgressSettingsValueValuesEnum,
                custom_mappings=flags.EGRESS_SETTINGS_MAPPING
            ).GetEnumForChoice(args.egress_settings)
            function.vpcConnectorEgressSettings = egress_settings_enum
            updated_fields.append('vpcConnectorEgressSettings')
        if args.IsSpecified('ingress_settings'):
            ingress_settings_enum = arg_utils.ChoiceEnumMapper(
                arg_name='ingress_settings',
                message_enum=function.IngressSettingsValueValuesEnum,
                custom_mappings=flags.INGRESS_SETTINGS_MAPPING
            ).GetEnumForChoice(args.ingress_settings)
            function.ingressSettings = ingress_settings_enum
            updated_fields.append('ingressSettings')
    # Populate trigger properties of function based on trigger args.
    if args.trigger_http:
        function.httpsTrigger = messages.HttpsTrigger()
        function.eventTrigger = None
        updated_fields.extend(['eventTrigger', 'httpsTrigger'])
    if trigger_params:
        function.eventTrigger = trigger_util.CreateEventTrigger(
            **trigger_params)
        function.httpsTrigger = None
        updated_fields.extend(['eventTrigger', 'httpsTrigger'])
    if args.IsSpecified('retry'):
        updated_fields.append('eventTrigger.failurePolicy')
        if args.retry:
            function.eventTrigger.failurePolicy = messages.FailurePolicy()
            function.eventTrigger.failurePolicy.retry = messages.Retry()
        else:
            function.eventTrigger.failurePolicy = None
    elif function.eventTrigger:
        function.eventTrigger.failurePolicy = None

    # Populate source properties of function based on source args.
    # Only Add source to function if its explicitly provided, a new function,
    # using a stage bucket or deploy of an existing function that previously
    # used local source.
    if (args.source or args.stage_bucket or is_new_function
            or function.sourceUploadUrl):
        updated_fields.extend(
            source_util.SetFunctionSourceProps(function, function_ref,
                                               args.source, args.stage_bucket,
                                               args.ignore_file))

    # Apply label args to function
    if labels_util.SetFunctionLabels(function, args.update_labels,
                                     args.remove_labels, args.clear_labels):
        updated_fields.append('labels')

    # Apply environment variables args to function
    updated_fields.extend(_ApplyEnvVarsArgsToFunction(function, args))

    ensure_all_users_invoke = flags.ShouldEnsureAllUsersInvoke(args)
    deny_all_users_invoke = flags.ShouldDenyAllUsersInvoke(args)

    if is_new_function:
        if (not ensure_all_users_invoke and not deny_all_users_invoke and
                api_util.CanAddFunctionIamPolicyBinding(_GetProject(args))):
            ensure_all_users_invoke = console_io.PromptContinue(prompt_string=(
                'Allow unauthenticated invocations of new function [{}]?'.
                format(args.NAME)),
                                                                default=False)

        op = api_util.CreateFunction(function,
                                     function_ref.Parent().RelativeName())
        if (not ensure_all_users_invoke and not deny_all_users_invoke):
            template = ('Function created with limited-access IAM policy. '
                        'To enable unauthorized access consider "%s"')
            log.warning(template %
                        _CreateBindPolicyCommand(args.NAME, args.region))
            deny_all_users_invoke = True

    elif updated_fields:
        op = api_util.PatchFunction(function, updated_fields)

    else:
        op = None  # Nothing to wait for
        if not ensure_all_users_invoke and not deny_all_users_invoke:
            log.status.Print('Nothing to update.')
            return

    stop_trying_perm_set = [False]

    # The server asyncrhonously sets allUsers invoker permissions some time after
    # we create the function. That means, to remove it, we need do so after the
    # server adds it. We can remove this mess after the default changes.
    # TODO(b/139026575): Remove the "remove" path, only bother adding. Remove the
    # logic from the polling loop. Remove the ability to add logic like this to
    # the polling loop.
    def TryToSetInvokerPermission():
        """Try to make the invoker permission be what we said it should.

    This is for executing in the polling loop, and will stop trying as soon as
    it succeeds at making a change.
    """
        if stop_trying_perm_set[0]:
            return
        try:
            if ensure_all_users_invoke:
                api_util.AddFunctionIamPolicyBinding(function.name)
                stop_trying_perm_set[0] = True
            elif deny_all_users_invoke:
                stop_trying_perm_set[0] = (
                    api_util.RemoveFunctionIamPolicyBindingIfFound(
                        function.name))
        except exceptions.HttpException:
            stop_trying_perm_set[0] = True
            log.warning('Setting IAM policy failed, try "%s"' %
                        _CreateBindPolicyCommand(args.NAME, args.region))

    if op:
        api_util.WaitForFunctionUpdateOperation(
            op, do_every_poll=TryToSetInvokerPermission)
    return api_util.GetFunction(function.name)
Пример #20
0
    def Run(self, args):
        """This is what gets called when the user runs this command.

    Args:
      args: an argparse namespace. All the arguments that were provided to this
        command invocation.

    Returns:
      Some value that we want to have printed later.

    Raises:
      FailedDeployException: If the build is completed and not 'SUCCESS'.
    """

        if args.source is None:
            if args.tag or args.tag_default:
                raise c_exceptions.RequiredArgumentException(
                    'SOURCE',
                    'required to build container image provided by --tag or --tag-default.'
                )
            if args.config:
                raise c_exceptions.RequiredArgumentException(
                    'SOURCE',
                    'required because --config is a relative path in the '
                    'source directory.')

        if args.source and args.image and not args.config:
            raise c_exceptions.InvalidArgumentException(
                'SOURCE', 'Source must not be provided when no Kubernetes '
                'configs and no docker builds are required.')

        image = self._DetermineImageFromArgs(args)

        # Determine app_name
        if args.app_name:
            app_name = args.app_name
        else:
            app_name = self._ImageName(image)

        # Determine app_version
        app_version = None
        image_has_tag = '@' not in image and ':' in image
        if args.app_version:
            app_version = args.app_version
        elif image_has_tag:
            app_version = image.split(':')[-1]  # Set version to tag
        elif args.source:
            if git.IsGithubRepository(
                    args.source) and not git.HasPendingChanges(args.source):
                short_sha = git.GetShortGitHeadRevision(args.source)
                if short_sha:
                    app_version = short_sha

        # Validate expose
        if args.expose and args.expose < 0:
            raise c_exceptions.InvalidArgumentException(
                '--expose', 'port number is invalid')

        # Determine gcs_staging_dir_bucket and gcs_staging_dir_object
        if args.gcs_staging_dir is None:
            gcs_staging_dir_bucket = staging_bucket_util.GetDefaultStagingBucket(
            )
            gcs_staging_dir_object = 'deploy'
        else:
            try:
                gcs_staging_dir_ref = resources.REGISTRY.Parse(
                    args.gcs_staging_dir, collection='storage.objects')
                gcs_staging_dir_object = gcs_staging_dir_ref.object
            except resources.WrongResourceCollectionException:
                gcs_staging_dir_ref = resources.REGISTRY.Parse(
                    args.gcs_staging_dir, collection='storage.buckets')
                gcs_staging_dir_object = None
            gcs_staging_dir_bucket = gcs_staging_dir_ref.bucket

        gcs_client = storage_api.StorageClient()
        gcs_client.CreateBucketIfNotExists(gcs_staging_dir_bucket)

        # If we are using a default bucket check that it is owned by user project
        # (b/33046325)
        if (args.gcs_staging_dir is None
                and not staging_bucket_util.BucketIsInProject(
                    gcs_client, gcs_staging_dir_bucket)):
            raise c_exceptions.RequiredArgumentException(
                '--gcs-staging-dir',
                'A bucket with name {} already exists and is owned by '
                'another project. Specify a bucket using '
                '--gcs-staging-dir.'.format(gcs_staging_dir_bucket))

        if gcs_staging_dir_object:
            gcs_config_staging_path = '{}/{}/config'.format(
                gcs_staging_dir_bucket, gcs_staging_dir_object)
        else:
            gcs_config_staging_path = gcs_staging_dir_bucket

        if args.source:
            staged_source = self._StageSource(args.source,
                                              gcs_staging_dir_bucket,
                                              gcs_staging_dir_object)
        else:
            staged_source = None

        messages = cloudbuild_util.GetMessagesModule()
        build_config = build_util.CreateBuild(
            messages,
            build_timeout=properties.VALUES.builds.timeout.Get(),
            build_and_push=(args.tag_default or args.tag),
            staged_source=staged_source,
            image=image,
            dockerfile_path='Dockerfile',
            app_name=app_name,
            app_version=app_version,
            config_path=args.config,
            namespace=args.namespace,
            expose_port=args.expose,
            gcs_config_staging_path=gcs_config_staging_path,
            cluster=args.cluster,
            location=args.location,
            build_tags=([] if not args.app_name else [args.app_name]))

        client = cloudbuild_util.GetClientInstance()
        self._SubmitBuild(client, messages, build_config,
                          gcs_config_staging_path, args.async_)
Пример #21
0
  def Run(self, args):
    """This is what gets called when the user runs this command.

    Args:
      args: an argparse namespace. All the arguments that were provided to this
        command invocation.

    Returns:
      Some value that we want to have printed later.

    Raises:
      FailedBuildException: If the build is completed and not 'SUCCESS'.
    """

    default_gcs_source = False
    default_bucket_name = None
    if args.gcs_source_staging_dir is None:
      default_gcs_source = True
      default_bucket_name = staging_bucket_util.GetDefaultStagingBucket()
      args.gcs_source_staging_dir = 'gs://{}/source'.format(default_bucket_name)

    client = cloudbuild_util.GetClientInstance()
    messages = cloudbuild_util.GetMessagesModule()

    gcs_client = storage_api.StorageClient()

    # First, create the build request.
    build_timeout = properties.VALUES.builds.timeout.Get()

    if build_timeout is not None:
      try:
        # A bare number is interpreted as seconds.
        build_timeout_secs = int(build_timeout)
      except ValueError:
        build_timeout_duration = times.ParseDuration(build_timeout)
        build_timeout_secs = int(build_timeout_duration.total_seconds)
      timeout_str = six.text_type(build_timeout_secs) + 's'
    else:
      timeout_str = None

    if args.tag is not None:
      if (properties.VALUES.builds.check_tag.GetBool() and
          'gcr.io/' not in args.tag):
        raise c_exceptions.InvalidArgumentException(
            '--tag',
            'Tag value must be in the gcr.io/* or *.gcr.io/* namespace.')
      if properties.VALUES.builds.use_kaniko.GetBool():
        if args.no_cache:
          ttl = '0h'
        else:
          ttl = '{}h'.format(properties.VALUES.builds.kaniko_cache_ttl.Get())
        build_config = messages.Build(
            steps=[
                messages.BuildStep(
                    name=properties.VALUES.builds.kaniko_image.Get(),
                    args=[
                        '--destination', args.tag, '--cache', 'true',
                        '--cache-ttl', ttl
                    ],
                ),
            ],
            timeout=timeout_str,
            substitutions=cloudbuild_util.EncodeSubstitutions(
                args.substitutions, messages))
      else:
        if args.no_cache:
          raise c_exceptions.InvalidArgumentException(
              'no-cache',
              'Cannot specify --no-cache if builds/use_kaniko property is '
              'False')
        build_config = messages.Build(
            images=[args.tag],
            steps=[
                messages.BuildStep(
                    name='gcr.io/cloud-builders/docker',
                    args=[
                        'build', '--network', 'cloudbuild', '--no-cache', '-t',
                        args.tag, '.'
                    ],
                ),
            ],
            timeout=timeout_str,
            substitutions=cloudbuild_util.EncodeSubstitutions(
                args.substitutions, messages))
    elif args.config is not None:
      if args.no_cache:
        raise c_exceptions.ConflictingArgumentsException(
            '--config', '--no-cache')
      if not args.config:
        raise c_exceptions.InvalidArgumentException(
            '--config', 'Config file path must not be empty.')
      build_config = config.LoadCloudbuildConfigFromPath(
          args.config, messages, params=args.substitutions)
    else:
      raise c_exceptions.OneOfArgumentsRequiredException(
          ['--tag', '--config'],
          'Requires either a docker tag or a config file.')

    # If timeout was set by flag, overwrite the config file.
    if timeout_str:
      build_config.timeout = timeout_str

    # --no-source overrides the default --source.
    if not args.IsSpecified('source') and args.no_source:
      args.source = None

    gcs_source_staging = None
    if args.source:
      suffix = '.tgz'
      if args.source.startswith('gs://') or os.path.isfile(args.source):
        _, suffix = os.path.splitext(args.source)

      # Next, stage the source to Cloud Storage.
      staged_object = '{stamp}-{uuid}{suffix}'.format(
          stamp=times.GetTimeStampFromDateTime(times.Now()),
          uuid=uuid.uuid4().hex,
          suffix=suffix,
      )
      gcs_source_staging_dir = resources.REGISTRY.Parse(
          args.gcs_source_staging_dir, collection='storage.objects')

      # We create the bucket (if it does not exist) first. If we do an existence
      # check and then create the bucket ourselves, it would be possible for an
      # attacker to get lucky and beat us to creating the bucket. Block on this
      # creation to avoid this race condition.
      gcs_client.CreateBucketIfNotExists(gcs_source_staging_dir.bucket)

      # If no bucket is specified (for the source `default_gcs_source`), check
      # that the default bucket is also owned by the project (b/33046325).
      if default_gcs_source and not staging_bucket_util.BucketIsInProject(
          gcs_client, default_bucket_name):
        raise c_exceptions.RequiredArgumentException(
            'gcs-source-staging-dir',
            'A bucket with name {} already exists and is owned by '
            'another project. Specify a bucket using '
            '--gcs-source-staging-dir.'.format(default_bucket_name))

      if gcs_source_staging_dir.object:
        staged_object = gcs_source_staging_dir.object + '/' + staged_object
      gcs_source_staging = resources.REGISTRY.Create(
          collection='storage.objects',
          bucket=gcs_source_staging_dir.bucket,
          object=staged_object)

      if args.source.startswith('gs://'):
        gcs_source = resources.REGISTRY.Parse(
            args.source, collection='storage.objects')
        staged_source_obj = gcs_client.Rewrite(gcs_source, gcs_source_staging)
        build_config.source = messages.Source(
            storageSource=messages.StorageSource(
                bucket=staged_source_obj.bucket,
                object=staged_source_obj.name,
                generation=staged_source_obj.generation,
            ))
      else:
        if not os.path.exists(args.source):
          raise c_exceptions.BadFileException(
              'could not find source [{src}]'.format(src=args.source))
        if os.path.isdir(args.source):
          source_snapshot = snapshot.Snapshot(args.source,
                                              ignore_file=args.ignore_file)
          size_str = resource_transform.TransformSize(
              source_snapshot.uncompressed_size)
          log.status.Print(
              'Creating temporary tarball archive of {num_files} file(s)'
              ' totalling {size} before compression.'.format(
                  num_files=len(source_snapshot.files), size=size_str))
          staged_source_obj = source_snapshot.CopyTarballToGCS(
              gcs_client, gcs_source_staging, ignore_file=args.ignore_file)
          build_config.source = messages.Source(
              storageSource=messages.StorageSource(
                  bucket=staged_source_obj.bucket,
                  object=staged_source_obj.name,
                  generation=staged_source_obj.generation,
              ))
        elif os.path.isfile(args.source):
          unused_root, ext = os.path.splitext(args.source)
          if ext not in _ALLOWED_SOURCE_EXT:
            raise c_exceptions.BadFileException(
                'Local file [{src}] is none of ' +
                ', '.join(_ALLOWED_SOURCE_EXT))
          log.status.Print('Uploading local file [{src}] to '
                           '[gs://{bucket}/{object}].'.format(
                               src=args.source,
                               bucket=gcs_source_staging.bucket,
                               object=gcs_source_staging.object,
                           ))
          staged_source_obj = gcs_client.CopyFileToGCS(args.source,
                                                       gcs_source_staging)
          build_config.source = messages.Source(
              storageSource=messages.StorageSource(
                  bucket=staged_source_obj.bucket,
                  object=staged_source_obj.name,
                  generation=staged_source_obj.generation,
              ))
    else:
      # No source
      if not args.no_source:
        raise c_exceptions.InvalidArgumentException(
            '--no-source', 'To omit source, use the --no-source flag.')

    if args.gcs_log_dir:
      gcs_log_dir = resources.REGISTRY.Parse(
          args.gcs_log_dir, collection='storage.objects')

      build_config.logsBucket = ('gs://' + gcs_log_dir.bucket + '/' +
                                 gcs_log_dir.object)

    # Machine type.
    if args.machine_type is not None:
      machine_type = Submit._machine_type_flag_map.GetEnumForChoice(
          args.machine_type)
      if not build_config.options:
        build_config.options = messages.BuildOptions()
      build_config.options.machineType = machine_type

    # Disk size.
    if args.disk_size is not None:
      disk_size = compute_utils.BytesToGb(args.disk_size)
      if not build_config.options:
        build_config.options = messages.BuildOptions()
      build_config.options.diskSizeGb = int(disk_size)

    log.debug('submitting build: ' + repr(build_config))

    # Start the build.
    op = client.projects_builds.Create(
        messages.CloudbuildProjectsBuildsCreateRequest(
            build=build_config, projectId=properties.VALUES.core.project.Get()))
    json = encoding.MessageToJson(op.metadata)
    build = encoding.JsonToMessage(messages.BuildOperationMetadata, json).build

    build_ref = resources.REGISTRY.Create(
        collection='cloudbuild.projects.builds',
        projectId=build.projectId,
        id=build.id)

    log.CreatedResource(build_ref)
    if build.logUrl:
      log.status.Print(
          'Logs are available at [{log_url}].'.format(log_url=build.logUrl))
    else:
      log.status.Print('Logs are available in the Cloud Console.')

    # If the command is run --async, we just print out a reference to the build.
    if args.async_:
      return build

    mash_handler = execution.MashHandler(
        execution.GetCancelBuildHandler(client, messages, build_ref))

    # Otherwise, logs are streamed from GCS.
    with execution_utils.CtrlCSection(mash_handler):
      build = cb_logs.CloudBuildClient(client, messages).Stream(build_ref)

    if build.status == messages.Build.StatusValueValuesEnum.TIMEOUT:
      log.status.Print(
          'Your build timed out. Use the [--timeout=DURATION] flag to change '
          'the timeout threshold.')

    if build.status != messages.Build.StatusValueValuesEnum.SUCCESS:
      raise FailedBuildException(build)

    return build
Пример #22
0
    def ConstructInstanceFromArgs(cls,
                                  sql_messages,
                                  args,
                                  original=None,
                                  instance_ref=None):
        """Construct a Cloud SQL instance from command line args.

    Args:
      sql_messages: module, The messages module that should be used.
      args: argparse.Namespace, The CLI arg namespace.
      original: sql_messages.DatabaseInstance, The original instance, if some of
          it might be used to fill fields in the new one.
      instance_ref: reference to DatabaseInstance object, used to fill project
          and instance information.

    Returns:
      sql_messages.DatabaseInstance, The constructed (and possibly partial)
      database instance.

    Raises:
      ToolException: An error other than http error occured while executing the
          command.
    """
        settings = cls._ConstructSettingsFromArgs(sql_messages, args, original)
        cls._SetBackupConfiguration(sql_messages, settings, args, original)
        cls._SetDatabaseFlags(sql_messages, settings, args)
        cls._SetMaintenanceWindow(sql_messages, settings, args, original)

        on_premises_host_port = getattr(args, 'on_premises_host_port', None)
        if on_premises_host_port:
            if args.require_ssl:
                raise exceptions.ToolException(
                    'Argument --on-premises-host-port not '
                    'allowed with --require_ssl')
            settings.onPremisesConfiguration = sql_messages.OnPremisesConfiguration(
                hostPort=on_premises_host_port)

        storage_size = getattr(args, 'storage_size', None)
        if storage_size:
            settings.dataDiskSizeGb = int(storage_size / (1 << 30))

        # these flags are only present for the create command
        region = getattr(args, 'region', None)
        database_version = getattr(args, 'database_version', None)

        instance_resource = sql_messages.DatabaseInstance(
            region=region,
            databaseVersion=database_version,
            masterInstanceName=getattr(args, 'master_instance_name', None),
            settings=settings)

        if hasattr(args, 'master_instance_name'):
            if args.master_instance_name:
                replication = 'ASYNCHRONOUS'
                if hasattr(args,
                           'replica_type') and args.replica_type == 'FAILOVER':
                    instance_resource.replicaConfiguration = (
                        sql_messages.ReplicaConfiguration(failoverTarget=True))
            else:
                replication = 'SYNCHRONOUS'
            if not args.replication:
                instance_resource.settings.replicationType = replication

        if instance_ref:
            cls.SetProjectAndInstanceFromRef(instance_resource, instance_ref)

        if hasattr(args, 'storage_type') and args.storage_type:
            instance_resource.settings.dataDiskType = 'PD_' + args.storage_type

        if hasattr(args,
                   'failover_replica_name') and args.failover_replica_name:
            instance_resource.failoverReplica = (
                sql_messages.DatabaseInstance.FailoverReplicaValue(
                    name=args.failover_replica_name))

        if (hasattr(args, 'storage_auto_increase')
                and args.storage_auto_increase is not None):
            instance_resource.settings.storageAutoResize = args.storage_auto_increase

        if (hasattr(args, 'storage_auto_increase_limit')
                and args.IsSpecified('storage_auto_increase_limit')):
            # Resize limit should be settable if the original instance has resize
            # turned on, or if the instance to be created has resize flag.
            if (original and original.settings.storageAutoResize) or (
                    args.storage_auto_increase):
                # If the limit is set to None, we want it to be set to 0. This is a
                # backend requirement.
                instance_resource.settings.storageAutoResizeLimit = (
                    args.storage_auto_increase_limit) or 0
            else:
                raise exceptions.RequiredArgumentException(
                    '--storage-auto-increase',
                    'To set the storage capacity limit '
                    'using [--storage-auto-increase-limit], [--storage-auto-increase] '
                    'must be enabled.')

        return instance_resource
Пример #23
0
def _SetSource(build_config,
               messages,
               is_specified_source,
               no_source,
               source,
               gcs_source_staging_dir,
               ignore_file,
               hide_logs=False):
    """Set the source for the build config."""
    default_gcs_source = False
    default_bucket_name = None
    if gcs_source_staging_dir is None:
        default_gcs_source = True
        default_bucket_name = staging_bucket_util.GetDefaultStagingBucket()
        gcs_source_staging_dir = 'gs://{}/source'.format(default_bucket_name)
    gcs_client = storage_api.StorageClient()

    # --no-source overrides the default --source.
    if not is_specified_source and no_source:
        source = None

    gcs_source_staging = None
    if source:
        suffix = '.tgz'
        if source.startswith('gs://') or os.path.isfile(source):
            _, suffix = os.path.splitext(source)

        # Next, stage the source to Cloud Storage.
        staged_object = '{stamp}-{uuid}{suffix}'.format(
            stamp=times.GetTimeStampFromDateTime(times.Now()),
            uuid=uuid.uuid4().hex,
            suffix=suffix,
        )
        gcs_source_staging_dir = resources.REGISTRY.Parse(
            gcs_source_staging_dir, collection='storage.objects')

        try:
            gcs_client.CreateBucketIfNotExists(
                gcs_source_staging_dir.bucket,
                check_ownership=default_gcs_source)
        except storage_api.BucketInWrongProjectError:
            # If we're using the default bucket but it already exists in a different
            # project, then it could belong to a malicious attacker (b/33046325).
            raise c_exceptions.RequiredArgumentException(
                'gcs-source-staging-dir',
                'A bucket with name {} already exists and is owned by '
                'another project. Specify a bucket using '
                '--gcs-source-staging-dir.'.format(default_bucket_name))

        if gcs_source_staging_dir.object:
            staged_object = gcs_source_staging_dir.object + '/' + staged_object
        gcs_source_staging = resources.REGISTRY.Create(
            collection='storage.objects',
            bucket=gcs_source_staging_dir.bucket,
            object=staged_object)

        if source.startswith('gs://'):
            gcs_source = resources.REGISTRY.Parse(source,
                                                  collection='storage.objects')
            staged_source_obj = gcs_client.Rewrite(gcs_source,
                                                   gcs_source_staging)
            build_config.source = messages.Source(
                storageSource=messages.StorageSource(
                    bucket=staged_source_obj.bucket,
                    object=staged_source_obj.name,
                    generation=staged_source_obj.generation,
                ))
        else:
            if not os.path.exists(source):
                raise c_exceptions.BadFileException(
                    'could not find source [{src}]'.format(src=source))
            if os.path.isdir(source):
                source_snapshot = snapshot.Snapshot(source,
                                                    ignore_file=ignore_file)
                size_str = resource_transform.TransformSize(
                    source_snapshot.uncompressed_size)
                if not hide_logs:
                    log.status.Print(
                        'Creating temporary tarball archive of {num_files} file(s)'
                        ' totalling {size} before compression.'.format(
                            num_files=len(source_snapshot.files),
                            size=size_str))
                staged_source_obj = source_snapshot.CopyTarballToGCS(
                    gcs_client,
                    gcs_source_staging,
                    ignore_file=ignore_file,
                    hide_logs=hide_logs)
                build_config.source = messages.Source(
                    storageSource=messages.StorageSource(
                        bucket=staged_source_obj.bucket,
                        object=staged_source_obj.name,
                        generation=staged_source_obj.generation,
                    ))
            elif os.path.isfile(source):
                unused_root, ext = os.path.splitext(source)
                if ext not in _ALLOWED_SOURCE_EXT:
                    raise c_exceptions.BadFileException(
                        'Local file [{src}] is none of ' +
                        ', '.join(_ALLOWED_SOURCE_EXT))
                if not hide_logs:
                    log.status.Print('Uploading local file [{src}] to '
                                     '[gs://{bucket}/{object}].'.format(
                                         src=source,
                                         bucket=gcs_source_staging.bucket,
                                         object=gcs_source_staging.object,
                                     ))
                staged_source_obj = gcs_client.CopyFileToGCS(
                    source, gcs_source_staging)
                build_config.source = messages.Source(
                    storageSource=messages.StorageSource(
                        bucket=staged_source_obj.bucket,
                        object=staged_source_obj.name,
                        generation=staged_source_obj.generation,
                    ))
    else:
        # No source
        if not no_source:
            raise c_exceptions.InvalidArgumentException(
                '--no-source', 'To omit source, use the --no-source flag.')

    return build_config
Пример #24
0
    def _CreateRequests(self, args, instance_refs, project, zone,
                        compute_client, resource_parser, holder):
        # gcloud creates default values for some fields in Instance resource
        # when no value was specified on command line.
        # When --source-instance-template was specified, defaults are taken from
        # Instance Template and gcloud flags are used to override them - by default
        # fields should not be initialized.
        source_instance_template = self.GetSourceInstanceTemplate(
            args, resource_parser)
        skip_defaults = source_instance_template is not None

        source_machine_image = self.GetSourceMachineImage(
            args, resource_parser)
        skip_defaults = skip_defaults or source_machine_image is not None

        scheduling = instance_utils.GetScheduling(
            args,
            compute_client,
            skip_defaults,
            support_node_affinity=True,
            support_min_node_cpu=self._support_min_node_cpu,
            support_location_hint=self._support_location_hint)
        tags = instance_utils.GetTags(args, compute_client)
        labels = instance_utils.GetLabels(args, compute_client)
        metadata = instance_utils.GetMetadata(args, compute_client,
                                              skip_defaults)
        boot_disk_size_gb = instance_utils.GetBootDiskSizeGb(args)

        network_interfaces = self._GetNetworkInterfacesWithValidation(
            args, resource_parser, compute_client, holder, project, zone,
            skip_defaults)

        machine_type_uris = instance_utils.GetMachineTypeUris(
            args, compute_client, holder, instance_refs, skip_defaults)

        create_boot_disk = not instance_utils.UseExistingBootDisk(args.disk
                                                                  or [])
        image_uri = self._GetImageUri(args, compute_client, create_boot_disk,
                                      project, resource_parser)

        shielded_instance_config = self._BuildShieldedInstanceConfigMessage(
            messages=compute_client.messages, args=args)

        if self._support_confidential_compute:
            confidential_instance_config = (
                self._BuildConfidentialInstanceConfigMessage(
                    messages=compute_client.messages, args=args))

        csek_keys = csek_utils.CsekKeyStore.FromArgs(
            args, self._support_rsa_encrypted)
        disks_messages = self._GetDiskMessages(args, skip_defaults,
                                               instance_refs, compute_client,
                                               resource_parser,
                                               create_boot_disk,
                                               boot_disk_size_gb, image_uri,
                                               csek_keys)

        project_to_sa = self._GetProjectToServiceAccountMap(
            args, instance_refs, compute_client, skip_defaults)

        requests = []
        for instance_ref, machine_type_uri, disks in zip(
                instance_refs, machine_type_uris, disks_messages):

            can_ip_forward = instance_utils.GetCanIpForward(
                args, skip_defaults)
            guest_accelerators = instance_utils.GetAccelerators(
                args, compute_client, resource_parser, instance_ref.project,
                instance_ref.zone)

            instance = compute_client.messages.Instance(
                canIpForward=can_ip_forward,
                deletionProtection=args.deletion_protection,
                description=args.description,
                disks=disks,
                guestAccelerators=guest_accelerators,
                hostname=args.hostname,
                labels=labels,
                machineType=machine_type_uri,
                metadata=metadata,
                minCpuPlatform=args.min_cpu_platform,
                name=instance_ref.Name(),
                networkInterfaces=network_interfaces,
                serviceAccounts=project_to_sa[instance_ref.project],
                scheduling=scheduling,
                tags=tags)

            resource_policies = getattr(args, 'resource_policies', None)
            if resource_policies:
                parsed_resource_policies = []
                for policy in resource_policies:
                    resource_policy_ref = maintenance_util.ParseResourcePolicyWithZone(
                        resource_parser,
                        policy,
                        project=instance_ref.project,
                        zone=instance_ref.zone)
                    parsed_resource_policies.append(
                        resource_policy_ref.SelfLink())
                instance.resourcePolicies = parsed_resource_policies

            if shielded_instance_config:
                instance.shieldedInstanceConfig = shielded_instance_config

            if self._support_confidential_compute and confidential_instance_config:
                instance.confidentialInstanceConfig = confidential_instance_config

            if self._support_erase_vss and \
              args.IsSpecified('erase_windows_vss_signature'):
                instance.eraseWindowsVssSignature = args.erase_windows_vss_signature

            if self._support_post_key_revocation_action_type and args.IsSpecified(
                    'post_key_revocation_action_type'):
                instance.postKeyRevocationActionType = arg_utils.ChoiceToEnum(
                    args.post_key_revocation_action_type,
                    compute_client.messages.Instance.
                    PostKeyRevocationActionTypeValueValuesEnum)

            request = compute_client.messages.ComputeInstancesInsertRequest(
                instance=instance,
                project=instance_ref.project,
                zone=instance_ref.zone)

            if source_instance_template:
                request.sourceInstanceTemplate = source_instance_template

            if source_machine_image:
                request.instance.sourceMachineImage = source_machine_image
                if args.IsSpecified('source_machine_image_csek_key_file'):
                    key = instance_utils.GetSourceMachineImageKey(
                        args, self.SOURCE_MACHINE_IMAGE, compute_client,
                        holder)
                    request.instance.sourceMachineImageEncryptionKey = key

            if self._support_machine_image_key and \
                args.IsSpecified('source_machine_image_csek_key_file'):
                if not args.IsSpecified('source_machine_image'):
                    raise exceptions.RequiredArgumentException(
                        '`--source-machine-image`',
                        '`--source-machine-image-csek-key-file` requires '
                        '`--source-machine-image` to be specified`')

            if args.IsSpecified('enable_display_device'):
                request.instance.displayDevice = compute_client.messages.DisplayDevice(
                    enableDisplay=args.enable_display_device)

            request.instance.reservationAffinity = instance_utils.GetReservationAffinity(
                args, compute_client)

            requests.append(
                (compute_client.apitools_client.instances, 'Insert', request))
        return requests
Пример #25
0
 def validateFlags(self, args):
   if not args.no_service_account and not args.service_account:
     raise exceptions.RequiredArgumentException(
         '--service-account', 'must be specified with a service account. To '
         'clear the default service account use [--no-service-account].')
Пример #26
0
    def _ConstructBaseSettingsFromArgs(cls,
                                       sql_messages,
                                       args,
                                       instance=None,
                                       release_track=DEFAULT_RELEASE_TRACK):
        """Constructs instance settings from the command line arguments.

    Args:
      sql_messages: module, The messages module that should be used.
      args: argparse.Namespace, The arguments that this command was invoked
        with.
      instance: sql_messages.DatabaseInstance, The original instance, for
        settings that depend on the previous state.
      release_track: base.ReleaseTrack, the release track that this was run
        under.

    Returns:
      A settings object representing the instance settings.

    Raises:
      ToolException: An error other than http error occurred while executing the
          command.
    """

        # This code is shared by create and patch, but these args don't exist in
        # create anymore, so insert them here to avoid regressions below.
        if 'authorized_gae_apps' not in args:
            args.authorized_gae_apps = None
        if 'follow_gae_app' not in args:
            args.follow_gae_app = None
        if 'pricing_plan' not in args:
            args.pricing_plan = 'PER_USE'

        settings = sql_messages.Settings(
            kind='sql#settings',
            tier=reducers.MachineType(instance, args.tier, args.memory,
                                      args.cpu),
            pricingPlan=_ParsePricingPlan(sql_messages, args.pricing_plan),
            replicationType=_ParseReplicationType(sql_messages,
                                                  args.replication),
            activationPolicy=_ParseActivationPolicy(sql_messages,
                                                    args.activation_policy))

        if args.authorized_gae_apps:
            settings.authorizedGaeApplications = args.authorized_gae_apps

        if any([
                args.assign_ip is not None, args.require_ssl is not None,
                args.authorized_networks
        ]):
            settings.ipConfiguration = sql_messages.IpConfiguration()
            if args.assign_ip is not None:
                cls.SetIpConfigurationEnabled(settings, args.assign_ip)

            if args.authorized_networks:
                cls.SetAuthorizedNetworks(settings, args.authorized_networks,
                                          sql_messages.AclEntry)

            if args.require_ssl is not None:
                settings.ipConfiguration.requireSsl = args.require_ssl

        if any([args.follow_gae_app, _GetZone(args)]):
            settings.locationPreference = sql_messages.LocationPreference(
                kind='sql#locationPreference',
                followGaeApplication=args.follow_gae_app,
                zone=_GetZone(args))

        if args.storage_size:
            settings.dataDiskSizeGb = int(args.storage_size /
                                          constants.BYTES_TO_GB)

        if args.storage_auto_increase is not None:
            settings.storageAutoResize = args.storage_auto_increase

        if args.IsSpecified('availability_type'):
            settings.availabilityType = _ParseAvailabilityType(
                sql_messages, args.availability_type)

        # BETA args.
        if _IsBetaOrNewer(release_track):
            if args.IsSpecified('storage_auto_increase_limit'):
                # Resize limit should be settable if the original instance has resize
                # turned on, or if the instance to be created has resize flag.
                if (instance and instance.settings.storageAutoResize) or (
                        args.storage_auto_increase):
                    # If the limit is set to None, we want it to be set to 0. This is a
                    # backend requirement.
                    settings.storageAutoResizeLimit = (
                        args.storage_auto_increase_limit or 0)
                else:
                    raise exceptions.RequiredArgumentException(
                        '--storage-auto-increase',
                        'To set the storage capacity limit '
                        'using [--storage-auto-increase-limit], '
                        '[--storage-auto-increase] must be enabled.')

            if args.IsSpecified('network'):
                if not settings.ipConfiguration:
                    settings.ipConfiguration = sql_messages.IpConfiguration()
                settings.ipConfiguration.privateNetwork = reducers.PrivateNetworkUrl(
                    args.network)

        return settings
Пример #27
0
    def Run(self, args):
        """Deploy a container to Cloud Run."""
        service_ref = flags.GetService(args)
        messages = None
        build_type = None
        build_config = None
        image = None
        include_build = flags.FlagIsExplicitlySet(args, 'source')
        operation_message = 'Deploying container'
        # Build an image from source if source specified
        if include_build:
            # Create a tag for the image creation
            if not args.IsSpecified('image'):
                args.image = 'gcr.io/{projectID}/cloud-run-source-deploy/{service}:{tag}'.format(
                    projectID=properties.VALUES.core.project.Get(
                        required=True),
                    service=service_ref.servicesId,
                    tag=uuid.uuid4().hex)
            # Use GCP Buildpacks if Dockerfile doesn't exist
            docker_file = args.source + '/Dockerfile'
            if os.path.exists(docker_file):
                build_type = BuildType.DOCKERFILE
            else:
                args.pack = [{'image': args.image}]
                build_type = BuildType.BUILDPACKS
            operation_message = 'Building using {build_type} and deploying container'.format(
                build_type=build_type.value)
            messages = cloudbuild_util.GetMessagesModule()
            image = None if args.pack else args.image
            build_config = submit_util.CreateBuildConfigAlpha(
                image, args.no_cache, messages, args.substitutions, None,
                args.IsSpecified('source'), False, args.source,
                args.gcs_source_staging_dir, args.ignore_file,
                args.gcs_log_dir, args.machine_type, args.disk_size,
                args.worker_pool, args.pack)
        elif not args.IsSpecified('image'):
            raise c_exceptions.RequiredArgumentException(
                '--image', 'Requires a container image to deploy (e.g. '
                '`gcr.io/cloudrun/hello:latest`) if no build source is provided.'
            )
        # Deploy a container with an image
        conn_context = connection_context.GetConnectionContext(
            args, flags.Product.RUN, self.ReleaseTrack())
        changes = flags.GetConfigurationChanges(args)
        changes.append(
            config_changes.SetLaunchStageAnnotationChange(self.ReleaseTrack()))

        with serverless_operations.Connect(conn_context) as operations:
            service = operations.GetService(service_ref)
            allow_unauth = GetAllowUnauth(args, operations, service_ref,
                                          service)
            resource_change_validators.ValidateClearVpcConnector(service, args)

            pretty_print.Info(
                messages_util.GetStartDeployMessage(conn_context, service_ref,
                                                    operation_message))
            has_latest = (service is None or traffic.LATEST_REVISION_KEY
                          in service.spec_traffic)
            deployment_stages = stages.ServiceStages(
                include_iam_policy_set=allow_unauth is not None,
                include_route=has_latest,
                include_build=include_build)
            header = None
            if include_build:
                header = 'Building and deploying'
            else:
                header = 'Deploying'
            if service is None:
                header += ' new service'
            header += '...'
            with progress_tracker.StagedProgressTracker(
                    header,
                    deployment_stages,
                    failure_message='Deployment failed',
                    suppress_output=args.async_) as tracker:
                service = operations.ReleaseService(
                    service_ref,
                    changes,
                    tracker,
                    asyn=args.async_,
                    allow_unauthenticated=allow_unauth,
                    prefetch=service,
                    build_config=build_config,
                    build_messages=messages)

            if args.async_:
                pretty_print.Success(
                    'Service [{{bold}}{serv}{{reset}}] is deploying '
                    'asynchronously.'.format(serv=service.name))
            else:
                service = operations.GetService(service_ref)
                pretty_print.Success(
                    messages_util.GetSuccessMessageForSynchronousDeploy(
                        service))
            return service