def Run(self, args):
    holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
    client = holder.client

    instance_ref = flags.INSTANCE_ARG.ResolveAsResource(
        args,
        holder.resources,
        scope_lister=flags.GetInstanceZoneScopeLister(client))

    embedded_request = client.messages.InstancesSetMinCpuPlatformRequest(
        minCpuPlatform=args.min_cpu_platform or None)
    request = client.messages.ComputeInstancesSetMinCpuPlatformRequest(
        instance=instance_ref.instance,
        project=instance_ref.project,
        instancesSetMinCpuPlatformRequest=embedded_request,
        zone=instance_ref.zone)

    operation = client.apitools_client.instances.SetMinCpuPlatform(request)

    operation_ref = holder.resources.Parse(
        operation.selfLink, collection='compute.zoneOperations')

    if args.async:
      log.UpdatedResource(
          operation_ref,
          kind='gce instance [{0}]'.format(instance_ref.Name()),
          is_async=True,
          details='Use [gcloud compute operations describe] command '
                  'to check the status of this operation.'
      )
      return operation

    operation_poller = poller.Poller(client.apitools_client.instances)
    return waiter.WaitFor(
        operation_poller, operation_ref,
        'Changing minimum CPU platform of instance [{0}]'.format(
            instance_ref.Name()))
Esempio n. 2
0
  def Run(self, args):
    holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
    client = holder.client

    instance_refs = flags.INSTANCES_ARG.ResolveAsResource(
        args, holder.resources,
        scope_lister=flags.GetInstanceZoneScopeLister(client))

    requests = self._CreateRequests(client, instance_refs, args)

    errors_to_collect = []
    responses = client.BatchRequests(requests, errors_to_collect)
    if errors_to_collect:
      raise core_exceptions.MultiError(errors_to_collect)

    operation_refs = [holder.resources.Parse(r.selfLink) for r in responses]

    if args.async_:
      for operation_ref in operation_refs:
        log.status.Print('Stop instance in progress for [{}].'.format(
            operation_ref.SelfLink()))
      log.status.Print(
          'Use [gcloud compute operations describe URI] command to check the '
          'status of the operation(s).')
      return responses

    operation_poller = poller.BatchPoller(
        client, client.apitools_client.instances, instance_refs)
    waiter.WaitFor(
        operation_poller,
        poller.OperationBatch(operation_refs),
        'Stopping instance(s) {0}'.format(
            ', '.join(i.Name() for i in instance_refs)),
        max_wait_ms=None)

    for instance_ref in instance_refs:
      log.status.Print('Updated [{0}].'.format(instance_ref))
Esempio n. 3
0
    def Run(self, args):
        """This is what gets called when the user runs this command."""
        client = client_util.GetClientInstance()
        messages = client_util.GetMessagesModule()

        yaml_data = input_util.LoadYamlFromPath(args.file)
        workflow = workflow_input_util.CloudBuildYamlDataToWorkflow(yaml_data)

        project = properties.VALUES.core.project.Get(required=True)
        parent = 'projects/%s/locations/%s' % (project, args.region)
        name = '%s/workflows/%s' % (parent, args.WORKFLOW_ID)

        # Update workflow (or create if missing).
        workflow.name = name
        update_operation = client.projects_locations_workflows.Patch(
            messages.CloudbuildProjectsLocationsWorkflowsPatchRequest(
                name=name, workflow=workflow, allowMissing=True))

        update_operation_ref = resources.REGISTRY.ParseRelativeName(
            update_operation.name,
            collection='cloudbuild.projects.locations.operations')

        updated_workflow = waiter.WaitFor(
            waiter.CloudOperationPoller(client.projects_locations_workflows,
                                        client.projects_locations_operations),
            update_operation_ref, 'Updating Workflow')

        updated_workflow_ref = resources.REGISTRY.Parse(
            updated_workflow.name,
            collection='cloudbuild.projects.locations.workflows',
            api_version=client_util.RELEASE_TRACK_TO_API_VERSION[
                self.ReleaseTrack()],
        )

        log.status.Print('Apply result: {}'.format(updated_workflow_ref))
        return updated_workflow
    def Run(self, args):
        client = networkconnectivity_api.SpokesClient(
            release_track=self.ReleaseTrack())
        spoke_ref = args.CONCEPTS.spoke.Parse()

        console_io.PromptContinue(
            message=('You are about to delete spoke [{}]'.format(
                spoke_ref.Name())),
            cancel_on_no=True)

        op_ref = client.Delete(spoke_ref)

        log.status.Print('Delete request issued for: [{}]'.format(
            spoke_ref.Name()))

        if op_ref.done:
            log.DeletedResource(spoke_ref.Name(), kind='spoke')
            return op_ref

        if args.async_:
            log.status.Print('Check operation [{}] for status.'.format(
                op_ref.name))
            return op_ref

        api_version = networkconnectivity_util.VERSION_MAP[self.ReleaseTrack()]
        op_resource = resources.REGISTRY.ParseRelativeName(
            op_ref.name,
            collection='networkconnectivity.projects.locations.operations',
            api_version=api_version)
        poller = waiter.CloudOperationPollerNoResources(
            client.operation_service)
        res = waiter.WaitFor(
            poller, op_resource,
            'Waiting for operation [{}] to complete'.format(op_ref.name))
        log.DeletedResource(spoke_ref.Name(), kind='spoke')
        return res
Esempio n. 5
0
def WaitForOperation(resources, service, operation, collection, resource_ref,
                     message):
  """Waits for the operation to finish.

  Args:
    resources: The resource parser.
    service: apitools.base.py.base_api.BaseApiService, the service representing
      the target of the operation.
    operation: The operation to wait for.
    collection: The operations collection.
    resource_ref: The resource reference.
    message: The message to show.

  Returns:
    The operation result.
  """
  params = {'project': resource_ref.project}
  if collection == 'compute.regionOperations':
    params['region'] = resource_ref.region

  operation_ref = resources.Parse(
      operation.name, params=params, collection=collection)
  operation_poller = poller.Poller(service, resource_ref)
  return waiter.WaitFor(operation_poller, operation_ref, message)
Esempio n. 6
0
def _UpdateShieldedInstanceConfig(holder, client, operation_poller,
                                  instance_ref, args):
    """Update the Shielded Instance Config."""
    if (args.shielded_vm_secure_boot is None and args.shielded_vm_vtpm is None
            and args.shielded_vm_integrity_monitoring is None):
        return None
    shielded_config_msg = client.messages.ShieldedInstanceConfig(
        enableSecureBoot=args.shielded_vm_secure_boot,
        enableVtpm=args.shielded_vm_vtpm,
        enableIntegrityMonitoring=args.shielded_vm_integrity_monitoring)
    request = client.messages.ComputeInstancesUpdateShieldedInstanceConfigRequest(
        instance=instance_ref.Name(),
        project=instance_ref.project,
        shieldedInstanceConfig=shielded_config_msg,
        zone=instance_ref.zone)

    operation = client.apitools_client.instances.UpdateShieldedInstanceConfig(
        request)
    operation_ref = holder.resources.Parse(operation.selfLink,
                                           collection='compute.zoneOperations')
    return waiter.WaitFor(
        operation_poller, operation_ref,
        'Setting shieldedInstanceConfig of instance [{0}]'.format(
            instance_ref.Name()))
    def Patch(self, pap_ref, status):
        """Updates public advertised prefix."""

        status = arg_utils.ChoiceToEnum(
            status, self.messages.PublicAdvertisedPrefix.StatusValueValuesEnum)
        original_pap = self._service.Get(
            self.client.messages.ComputePublicAdvertisedPrefixesGetRequest(
                **pap_ref.AsDict()))

        request = self.messages.ComputePublicAdvertisedPrefixesPatchRequest(
            project=pap_ref.project,
            publicAdvertisedPrefix=pap_ref.Name(),
            publicAdvertisedPrefixResource=self.messages.
            PublicAdvertisedPrefix(status=status,
                                   fingerprint=original_pap.fingerprint))

        operation = self._service.Patch(request)
        operation_ref = self.resources.Parse(
            operation.selfLink, collection='compute.globalOperations')

        operation_poller = poller.Poller(self._service)
        return waiter.WaitFor(
            operation_poller, operation_ref,
            'Updating public advertised prefix [{}].'.format(pap_ref.Name()))
Esempio n. 8
0
    def Run(self, args):
        holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
        client = holder.client.apitools_client
        messages = holder.client.messages

        image_ref = self.DISK_IMAGE_ARG.ResolveAsResource(
            args,
            holder.resources,
            scope_lister=flags.GetDefaultScopeLister(holder.client))

        add_labels = labels_util.GetUpdateLabelsDictFromArgs(args)

        image = client.images.Get(
            messages.ComputeImagesGetRequest(**image_ref.AsDict()))

        labels_update = labels_util.Diff(additions=add_labels).Apply(
            messages.GlobalSetLabelsRequest.LabelsValue, image.labels)

        if not labels_update.needs_update:
            return image

        request = messages.ComputeImagesSetLabelsRequest(
            project=image_ref.project,
            resource=image_ref.image,
            globalSetLabelsRequest=messages.GlobalSetLabelsRequest(
                labelFingerprint=image.labelFingerprint,
                labels=labels_update.labels))

        operation = client.images.SetLabels(request)
        operation_ref = holder.resources.Parse(
            operation.selfLink, collection='compute.globalOperations')

        operation_poller = poller.Poller(client.images)
        return waiter.WaitFor(
            operation_poller, operation_ref,
            'Updating labels of image [{0}]'.format(image_ref.Name()))
Esempio n. 9
0
    def WaitForOperation(self, operation_ref, message=None, service=None):
        """Waits for the given google.longrunning.Operation to complete.

    Args:
      operation_ref: The operation to poll.
      message: String to display for default progress_tracker.
      service: The service to get the resource after the long running operation
        completes.

    Raises:
      apitools.base.py.HttpError: if the request returns an HTTP error

    Returns:
      The Operation or the Resource the Operation is associated with.
    """
        # Consumers of OperationsClient can be resource-aware and if so, they can
        # provide the service used for interacting with the Resource the Operation
        # is associated with.  In this case, OperationsClient#WaitForOperation  will
        # return the Resource the polled Operation is associated with.  Otherwise,
        # no service is provided and the Operation object itself is returned.
        #
        # Example: `gateways create` is resource-aware and returns an
        # ApigatewayGateway while `operations wait` is not resource-aware and will
        # return the Operation itself.
        if service is None:
            poller = waiter.CloudOperationPollerNoResources(
                self.client.projects_locations_operations)
        else:
            poller = waiter.CloudOperationPoller(
                service, self.client.projects_locations_operations)

        if message is None:
            message = 'Waiting for Operation [{}] to complete'.format(
                operation_ref.RelativeName())

        return waiter.WaitFor(poller, operation_ref, message)
Esempio n. 10
0
def DeleteFeature(name, feature_display_name, force=False):
    """Deletes a Feature resource in Hub.

  Args:
    name: the full resource name of the Feature to delete, e.g.,
      projects/foo/locations/global/features/name.
    feature_display_name: the FEATURE_DISPLAY_NAME of this Feature
    force: flag to trigger force deletion of the Feature.

  Raises:
    apitools.base.py.HttpError: if the request returns an HTTP error
  """

    client = core_apis.GetClientInstance('gkehub', 'v1alpha1')
    op = client.projects_locations_global_features.Delete(
        client.MESSAGES_MODULE.
        GkehubProjectsLocationsGlobalFeaturesDeleteRequest(name=name,
                                                           force=force))
    op_resource = resources.REGISTRY.ParseRelativeName(
        op.name, collection='gkehub.projects.locations.operations')
    waiter.WaitFor(
        waiter.CloudOperationPollerNoResources(
            client.projects_locations_operations), op_resource,
        'Waiting for Feature {} to be deleted'.format(feature_display_name))
    def Run(self, args):
        """Run package import command."""
        client = apis.GetClientInstance('artifactregistry', self.api_version)
        messages = client.MESSAGES_MODULE

        client.additional_http_headers['X-Goog-Upload-Protocol'] = 'multipart'

        repo_ref = args.CONCEPTS.repository.Parse()

        upload_req = messages.UploadAptArtifactRequest
        upload_request = upload_req()

        request = messages.ArtifactregistryProjectsLocationsRepositoriesAptArtifactsUploadRequest(
            uploadAptArtifactRequest=upload_request,
            parent=repo_ref.RelativeName())

        upload = transfer.Upload.FromFile(
            args.source, mime_type='application/vnd.debian.binary-package')

        op_obj = client.projects_locations_repositories_aptArtifacts.Upload(
            request, upload=upload)

        op = op_obj.operation
        op_ref = resources.REGISTRY.ParseRelativeName(
            op.name,
            collection='artifactregistry.projects.locations.operations')

        if args.async_:
            return op_ref
        else:
            result = waiter.WaitFor(
                waiter.CloudOperationPollerNoResources(
                    client.projects_locations_operations), op_ref,
                'Uploading package')

            return result
Esempio n. 12
0
    def testError(self):
        with api_mock.Client(self.client_class) as client:
            poller = compute_poller.Poller(client.instances)
            operation_ref = resources.REGISTRY.Create(
                _GLOBAL_OPERATIONS_COLLECTION,
                project='mickey',
                operation='operationX')
            instance_ref = resources.REGISTRY.Create('compute.instances',
                                                     project='mickey',
                                                     zone='disney',
                                                     instance='Super-Cheese')

            self.ExpectOperation(client.globalOperations,
                                 operation_ref,
                                 client.instances,
                                 instance_ref,
                                 error_msg='Something happened')
            with self.assertRaisesRegex(compute_poller.OperationErrors,
                                        r'Something happened'):
                waiter.WaitFor(poller=poller,
                               operation_ref=operation_ref,
                               message='Making Cheese')
        self.AssertOutputEquals('')
        self.AssertErrContains('Making Cheese')
Esempio n. 13
0
    def Run(self, args):
        datafusion = df.Datafusion()
        instance_ref = args.CONCEPTS.instance.Parse()

        labels = args.labels or {}
        enable_stackdriver_logging = args.enable_stackdriver_logging or False
        enable_stackdriver_monitoring = args.enable_stackdriver_monitoring or False
        version = args.version
        instance = datafusion.messages.Instance(
            name=instance_ref.RelativeName(),
            version=version,
            enableStackdriverLogging=enable_stackdriver_logging,
            enableStackdriverMonitoring=enable_stackdriver_monitoring,
            labels=encoding.DictToAdditionalPropertyMessage(
                labels, datafusion.messages.Instance.LabelsValue, True))
        request = datafusion.messages.DatafusionProjectsLocationsInstancesPatchRequest(
            instance=instance, name=instance_ref.RelativeName())

        operation = datafusion.client.projects_locations_instances.Patch(
            request)

        if args.async_:
            log.CreatedResource(instance_ref.RelativeName(),
                                kind='instance',
                                is_async=True)
            return operation
        else:
            waiter.WaitFor(
                operation_poller.OperationPoller(),
                operation.name,
                'Waiting for [{}] to complete. This may take several minutes.'.
                format(operation.name),
                wait_ceiling_ms=df.OPERATION_TIMEOUT)
            log.UpdatedResource(instance_ref.RelativeName(),
                                kind='instance',
                                is_async=False)
Esempio n. 14
0
def CreateRepository(repo):
    """Creates an Artifact Registry repostiory and waits for the operation.

  Args:
    repo: googlecloudsdk.command_lib.artifacts.docker_util.DockerRepo defining
      the repository to be created.
  """
    messages = requests.GetMessages()
    repository_message = messages.Repository(
        name=repo.GetRepositoryName(),
        description='Cloud Run Source Deployments',
        format=messages.Repository.FormatValueValuesEnum.DOCKER,
    )

    op = requests.CreateRepository(repo.project, repo.location,
                                   repository_message)
    op_resource = resources.REGISTRY.ParseRelativeName(
        op.name, collection='artifactregistry.projects.locations.operations')

    client = requests.GetClient()
    waiter.WaitFor(
        waiter.CloudOperationPoller(client.projects_locations_repositories,
                                    client.projects_locations_operations),
        op_resource)
Esempio n. 15
0
  def Run(self, args):
    """This is what gets called when the user runs this command."""
    client = client_util.GetClientInstance()
    messages = client_util.GetMessagesModule()

    project = properties.VALUES.core.project.Get(required=True)
    parent = 'projects/%s/locations/%s' % (project, args.region)
    resource_name = '%s/workflows/%s' % (parent, args.WORKFLOW_ID)

    # Delete workflow.
    delete_operation = client.projects_locations_workflows.Delete(
        messages.CloudbuildProjectsLocationsWorkflowsDeleteRequest(
            name=resource_name))

    delete_operation_ref = resources.REGISTRY.ParseRelativeName(
        delete_operation.name,
        collection='cloudbuild.projects.locations.operations')

    waiter.WaitFor(
        waiter.CloudOperationPollerNoResources(
            client.projects_locations_operations), delete_operation_ref,
        'Deleting Workflow')

    log.DeletedResource(resource_name)
Esempio n. 16
0
def ParseReplaceAccessLevelsResponseBase(lro, version):
    """Parse the Long Running Operation response of the ReplaceAccessLevels call.

  Args:
    lro: Long Running Operation response of ReplaceAccessLevels.
    version: version of the API. e.g. 'v1beta', 'v1'.

  Returns:
    The replacement Access Levels created by the ReplaceAccessLevels call.

  Raises:
    ParseResponseError: if the response could not be parsed into the proper
    object.
  """
    client = util.GetClient(version=version)
    operation_ref = resources.REGISTRY.Parse(
        lro.name, collection='accesscontextmanager.operations')
    poller = common.BulkAPIOperationPoller(client.accessPolicies_accessLevels,
                                           client.operations, operation_ref)

    return waiter.WaitFor(
        poller, operation_ref,
        'Waiting for Replace Access Levels operation [{}]'.format(
            operation_ref.Name()))
  def Run(self, args):
    """Issues the request to delete Signed URL key from the backend service."""
    holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
    api_client = holder.client.apitools_client
    messages = holder.client.messages
    service = api_client.backendBuckets

    backend_bucket_ref = self.BACKEND_BUCKET_ARG.ResolveAsResource(
        args,
        holder.resources,
        scope_lister=compute_flags.GetDefaultScopeLister(holder.client))
    request = messages.ComputeBackendBucketsDeleteSignedUrlKeyRequest(
        project=backend_bucket_ref.project,
        backendBucket=backend_bucket_ref.Name(),
        keyName=args.key_name)

    operation = service.DeleteSignedUrlKey(request)
    operation_ref = holder.resources.Parse(
        operation.selfLink, collection='compute.globalOperations')

    operation_poller = poller.Poller(service)
    return waiter.WaitFor(operation_poller, operation_ref,
                          'Deleting Cloud CDN Signed URL key from [{0}]'.format(
                              backend_bucket_ref.Name()))
Esempio n. 18
0
    def Run(self, args):
        holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
        client = holder.client.apitools_client
        messages = holder.client.messages

        snapshot_ref = SnapshotsAddLabels.SnapshotArg.ResolveAsResource(
            args, holder.resources)

        add_labels = labels_util.GetUpdateLabelsDictFromArgs(args)

        snapshot = client.snapshots.Get(
            messages.ComputeSnapshotsGetRequest(**snapshot_ref.AsDict()))

        replacement = labels_util.UpdateLabels(
            snapshot.labels,
            messages.GlobalSetLabelsRequest.LabelsValue,
            update_labels=add_labels)

        if not replacement:
            return snapshot

        request = messages.ComputeSnapshotsSetLabelsRequest(
            project=snapshot_ref.project,
            resource=snapshot_ref.snapshot,
            globalSetLabelsRequest=messages.GlobalSetLabelsRequest(
                labelFingerprint=snapshot.labelFingerprint,
                labels=replacement))

        operation = client.snapshots.SetLabels(request)
        operation_ref = holder.resources.Parse(
            operation.selfLink, collection='compute.globalOperations')

        operation_poller = poller.Poller(client.snapshots)
        return waiter.WaitFor(
            operation_poller, operation_ref,
            'Updating labels of snapshot [{0}]'.format(snapshot_ref.Name()))
    def Run(self, args):
        datafusion = df.Datafusion()
        instance_ref = args.CONCEPTS.instance.Parse()

        # Prompt for zone if it is not specified
        version = args.version
        if not version:
            version = ''
        zone = args.zone
        if not zone:
            zone = ''
        options = args.options
        if not options:
            options = {}
        labels = args.labels
        if not labels:
            labels = {}
        enable_stackdriver_logging = args.enable_stackdriver_logging
        if not enable_stackdriver_logging:
            enable_stackdriver_logging = False
        enable_stackdriver_monitoring = args.enable_stackdriver_monitoring
        if not enable_stackdriver_monitoring:
            enable_stackdriver_monitoring = False
        enable_rbac = args.enable_rbac
        if not enable_rbac:
            enable_rbac = False
        edition_mapper = arg_utils.ChoiceEnumMapper(
            'edition_enum',
            df.Datafusion().messages.Instance.TypeValueValuesEnum)
        edition = edition_mapper.GetEnumForChoice(args.edition)
        instance = datafusion.messages.Instance(
            zone=zone,
            type=edition,
            version=version,
            enableStackdriverLogging=enable_stackdriver_logging,
            enableStackdriverMonitoring=enable_stackdriver_monitoring,
            enableRbac=enable_rbac,
            options=encoding.DictToAdditionalPropertyMessage(
                options, datafusion.messages.Instance.OptionsValue, True),
            labels=encoding.DictToAdditionalPropertyMessage(
                labels, datafusion.messages.Instance.LabelsValue, True))

        req = datafusion.messages.DatafusionProjectsLocationsInstancesCreateRequest(
            instance=instance,
            instanceId=instance_ref.Name(),
            parent=instance_ref.Parent().RelativeName())

        operation = datafusion.client.projects_locations_instances.Create(req)

        if args.async_:
            log.CreatedResource(instance_ref.RelativeName(),
                                kind='instance',
                                is_async=True)
            return operation
        else:
            waiter.WaitFor(
                operation_poller.OperationPoller(),
                operation.name,
                'Waiting for [{}] to complete. This may take several minutes.'.
                format(operation.name),
                max_wait_ms=df.OPERATION_TIMEOUT,
                wait_ceiling_ms=df.OPERATION_TIMEOUT)
            log.CreatedResource(instance_ref.RelativeName(),
                                kind='instance',
                                is_async=False)
Esempio n. 20
0
    def Patch(self,
              perimeter_ref,
              description=None,
              title=None,
              perimeter_type=None,
              resources=None,
              restricted_services=None,
              unrestricted_services=None,
              levels=None,
              ingress_allowed_services=None,
              vpc_allowed_services=None,
              bridge_allowed_services=None,
              enable_ingress_service_restriction=None,
              enable_vpc_service_restriction=None,
              enable_bridge_service_restriction=None):
        """Patch a service perimeter.

    Any non-None fields will be included in the update mask.

    Args:
      perimeter_ref: resources.Resource, reference to the perimeter to patch
      description: str, description of the zone or None if not updating
      title: str, title of the zone or None if not updating
      perimeter_type: PerimeterTypeValueValuesEnum type enum value for the level
        or None if not updating
      resources: list of str, the names of resources (for now, just
        'projects/...') in the zone or None if not updating.
      restricted_services: list of str, the names of services
        ('example.googleapis.com') that *are* restricted by the access zone or
        None if not updating.
      unrestricted_services: list of str, the names of services
        ('example.googleapis.com') that *are not* restricted by the access zone
        or None if not updating.
      levels: list of Resource, the access levels (in the same policy) that must
        be satisfied for calls into this zone or None if not updating.
      ingress_allowed_services: list of str, the names of services
        ('example.googleapis.com') that *are* allowed to use Access Levels to
        make a cross access zone boundary call, or None if not updating.
      vpc_allowed_services: list of str, the names of services
        ('example.googleapis.com') that *are* allowed to be made within the
        access zone, or None if not updating.
      bridge_allowed_services: list of str, the names of services
        ('example.googleapis.com') that *are* allowed to use the bridge access
        zone, or None if not updating.
      enable_ingress_service_restriction: bool, whether to restrict the set of
        APIs callable outside the access zone via Access Levels, or None if not
        updating.
      enable_vpc_service_restriction: bool, whether to restrict the set of APIs
        callable within the access zone, or None if not updating.
      enable_bridge_service_restriction: bool, whether to restrict the set of
        APIs callable using the bridge access zone, or None if not updating.

    Returns:
      AccessZone, the updated access zone
    """
        m = self.messages
        perimeter = m.ServicePerimeter()

        update_mask = []

        if description is not None:
            update_mask.append('description')
            perimeter.description = description
        if title is not None:
            update_mask.append('title')
            perimeter.title = title
        if perimeter_type is not None:
            update_mask.append('perimeterType')
            perimeter.perimeterType = perimeter_type
        status = m.ServicePerimeterConfig()
        status_mutated = False
        if resources is not None:
            update_mask.append('status.resources')
            status.resources = resources
            status_mutated = True
        if self.include_unrestricted_services and unrestricted_services is not None:
            update_mask.append('status.unrestrictedServices')
            status.unrestrictedServices = unrestricted_services
            status_mutated = True
        if restricted_services is not None:
            update_mask.append('status.restrictedServices')
            status.restrictedServices = restricted_services
            status_mutated = True
        if levels is not None:
            update_mask.append('status.accessLevels')
            status.accessLevels = [l.RelativeName() for l in levels]
            status_mutated = True

        def AddServiceRestrictionFields(allowed_services, enable_restriction,
                                        restriction_type):
            """Utility function for adding service restriction fields."""
            if allowed_services is None and enable_restriction is None:
                return False
            full_restriction_name = restriction_type + 'ServiceRestriction'

            # Set empty message if absent.
            if getattr(status, full_restriction_name) is None:
                restriction_message = getattr(
                    m,
                    restriction_type.capitalize() + 'ServiceRestriction')()
                setattr(status, full_restriction_name, restriction_message)

            if allowed_services is not None:
                update_mask.append('status.' + full_restriction_name +
                                   '.allowedServices')
                restriction_message = getattr(status, full_restriction_name)
                restriction_message.allowedServices = allowed_services

            if enable_restriction is not None:
                update_mask.append('status.' + full_restriction_name +
                                   '.enableRestriction')
                restriction_message = getattr(status, full_restriction_name)
                restriction_message.enableRestriction = enable_restriction

            return True

        status_mutated |= AddServiceRestrictionFields(
            allowed_services=ingress_allowed_services,
            enable_restriction=enable_ingress_service_restriction,
            restriction_type='ingress')
        status_mutated |= AddServiceRestrictionFields(
            allowed_services=vpc_allowed_services,
            enable_restriction=enable_vpc_service_restriction,
            restriction_type='vpc')
        status_mutated |= AddServiceRestrictionFields(
            allowed_services=bridge_allowed_services,
            enable_restriction=enable_bridge_service_restriction,
            restriction_type='bridge')

        if status_mutated:
            perimeter.status = status

        update_mask.sort()  # For ease-of-testing

        # No update mask implies no fields were actually edited, so this is a no-op.
        if not update_mask:
            log.warning(
                'The update specified results in an identical resource. Skipping request.'
            )
            return perimeter

        request_type = (
            m.AccesscontextmanagerAccessPoliciesServicePerimetersPatchRequest)
        request = request_type(
            servicePerimeter=perimeter,
            name=perimeter_ref.RelativeName(),
            updateMask=','.join(update_mask),
        )

        operation = self.client.accessPolicies_servicePerimeters.Patch(request)
        poller = util.OperationPoller(
            self.client.accessPolicies_servicePerimeters,
            self.client.operations, perimeter_ref)
        operation_ref = core_resources.REGISTRY.Parse(
            operation.name, collection='accesscontextmanager.operations')
        return waiter.WaitFor(
            poller, operation_ref,
            'Waiting for PATCH operation [{}]'.format(operation_ref.Name()))
Esempio n. 21
0
def DeployConnectAgent(args,
                       service_account_key_data,
                       docker_credential_data,
                       upgrade=False):
  """Deploys the GKE Connect agent to the cluster.

  Args:
    args: arguments of the command.
    service_account_key_data: The contents of a Google IAM service account JSON
      file
    docker_credential_data: A credential that can be used to access Docker, to
      be stored in a secret and referenced from pod.spec.ImagePullSecrets.
    upgrade: whether to attempt to upgrade the agent, rather than replacing it.

  Raises:
    exceptions.Error: If the agent cannot be deployed properly
    calliope_exceptions.MinimumArgumentException: If the agent cannot be
    deployed properly
  """
  kube_client = KubernetesClient(args)

  image = args.docker_image
  if not image:
    # Get the SHA for the default image.
    try:
      digest = ImageDigestForContainerImage(DEFAULT_CONNECT_AGENT_IMAGE,
                                            DEFAULT_CONNECT_AGENT_TAG)
      image = '{}@{}'.format(DEFAULT_CONNECT_AGENT_IMAGE, digest)
    except Exception as exp:
      raise exceptions.Error(
          'could not determine image digest for {}:{}: {}'.format(
              DEFAULT_CONNECT_AGENT_IMAGE, DEFAULT_CONNECT_AGENT_TAG, exp))

  project_id = properties.VALUES.core.project.GetOrFail()
  namespace = _GKEConnectNamespace(kube_client, project_id)

  full_manifest, agent_install_deployment_name = GenerateInstallManifest(
      project_id, namespace, image, service_account_key_data,
      docker_credential_data, args.CLUSTER_NAME, args.proxy)

  # Generate a manifest file if necessary.
  if args.manifest_output_file:
    try:
      files.WriteFileContents(
          files.ExpandHomeDir(args.manifest_output_file),
          full_manifest,
          private=True)
    except files.Error as e:
      exceptions.Error('could not create manifest file: {}'.format(e))

    log.status.Print(MANIFEST_SAVED_MESSAGE.format(args.manifest_output_file))
    return

  log.status.Print('Deploying GKE Connect agent to cluster...')

  # During an upgrade, the namespace should not be deleted.
  if not upgrade:
    # Delete the ns if necessary
    if kube_client.NamespaceExists(namespace):
      console_io.PromptContinue(
          message='Namespace [{namespace}] already exists in the cluster. This '
          'may be from a previous installation of the agent. If you want to '
          'investigate, enter "n" and run\n\n'
          '  kubectl \\\n'
          '    --kubeconfig={kubeconfig} \\\n'
          '    --context={context} \\\n'
          '    get all -n {namespace}\n\n'
          'Continuing will delete namespace [{namespace}].'.format(
              namespace=namespace,
              kubeconfig=kube_client.kubeconfig,
              context=kube_client.context),
          cancel_on_no=True)
      try:
        succeeded, error = waiter.WaitFor(
            KubernetesPoller(),
            NamespaceDeleteOperation(namespace, kube_client),
            'Deleting namespace [{}] in the cluster'.format(namespace),
            pre_start_sleep_ms=NAMESPACE_DELETION_INITIAL_WAIT_MS,
            max_wait_ms=NAMESPACE_DELETION_TIMEOUT_MS,
            wait_ceiling_ms=NAMESPACE_DELETION_MAX_POLL_INTERVAL_MS,
            sleep_ms=NAMESPACE_DELETION_INITIAL_POLL_INTERVAL_MS)
      except waiter.TimeoutError as e:
        # waiter.TimeoutError assumes that the operation is a Google API
        # operation, and prints a debugging string to that effect.
        raise exceptions.Error(
            'Could not delete namespace [{}] from cluster.'.format(namespace))

      if not succeeded:
        raise exceptions.Error(
            'Could not delete namespace [{}] from cluster. Error: {}'.format(
                namespace, error))

  # Create or update the agent install deployment and related resources.
  err = kube_client.Apply(full_manifest)
  if err:
    raise exceptions.Error(
        'Failed to apply manifest to cluster: {}'.format(err))

  kubectl_log_cmd = (
      'kubectl --kubeconfig={} --context={} logs -n {} -l app={}'.format(
          kube_client.kubeconfig, kube_client.context, namespace,
          AGENT_INSTALL_APP_LABEL))

  def _WriteAgentLogs():
    """Writes logs from the agent install deployment to a temporary file."""
    logs, err = kube_client.Logs(
        namespace, 'deployment/{}'.format(agent_install_deployment_name))
    if err:
      log.warning(
          'Could not fetch Connect agent installation deployment logs: {}'
          .format(err))
      return

    _, tmp_file = tempfile.mkstemp(
        suffix='_{}.log'.format(times.Now().strftime('%Y%m%d-%H%M%S')),
        prefix='gke_connect_',
    )
    files.WriteFileContents(tmp_file, logs, private=True)
    log.status.Print(
        'Connect agent installation deployment logs saved to [{}]'.format(
            tmp_file))

  try:
    succeeded, error = waiter.WaitFor(
        KubernetesPoller(),
        DeploymentPodsAvailableOperation(namespace,
                                         RUNTIME_CONNECT_AGENT_DEPLOYMENT_NAME,
                                         image, kube_client),
        'Waiting for Connect agent to be installed',
        pre_start_sleep_ms=AGENT_INSTALL_INITIAL_WAIT_MS,
        max_wait_ms=AGENT_INSTALL_TIMEOUT_MS,
        wait_ceiling_ms=AGENT_INSTALL_MAX_POLL_INTERVAL_MS,
        sleep_ms=AGENT_INSTALL_INITIAL_POLL_INTERVAL_MS)
  except waiter.TimeoutError:
    # waiter.TimeoutError assumes that the operation is a Google API operation,
    # and prints a debugging string to that effect.
    _WriteAgentLogs()
    raise exceptions.Error(
        'Connect agent installation timed out. Leaving deployment in cluster '
        'for further debugging.\nTo view logs from the cluster:\n\n'
        '{}\n'.format(kubectl_log_cmd))

  _WriteAgentLogs()

  if not succeeded:
    raise exceptions.Error(
        'Connect agent installation did not succeed. To view logs from the '
        'cluster: {}\nKubectl error log: {}'.format(kubectl_log_cmd, error))

  log.status.Print('Connect agent installation succeeded.')
Esempio n. 22
0
    def Patch(self,
              perimeter_ref,
              description=None,
              title=None,
              perimeter_type=None,
              resources=None,
              restricted_services=None,
              unrestricted_services=None,
              levels=None,
              vpc_allowed_services=None,
              enable_vpc_accessible_services=None,
              apply_to_dry_run_config=False,
              clear_dry_run=False):
        """Patch a service perimeter.

    Args:
      perimeter_ref: resources.Resource, reference to the perimeter to patch
      description: str, description of the zone or None if not updating
      title: str, title of the zone or None if not updating
      perimeter_type: PerimeterTypeValueValuesEnum type enum value for the level
        or None if not updating
      resources: list of str, the names of resources (for now, just
        'projects/...') in the zone or None if not updating.
      restricted_services: list of str, the names of services
        ('example.googleapis.com') that *are* restricted by the access zone or
        None if not updating.
      unrestricted_services: list of str, the names of services
        ('example.googleapis.com') that *are not* restricted by the access zone
        or None if not updating.
      levels: list of Resource, the access levels (in the same policy) that must
        be satisfied for calls into this zone or None if not updating.
      vpc_allowed_services: list of str, the names of services
        ('example.googleapis.com') that *are* allowed to be made within the
        access zone, or None if not updating.
      enable_vpc_accessible_services: bool, whether to restrict the set of APIs
        callable within the access zone, or None if not updating.
      apply_to_dry_run_config: When true, the configuration will be place in the
        'spec' field instead of the 'status' field of the Service Perimeter.
      clear_dry_run: When true, the ServicePerimeterConfig field for dry-run
        (i.e. 'spec') will be cleared and dryRun will be set to False.

    Returns:
      ServicePerimeter, the updated Service Perimeter.
    """
        m = self.messages
        perimeter = m.ServicePerimeter()

        update_mask = []

        if description is not None:
            update_mask.append('description')
            perimeter.description = description
        if title is not None:
            update_mask.append('title')
            perimeter.title = title
        if perimeter_type is not None:
            update_mask.append('perimeterType')
            perimeter.perimeterType = perimeter_type

        if not clear_dry_run:
            mask_prefix = 'status' if not apply_to_dry_run_config else 'spec'

            config, config_mask_additions = _CreateServicePerimeterConfig(
                m, mask_prefix, self.include_unrestricted_services, resources,
                restricted_services, unrestricted_services, levels,
                vpc_allowed_services, enable_vpc_accessible_services)

            if not apply_to_dry_run_config:
                perimeter.status = config
            else:
                perimeter.useExplicitDryRunSpec = True
                perimeter.spec = config

            update_mask += config_mask_additions

            if apply_to_dry_run_config and config_mask_additions:
                update_mask.append('useExplicitDryRunSpec')

        else:
            update_mask.append('spec')
            update_mask.append('useExplicitDryRunSpec')
            perimeter.spec = None
            perimeter.useExplicitDryRunSpec = False

        update_mask.sort()  # For ease-of-testing

        # No update mask implies no fields were actually edited, so this is a no-op.
        if not update_mask:
            log.warning(
                'The update specified results in an identical resource. Skipping request.'
            )
            return perimeter

        request_type = (
            m.AccesscontextmanagerAccessPoliciesServicePerimetersPatchRequest)
        request = request_type(
            servicePerimeter=perimeter,
            name=perimeter_ref.RelativeName(),
            updateMask=','.join(update_mask),
        )

        operation = self.client.accessPolicies_servicePerimeters.Patch(request)
        poller = util.OperationPoller(
            self.client.accessPolicies_servicePerimeters,
            self.client.operations, perimeter_ref)
        operation_ref = core_resources.REGISTRY.Parse(
            operation.name, collection='accesscontextmanager.operations')
        return waiter.WaitFor(
            poller, operation_ref,
            'Waiting for PATCH operation [{}]'.format(operation_ref.Name()))
Esempio n. 23
0
  def _Run(self, args, support_keepalive_interval=False):
    holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
    messages = holder.client.messages
    service = holder.client.apitools_client.routers

    router_ref = self.ROUTER_ARG.ResolveAsResource(args, holder.resources)
    network_ref = self.NETWORK_ARG.ResolveAsResource(args, holder.resources)

    router_resource = messages.Router(
        name=router_ref.Name(),
        description=args.description,
        network=network_ref.SelfLink())

    if support_keepalive_interval:
      # Add bgp field with the assigned asn and/or keepalive_interval
      if args.asn is not None or args.keepalive_interval is not None:
        router_resource.bgp = (
            messages.RouterBgp(
                asn=args.asn, keepaliveInterval=args.keepalive_interval))
    else:
      # Add bgp field with the assigned asn.
      if args.asn is not None:
        router_resource.bgp = messages.RouterBgp(asn=args.asn)

    if router_utils.HasReplaceAdvertisementFlags(args):
      mode, groups, ranges = router_utils.ParseAdvertisements(
          messages=messages, resource_class=messages.RouterBgp, args=args)

      attrs = {
          'advertiseMode': mode,
          'advertisedGroups': groups,
          'advertisedIpRanges': ranges,
      }
      # Create an empty bgp field if not generated yet.
      if args.asn is None:
        router_resource.bgp = messages.RouterBgp()
      for attr, value in six.iteritems(attrs):
        if value is not None:
          setattr(router_resource.bgp, attr, value)

    result = service.Insert(
        messages.ComputeRoutersInsertRequest(
            router=router_resource,
            region=router_ref.region,
            project=router_ref.project))

    operation_ref = resources.REGISTRY.Parse(
        result.name,
        collection='compute.regionOperations',
        params={
            'project': router_ref.project,
            'region': router_ref.region,
        })

    if args.async_:
      # Override the networks list format with the default operations format
      if not args.IsSpecified('format'):
        args.format = 'none'
      log.CreatedResource(
          operation_ref,
          kind='router [{0}]'.format(router_ref.Name()),
          is_async=True,
          details='Run the [gcloud compute operations describe] command '
          'to check the status of this operation.')
      return result

    target_router_ref = holder.resources.Parse(
        router_ref.Name(),
        collection='compute.routers',
        params={
            'project': router_ref.project,
            'region': router_ref.region,
        })

    operation_poller = poller.Poller(service, target_router_ref)
    return waiter.WaitFor(operation_poller, operation_ref,
                          'Creating router [{0}]'.format(router_ref.Name()))
Esempio n. 24
0
 def _WaitForResult(self, operation_poller, operation_ref, message):
     if operation_ref:
         return waiter.WaitFor(operation_poller, operation_ref, message)
     return None
Esempio n. 25
0
    def Run(self, args):
        holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
        client = holder.client.apitools_client
        messages = holder.client.messages

        disk_ref = self.DISK_ARG.ResolveAsResource(
            args,
            holder.resources,
            scope_lister=flags.GetDefaultScopeLister(holder.client))

        add_labels = labels_util.GetUpdateLabelsDictFromArgs(args)

        if disk_ref.Collection() == 'compute.disks':
            service = client.disks
            request_type = messages.ComputeDisksGetRequest
        elif disk_ref.Collection() == 'compute.regionDisks':
            service = client.regionDisks
            request_type = messages.ComputeRegionDisksGetRequest
        else:
            raise ValueError('Unexpected resource argument of {}'.format(
                disk_ref.Collection()))

        disk = service.Get(request_type(**disk_ref.AsDict()))

        if disk_ref.Collection() == 'compute.disks':
            operation_collection = 'compute.zoneOperations'
            replacement = labels_util.UpdateLabels(
                disk.labels,
                messages.ZoneSetLabelsRequest.LabelsValue,
                update_labels=add_labels)
            request = messages.ComputeDisksSetLabelsRequest(
                project=disk_ref.project,
                resource=disk_ref.disk,
                zone=disk_ref.zone,
                zoneSetLabelsRequest=messages.ZoneSetLabelsRequest(
                    labelFingerprint=disk.labelFingerprint,
                    labels=replacement))
        else:
            operation_collection = 'compute.regionOperations'
            replacement = labels_util.UpdateLabels(
                disk.labels,
                messages.RegionSetLabelsRequest.LabelsValue,
                update_labels=add_labels)
            request = messages.ComputeRegionDisksSetLabelsRequest(
                project=disk_ref.project,
                resource=disk_ref.disk,
                region=disk_ref.region,
                regionSetLabelsRequest=messages.RegionSetLabelsRequest(
                    labelFingerprint=disk.labelFingerprint,
                    labels=replacement))

        if not replacement:
            return disk

        operation = service.SetLabels(request)
        operation_ref = holder.resources.Parse(operation.selfLink,
                                               collection=operation_collection)

        operation_poller = poller.Poller(service)
        return waiter.WaitFor(
            operation_poller, operation_ref,
            'Updating labels of disk [{0}]'.format(disk_ref.Name()))
Esempio n. 26
0
    def Run(self, args):
        holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
        messages = holder.client.messages
        service = holder.client.apitools_client.routers

        router_ref = self.ROUTER_ARG.ResolveAsResource(args, holder.resources)

        request_type = messages.ComputeRoutersGetRequest
        replacement = service.Get(request_type(**router_ref.AsDict()))

        # Retrieve specified NAT and update base fields.
        existing_nat = nats_utils.FindNatOrRaise(replacement, args.name)
        nat = nats_utils.UpdateNatMessage(existing_nat,
                                          args,
                                          holder,
                                          with_logging=self.with_logging)

        cleared_fields = []
        if args.clear_min_ports_per_vm:
            cleared_fields.append('minPortsPerVm')
        if args.clear_udp_idle_timeout:
            cleared_fields.append('udpIdleTimeoutSec')
        if args.clear_icmp_idle_timeout:
            cleared_fields.append('icmpIdleTimeoutSec')
        if args.clear_tcp_transitory_idle_timeout:
            cleared_fields.append('tcpTransitoryIdleTimeoutSec')
        if args.clear_tcp_established_idle_timeout:
            cleared_fields.append('tcpEstablishedIdleTimeoutSec')

        with holder.client.apitools_client.IncludeFields(cleared_fields):
            request_type = messages.ComputeRoutersPatchRequest
            result = service.Patch(
                request_type(project=router_ref.project,
                             region=router_ref.region,
                             router=router_ref.Name(),
                             routerResource=replacement))

        operation_ref = resources.REGISTRY.Parse(
            result.name,
            collection='compute.regionOperations',
            params={
                'project': router_ref.project,
                'region': router_ref.region,
            })

        if args. async:
            log.UpdatedResource(
                operation_ref,
                kind='nat [{0}] in router [{1}]'.format(
                    nat.name, router_ref.Name()),
                is_async=True,
                details='Run the [gcloud compute operations describe] command '
                'to check the status of this operation.')
            return result

        target_router_ref = holder.resources.Parse(
            router_ref.Name(),
            collection='compute.routers',
            params={
                'project': router_ref.project,
                'region': router_ref.region,
            })

        operation_poller = poller.Poller(service, target_router_ref)
        return waiter.WaitFor(
            operation_poller, operation_ref,
            'Updating nat [{0}] in router [{1}]'.format(
                nat.name, router_ref.Name()))
Esempio n. 27
0
    def _Run(self, args, support_bfd=False, support_enable=False):
        # Manually ensure replace/incremental flags are mutually exclusive.
        router_utils.CheckIncompatibleFlagsOrRaise(args)

        holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
        messages = holder.client.messages
        service = holder.client.apitools_client.routers

        router_ref = self.ROUTER_ARG.ResolveAsResource(args, holder.resources)

        request_type = messages.ComputeRoutersGetRequest
        replacement = service.Get(request_type(**router_ref.AsDict()))

        # Retrieve specified peer and update base fields.
        peer = _UpdateBgpPeerMessage(messages,
                                     replacement,
                                     args,
                                     support_bfd=support_bfd,
                                     support_enable=support_enable)

        if router_utils.HasReplaceAdvertisementFlags(args):
            mode, groups, ranges = router_utils.ParseAdvertisements(
                messages=messages,
                resource_class=messages.RouterBgpPeer,
                args=args)

            router_utils.PromptIfSwitchToDefaultMode(
                messages=messages,
                resource_class=messages.RouterBgpPeer,
                existing_mode=peer.advertiseMode,
                new_mode=mode)

            attrs = {
                'advertiseMode': mode,
                'advertisedGroups': groups,
                'advertisedIpRanges': ranges,
            }

            for attr, value in attrs.items():
                if value is not None:
                    setattr(peer, attr, value)

        if router_utils.HasIncrementalAdvertisementFlags(args):
            # This operation should only be run on custom mode peers.
            router_utils.ValidateCustomMode(
                messages=messages,
                resource_class=messages.RouterBgpPeer,
                resource=peer)

            # These arguments are guaranteed to be mutually exclusive in args.
            if args.add_advertisement_groups:
                groups_to_add = routers_utils.ParseGroups(
                    resource_class=messages.RouterBgpPeer,
                    groups=args.add_advertisement_groups)
                peer.advertisedGroups.extend(groups_to_add)

            if args.remove_advertisement_groups:
                groups_to_remove = routers_utils.ParseGroups(
                    resource_class=messages.RouterBgpPeer,
                    groups=args.remove_advertisement_groups)
                router_utils.RemoveGroupsFromAdvertisements(
                    messages=messages,
                    resource_class=messages.RouterBgpPeer,
                    resource=peer,
                    groups=groups_to_remove)

            if args.add_advertisement_ranges:
                ip_ranges_to_add = routers_utils.ParseIpRanges(
                    messages=messages, ip_ranges=args.add_advertisement_ranges)
                peer.advertisedIpRanges.extend(ip_ranges_to_add)

            if args.remove_advertisement_ranges:
                router_utils.RemoveIpRangesFromAdvertisements(
                    messages=messages,
                    resource_class=messages.RouterBgpPeer,
                    resource=peer,
                    ip_ranges=args.remove_advertisement_ranges)

        request_type = messages.ComputeRoutersPatchRequest
        result = service.Patch(
            request_type(project=router_ref.project,
                         region=router_ref.region,
                         router=router_ref.Name(),
                         routerResource=replacement))

        operation_ref = resources.REGISTRY.Parse(
            result.name,
            collection='compute.regionOperations',
            params={
                'project': router_ref.project,
                'region': router_ref.region,
            })

        if args. async:
            log.UpdatedResource(
                operation_ref,
                kind='peer [{0}] in router [{1}]'.format(
                    peer.name, router_ref.Name()),
                is_async=True,
                details='Run the [gcloud compute operations describe] command '
                'to check the status of this operation.')
            return result

        target_router_ref = holder.resources.Parse(
            router_ref.Name(),
            collection='compute.routers',
            params={
                'project': router_ref.project,
                'region': router_ref.region,
            })

        operation_poller = poller.Poller(service, target_router_ref)
        return waiter.WaitFor(
            operation_poller, operation_ref,
            'Updating peer [{0}] in router [{1}]'.format(
                peer.name, router_ref.Name()))
Esempio n. 28
0
    def Run(self, args):
        """See base.UpdateCommand."""

        holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
        messages = holder.client.messages
        service = holder.client.apitools_client.routers

        router_ref = self.ROUTER_ARG.ResolveAsResource(args, holder.resources)

        request_type = messages.ComputeRoutersGetRequest
        replacement = service.Get(request_type(**router_ref.AsDict()))

        peer = _CreateBgpPeerMessage(messages, args)

        if router_utils.HasReplaceAdvertisementFlags(args):
            mode, groups, ranges = router_utils.ParseAdvertisements(
                messages=messages,
                resource_class=messages.RouterBgpPeer,
                args=args)

            attrs = {
                'advertiseMode': mode,
                'advertisedGroups': groups,
                'advertisedIpRanges': ranges,
            }

            for attr, value in six.iteritems(attrs):
                if value is not None:
                    setattr(peer, attr, value)

        replacement.bgpPeers.append(peer)

        result = service.Patch(
            messages.ComputeRoutersPatchRequest(project=router_ref.project,
                                                region=router_ref.region,
                                                router=router_ref.Name(),
                                                routerResource=replacement))

        operation_ref = resources.REGISTRY.Parse(
            result.name,
            collection='compute.regionOperations',
            params={
                'project': router_ref.project,
                'region': router_ref.region,
            })

        if args. async:
            log.UpdatedResource(
                operation_ref,
                kind='router [{0}] to add peer [{1}]'.format(
                    router_ref.Name(), peer.name),
                is_async=True,
                details='Run the [gcloud compute operations describe] command '
                'to check the status of this operation.')
            return result

        target_router_ref = holder.resources.Parse(
            router_ref.Name(),
            collection='compute.routers',
            params={
                'project': router_ref.project,
                'region': router_ref.region,
            })

        operation_poller = poller.Poller(service, target_router_ref)
        return waiter.WaitFor(
            operation_poller, operation_ref,
            'Creating peer [{0}] in router [{1}]'.format(
                peer.name, router_ref.Name()))
Esempio n. 29
0
    def Run(self, args):
        holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
        client = holder.client

        csek_key_file = args.csek_key_file
        request_list = []
        instance_refs = flags.INSTANCES_ARG.ResolveAsResource(
            args,
            holder.resources,
            scope_lister=flags.GetInstanceZoneScopeLister(client))

        # If csek_key_file is supplied, we must first get a reference to the
        # instances specified in the file to ensure that they exist.
        # Only then can we verify that the key specified in the file matches what
        # was used to create the instance.
        if csek_key_file:
            instances = self.GetInstances(client, instance_refs)
        else:
            instances = [None for _ in instance_refs]

        for instance_ref, instance in zip(instance_refs, instances):
            disks = []

            if instance:
                allow_rsa_encrypted = self.ReleaseTrack() in [
                    base.ReleaseTrack.ALPHA, base.ReleaseTrack.BETA
                ]
                csek_keys = csek_utils.CsekKeyStore.FromArgs(
                    args, allow_rsa_encrypted)
                for disk in instance.disks:
                    disk_resource = resources.REGISTRY.Parse(disk.source)

                    disk_key_or_none = csek_utils.MaybeLookupKeyMessage(
                        csek_keys, disk_resource, client.apitools_client)

                    if disk_key_or_none:
                        disks.append(
                            client.messages.CustomerEncryptionKeyProtectedDisk(
                                diskEncryptionKey=disk_key_or_none,
                                source=disk.source))
            if disks:
                encryption_req = client.messages.InstancesResumeRequest(
                    disks=disks)

                request = (client.apitools_client.instances, 'Resume',
                           client.messages.ComputeInstancesResumeRequest(
                               instance=instance_ref.Name(),
                               instancesResumeRequest=encryption_req,
                               project=instance_ref.project,
                               zone=instance_ref.zone))

            else:
                request = (client.apitools_client.instances, 'Resume',
                           client.messages.ComputeInstancesResumeRequest(
                               instance=instance_ref.Name(),
                               project=instance_ref.project,
                               zone=instance_ref.zone))

            request_list.append(request)

        errors_to_collect = []
        responses = client.BatchRequests(request_list, errors_to_collect)
        if errors_to_collect:
            raise exceptions.MultiError(errors_to_collect)

        operation_refs = [
            holder.resources.Parse(r.selfLink) for r in responses
        ]

        if args.async_:
            for operation_ref in operation_refs:
                log.status.Print(
                    'Resume instance in progress for [{}].'.format(
                        operation_ref.SelfLink()))
            log.status.Print(
                'Use [gcloud compute operations describe URI] command to check the '
                'status of the operation(s).')
            return responses

        operation_poller = poller.BatchPoller(client,
                                              client.apitools_client.instances,
                                              instance_refs)

        result = waiter.WaitFor(operation_poller,
                                poller.OperationBatch(operation_refs),
                                'Resuming instance(s) {0}'.format(', '.join(
                                    i.Name() for i in instance_refs)),
                                max_wait_ms=None)

        for instance_ref in instance_refs:
            log.status.Print('Updated [{0}].'.format(instance_ref))

        return result
Esempio n. 30
0
def PrepareEnvironment(args):
    """Ensures that the user's environment is ready to accept SSH connections."""

    # Load Cloud Shell API
    client = apis.GetClientInstance('cloudshell', 'v1alpha1')
    messages = apis.GetMessagesModule('cloudshell', 'v1alpha1')
    operations_client = apis.GetClientInstance('cloudshell', 'v1')

    # Ensure we have a key pair on the local machine
    ssh_env = ssh.Environment.Current()
    ssh_env.RequireSSH()
    keys = ssh.Keys.FromFilename(filename=args.ssh_key_file)
    keys.EnsureKeysExist(overwrite=args.force_key_file_overwrite)

    # Look up the Cloud Shell environment
    environment = client.users_environments.Get(
        messages.CloudshellUsersEnvironmentsGetRequest(
            name=DEFAULT_ENVIRONMENT_NAME))

    # If the environment doesn't have the public key, push it
    key_parts = keys.GetPublicKey().ToEntry().split(' ')
    key = messages.PublicKey(
        format=messages.PublicKey.FormatValueValuesEnum(key_parts[0].replace(
            '-', '_').upper()),
        key=base64.b64decode(key_parts[1]),
    )
    has_key = False
    for candidate in environment.publicKeys:
        if key.format == candidate.format and key.key == candidate.key:
            has_key = True
            break
    if not has_key:
        log.Print('Pushing your public key to Cloud Shell...')
        client.users_environments_publicKeys.Create(
            messages.CloudshellUsersEnvironmentsPublicKeysCreateRequest(
                parent=DEFAULT_ENVIRONMENT_NAME,
                createPublicKeyRequest=messages.CreatePublicKeyRequest(
                    key=key),
            ))

    # If the environment isn't running, start it
    if environment.state != messages.Environment.StateValueValuesEnum.RUNNING:
        log.Print('Starting your Cloud Shell machine...')
        start_operation = client.users_environments.Start(
            messages.CloudshellUsersEnvironmentsStartRequest(
                name=DEFAULT_ENVIRONMENT_NAME))

        environment = waiter.WaitFor(
            StartEnvironmentPoller(client.users_environments,
                                   operations_client.operations),
            start_operation,
            'Waiting for your Cloud Shell machine to start',
            sleep_ms=500,
            max_wait_ms=None)

    return ConnectionInfo(
        ssh_env=ssh_env,
        user=environment.sshUsername,
        host=environment.sshHost,
        port=environment.sshPort,
        key=keys.key_file,
    )