Exemplo n.º 1
0
    def testTwoOperations_OneWithErrorInGetResult(self):
        self.batch_fake.AddInstance(self.instance_x_ref,
                                    self.operation_x_ref,
                                    number_of_polls_to_done=0)
        self.batch_fake.AddInstance(self.instance_y_ref,
                                    self.operation_y_ref,
                                    number_of_polls_to_done=0,
                                    error_on_instance=True)

        with mock.patch.object(
                client_adapter.ClientAdapter,
                'BatchRequests',
                side_effect=self.batch_fake.BatchRequests) as batch_requests:

            poller = compute_poller.BatchPoller(
                self.adapter, self.adapter.apitools_client.instances)
            with self.assertRaisesRegex(core_exceptions.MultiError,
                                        'HTTPError 444: Fake http error'):
                waiter.WaitFor(poller=poller,
                               operation_ref=compute_poller.OperationBatch([
                                   self.operation_x_ref, self.operation_y_ref
                               ]),
                               message='Making Cheese')
        # Expecting twoo calls: poll and get result.
        self.assertEqual(2, batch_requests.call_count)
        self.AssertOutputEquals('')
        self.AssertErrContains('Making Cheese')
Exemplo n.º 2
0
    def testTwoOperations_FirstFinish(self):
        self.batch_fake.AddInstance(self.instance_x_ref,
                                    self.operation_x_ref,
                                    number_of_polls_to_done=1)
        self.batch_fake.AddInstance(self.instance_y_ref,
                                    self.operation_y_ref,
                                    number_of_polls_to_done=2)

        with mock.patch.object(
                client_adapter.ClientAdapter,
                'BatchRequests',
                side_effect=self.batch_fake.BatchRequests) as batch_requests:

            poller = compute_poller.BatchPoller(
                self.adapter, self.adapter.apitools_client.instances)
            results = waiter.WaitFor(
                poller=poller,
                operation_ref=compute_poller.OperationBatch(
                    [self.operation_x_ref, self.operation_y_ref]),
                message='Making Cheese')
        # Expected calls with following responses:
        #  1. X pending, Y Pending
        #  2. X done, Y pending
        #  3. X done, Y done
        #  4. Get X, Get Y
        self.assertEqual(4, batch_requests.call_count)
        self.assertEqual('Super-Cheese-X', results[0].name)
        self.assertEqual('Super-Cheese-Y', results[1].name)
        self.AssertOutputEquals('')
        self.AssertErrContains('Making Cheese')
Exemplo n.º 3
0
    def Run(self, args):
        holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
        client = holder.client

        instance_refs = flags.INSTANCES_ARG.ResolveAsResource(
            args,
            holder.resources,
            scope_lister=flags.GetInstanceZoneScopeLister(client))

        requests = []
        for instance_ref in instance_refs:
            discard_local_ssd = None
            if self.ReleaseTrack() != base.ReleaseTrack.GA:
                discard_local_ssd = args.discard_local_ssd
            requests.append(
                (client.apitools_client.instances, 'Suspend',
                 self._CreateSuspendRequest(client, instance_ref,
                                            discard_local_ssd)))

        errors_to_collect = []
        responses = client.BatchRequests(requests, errors_to_collect)
        if errors_to_collect:
            raise exceptions.MultiError(errors_to_collect)

        operation_refs = [
            holder.resources.Parse(r.selfLink) for r in responses
        ]

        if args.async_:
            for operation_ref in operation_refs:
                log.status.Print(
                    'Suspend instance in progress for [{}].'.format(
                        operation_ref.SelfLink()))
            log.status.Print(
                'Use [gcloud compute operations describe URI] command to check the '
                'status of the operation(s).')
            return responses

        operation_poller = poller.BatchPoller(client,
                                              client.apitools_client.instances,
                                              instance_refs)

        result = waiter.WaitFor(operation_poller,
                                poller.OperationBatch(operation_refs),
                                'Suspending instance(s) {0}'.format(', '.join(
                                    i.Name() for i in instance_refs)),
                                max_wait_ms=None)

        for instance_ref in instance_refs:
            log.status.Print('Updated [{0}].'.format(instance_ref))

        return result
  def Run(self, args):
    """Issues the request to delete a SSL policy."""
    holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
    helper = ssl_policies_utils.SslPolicyHelper(holder)
    client = holder.client.apitools_client
    refs = _SSL_POLICY_ARG.ResolveAsResource(args, holder.resources)
    utils.PromptForDeletion(refs)

    operation_refs = [helper.Delete(ref) for ref in refs]
    wait_message = 'Deleting SSL {}'.format(
        ('policies' if (len(operation_refs) > 1) else 'policy'))
    operation_poller = DeleteBatchPoller(holder.client, client.sslPolicies)
    return waiter.WaitFor(operation_poller,
                          poller.OperationBatch(operation_refs), wait_message)
    def Run(self, args):

        holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
        client = holder.client.apitools_client
        messages = holder.client.messages

        instance_refs = instance_flags.INSTANCES_ARG.ResolveAsResource(
            args,
            holder.resources,
            scope_lister=flags.GetDefaultScopeLister(holder.client))

        requests = []
        for instance_ref in instance_refs:
            request = messages.ComputeInstancesSimulateMaintenanceEventRequest(
                **instance_ref.AsDict())
            requests.append(
                (client.instances, 'SimulateMaintenanceEvent', request))

        errors_to_collect = []
        responses = holder.client.BatchRequests(requests, errors_to_collect)
        for r in responses:
            err = getattr(r, 'error', None)
            if err:
                errors_to_collect.append(poller.OperationErrors(err.errors))
        if errors_to_collect:
            raise core_exceptions.MultiError(errors_to_collect)

        operation_refs = [
            holder.resources.Parse(r.selfLink) for r in responses
        ]

        if args. async:
            for i, operation_ref in enumerate(operation_refs):
                log.UpdatedResource(
                    operation_ref,
                    kind='gce instance [{0}]'.format(instance_refs[i].Name()),
                    is_async=True,
                    details='Use [gcloud compute operations describe] command '
                    'to check the status of this operation.')
            return responses

        operation_poller = poller.BatchPoller(holder.client, client.instances,
                                              instance_refs)
        return waiter.WaitFor(
            operation_poller,
            poller.OperationBatch(operation_refs),
            'Simulating maintenance on instance(s) [{0}]'.format(', '.join(
                i.SelfLink() for i in instance_refs)),
            max_wait_ms=SIMULATE_MAINTENANCE_EVENT_TIMEOUT_MS,
            wait_ceiling_ms=SIMULATE_MAINTENANCE_EVENT_TIMEOUT_MS)
Exemplo n.º 6
0
  def Run(self, args):
    """Issues the request to delete a VPN Gateway."""
    holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
    helper = vpn_gateways_utils.VpnGatewayHelper(holder)
    client = holder.client.apitools_client
    refs = _VPN_GATEWAY_ARG.ResolveAsResource(args, holder.resources)
    utils.PromptForDeletion(refs)

    operation_refs = [helper.Delete(ref) for ref in refs]
    wait_message = 'Deleting VPN {}'.format(
        ('Gateways' if (len(operation_refs) > 1) else 'Gateway'))
    operation_poller = DeleteBatchPoller(holder.client, client.vpnGateways)
    return waiter.WaitFor(operation_poller,
                          poller.OperationBatch(operation_refs), wait_message)
Exemplo n.º 7
0
    def testNoOperations(self):
        with mock.patch.object(
                client_adapter.ClientAdapter,
                'BatchRequests',
                side_effect=self.batch_fake.BatchRequests) as batch_requests:

            poller = compute_poller.BatchPoller(
                self.adapter, self.adapter.apitools_client.instances)
            results = waiter.WaitFor(
                poller=poller,
                operation_ref=compute_poller.OperationBatch([]),
                message='Making Cheese')
        # Expecting a call from Poll and then GetResult
        self.assertEqual(2, batch_requests.call_count)
        self.assertEqual(0, len(results))
        self.AssertOutputEquals('')
        self.AssertErrContains('Making Cheese')
Exemplo n.º 8
0
    def Run(self, args):
        holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
        client = holder.client
        helper = vpn_tunnels_utils.VpnTunnelHelper(holder)

        vpn_tunnel_refs = _VPN_TUNNEL_ARG.ResolveAsResource(
            args,
            holder.resources,
            scope_lister=compute_flags.GetDefaultScopeLister(client))
        utils.PromptForDeletion(vpn_tunnel_refs, 'region')

        operation_refs = [helper.Delete(ref) for ref in vpn_tunnel_refs]
        wait_message = 'Deleting VPN {}'.format(
            ('tunnels' if (len(operation_refs) > 1) else 'tunnel'))
        operation_poller = DeleteBatchPoller(client,
                                             client.apitools_client.vpnTunnels)
        return waiter.WaitFor(operation_poller,
                              poller.OperationBatch(operation_refs),
                              wait_message)
Exemplo n.º 9
0
    def Run(self, args):
        holder = base_classes.ComputeApiHolder(self.ReleaseTrack())
        client = holder.client

        csek_key_file = args.csek_key_file
        request_list = []
        instance_refs = flags.INSTANCES_ARG.ResolveAsResource(
            args,
            holder.resources,
            scope_lister=flags.GetInstanceZoneScopeLister(client))

        # If csek_key_file is supplied, we must first get a reference to the
        # instances specified in the file to ensure that they exist.
        # Only then can we verify that the key specified in the file matches what
        # was used to create the instance.
        if csek_key_file:
            instances = self.GetInstances(client, instance_refs)
        else:
            instances = [None for _ in instance_refs]

        for instance_ref, instance in zip(instance_refs, instances):
            disks = []

            if instance:
                allow_rsa_encrypted = self.ReleaseTrack() in [
                    base.ReleaseTrack.ALPHA, base.ReleaseTrack.BETA
                ]
                csek_keys = csek_utils.CsekKeyStore.FromArgs(
                    args, allow_rsa_encrypted)
                for disk in instance.disks:
                    disk_resource = resources.REGISTRY.Parse(disk.source)

                    disk_key_or_none = csek_utils.MaybeLookupKeyMessage(
                        csek_keys, disk_resource, client.apitools_client)

                    if disk_key_or_none:
                        disks.append(
                            client.messages.CustomerEncryptionKeyProtectedDisk(
                                diskEncryptionKey=disk_key_or_none,
                                source=disk.source))
            if disks:
                encryption_req = client.messages.InstancesResumeRequest(
                    disks=disks)

                request = (client.apitools_client.instances, 'Resume',
                           client.messages.ComputeInstancesResumeRequest(
                               instance=instance_ref.Name(),
                               instancesResumeRequest=encryption_req,
                               project=instance_ref.project,
                               zone=instance_ref.zone))

            else:
                request = (client.apitools_client.instances, 'Resume',
                           client.messages.ComputeInstancesResumeRequest(
                               instance=instance_ref.Name(),
                               project=instance_ref.project,
                               zone=instance_ref.zone))

            request_list.append(request)

        errors_to_collect = []
        responses = client.BatchRequests(request_list, errors_to_collect)
        if errors_to_collect:
            raise exceptions.MultiError(errors_to_collect)

        operation_refs = [
            holder.resources.Parse(r.selfLink) for r in responses
        ]

        if args.async_:
            for operation_ref in operation_refs:
                log.status.Print(
                    'Resume instance in progress for [{}].'.format(
                        operation_ref.SelfLink()))
            log.status.Print(
                'Use [gcloud compute operations describe URI] command to check the '
                'status of the operation(s).')
            return responses

        operation_poller = poller.BatchPoller(client,
                                              client.apitools_client.instances,
                                              instance_refs)

        result = waiter.WaitFor(operation_poller,
                                poller.OperationBatch(operation_refs),
                                'Resuming instance(s) {0}'.format(', '.join(
                                    i.Name() for i in instance_refs)),
                                max_wait_ms=None)

        for instance_ref in instance_refs:
            log.status.Print('Updated [{0}].'.format(instance_ref))

        return result
Exemplo n.º 10
0
    def Run(self, args):
        """Returns a list of requests necessary for snapshotting disks."""
        holder = base_classes.ComputeApiHolder(self.ReleaseTrack())

        disk_refs = SnapshotDisks.disks_arg.ResolveAsResource(
            args,
            holder.resources,
            scope_lister=flags.GetDefaultScopeLister(holder.client))
        if args.snapshot_names:
            if len(disk_refs) != len(args.snapshot_names):
                raise exceptions.ToolException(
                    '[--snapshot-names] must have the same number of values as disks '
                    'being snapshotted.')
            snapshot_names = args.snapshot_names
        else:
            # Generates names like "d52jsqy3db4q".
            snapshot_names = [
                name_generator.GenerateRandomName() for _ in disk_refs
            ]

        snapshot_refs = [
            holder.resources.Parse(
                snapshot_name,
                params={
                    'project': properties.VALUES.core.project.GetOrFail,
                },
                collection='compute.snapshots')
            for snapshot_name in snapshot_names
        ]

        client = holder.client.apitools_client
        messages = holder.client.messages

        requests = []

        for disk_ref, snapshot_ref in zip(disk_refs, snapshot_refs):
            # This feature is only exposed in alpha/beta
            allow_rsa_encrypted = self.ReleaseTrack() in [
                base.ReleaseTrack.ALPHA, base.ReleaseTrack.BETA
            ]
            csek_keys = csek_utils.CsekKeyStore.FromArgs(
                args, allow_rsa_encrypted)
            disk_key_or_none = csek_utils.MaybeLookupKeyMessage(
                csek_keys, disk_ref, client)

            snapshot_message = messages.Snapshot(
                name=snapshot_ref.Name(),
                description=args.description,
                sourceDiskEncryptionKey=disk_key_or_none)
            if (hasattr(args, 'storage_location')
                    and args.IsSpecified('storage_location')):
                snapshot_message.storageLocations = [args.storage_location]
            if (hasattr(args, 'labels') and args.IsSpecified('labels')):
                snapshot_message.labels = labels_util.ParseCreateArgs(
                    args, messages.Snapshot.LabelsValue)

            if disk_ref.Collection() == 'compute.disks':
                request = messages.ComputeDisksCreateSnapshotRequest(
                    disk=disk_ref.Name(),
                    snapshot=snapshot_message,
                    project=disk_ref.project,
                    zone=disk_ref.zone,
                    guestFlush=args.guest_flush)
                requests.append((client.disks, 'CreateSnapshot', request))
            elif disk_ref.Collection() == 'compute.regionDisks':
                request = messages.ComputeRegionDisksCreateSnapshotRequest(
                    disk=disk_ref.Name(),
                    snapshot=snapshot_message,
                    project=disk_ref.project,
                    region=disk_ref.region)
                if hasattr(request,
                           'guestFlush'):  # only available in alpha API
                    guest_flush = getattr(args, 'guest_flush', None)
                    if guest_flush is not None:
                        request.guestFlush = guest_flush
                requests.append(
                    (client.regionDisks, 'CreateSnapshot', request))

        errors_to_collect = []
        responses = holder.client.BatchRequests(requests, errors_to_collect)
        for r in responses:
            err = getattr(r, 'error', None)
            if err:
                errors_to_collect.append(poller.OperationErrors(err.errors))
        if errors_to_collect:
            raise core_exceptions.MultiError(errors_to_collect)

        operation_refs = [
            holder.resources.Parse(r.selfLink) for r in responses
        ]

        if args. async:
            for operation_ref in operation_refs:
                log.status.Print('Disk snapshot in progress for [{}].'.format(
                    operation_ref.SelfLink()))
            log.status.Print(
                'Use [gcloud compute operations describe URI] command '
                'to check the status of the operation(s).')
            return responses

        operation_poller = poller.BatchPoller(holder.client, client.snapshots,
                                              snapshot_refs)
        return waiter.WaitFor(operation_poller,
                              poller.OperationBatch(operation_refs),
                              'Creating snapshot(s) {0}'.format(', '.join(
                                  s.Name() for s in snapshot_refs)),
                              max_wait_ms=None)
Exemplo n.º 11
0
    def Run(self, args):
        """Returns a list of requests necessary for snapshotting disks."""
        holder = base_classes.ComputeApiHolder(self.ReleaseTrack())

        disk_refs = SnapshotDisks.disks_arg.ResolveAsResource(
            args,
            holder.resources,
            scope_lister=flags.GetDefaultScopeLister(holder.client))
        if args.snapshot_names:
            if len(disk_refs) != len(args.snapshot_names):
                raise exceptions.ToolException(
                    '[--snapshot-names] must have the same number of values as disks '
                    'being snapshotted.')
            snapshot_names = args.snapshot_names
        else:
            # Generates names like "d52jsqy3db4q".
            snapshot_names = [
                name_generator.GenerateRandomName() for _ in disk_refs
            ]

        snapshot_refs = [
            holder.resources.Parse(snapshot_name,
                                   collection='compute.snapshots')
            for snapshot_name in snapshot_names
        ]

        client = holder.client.apitools_client
        messages = holder.client.messages

        requests = []

        for disk_ref, snapshot_ref in zip(disk_refs, snapshot_refs):
            # This feature is only exposed in alpha/beta
            allow_rsa_encrypted = self.ReleaseTrack() in [
                base.ReleaseTrack.ALPHA, base.ReleaseTrack.BETA
            ]
            csek_keys = csek_utils.CsekKeyStore.FromArgs(
                args, allow_rsa_encrypted)
            disk_key_or_none = csek_utils.MaybeLookupKeyMessage(
                csek_keys, disk_ref, client)

            # TODO(user) drop test after 'guestFlush' goes GA
            if hasattr(args, 'guest_flush') and args.guest_flush:
                request_kwargs = {'guestFlush': True}
            else:
                request_kwargs = {}

            if disk_ref.Collection() == 'compute.disks':
                request = messages.ComputeDisksCreateSnapshotRequest(
                    disk=disk_ref.Name(),
                    snapshot=messages.Snapshot(
                        name=snapshot_ref.Name(),
                        description=args.description,
                        sourceDiskEncryptionKey=disk_key_or_none),
                    project=disk_ref.project,
                    zone=disk_ref.zone,
                    **request_kwargs)
                requests.append((client.disks, 'CreateSnapshot', request))
            elif disk_ref.Collection() == 'compute.regionDisks':
                request = messages.ComputeRegionDisksCreateSnapshotRequest(
                    disk=disk_ref.Name(),
                    snapshot=messages.Snapshot(
                        name=snapshot_ref.Name(),
                        description=args.description,
                        sourceDiskEncryptionKey=disk_key_or_none),
                    project=disk_ref.project,
                    region=disk_ref.region,
                    **request_kwargs)
                requests.append(
                    (client.regionDisks, 'CreateSnapshot', request))

        errors_to_collect = []
        responses = holder.client.BatchRequests(requests, errors_to_collect)
        if errors_to_collect:
            raise core_exceptions.MultiError(errors_to_collect)

        operation_refs = [
            holder.resources.Parse(r.selfLink) for r in responses
        ]

        if args. async:
            for operation_ref in operation_refs:
                log.status.Print('Disk snapshot in progress for [{}].'.format(
                    operation_ref.SelfLink()))
            log.status.Print(
                'Use [gcloud compute operations describe URI] command '
                'to check the status of the operation(s).')
            return responses

        operation_poller = poller.BatchPoller(holder.client, client.snapshots,
                                              snapshot_refs)
        return waiter.WaitFor(
            operation_poller, poller.OperationBatch(operation_refs),
            'Creating snapshot(s) {0}'.format(', '.join(
                s.Name() for s in snapshot_refs)))