def Run(self, args): instances_flags.ValidateDiskFlags( args, enable_kms=self.ReleaseTrack() in [base.ReleaseTrack.ALPHA]) instances_flags.ValidateLocalSsdFlags(args) instances_flags.ValidateNicFlags(args) instances_flags.ValidateServiceAccountAndScopeArgs(args) instances_flags.ValidateAcceleratorArgs(args) if self._support_network_tier: instances_flags.ValidateNetworkTierArgs(args) holder = base_classes.ComputeApiHolder(self.ReleaseTrack()) compute_client = holder.client resource_parser = holder.resources instance_refs = instances_flags.INSTANCES_ARG.ResolveAsResource( args, resource_parser, scope_lister=flags.GetDefaultScopeLister(compute_client)) requests = self._CreateRequests(args, instance_refs, compute_client, resource_parser) if not args.async: # TODO(b/63664449): Replace this with poller + progress tracker. try: # Using legacy MakeRequests (which also does polling) here until # replaced by api_lib.utils.waiter. return compute_client.MakeRequests(requests) except exceptions.ToolException as e: invalid_machine_type_message_regex = ( r'Invalid value for field \'resource.machineType\': .+. ' r'Machine type with name \'.+\' does not exist in zone \'.+\'\.') if re.search(invalid_machine_type_message_regex, e.message): raise exceptions.ToolException( e.message + '\nUse `gcloud compute machine-types list --zones` to see the ' 'available machine types.') raise errors_to_collect = [] responses = compute_client.BatchRequests(requests, errors_to_collect) for r in responses: err = getattr(r, 'error', None) if err: errors_to_collect.append(poller.OperationErrors(err.errors)) if errors_to_collect: raise core_exceptions.MultiError(errors_to_collect) operation_refs = [holder.resources.Parse(r.selfLink) for r in responses] for instance_ref, operation_ref in zip(instance_refs, operation_refs): log.status.Print('Instance creation in progress for [{}]: {}' .format(instance_ref.instance, operation_ref.SelfLink())) log.status.Print('Use [gcloud compute operations describe URI] command ' 'to check the status of the operation(s).') if not args.IsSpecified('format'): # For async output we need a separate format. Since we already printed in # the status messages information about operations there is nothing else # needs to be printed. args.format = 'disable' return responses
def Run(self, args): holder = base_classes.ComputeApiHolder(self.ReleaseTrack()) client = holder.client.apitools_client messages = holder.client.messages instance_refs = instance_flags.INSTANCES_ARG.ResolveAsResource( args, holder.resources, scope_lister=flags.GetDefaultScopeLister(holder.client)) requests = [] for instance_ref in instance_refs: request = messages.ComputeInstancesSimulateMaintenanceEventRequest( **instance_ref.AsDict()) requests.append( (client.instances, 'SimulateMaintenanceEvent', request)) errors_to_collect = [] responses = holder.client.BatchRequests(requests, errors_to_collect) for r in responses: err = getattr(r, 'error', None) if err: errors_to_collect.append(poller.OperationErrors(err.errors)) if errors_to_collect: raise core_exceptions.MultiError(errors_to_collect) operation_refs = [ holder.resources.Parse(r.selfLink) for r in responses ] if args. async: for i, operation_ref in enumerate(operation_refs): log.UpdatedResource( operation_ref, kind='gce instance [{0}]'.format(instance_refs[i].Name()), is_async=True, details='Use [gcloud compute operations describe] command ' 'to check the status of this operation.') return responses operation_poller = poller.BatchPoller(holder.client, client.instances, instance_refs) return waiter.WaitFor( operation_poller, poller.OperationBatch(operation_refs), 'Simulating maintenance on instance(s) [{0}]'.format(', '.join( i.SelfLink() for i in instance_refs)), max_wait_ms=SIMULATE_MAINTENANCE_EVENT_TIMEOUT_MS, wait_ceiling_ms=SIMULATE_MAINTENANCE_EVENT_TIMEOUT_MS)
def Run(self, args): """Returns a list of requests necessary for snapshotting disks.""" holder = base_classes.ComputeApiHolder(self.ReleaseTrack()) disk_refs = SnapshotDisks.disks_arg.ResolveAsResource( args, holder.resources, scope_lister=flags.GetDefaultScopeLister(holder.client)) if args.snapshot_names: if len(disk_refs) != len(args.snapshot_names): raise exceptions.ToolException( '[--snapshot-names] must have the same number of values as disks ' 'being snapshotted.') snapshot_names = args.snapshot_names else: # Generates names like "d52jsqy3db4q". snapshot_names = [ name_generator.GenerateRandomName() for _ in disk_refs ] snapshot_refs = [ holder.resources.Parse( snapshot_name, params={ 'project': properties.VALUES.core.project.GetOrFail, }, collection='compute.snapshots') for snapshot_name in snapshot_names ] client = holder.client.apitools_client messages = holder.client.messages requests = [] for disk_ref, snapshot_ref in zip(disk_refs, snapshot_refs): # This feature is only exposed in alpha/beta allow_rsa_encrypted = self.ReleaseTrack() in [ base.ReleaseTrack.ALPHA, base.ReleaseTrack.BETA ] csek_keys = csek_utils.CsekKeyStore.FromArgs( args, allow_rsa_encrypted) disk_key_or_none = csek_utils.MaybeLookupKeyMessage( csek_keys, disk_ref, client) snapshot_message = messages.Snapshot( name=snapshot_ref.Name(), description=args.description, sourceDiskEncryptionKey=disk_key_or_none) if (hasattr(args, 'storage_location') and args.IsSpecified('storage_location')): snapshot_message.storageLocations = [args.storage_location] if (hasattr(args, 'labels') and args.IsSpecified('labels')): snapshot_message.labels = labels_util.ParseCreateArgs( args, messages.Snapshot.LabelsValue) if disk_ref.Collection() == 'compute.disks': request = messages.ComputeDisksCreateSnapshotRequest( disk=disk_ref.Name(), snapshot=snapshot_message, project=disk_ref.project, zone=disk_ref.zone, guestFlush=args.guest_flush) requests.append((client.disks, 'CreateSnapshot', request)) elif disk_ref.Collection() == 'compute.regionDisks': request = messages.ComputeRegionDisksCreateSnapshotRequest( disk=disk_ref.Name(), snapshot=snapshot_message, project=disk_ref.project, region=disk_ref.region) if hasattr(request, 'guestFlush'): # only available in alpha API guest_flush = getattr(args, 'guest_flush', None) if guest_flush is not None: request.guestFlush = guest_flush requests.append( (client.regionDisks, 'CreateSnapshot', request)) errors_to_collect = [] responses = holder.client.BatchRequests(requests, errors_to_collect) for r in responses: err = getattr(r, 'error', None) if err: errors_to_collect.append(poller.OperationErrors(err.errors)) if errors_to_collect: raise core_exceptions.MultiError(errors_to_collect) operation_refs = [ holder.resources.Parse(r.selfLink) for r in responses ] if args. async: for operation_ref in operation_refs: log.status.Print('Disk snapshot in progress for [{}].'.format( operation_ref.SelfLink())) log.status.Print( 'Use [gcloud compute operations describe URI] command ' 'to check the status of the operation(s).') return responses operation_poller = poller.BatchPoller(holder.client, client.snapshots, snapshot_refs) return waiter.WaitFor(operation_poller, poller.OperationBatch(operation_refs), 'Creating snapshot(s) {0}'.format(', '.join( s.Name() for s in snapshot_refs)), max_wait_ms=None)
def Run(self, args): instances_flags.ValidateDiskFlags( args, enable_kms=self._support_kms, enable_snapshots=True, enable_source_snapshot_csek=self._support_source_snapshot_csek, enable_image_csek=self._support_image_csek) instances_flags.ValidateImageFlags(args) instances_flags.ValidateLocalSsdFlags(args) instances_flags.ValidateNicFlags(args) instances_flags.ValidateServiceAccountAndScopeArgs(args) instances_flags.ValidateAcceleratorArgs(args) instances_flags.ValidateNetworkTierArgs(args) instances_flags.ValidateReservationAffinityGroup(args) holder = base_classes.ComputeApiHolder(self.ReleaseTrack()) compute_client = holder.client resource_parser = holder.resources instance_refs = instance_utils.GetInstanceRefs(args, compute_client, holder) requests = self._CreateRequests(args, instance_refs, instance_refs[0].project, instance_refs[0].zone, compute_client, resource_parser, holder) if not args.async_: # TODO(b/63664449): Replace this with poller + progress tracker. try: # Using legacy MakeRequests (which also does polling) here until # replaced by api_lib.utils.waiter. return compute_client.MakeRequests(requests) except exceptions.ToolException as e: invalid_machine_type_message_regex = ( r'Invalid value for field \'resource.machineType\': .+. ' r'Machine type with name \'.+\' does not exist in zone \'.+\'\.' ) if re.search(invalid_machine_type_message_regex, six.text_type(e)): raise exceptions.ToolException( six.text_type(e) + '\nUse `gcloud compute machine-types list --zones` to see the ' 'available machine types.') raise errors_to_collect = [] responses = compute_client.BatchRequests(requests, errors_to_collect) for r in responses: err = getattr(r, 'error', None) if err: errors_to_collect.append(poller.OperationErrors(err.errors)) if errors_to_collect: raise core_exceptions.MultiError(errors_to_collect) operation_refs = [ holder.resources.Parse(r.selfLink) for r in responses ] log.status.Print( 'NOTE: The users will be charged for public IPs when VMs ' 'are created.') for instance_ref, operation_ref in zip(instance_refs, operation_refs): log.status.Print( 'Instance creation in progress for [{}]: {}'.format( instance_ref.instance, operation_ref.SelfLink())) log.status.Print( 'Use [gcloud compute operations describe URI] command ' 'to check the status of the operation(s).') if not args.IsSpecified('format'): # For async output we need a separate format. Since we already printed in # the status messages information about operations there is nothing else # needs to be printed. args.format = 'disable' return responses