def CreateRequests(self, args): """Creates and returns an InstanceTemplates.Insert request. Args: args: the argparse arguments that this command was invoked with. Returns: request: a ComputeInstanceTemplatesInsertRequest message object """ self.ValidateDiskFlags(args) instance_utils.ValidateLocalSsdFlags(args) boot_disk_size_gb = utils.BytesToGb(args.boot_disk_size) utils.WarnIfDiskSizeIsTooSmall(boot_disk_size_gb, args.boot_disk_type) instance_template_ref = self.CreateGlobalReference(args.name) metadata = metadata_utils.ConstructMetadataMessage( self.messages, metadata=args.metadata, metadata_from_file=args.metadata_from_file) network_interface = self.CreateNetworkInterfaceMessage(args) if args.maintenance_policy: on_host_maintenance = ( self.messages.Scheduling.OnHostMaintenanceValueValuesEnum( args.maintenance_policy)) else: on_host_maintenance = None # Note: We always specify automaticRestart=False for preemptible VMs. This # makes sense, since no-restart-on-failure is defined as "store-true", and # thus can't be given an explicit value. Hence it either has its default # value (in which case we override it for convenience's sake to the only # setting that makes sense for preemptible VMs), or the user actually # specified no-restart-on-failure, the only usable setting. if args.preemptible: scheduling = self.messages.Scheduling( automaticRestart=False, onHostMaintenance=on_host_maintenance, preemptible=True) else: scheduling = self.messages.Scheduling( automaticRestart=args.restart_on_failure, onHostMaintenance=on_host_maintenance) service_accounts = self.CreateServiceAccountMessages(args) create_boot_disk = not self.UseExistingBootDisk(args) if create_boot_disk: image_uri, _ = self.ExpandImageFlag(args, return_image_resource=True) else: image_uri = None if args.tags: tags = self.messages.Tags(items=args.tags) else: tags = None persistent_disks = self.CreateAttachedPersistentDiskMessages(args) if create_boot_disk: boot_disk_list = [ self.CreateDefaultBootAttachedDiskMessage( args, boot_disk_size_gb, image_uri) ] else: boot_disk_list = [] local_ssds = [ instance_utils.CreateLocalSsdMessage(self, x.get('device-name'), x.get('interface')) for x in args.local_ssd or [] ] disks = boot_disk_list + persistent_disks + local_ssds request = self.messages.ComputeInstanceTemplatesInsertRequest( instanceTemplate=self.messages.InstanceTemplate( properties=self.messages.InstanceProperties( machineType=args.machine_type, disks=disks, canIpForward=args.can_ip_forward, metadata=metadata, networkInterfaces=[network_interface], serviceAccounts=service_accounts, scheduling=scheduling, tags=tags, ), description=args.description, name=instance_template_ref.Name(), ), project=self.context['project']) return [request]
def CreateRequests(self, args): _ValidateDiskFlags(args) instance_utils.ValidateLocalSsdFlags(args) # TODO(user) drop test after CSEK goes GA if hasattr(args, 'csek_key_file'): self.csek_keys = csek_utils.CsekKeyStore.FromArgs(args) else: self.csek_keys = None if args.maintenance_policy: on_host_maintenance = ( self.messages.Scheduling.OnHostMaintenanceValueValuesEnum( args.maintenance_policy)) else: on_host_maintenance = None # Note: We always specify automaticRestart=False for preemptible VMs. This # makes sense, since no-restart-on-failure is defined as "store-true", and # thus can't be given an explicit value. Hence it either has its default # value (in which case we override it for convenience's sake to the only # setting that makes sense for preemptible VMs), or the user actually # specified no-restart-on-failure, the only usable setting. if args.preemptible: scheduling = self.messages.Scheduling( automaticRestart=False, onHostMaintenance=on_host_maintenance, preemptible=True) else: scheduling = self.messages.Scheduling( automaticRestart=not args.no_restart_on_failure, onHostMaintenance=on_host_maintenance) service_accounts = self.CreateServiceAccountMessages(args) if args.tags: tags = self.messages.Tags(items=args.tags) else: tags = None metadata = metadata_utils.ConstructMetadataMessage( self.messages, metadata=args.metadata, metadata_from_file=args.metadata_from_file) # If the user already provided an initial Windows password and # username through metadata, then there is no need to check # whether the image or the boot disk is Windows. boot_disk_size_gb = utils.BytesToGb(args.boot_disk_size) utils.WarnIfDiskSizeIsTooSmall(boot_disk_size_gb, args.boot_disk_type) instance_refs = self.CreateZonalReferences(args.names, args.zone) # Check if the zone is deprecated or has maintenance coming. self.WarnForZonalCreation(instance_refs) network_interface = self.CreateNetworkInterfaceMessage(args, instance_refs) # The element at index i is the machine type URI for instance # i. We build this list here because we want to delay work that # requires API calls as much as possible. This leads to a better # user experience because the tool can fail fast upon a spelling # mistake instead of delaying the user by making API calls whose # purpose has already been rendered moot by the spelling mistake. machine_type_uris = [] for instance_ref in instance_refs: machine_type_uris.append(self.CreateZonalReference( args.machine_type, instance_ref.zone, resource_type='machineTypes').SelfLink()) create_boot_disk = not _UseExistingBootDisk(args) if create_boot_disk: image_uri, _ = self.ExpandImageFlag( args, return_image_resource=False) else: image_uri = None # A list of lists where the element at index i contains a list of # disk messages that should be set for the instance at index i. disks_messages = [] # A mapping of zone to boot disk references for all existing boot # disks that are being attached. # TODO(user): Simplify this once resources.Resource becomes # hashable. existing_boot_disks = {} for instance_ref in instance_refs: persistent_disks, boot_disk_ref = ( self.CreatePersistentAttachedDiskMessages(args, instance_ref)) local_ssds = [ instance_utils.CreateLocalSsdMessage( self, x.get('device-name'), x.get('interface'), instance_ref.zone) for x in args.local_ssd or []] if create_boot_disk: boot_disk = self.CreateDefaultBootAttachedDiskMessage( args, boot_disk_size_gb, image_uri, instance_ref) persistent_disks = [boot_disk] + persistent_disks else: existing_boot_disks[boot_disk_ref.zone] = boot_disk_ref disks_messages.append(persistent_disks + local_ssds) requests = [] for instance_ref, machine_type_uri, disks in zip( instance_refs, machine_type_uris, disks_messages): requests.append(self.messages.ComputeInstancesInsertRequest( instance=self.messages.Instance( canIpForward=args.can_ip_forward, disks=disks, description=args.description, machineType=machine_type_uri, metadata=metadata, name=instance_ref.Name(), networkInterfaces=[network_interface], serviceAccounts=service_accounts, scheduling=scheduling, tags=tags, ), project=self.project, zone=instance_ref.zone)) return requests
def CreateRequests(self, args): _ValidateDiskFlags(args) instance_utils.ValidateLocalSsdFlags(args) # TODO(user) drop test after CSEK goes GA if hasattr(args, 'csek_key_file'): self.csek_keys = csek_utils.CsekKeyStore.FromArgs(args) else: self.csek_keys = None if args.maintenance_policy: on_host_maintenance = ( self.messages.Scheduling.OnHostMaintenanceValueValuesEnum( args.maintenance_policy)) else: on_host_maintenance = None # Note: We always specify automaticRestart=False for preemptible VMs. This # makes sense, since no-restart-on-failure is defined as "store-true", and # thus can't be given an explicit value. Hence it either has its default # value (in which case we override it for convenience's sake to the only # setting that makes sense for preemptible VMs), or the user actually # specified no-restart-on-failure, the only usable setting. if args.preemptible: scheduling = self.messages.Scheduling( automaticRestart=False, onHostMaintenance=on_host_maintenance, preemptible=True) else: scheduling = self.messages.Scheduling( automaticRestart=not args.no_restart_on_failure, onHostMaintenance=on_host_maintenance) service_accounts = self.CreateServiceAccountMessages(args) if args.tags: tags = self.messages.Tags(items=args.tags) else: tags = None metadata = metadata_utils.ConstructMetadataMessage( self.messages, metadata=args.metadata, metadata_from_file=args.metadata_from_file) # If the user already provided an initial Windows password and # username through metadata, then there is no need to check # whether the image or the boot disk is Windows. windows_username_present = False windows_password_present = False for kv in metadata.items: if kv.key == constants.INITIAL_WINDOWS_USER_METADATA_KEY_NAME: windows_username_present = True if kv.key == constants.INITIAL_WINDOWS_PASSWORD_METADATA_KEY_NAME: windows_password_present = True check_for_windows_image = (not windows_username_present or not windows_password_present) boot_disk_size_gb = utils.BytesToGb(args.boot_disk_size) utils.WarnIfDiskSizeIsTooSmall(boot_disk_size_gb, args.boot_disk_type) instance_refs = self.CreateZonalReferences(args.names, args.zone) # Check if the zone is deprecated or has maintenance coming. self.WarnForZonalCreation(instance_refs) network_interface = self.CreateNetworkInterfaceMessage(args, instance_refs) # The element at index i is the machine type URI for instance # i. We build this list here because we want to delay work that # requires API calls as much as possible. This leads to a better # user experience because the tool can fail fast upon a spelling # mistake instead of delaying the user by making API calls whose # purpose has already been rendered moot by the spelling mistake. machine_type_uris = [] for instance_ref in instance_refs: machine_type_uris.append(self.CreateZonalReference( args.machine_type, instance_ref.zone, resource_type='machineTypes').SelfLink()) create_boot_disk = not _UseExistingBootDisk(args) add_windows_credentials_to_metadata = False if create_boot_disk: image_uri, image_resource = self.ExpandImageFlag( args, return_image_resource=check_for_windows_image) if (check_for_windows_image and image_utils.HasWindowsLicense(image_resource, self.resources)): log.debug('[%s] is a Windows image.', image_resource.selfLink) add_windows_credentials_to_metadata = True # TODO(user): Rip this section out along with the other windows # passwod stuff on ~20150506 (also remove 'import re' at the top) # We should only match on images that are named exactly the way the # images team names the images. p = re.compile(r'windows-server-20\d\d(-r2)*-(dc|ent-internal)' r'-v(\d\d\d\d\d\d\d\d)') if image_resource and p.match(image_resource.name): datestamp = int(p.match(image_resource.name).group(3)) # Don't set Windows credentials for any Windows image built after 4/20. if datestamp > 20150420: add_windows_credentials_to_metadata = False else: image_uri = None # A list of lists where the element at index i contains a list of # disk messages that should be set for the instance at index i. disks_messages = [] # A mapping of zone to boot disk references for all existing boot # disks that are being attached. # TODO(user): Simplify this once resources.Resource becomes # hashable. existing_boot_disks = {} for instance_ref in instance_refs: persistent_disks, boot_disk_ref = ( self.CreatePersistentAttachedDiskMessages(args, instance_ref)) local_ssds = [ instance_utils.CreateLocalSsdMessage( self, x.get('device-name'), x.get('interface'), instance_ref.zone) for x in args.local_ssd or []] if create_boot_disk: boot_disk = self.CreateDefaultBootAttachedDiskMessage( args, boot_disk_size_gb, image_uri, instance_ref) persistent_disks = [boot_disk] + persistent_disks else: existing_boot_disks[boot_disk_ref.zone] = boot_disk_ref disks_messages.append(persistent_disks + local_ssds) # Now for every existing boot disk being attached, we have to # figure out whether it has a Windows license. if check_for_windows_image and existing_boot_disks: # Sorts the disk references by zone, so the code behaves # deterministically. disk_resources = self.FetchDiskResources( disk_ref for _, disk_ref in sorted(existing_boot_disks.iteritems())) for disk_resource in disk_resources: if image_utils.HasWindowsLicense(disk_resource, self.resources): log.debug('[%s] has a Windows image.', disk_resource.selfLink) add_windows_credentials_to_metadata = True # When the user creates more than one instance, it's possible for # some instances to have boot disks that have been initialized # with Windows images while others have boot disks without Windows # images: # # gcloud compute instances create # https://.../zone-1/instances/i-1 # https://.../zone-2/instances/i- # --disk name=x boot=yes # # In the above, disk "x" in zone-1 could be a Windows disk while # "x" in zone-2 could be a non-Windows disk. When this case is # encountered, we add the Windows username and password to all # instances because to write code to deal with such a rare case is # simply not worth the added complexity. if add_windows_credentials_to_metadata: if not windows_username_present: username = self.project.split(':')[-1][ :constants.MAX_WINDOWS_USERNAME_LENGTH] metadata.items.append(self.messages.Metadata.ItemsValueListEntry( key=constants.INITIAL_WINDOWS_USER_METADATA_KEY_NAME, value=username)) if not windows_password_present: metadata.items.append(self.messages.Metadata.ItemsValueListEntry( key=constants.INITIAL_WINDOWS_PASSWORD_METADATA_KEY_NAME, value=windows_password.Generate())) requests = [] for instance_ref, machine_type_uri, disks in zip( instance_refs, machine_type_uris, disks_messages): requests.append(self.messages.ComputeInstancesInsertRequest( instance=self.messages.Instance( canIpForward=args.can_ip_forward, disks=disks, description=args.description, machineType=machine_type_uri, metadata=metadata, name=instance_ref.Name(), networkInterfaces=[network_interface], serviceAccounts=service_accounts, scheduling=scheduling, tags=tags, ), project=self.project, zone=instance_ref.zone)) return requests