def test_validate_okay(self): test_res = self._get_test_resource(self.kp_template) self.m.StubOutWithMock(nova_utils, 'get_keypair') nova_utils.get_keypair(mox.IgnoreArg(), 'key_pair').AndRaise( exception.UserKeyPairMissing(key_name='foo')) self.m.ReplayAll() self.assertIsNone(test_res.validate())
def handle_create(self): security_groups = self.properties.get(self.SECURITY_GROUPS) user_data_format = self.properties.get(self.USER_DATA_FORMAT) userdata = nova_utils.build_userdata( self, self.properties.get(self.USER_DATA), instance_user=self.properties[self.ADMIN_USER], user_data_format=user_data_format) flavor = self.properties[self.FLAVOR] availability_zone = self.properties[self.AVAILABILITY_ZONE] key_name = self.properties[self.KEY_NAME] if key_name: # confirm keypair exists nova_utils.get_keypair(self.nova(), key_name) image = self.properties.get(self.IMAGE) if image: image = nova_utils.get_image_id(self.nova(), image) flavor_id = nova_utils.get_flavor_id(self.nova(), flavor) instance_meta = self.properties.get(self.METADATA) if instance_meta is not None: instance_meta = nova_utils.meta_serialize(instance_meta) scheduler_hints = self.properties.get(self.SCHEDULER_HINTS) nics = self._build_nics(self.properties.get(self.NETWORKS)) block_device_mapping = self._build_block_device_mapping( self.properties.get(self.BLOCK_DEVICE_MAPPING)) reservation_id = self.properties.get(self.RESERVATION_ID) config_drive = self.properties.get(self.CONFIG_DRIVE) disk_config = self.properties.get(self.DISK_CONFIG) server = None try: server = self.nova().servers.create( name=self.physical_resource_name(), image=image, flavor=flavor_id, key_name=key_name, security_groups=security_groups, userdata=userdata, meta=instance_meta, scheduler_hints=scheduler_hints, nics=nics, availability_zone=availability_zone, block_device_mapping=block_device_mapping, reservation_id=reservation_id, config_drive=config_drive, disk_config=disk_config) finally: # Avoid a race condition where the thread could be cancelled # before the ID is stored if server is not None: self.resource_id_set(server.id) return server
def validate_with_client(self, client, value): if not value: # Don't validate empty key, which can happen when you use a KeyPair # resource return True nova_client = client.nova() nova_utils.get_keypair(nova_client, value)
def validate(self): ''' Validate any of the provided params ''' super(Server, self).validate() # check validity of key key_name = self.properties.get('key_name', None) if key_name: nova_utils.get_keypair(self.nova(), key_name) # either volume_id or snapshot_id needs to be specified, but not both # for block device mapping. bdm = self.properties.get('block_device_mapping') or [] bootable_vol = False for mapping in bdm: if mapping['device_name'] == 'vda': bootable_vol = True if mapping.get('volume_id') and mapping.get('snapshot_id'): raise exception.ResourcePropertyConflict('volume_id', 'snapshot_id') if not mapping.get('volume_id') and not mapping.get('snapshot_id'): msg = _('Either volume_id or snapshot_id must be specified for' ' device mapping %s') % mapping['device_name'] raise exception.StackValidationFailed(message=msg) # make sure the image exists if specified. image = self.properties.get('image', None) if image: nova_utils.get_image_id(self.nova(), image) elif not image and not bootable_vol: msg = _('Neither image nor bootable volume is specified for' ' instance %s') % self.name raise exception.StackValidationFailed(message=msg)
def validate_with_client(self, client, value): if not value: # Don't validate empty key, which can happen when you use a KeyPair # resource return True nova_client = client.client('nova') nova_utils.get_keypair(nova_client, value)
def validate(self): ''' Validate any of the provided params ''' super(Server, self).validate() # check validity of key key_name = self.properties.get('key_name', None) if key_name: nova_utils.get_keypair(self.nova(), key_name) # either volume_id or snapshot_id needs to be specified, but not both # for block device mapping. bdm = self.properties.get('block_device_mapping') or [] bootable_vol = False for mapping in bdm: if mapping['device_name'] == 'vda': bootable_vol = True if mapping.get('volume_id') and mapping.get('snapshot_id'): raise exception.ResourcePropertyConflict( 'volume_id', 'snapshot_id') if not mapping.get('volume_id') and not mapping.get('snapshot_id'): msg = _('Either volume_id or snapshot_id must be specified for' ' device mapping %s') % mapping['device_name'] raise exception.StackValidationFailed(message=msg) # make sure the image exists if specified. image = self.properties.get('image', None) if image: nova_utils.get_image_id(self.nova(), image) elif not image and not bootable_vol: msg = _('Neither image nor bootable volume is specified for' ' instance %s') % self.name raise exception.StackValidationFailed(message=msg)
def test_validate_failure_key_exists(self): test_res = self._get_test_resource(self.kp_template) self.m.StubOutWithMock(nova_utils, 'get_keypair') nova_utils.get_keypair(mox.IgnoreArg(), 'key_pair').AndReturn('foo') self.m.ReplayAll() exc = self.assertRaises(exception.StackValidationFailed, test_res.validate) self.assertIn('Cannot create KeyPair resource with a name of ' '"key_pair"', str(exc))
def handle_create(self): security_groups = self._get_security_groups() userdata = self.properties['UserData'] or '' flavor = self.properties['InstanceType'] availability_zone = self.properties['AvailabilityZone'] key_name = self.properties['KeyName'] if key_name: # confirm keypair exists nova_utils.get_keypair(self.nova(), key_name) image_name = self.properties['ImageId'] image_id = nova_utils.get_image_id(self.nova(), image_name) flavor_id = nova_utils.get_flavor_id(self.nova(), flavor) tags = {} if self.properties['Tags']: for tm in self.properties['Tags']: tags[tm['Key']] = tm['Value'] else: tags = None scheduler_hints = {} if self.properties['NovaSchedulerHints']: for tm in self.properties['NovaSchedulerHints']: scheduler_hints[tm['Key']] = tm['Value'] else: scheduler_hints = None nics = self._build_nics(self.properties['NetworkInterfaces'], security_groups=security_groups, subnet_id=self.properties['SubnetId']) server = None try: server = self.nova().servers.create( name=self.physical_resource_name(), image=image_id, flavor=flavor_id, key_name=key_name, security_groups=security_groups, userdata=self.get_mime_string(userdata), meta=tags, scheduler_hints=scheduler_hints, nics=nics, availability_zone=availability_zone) finally: # Avoid a race condition where the thread could be cancelled # before the ID is stored if server is not None: self.resource_id_set(server.id) return server, scheduler.TaskRunner(self._attach_volumes_task())
def handle_create(self): security_groups = self.properties.get('security_groups', []) user_data_format = self.properties.get('user_data_format') if user_data_format == 'HEAT_CFNTOOLS': userdata = self.get_mime_string( self.properties.get('user_data', '')) else: userdata = self.properties.get('user_data', '') flavor = self.properties['flavor'] availability_zone = self.properties['availability_zone'] key_name = self.properties['key_name'] if key_name: # confirm keypair exists nova_utils.get_keypair(self.nova(), key_name) image = self.properties.get('image') if image: image = nova_utils.get_image_id(self.nova(), image) flavor_id = nova_utils.get_flavor_id(self.nova(), flavor) instance_meta = self.properties.get('metadata') scheduler_hints = self.properties.get('scheduler_hints') nics = self._build_nics(self.properties.get('networks')) block_device_mapping = self._build_block_device_mapping( self.properties.get('block_device_mapping')) reservation_id = self.properties.get('reservation_id') config_drive = self.properties.get('config_drive') disk_config = self.properties.get('diskConfig') server = None try: server = self.nova().servers.create( name=self.physical_resource_name(), image=image, flavor=flavor_id, key_name=key_name, security_groups=security_groups, userdata=userdata, meta=instance_meta, scheduler_hints=scheduler_hints, nics=nics, availability_zone=availability_zone, block_device_mapping=block_device_mapping, reservation_id=reservation_id, config_drive=config_drive, disk_config=disk_config) finally: # Avoid a race condition where the thread could be cancelled # before the ID is stored if server is not None: self.resource_id_set(server.id) return server
def handle_create(self): security_groups = self.properties.get('security_groups', []) user_data_format = self.properties.get('user_data_format') userdata = nova_utils.build_userdata( self, self.properties.get('user_data', ''), instance_user=self.properties['admin_user'], user_data_format=user_data_format) flavor = self.properties['flavor'] availability_zone = self.properties['availability_zone'] key_name = self.properties['key_name'] if key_name: # confirm keypair exists nova_utils.get_keypair(self.nova(), key_name) image = self.properties.get('image') if image: image = nova_utils.get_image_id(self.nova(), image) flavor_id = nova_utils.get_flavor_id(self.nova(), flavor) instance_meta = self.properties.get('metadata') scheduler_hints = self.properties.get('scheduler_hints') nics = self._build_nics(self.properties.get('networks')) block_device_mapping = self._build_block_device_mapping( self.properties.get('block_device_mapping')) reservation_id = self.properties.get('reservation_id') config_drive = self.properties.get('config_drive') disk_config = self.properties.get('diskConfig') server = None try: server = self.nova().servers.create( name=self.physical_resource_name(), image=image, flavor=flavor_id, key_name=key_name, security_groups=security_groups, userdata=userdata, meta=instance_meta, scheduler_hints=scheduler_hints, nics=nics, availability_zone=availability_zone, block_device_mapping=block_device_mapping, reservation_id=reservation_id, config_drive=config_drive, disk_config=disk_config) finally: # Avoid a race condition where the thread could be cancelled # before the ID is stored if server is not None: self.resource_id_set(server.id) return server
def test_validate_failure_key_exists(self): test_res = self._get_test_resource(self.kp_template) self.m.StubOutWithMock(nova_utils, 'get_keypair') nova_utils.get_keypair(mox.IgnoreArg(), 'key_pair').AndReturn('foo') self.m.ReplayAll() exc = self.assertRaises(exception.StackValidationFailed, test_res.validate) self.assertIn( 'Cannot create KeyPair resource with a name of ' '"key_pair"', str(exc))
def validate(self, value, context): if not value: # Don't validate empty key, which can happen when you use a KeyPair # resource return True try: nova_utils.get_keypair(Clients(context).nova(), value) except exception.UserKeyPairMissing: return False else: return True
def validate(self): super(KeyPair, self).validate() name = self.properties[self.NAME] try: nova_utils.get_keypair(self.nova(), name) except exception.UserKeyPairMissing: pass else: msg = _('Cannot create KeyPair resource with a name of "%s" (a ' 'keypair with that name already exists)') % name raise exception.StackValidationFailed(message=msg)
def handle_create(self): security_groups = self._get_security_groups() userdata = self.properties['UserData'] or '' flavor = self.properties['InstanceType'] availability_zone = self.properties['AvailabilityZone'] key_name = self.properties['KeyName'] if key_name: # confirm keypair exists nova_utils.get_keypair(self.nova(), key_name) image_name = self.properties['ImageId'] image_id = nova_utils.get_image_id(self.nova(), image_name) flavor_id = nova_utils.get_flavor_id(self.nova(), flavor) tags = {} if self.properties['Tags']: for tm in self.properties['Tags']: tags[tm['Key']] = tm['Value'] else: tags = None scheduler_hints = {} if self.properties['NovaSchedulerHints']: for tm in self.properties['NovaSchedulerHints']: scheduler_hints[tm['Key']] = tm['Value'] else: scheduler_hints = None nics = self._build_nics(self.properties['NetworkInterfaces'], subnet_id=self.properties['SubnetId']) server = None try: server = self.nova().servers.create( name=self.physical_resource_name(), image=image_id, flavor=flavor_id, key_name=key_name, security_groups=security_groups, userdata=self.get_mime_string(userdata), meta=tags, scheduler_hints=scheduler_hints, nics=nics, availability_zone=availability_zone) finally: # Avoid a race condition where the thread could be cancelled # before the ID is stored if server is not None: self.resource_id_set(server.id) return server, scheduler.TaskRunner(self._attach_volumes_task())
def validate(self): ''' Validate any of the provided params ''' super(Server, self).validate() # check validity of key key_name = self.properties.get('key_name') if key_name: nova_utils.get_keypair(self.nova(), key_name) # either volume_id or snapshot_id needs to be specified, but not both # for block device mapping. bdm = self.properties.get('block_device_mapping') or [] bootable_vol = False for mapping in bdm: if mapping['device_name'] == 'vda': bootable_vol = True if mapping.get('volume_id') and mapping.get('snapshot_id'): raise exception.ResourcePropertyConflict('volume_id', 'snapshot_id') if not mapping.get('volume_id') and not mapping.get('snapshot_id'): msg = _('Either volume_id or snapshot_id must be specified for' ' device mapping %s') % mapping['device_name'] raise exception.StackValidationFailed(message=msg) # make sure the image exists if specified. image = self.properties.get('image') if image: nova_utils.get_image_id(self.nova(), image) elif not image and not bootable_vol: msg = _('Neither image nor bootable volume is specified for' ' instance %s') % self.name raise exception.StackValidationFailed(message=msg) # network properties 'uuid' and 'network' shouldn't be used # both at once for all networks networks = self.properties.get('networks') or [] for network in networks: if network.get('uuid') and network.get('network'): msg = _('Properties "uuid" and "network" are both set ' 'to the network "%(network)s" for the server ' '"%(server)s". The "uuid" property is deprecated. ' 'Use only "network" property.' '') % dict(network=network['network'], server=self.name) raise exception.StackValidationFailed(message=msg) elif network.get('uuid'): logger.info(_('For the server "%(server)s" the "uuid" ' 'property is set to network "%(network)s". ' '"uuid" property is deprecated. Use "network" ' 'property instead.' '') % dict(network=network['network'], server=self.name))
def validate(self): ''' Validate any of the provided params ''' # check validity of key key_name = self.properties.get('KeyName', None) if key_name: nova_utils.get_keypair(self.nova(), key_name) # make sure the image exists. image_name = self.properties['ImageId'] live_image = self._lookup_live_image(image_name) if not live_image: return "Live-Image not found: %s" % image_name
def handle_create(self): security_groups = self._get_security_groups() userdata = self.properties[self.USER_DATA] or '' flavor = self.properties[self.INSTANCE_TYPE] availability_zone = self.properties[self.AVAILABILITY_ZONE] key_name = self.properties[self.KEY_NAME] if key_name: # confirm keypair exists nova_utils.get_keypair(self.nova(), key_name) image_name = self.properties[self.IMAGE_ID] image_id = nova_utils.get_image_id(self.nova(), image_name) flavor_id = nova_utils.get_flavor_id(self.nova(), flavor) scheduler_hints = {} if self.properties[self.NOVA_SCHEDULER_HINTS]: for tm in self.properties[self.NOVA_SCHEDULER_HINTS]: scheduler_hints[tm[self.TAG_KEY]] = tm[self.TAG_VALUE] else: scheduler_hints = None nics = self._build_nics(self.properties[self.NETWORK_INTERFACES], security_groups=security_groups, subnet_id=self.properties[self.SUBNET_ID]) server = None try: server = self.nova().servers.create( name=self.physical_resource_name(), image=image_id, flavor=flavor_id, key_name=key_name, security_groups=security_groups, userdata=nova_utils.build_userdata(self, userdata), meta=self._get_nova_metadata(self.properties), scheduler_hints=scheduler_hints, nics=nics, availability_zone=availability_zone) finally: # Avoid a race condition where the thread could be cancelled # before the ID is stored if server is not None: self.resource_id_set(server.id) return server, scheduler.TaskRunner(self._attach_volumes_task())
def handle_create(self): """Create a Rackspace Cloud Servers container. Rackspace Cloud Servers does not have the metadata service running, so we have to transfer the user-data file to the server and then trigger cloud-init. """ # Generate SSH public/private keypair if self._private_key is not None: rsa = RSA.importKey(self._private_key) else: rsa = RSA.generate(1024) self.private_key = rsa.exportKey() public_keys = [rsa.publickey().exportKey('OpenSSH')] if self.properties.get('key_name'): key_name = self.properties['key_name'] public_keys.append( nova_utils.get_keypair(self.nova(), key_name).public_key) personality_files = { "/root/.ssh/authorized_keys": '\n'.join(public_keys) } # Create server client = self.nova().servers logger.debug("Calling nova().servers.create()") server = client.create(self.physical_resource_name(), self.image, self.flavor, files=personality_files) # Save resource ID to db self.resource_id_set(server.id) return server, scheduler.TaskRunner(self._attach_volumes_task())
def handle_create(self): """Create a Rackspace Cloud Servers container. Rackspace Cloud Servers does not have the metadata service running, so we have to transfer the user-data file to the server and then trigger cloud-init. """ # Generate SSH public/private keypair if self._private_key is not None: rsa = RSA.importKey(self._private_key) else: rsa = RSA.generate(1024) self.private_key = rsa.exportKey() public_keys = [rsa.publickey().exportKey("OpenSSH")] if self.properties.get("key_name"): key_name = self.properties["key_name"] public_keys.append(nova_utils.get_keypair(self.nova(), key_name).public_key) personality_files = {"/root/.ssh/authorized_keys": "\n".join(public_keys)} # Create server client = self.nova().servers logger.debug("Calling nova().servers.create()") server = client.create(self.physical_resource_name(), self.image, self.flavor, files=personality_files) # Save resource ID to db self.resource_id_set(server.id) return server, scheduler.TaskRunner(self._attach_volumes_task())
def handle_create(self): security_groups = self._get_security_groups() userdata = self.properties['UserData'] or '' availability_zone = self.properties['AvailabilityZone'] key_name = self.properties['KeyName'] if key_name: # confirm keypair exists nova_utils.get_keypair(self.nova(), key_name) image_name = self.properties['ImageId'] live_image = self._lookup_live_image(image_name) guest_params = {} if self.properties['GuestParams']: for tm in self.properties['GuestParams']: guest_params[tm['Key']] = tm['Value'] else: guest_params = None scheduler_hints = {} if self.properties['NovaSchedulerHints']: for tm in self.properties['NovaSchedulerHints']: scheduler_hints[tm['Key']] = tm['Value'] else: scheduler_hints = None server = None try: server = live_image.start_live_image( name=self.physical_resource_name(), guest_params=guest_params, key_name=key_name, security_groups=security_groups, user_data=self.get_mime_string(userdata), availability_zone=availability_zone, scheduler_hints=scheduler_hints)[0] finally: # Avoid a race condition where the thread could be cancelled # before the ID is stored if server is not None: self.resource_id_set(server.id) return server
def public_key(self): """Return the public SSH key for the resource.""" if not self._public_key: if self.properties["public_key"]: self._public_key = self.properties["public_key"] elif self.resource_id: nova_key = nova_utils.get_keypair(self.nova(), self.resource_id) self._public_key = nova_key.public_key return self._public_key
def public_key(self): """Return the public SSH key for the resource.""" if not self._public_key: if self.properties['public_key']: self._public_key = self.properties['public_key'] elif self.resource_id: nova_key = nova_utils.get_keypair(self.nova(), self.resource_id) self._public_key = nova_key.public_key return self._public_key
def validate(self): ''' Validate any of the provided params ''' super(Server, self).validate() # check validity of key key_name = self.properties.get('key_name', None) if key_name: nova_utils.get_keypair(self.nova(), key_name) # make sure the image exists if specified. image = self.properties.get('image', None) if image: nova_utils.get_image_id(self.nova(), image) else: # TODO(sbaker) confirm block_device_mapping is populated # for boot-by-volume (see LP bug #1215267) pass
def validate(self): """ Validate any of the provided params """ res = super(Instance, self).validate() if res: return res # check validity of key key_name = self.properties.get("KeyName", None) if key_name: nova_utils.get_keypair(self.nova(), key_name) # check validity of security groups vs. network interfaces security_groups = self._get_security_groups() if security_groups and self.properties.get("NetworkInterfaces"): raise exception.ResourcePropertyConflict("SecurityGroups/SecurityGroupIds", "NetworkInterfaces") # make sure the image exists. nova_utils.get_image_id(self.nova(), self.properties["ImageId"])
def validate(self): ''' Validate any of the provided params ''' res = super(Instance, self).validate() if res: return res # check validity of key key_name = self.properties.get('KeyName', None) if key_name: nova_utils.get_keypair(self.nova(), key_name) # check validity of security groups vs. network interfaces security_groups = self._get_security_groups() if security_groups and self.properties.get('NetworkInterfaces'): raise exception.ResourcePropertyConflict( 'SecurityGroups/SecurityGroupIds', 'NetworkInterfaces') # make sure the image exists. nova_utils.get_image_id(self.nova(), self.properties['ImageId'])
def validate(self): ''' Validate any of the provided params ''' res = super(Instance, self).validate() if res: return res # check validity of key key_name = self.properties.get(self.KEY_NAME) if key_name: nova_utils.get_keypair(self.nova(), key_name) # check validity of security groups vs. network interfaces security_groups = self._get_security_groups() if security_groups and self.properties.get(self.NETWORK_INTERFACES): raise exception.ResourcePropertyConflict( '/'.join([self.SECURITY_GROUPS, self.SECURITY_GROUP_IDS]), self.NETWORK_INTERFACES) # make sure the image exists. nova_utils.get_image_id(self.nova(), self.properties[self.IMAGE_ID])
def test_get_keypair(self): """Tests the get_keypair function.""" my_pub_key = "a cool public key string" my_key_name = "mykey" my_key = self.m.CreateMockAnything() my_key.public_key = my_pub_key my_key.name = my_key_name self.nova_client.keypairs = self.m.CreateMockAnything() self.nova_client.keypairs.list().MultipleTimes().AndReturn([my_key]) self.m.ReplayAll() self.assertEqual(my_key, nova_utils.get_keypair(self.nova_client, my_key_name)) self.assertRaises(exception.UserKeyPairMissing, nova_utils.get_keypair, self.nova_client, "notakey") self.m.VerifyAll()
def test_get_keypair(self): """Tests the get_keypair function.""" my_pub_key = 'a cool public key string' my_key_name = 'mykey' my_key = self.m.CreateMockAnything() my_key.public_key = my_pub_key my_key.name = my_key_name self.nova_client.keypairs = self.m.CreateMockAnything() self.nova_client.keypairs.list().MultipleTimes().AndReturn([my_key]) self.m.ReplayAll() self.assertEqual(my_key, nova_utils.get_keypair(self.nova_client, my_key_name)) self.assertRaises(exception.UserKeyPairMissing, nova_utils.get_keypair, self.nova_client, 'notakey') self.m.VerifyAll()
def test_get_keypair(self): """Tests the get_keypair function.""" my_pub_key = 'a cool public key string' my_key_name = 'mykey' my_key = self.m.CreateMockAnything() my_key.public_key = my_pub_key my_key.name = my_key_name self.nova_client.keypairs = self.m.CreateMockAnything() self.nova_client.keypairs.get( my_key_name).AndReturn(my_key) self.nova_client.keypairs.get( 'notakey').AndRaise(fakes.fake_exception()) self.m.ReplayAll() self.assertEqual(my_key, nova_utils.get_keypair(self.nova_client, my_key_name)) self.assertRaises(exception.UserKeyPairMissing, nova_utils.get_keypair, self.nova_client, 'notakey') self.m.VerifyAll()
def _personality(self): # Generate SSH public/private keypair for the engine to use if self._private_key is not None: rsa = RSA.importKey(self._private_key) else: rsa = RSA.generate(1024) self.private_key = rsa.exportKey() public_keys = [rsa.publickey().exportKey('OpenSSH')] # Add the user-provided key_name to the authorized_keys file key_name = self.properties.get(self.KEY_NAME) if key_name: user_keypair = nova_utils.get_keypair(self.nova(), key_name) public_keys.append(user_keypair.public_key) personality = {"/root/.ssh/authorized_keys": '\n'.join(public_keys)} # Add any user-provided personality files user_personality = self.properties.get(self.PERSONALITY) if user_personality: personality.update(user_personality) return personality
def handle_create(self): security_groups = self.properties.get('security_groups', []) userdata = self.properties.get('user_data', '') flavor = self.properties['flavor'] availability_zone = self.properties['availability_zone'] key_name = self.properties['key_name'] if key_name: # confirm keypair exists nova_utils.get_keypair(self.nova(), key_name) image = self.properties.get('image') if image: image = nova_utils.get_image_id(self.nova(), image) flavor_id = nova_utils.get_flavor_id(self.nova(), flavor) instance_meta = self.properties.get('metadata') scheduler_hints = self.properties.get('scheduler_hints') nics = self._build_nics(self.properties.get('networks')) block_device_mapping = self._build_block_device_mapping( self.properties.get('block_device_mapping')) reservation_id = self.properties.get('reservation_id') config_drive = self.properties.get('config_drive') disk_config = self.properties.get('diskConfig') # TODO(sdake/shardy) ensure physical_resource_name() never returns a # string longer than 63 characters, as this is pretty inconvenient # behavior for autoscaling groups and nested stacks where instance # names can easily become quite long even with terse names. physical_resource_name_len = len(self.physical_resource_name()) if physical_resource_name_len > 63: raise exception.Error(_('Server %(server)s length %(length)d > 63' ' characters, please reduce the length of' ' stack or resource names') % dict(server=self.physical_resource_name(), length=physical_resource_name_len)) server = None try: server = self.nova().servers.create( name=self.physical_resource_name(), image=image, flavor=flavor_id, key_name=key_name, security_groups=security_groups, userdata=self.get_mime_string(userdata), meta=instance_meta, scheduler_hints=scheduler_hints, nics=nics, availability_zone=availability_zone, block_device_mapping=block_device_mapping, reservation_id=reservation_id, config_drive=config_drive, disk_config=disk_config) finally: # Avoid a race condition where the thread could be cancelled # before the ID is stored if server is not None: self.resource_id_set(server.id) return server
def validate(self): ''' Validate any of the provided params ''' super(Server, self).validate() # check validity of key key_name = self.properties.get(self.KEY_NAME) if key_name: nova_utils.get_keypair(self.nova(), key_name) # either volume_id or snapshot_id needs to be specified, but not both # for block device mapping. bdm = self.properties.get(self.BLOCK_DEVICE_MAPPING) or [] bootable_vol = False for mapping in bdm: device_name = mapping[self.BLOCK_DEVICE_MAPPING_DEVICE_NAME] if device_name == 'vda': bootable_vol = True volume_id = mapping.get(self.BLOCK_DEVICE_MAPPING_VOLUME_ID) snapshot_id = mapping.get(self.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID) if volume_id and snapshot_id: raise exception.ResourcePropertyConflict( self.BLOCK_DEVICE_MAPPING_VOLUME_ID, self.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID) if not volume_id and not snapshot_id: msg = _('Either volume_id or snapshot_id must be specified for' ' device mapping %s') % device_name raise exception.StackValidationFailed(message=msg) # make sure the image exists if specified. image = self.properties.get(self.IMAGE) if image: nova_utils.get_image_id(self.nova(), image) elif not image and not bootable_vol: msg = _('Neither image nor bootable volume is specified for' ' instance %s') % self.name raise exception.StackValidationFailed(message=msg) # network properties 'uuid' and 'network' shouldn't be used # both at once for all networks networks = self.properties.get(self.NETWORKS) or [] for network in networks: if network.get(self.NETWORK_UUID) and network.get(self.NETWORK_ID): msg = _('Properties "%(uuid)s" and "%(id)s" are both set ' 'to the network "%(network)s" for the server ' '"%(server)s". The "%(uuid)s" property is deprecated. ' 'Use only "%(id)s" property.' '') % dict(uuid=self.NETWORK_UUID, id=self.NETWORK_ID, network=network[self.NETWORK_ID], server=self.name) raise exception.StackValidationFailed(message=msg) elif network.get(self.NETWORK_UUID): logger.info(_('For the server "%(server)s" the "%(uuid)s" ' 'property is set to network "%(network)s". ' '"%(uuid)s" property is deprecated. Use ' '"%(id)s" property instead.' '') % dict(uuid=self.NETWORK_UUID, id=self.NETWORK_ID, network=network[self.NETWORK_ID], server=self.name)) # verify that the number of metadata entries is not greater # than the maximum number allowed in the provider's absolute # limits metadata = self.properties.get('metadata') if metadata is not None: limits = nova_utils.absolute_limits(self.nova()) if len(metadata) > limits['maxServerMeta']: msg = _('Instance metadata must not contain greater than %s ' 'entries. This is the maximum number allowed by your ' 'service provider') % limits['maxServerMeta'] raise exception.StackValidationFailed(message=msg)
def validate(self): ''' Validate any of the provided params ''' super(Server, self).validate() # check validity of key key_name = self.properties.get(self.KEY_NAME) if key_name: nova_utils.get_keypair(self.nova(), key_name) # either volume_id or snapshot_id needs to be specified, but not both # for block device mapping. bdm = self.properties.get(self.BLOCK_DEVICE_MAPPING) or [] bootable_vol = False for mapping in bdm: device_name = mapping[self.BLOCK_DEVICE_MAPPING_DEVICE_NAME] if device_name == 'vda': bootable_vol = True volume_id = mapping.get(self.BLOCK_DEVICE_MAPPING_VOLUME_ID) snapshot_id = mapping.get(self.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID) if volume_id and snapshot_id: raise exception.ResourcePropertyConflict( self.BLOCK_DEVICE_MAPPING_VOLUME_ID, self.BLOCK_DEVICE_MAPPING_SNAPSHOT_ID) if not volume_id and not snapshot_id: msg = _('Either volume_id or snapshot_id must be specified for' ' device mapping %s') % device_name raise exception.StackValidationFailed(message=msg) # make sure the image exists if specified. image = self.properties.get(self.IMAGE) if image: nova_utils.get_image_id(self.nova(), image) elif not image and not bootable_vol: msg = _('Neither image nor bootable volume is specified for' ' instance %s') % self.name raise exception.StackValidationFailed(message=msg) # network properties 'uuid' and 'network' shouldn't be used # both at once for all networks networks = self.properties.get(self.NETWORKS) or [] for network in networks: if network.get(self.NETWORK_UUID) and network.get(self.NETWORK_ID): msg = _('Properties "%(uuid)s" and "%(id)s" are both set ' 'to the network "%(network)s" for the server ' '"%(server)s". The "%(uuid)s" property is deprecated. ' 'Use only "%(id)s" property.' '') % dict(uuid=self.NETWORK_UUID, id=self.NETWORK_ID, network=network[self.NETWORK_ID], server=self.name) raise exception.StackValidationFailed(message=msg) elif network.get(self.NETWORK_UUID): logger.info( _('For the server "%(server)s" the "%(uuid)s" ' 'property is set to network "%(network)s". ' '"%(uuid)s" property is deprecated. Use ' '"%(id)s" property instead.' '') % dict(uuid=self.NETWORK_UUID, id=self.NETWORK_ID, network=network[self.NETWORK_ID], server=self.name)) # verify that the number of metadata entries is not greater # than the maximum number allowed in the provider's absolute # limits metadata = self.properties.get('metadata') if metadata is not None: limits = nova_utils.absolute_limits(self.nova()) if len(metadata) > limits['maxServerMeta']: msg = _('Instance metadata must not contain greater than %s ' 'entries. This is the maximum number allowed by your ' 'service provider') % limits['maxServerMeta'] raise exception.StackValidationFailed(message=msg)
def handle_create(self): security_groups = self._get_security_groups() userdata = self.properties['UserData'] or '' flavor = self.properties['InstanceType'] availability_zone = self.properties['AvailabilityZone'] key_name = self.properties['KeyName'] if key_name: # confirm keypair exists nova_utils.get_keypair(self.nova(), key_name) image_name = self.properties['ImageId'] image_id = nova_utils.get_image_id(self.nova(), image_name) flavor_id = nova_utils.get_flavor_id(self.nova(), flavor) tags = {} if self.properties['Tags']: for tm in self.properties['Tags']: tags[tm['Key']] = tm['Value'] else: tags = None scheduler_hints = {} if self.properties['NovaSchedulerHints']: for tm in self.properties['NovaSchedulerHints']: scheduler_hints[tm['Key']] = tm['Value'] else: scheduler_hints = None nics = self._build_nics(self.properties['NetworkInterfaces'], security_groups=security_groups, subnet_id=self.properties['SubnetId']) server = None # TODO(sdake/shardy) ensure physical_resource_name() never returns a # string longer than 63 characters, as this is pretty inconvenient # behavior for autoscaling groups and nested stacks where instance # names can easily become quite long even with terse names. physical_resource_name_len = len(self.physical_resource_name()) if physical_resource_name_len > 63: raise exception.Error(_('Server %(server)s length %(length)d > 63' ' characters, please reduce the length of' ' stack or resource names') % dict(server=self.physical_resource_name(), length=physical_resource_name_len)) try: server = self.nova().servers.create( name=self.physical_resource_name(), image=image_id, flavor=flavor_id, key_name=key_name, security_groups=security_groups, userdata=self.get_mime_string(userdata), meta=tags, scheduler_hints=scheduler_hints, nics=nics, availability_zone=availability_zone) finally: # Avoid a race condition where the thread could be cancelled # before the ID is stored if server is not None: self.resource_id_set(server.id) return server, scheduler.TaskRunner(self._attach_volumes_task())
def handle_create(self): security_groups = self.properties.get('security_groups', []) user_data_format = self.properties.get('user_data_format') if user_data_format == 'HEAT_CFNTOOLS': userdata = self.get_mime_string( self.properties.get('user_data', '')) else: userdata = self.properties.get('user_data', '') flavor = self.properties['flavor'] availability_zone = self.properties['availability_zone'] key_name = self.properties['key_name'] if key_name: # confirm keypair exists nova_utils.get_keypair(self.nova(), key_name) image = self.properties.get('image') if image: image = nova_utils.get_image_id(self.nova(), image) flavor_id = nova_utils.get_flavor_id(self.nova(), flavor) instance_meta = self.properties.get('metadata') scheduler_hints = self.properties.get('scheduler_hints') nics = self._build_nics(self.properties.get('networks')) block_device_mapping = self._build_block_device_mapping( self.properties.get('block_device_mapping')) reservation_id = self.properties.get('reservation_id') config_drive = self.properties.get('config_drive') disk_config = self.properties.get('diskConfig') # TODO(sdake/shardy) ensure physical_resource_name() never returns a # string longer than 63 characters, as this is pretty inconvenient # behavior for autoscaling groups and nested stacks where instance # names can easily become quite long even with terse names. physical_resource_name_len = len(self.physical_resource_name()) if physical_resource_name_len > 63: raise exception.Error( _('Server %(server)s length %(length)d > 63' ' characters, please reduce the length of' ' stack or resource names') % dict(server=self.physical_resource_name(), length=physical_resource_name_len)) server = None try: server = self.nova().servers.create( name=self.physical_resource_name(), image=image, flavor=flavor_id, key_name=key_name, security_groups=security_groups, userdata=userdata, meta=instance_meta, scheduler_hints=scheduler_hints, nics=nics, availability_zone=availability_zone, block_device_mapping=block_device_mapping, reservation_id=reservation_id, config_drive=config_drive, disk_config=disk_config) finally: # Avoid a race condition where the thread could be cancelled # before the ID is stored if server is not None: self.resource_id_set(server.id) return server
def handle_create(self): security_groups = self._get_security_groups() userdata = self.properties['UserData'] or '' flavor = self.properties['InstanceType'] availability_zone = self.properties['AvailabilityZone'] key_name = self.properties['KeyName'] if key_name: # confirm keypair exists nova_utils.get_keypair(self.nova(), key_name) image_name = self.properties['ImageId'] image_id = nova_utils.get_image_id(self.nova(), image_name) flavor_id = nova_utils.get_flavor_id(self.nova(), flavor) tags = {} if self.properties['Tags']: for tm in self.properties['Tags']: tags[tm['Key']] = tm['Value'] else: tags = None scheduler_hints = {} if self.properties['NovaSchedulerHints']: for tm in self.properties['NovaSchedulerHints']: scheduler_hints[tm['Key']] = tm['Value'] else: scheduler_hints = None nics = self._build_nics(self.properties['NetworkInterfaces'], security_groups=security_groups, subnet_id=self.properties['SubnetId']) server = None # TODO(sdake/shardy) ensure physical_resource_name() never returns a # string longer than 63 characters, as this is pretty inconvenient # behavior for autoscaling groups and nested stacks where instance # names can easily become quite long even with terse names. physical_resource_name_len = len(self.physical_resource_name()) if physical_resource_name_len > 63: raise exception.Error( _('Server %(server)s length %(length)d > 63' ' characters, please reduce the length of' ' stack or resource names') % dict(server=self.physical_resource_name(), length=physical_resource_name_len)) try: server = self.nova().servers.create( name=self.physical_resource_name(), image=image_id, flavor=flavor_id, key_name=key_name, security_groups=security_groups, userdata=self.get_mime_string(userdata), meta=tags, scheduler_hints=scheduler_hints, nics=nics, availability_zone=availability_zone) finally: # Avoid a race condition where the thread could be cancelled # before the ID is stored if server is not None: self.resource_id_set(server.id) return server, scheduler.TaskRunner(self._attach_volumes_task())