def handle_update(self, json_snippet, tmpl_diff, prop_diff): prg_attach = None prg_detach = None if prop_diff: # Even though some combinations of changed properties # could be updated in UpdateReplace manner, # we still first detach the old resource so that # self.resource_id is not replaced prematurely volume_id = self.properties[self.VOLUME_ID] server_id = self._stored_properties_data.get(self.INSTANCE_ID) self.client_plugin('nova').detach_volume(server_id, self.resource_id) prg_detach = heat_cinder.VolumeDetachProgress( server_id, volume_id, self.resource_id) prg_detach.called = True if self.VOLUME_ID in prop_diff: volume_id = prop_diff.get(self.VOLUME_ID) device = self.properties[self.DEVICE] if self.DEVICE in prop_diff: device = prop_diff.get(self.DEVICE) if self.INSTANCE_ID in prop_diff: server_id = prop_diff.get(self.INSTANCE_ID) prg_attach = heat_cinder.VolumeAttachProgress( server_id, volume_id, device) return prg_detach, prg_attach
def handle_update(self, json_snippet, tmpl_diff, prop_diff): vol = None cinder = self.client() prg_resize = None prg_attach = None prg_detach = None # update the name and description for cinder volume if self.NAME in prop_diff or self.DESCRIPTION in prop_diff: vol = cinder.volumes.get(self.resource_id) update_name = (prop_diff.get(self.NAME) or self.properties[self.NAME]) update_description = (prop_diff.get(self.DESCRIPTION) or self.properties[self.DESCRIPTION]) kwargs = self._fetch_name_and_description( cinder.volume_api_version, update_name, update_description) cinder.volumes.update(vol, **kwargs) # update the metadata for cinder volume if self.METADATA in prop_diff: if not vol: vol = cinder.volumes.get(self.resource_id) metadata = prop_diff.get(self.METADATA) cinder.volumes.update_all_metadata(vol, metadata) # retype if self.VOLUME_TYPE in prop_diff: if cinder.volume_api_version == 1: LOG.info( _LI('Volume type update not supported ' 'by Cinder API V1.')) raise exception.NotSupported( feature=_('Using Cinder API V1, volume_type update')) else: if not vol: vol = cinder.volumes.get(self.resource_id) new_vol_type = prop_diff.get(self.VOLUME_TYPE) cinder.volumes.retype(vol, new_vol_type, 'never') # update read_only access mode if self.READ_ONLY in prop_diff: flag = prop_diff.get(self.READ_ONLY) cinder.volumes.update_readonly_flag(self.resource_id, flag) # extend volume size if self.SIZE in prop_diff: if not vol: vol = cinder.volumes.get(self.resource_id) new_size = prop_diff[self.SIZE] if new_size < vol.size: raise exception.NotSupported(feature=_("Shrinking volume")) elif new_size > vol.size: prg_resize = heat_cinder.VolumeResizeProgress(size=new_size) if vol.attachments: # NOTE(pshchelo): # this relies on current behavior of cinder attachments, # i.e. volume attachments is a list with len<=1, # so the volume can be attached only to single instance, # and id of attachment is the same as id of the volume # it describes, so detach/attach the same volume # will not change volume attachment id. server_id = vol.attachments[0]['server_id'] device = vol.attachments[0]['device'] attachment_id = vol.attachments[0]['id'] prg_detach = heat_cinder.VolumeDetachProgress( server_id, vol.id, attachment_id) prg_attach = heat_cinder.VolumeAttachProgress( server_id, vol.id, device) return prg_detach, prg_resize, prg_attach
def handle_create(self): security_groups = self._get_security_groups() userdata = self.properties[self.USER_DATA] or '' flavor = self.properties[self.INSTANCE_TYPE] availability_zone = self.properties[self.AVAILABILITY_ZONE] image_name = self.properties[self.IMAGE_ID] image_id = self.client_plugin('glance').get_image_id(image_name) flavor_id = self.client_plugin().get_flavor_id(flavor) scheduler_hints = {} if self.properties[self.NOVA_SCHEDULER_HINTS]: for tm in self.properties[self.NOVA_SCHEDULER_HINTS]: # adopted from novaclient shell hint = tm[self.NOVA_SCHEDULER_HINT_KEY] hint_value = tm[self.NOVA_SCHEDULER_HINT_VALUE] if hint in scheduler_hints: if isinstance(scheduler_hints[hint], six.string_types): scheduler_hints[hint] = [scheduler_hints[hint]] scheduler_hints[hint].append(hint_value) else: scheduler_hints[hint] = hint_value else: scheduler_hints = None if cfg.CONF.stack_scheduler_hints: if scheduler_hints is None: scheduler_hints = {} scheduler_hints['heat_root_stack_id'] = self.stack.root_stack_id() scheduler_hints['heat_stack_id'] = self.stack.id scheduler_hints['heat_stack_name'] = self.stack.name scheduler_hints['heat_path_in_stack'] = self.stack.path_in_stack() scheduler_hints['heat_resource_name'] = self.name nics = self._build_nics(self.properties[self.NETWORK_INTERFACES], security_groups=security_groups, subnet_id=self.properties[self.SUBNET_ID]) block_device_mapping = self._build_block_device_mapping( self.properties.get(self.BLOCK_DEVICE_MAPPINGS)) server = None # FIXME(shadower): the instance_user config option is deprecated. Once # it's gone, we should always use ec2-user for compatibility with # CloudFormation. if cfg.CONF.instance_user: instance_user = cfg.CONF.instance_user else: instance_user = '******' try: server = self.nova().servers.create( name=self.physical_resource_name(), image=image_id, flavor=flavor_id, key_name=self.properties[self.KEY_NAME], security_groups=security_groups, userdata=self.client_plugin().build_userdata( self.metadata_get(), userdata, instance_user), meta=self._get_nova_metadata(self.properties), scheduler_hints=scheduler_hints, nics=nics, availability_zone=availability_zone, block_device_mapping=block_device_mapping) finally: # Avoid a race condition where the thread could be cancelled # before the ID is stored if server is not None: self.resource_id_set(server.id) creator = nova_cp.ServerCreateProgress(server.id) attachers = [] for vol_id, device in self.volumes(): attachers.append( cinder_cp.VolumeAttachProgress(self.resource_id, vol_id, device)) return creator, tuple(attachers)