def _needs_update(self, after, before, after_props, before_props, prev_resource, check_init_complete=True): # Issue an update to the nested stack if the stack resource # is able to update. If return true, let the individual # resources in it decide if they need updating. # FIXME (ricolin): seems currently can not call super here if self.nested() is None and self.status == self.FAILED: raise resource.UpdateReplace(self) # If stack resource is in CHECK_FAILED state, raise UpdateReplace # to replace the failed stack. if self.state == (self.CHECK, self.FAILED): raise resource.UpdateReplace(self) if (check_init_complete and self.nested() is None and self.action == self.INIT and self.status == self.COMPLETE): raise resource.UpdateReplace(self) return True
def handle_update(self, json_snippet, tmpl_diff, prop_diff): if 'Metadata' in tmpl_diff: self.metadata = tmpl_diff['Metadata'] checkers = [] server = None if self.METADATA in prop_diff: server = self.nova().servers.get(self.resource_id) nova_utils.meta_update(self.nova(), server, prop_diff[self.METADATA]) if self.FLAVOR in prop_diff: flavor_update_policy = (prop_diff.get(self.FLAVOR_UPDATE_POLICY) or self.properties.get( self.FLAVOR_UPDATE_POLICY)) if flavor_update_policy == 'REPLACE': raise resource.UpdateReplace(self.name) flavor = prop_diff[self.FLAVOR] flavor_id = nova_utils.get_flavor_id(self.nova(), flavor) if not server: server = self.nova().servers.get(self.resource_id) checker = scheduler.TaskRunner(nova_utils.resize, server, flavor, flavor_id) checkers.append(checker) if self.IMAGE in prop_diff: image_update_policy = (prop_diff.get(self.IMAGE_UPDATE_POLICY) or self.properties.get( self.IMAGE_UPDATE_POLICY)) if image_update_policy == 'REPLACE': raise resource.UpdateReplace(self.name) image = prop_diff[self.IMAGE] image_id = nova_utils.get_image_id(self.nova(), image) if not server: server = self.nova().servers.get(self.resource_id) preserve_ephemeral = ( image_update_policy == 'REBUILD_PRESERVE_EPHEMERAL') checker = scheduler.TaskRunner( nova_utils.rebuild, server, image_id, preserve_ephemeral=preserve_ephemeral) checkers.append(checker) if self.NAME in prop_diff: if not server: server = self.nova().servers.get(self.resource_id) nova_utils.rename(server, prop_diff[self.NAME]) # Optimization: make sure the first task is started before # check_update_complete. if checkers: checkers[0].start() return checkers
def handle_update(self, json_snippet=None, tmpl_diff=None, prop_diff=None): LOG.info('Updating resource %s with prop_diff %s', self.name, prop_diff) for prop in prop_diff: if '!' in prop: raise resource.UpdateReplace(self.name) self.data_set(prop, prop_diff.get(prop), redact=False)
def handle_update(self, prop_diff, json_snippet=None, tmpl_diff=None): args = dict(alarm_id=self.resource_id) if prop_diff.get(self.NAME): args['name'] = prop_diff.get(self.NAME) if prop_diff.get(self.DESCRIPTION): args['description'] = prop_diff.get(self.DESCRIPTION) if prop_diff.get(self.EXPRESSION): args['expression'] = prop_diff.get(self.EXPRESSION) if prop_diff.get(self.SEVERITY): args['severity'] = prop_diff.get(self.SEVERITY) if prop_diff.get(self.OK_ACTIONS): args['ok_actions'] = prop_diff.get(self.OK_ACTIONS) if prop_diff.get(self.ALARM_ACTIONS): args['alarm_actions'] = prop_diff.get(self.ALARM_ACTIONS) if prop_diff.get(self.UNDETERMINED_ACTIONS): args['undetermined_actions'] = prop_diff.get( self.UNDETERMINED_ACTIONS) if prop_diff.get(self.ACTIONS_ENABLED): args['actions_enabled'] = prop_diff.get(self.ACTIONS_ENABLED) if len(args) > 1: try: self.client().alarm_definitions.patch(**args) except Exception as ex: if self.client_plugin().is_un_processable(ex): # Monasca does not allow to update the sub expression raise resource.UpdateReplace(resource_name=self.name)
def _needs_update(self, after, before, after_props, before_props, prev_resource, check_init_complete=True): # If the nested stack has not been created, use the default # implementation to determine if we need to replace the resource. Note # that we do *not* return the result. if self.resource_id is None: super(StackResource, self)._needs_update(after, before, after_props, before_props, prev_resource, check_init_complete) else: if self.state == (self.CHECK, self.FAILED): nested_stack = self.rpc_client().show_stack( self.context, self.nested_identifier())[0] nested_stack_state = (nested_stack[rpc_api.STACK_ACTION], nested_stack[rpc_api.STACK_STATUS]) if nested_stack_state == (self.stack.CHECK, self.stack.FAILED): # The stack-check action marked the stack resource # CHECK_FAILED, so return True to allow the individual # CHECK_FAILED resources decide if they need updating. return True # The mark-unhealthy action marked the stack resource # CHECK_FAILED, so raise UpdateReplace to replace the # entire failed stack. raise resource.UpdateReplace(self) # Always issue an update to the nested stack and let the individual # resources in it decide if they need updating. return True
def _needs_update(self, after, before, after_props, before_props, prev_resource, check_init_complete=True): # check if we need to force replace first old_inputs = before_props[self.INPUT] new_inputs = after_props[self.INPUT] for i in after_props[self.REPLACE_ON_CHANGE]: if old_inputs.get(i) != new_inputs.get(i): LOG.debug('Replacing ExternalResource %(id)s instead of ' 'updating due to change to input "%(i)s"' % { "id": self.resource_id, "i": i }) raise resource.UpdateReplace(self) # honor always_update if found if self.properties[self.ALWAYS_UPDATE]: return True # call super in all other scenarios else: return super(MistralExternalResource, self)._needs_update(after, before, after_props, before_props, prev_resource, check_init_complete)
def handle_update(self, json_snippet, tmpl_diff, prop_diff): if 'Metadata' in tmpl_diff: self.metadata = tmpl_diff['Metadata'] checkers = [] server = None if 'metadata' in prop_diff: server = self.nova().servers.get(self.resource_id) nova_utils.meta_update(self.nova(), server, prop_diff['metadata']) if 'flavor' in prop_diff: flavor_update_policy = ( prop_diff.get('flavor_update_policy') or self.properties.get('flavor_update_policy')) if flavor_update_policy == 'REPLACE': raise resource.UpdateReplace(self.name) flavor = prop_diff['flavor'] flavor_id = nova_utils.get_flavor_id(self.nova(), flavor) if not server: server = self.nova().servers.get(self.resource_id) checker = scheduler.TaskRunner(nova_utils.resize, server, flavor, flavor_id) checkers.append(checker) if 'image' in prop_diff: image_update_policy = (prop_diff.get('image_update_policy') or self.properties.get('image_update_policy')) if image_update_policy == 'REPLACE': raise resource.UpdateReplace(self.name) image = prop_diff['image'] image_id = nova_utils.get_image_id(self.nova(), image) if not server: server = self.nova().servers.get(self.resource_id) checker = scheduler.TaskRunner(nova_utils.rebuild, server, image_id) checkers.append(checker) # Optimization: make sure the first task is started before # check_update_complete. if checkers: checkers[0].start() return checkers
def handle_update(self, json_snippet, tmpl_diff, prop_diff): # force delete and replace if not prop_diff: return if hasattr(HeatException, "UpdateReplace"): raise HeatException.UpdateReplace() else: # in older versions raise resource.UpdateReplace()
def _needs_update(self, after, before, after_props, before_props, prev_resource): if after_props.get(self.REPLACEMENT_POLICY) == 'REPLACE_ALWAYS': raise resource.UpdateReplace(self.name) return super(Port, self)._needs_update(after, before, after_props, before_props, prev_resource)
def _needs_update(self, after, before, after_props, before_props, prev_resource, check_init_complete=True): # If resource is in CHECK_FAILED state, raise UpdateReplace # to replace the failed stack. if self.state == (self.CHECK, self.FAILED): raise resource.UpdateReplace(self) # Always issue an update to the remote stack and let the individual # resources in it decide if they need updating. return True
def update_with_template(self, child_template, user_params=None, timeout_mins=None): """Update the nested stack with the new template.""" if self.id is None: self.store() if self.stack.action == self.stack.ROLLBACK: if self._try_rollback(): LOG.info('Triggered nested stack %s rollback', self.physical_resource_name()) return {'target_action': self.stack.ROLLBACK} if self.resource_id is None: # if the create failed for some reason and the nested # stack was not created, we need to create an empty stack # here so that the update will work. def _check_for_completion(): while not self.check_create_complete(): yield empty_temp = template_format.parse( "heat_template_version: '2013-05-23'") self.create_with_template(empty_temp, {}) checker = scheduler.TaskRunner(_check_for_completion) checker(timeout=self.stack.timeout_secs()) if timeout_mins is None: timeout_mins = self.stack.timeout_mins try: status_data = stack_object.Stack.get_status(self.context, self.resource_id) except exception.NotFound: raise resource.UpdateReplace(self) action, status, status_reason, updated_time = status_data kwargs = self._stack_kwargs(user_params, child_template) cookie = {'previous': { 'updated_at': updated_time, 'state': (action, status)}} kwargs.update({ 'stack_identity': dict(self.nested_identifier()), 'args': {rpc_api.PARAM_TIMEOUT: timeout_mins, rpc_api.PARAM_CONVERGE: self.converge} }) with self.translate_remote_exceptions: try: self.rpc_client()._update_stack(self.context, **kwargs) except exception.HeatException: with excutils.save_and_reraise_exception(): raw_template.RawTemplate.delete(self.context, kwargs['template_id']) return cookie
def _validate_update_properties(self, prop_diff): # according to aws doc, when update allocation_id or eip, # if you also change the InstanceId or NetworkInterfaceId, # should go to Replacement flow if self.ALLOCATION_ID in prop_diff or self.EIP in prop_diff: instance_id = prop_diff.get(self.INSTANCE_ID) ni_id = prop_diff.get(self.NETWORK_INTERFACE_ID) if instance_id or ni_id: raise resource.UpdateReplace(self.name) # according to aws doc, when update the instance_id or # network_interface_id, if you also change the EIP or # ALLOCATION_ID, should go to Replacement flow if (self.INSTANCE_ID in prop_diff or self.NETWORK_INTERFACE_ID in prop_diff): eip = prop_diff.get(self.EIP) allocation_id = prop_diff.get(self.ALLOCATION_ID) if eip or allocation_id: raise resource.UpdateReplace(self.name)
def _update_flavor(self, server, prop_diff): flavor_update_policy = (prop_diff.get(self.FLAVOR_UPDATE_POLICY) or self.properties.get(self.FLAVOR_UPDATE_POLICY)) flavor = prop_diff[self.FLAVOR] if flavor_update_policy == 'REPLACE': raise resource.UpdateReplace(self.name) flavor_id = self.client_plugin().get_flavor_id(flavor) if not server: server = self.nova().servers.get(self.resource_id) return scheduler.TaskRunner(self.client_plugin().resize, server, flavor, flavor_id)
def _needs_update(self, after, before, after_props, before_props, prev_resource): # Issue an update to the nested stack if the stack resource # is able to update. If return true, let the individual # resources in it decide if they need updating. # FIXME (ricolin): seems currently can not call super here if self.nested() is None and (self.status == self.FAILED or (self.action == self.INIT and self.status == self.COMPLETE)): raise resource.UpdateReplace(self) return True
def handle_update(self, json_snippet=None, tmpl_diff=None, prop_diff=None): self.properties = json_snippet.properties(self.properties_schema, self.context) value = prop_diff.get(self.VALUE) if value: update_replace = self.properties[self.UPDATE_REPLACE] if update_replace: raise resource.UpdateReplace(self.name) else: # emulate failure fail_prop = self.properties[self.FAIL] if not fail_prop: # update in place self.data_set('value', value, redact=False) return timeutils.utcnow(), self._wait_secs() return timeutils.utcnow(), 0
def _update_image(self, server, prop_diff): image_update_policy = (prop_diff.get(self.IMAGE_UPDATE_POLICY) or self.properties.get(self.IMAGE_UPDATE_POLICY)) if image_update_policy == 'REPLACE': raise resource.UpdateReplace(self.name) image = prop_diff[self.IMAGE] image_id = self.client_plugin('glance').get_image_id(image) if not server: server = self.nova().servers.get(self.resource_id) preserve_ephemeral = ( image_update_policy == 'REBUILD_PRESERVE_EPHEMERAL') password = (prop_diff.get(self.ADMIN_PASS) or self.properties.get(self.ADMIN_PASS)) return scheduler.TaskRunner(self.client_plugin().rebuild, server, image_id, password=password, preserve_ephemeral=preserve_ephemeral)
def handle_update(self, json_snippet, tmpl_diff, prop_diff): if 'Metadata' in tmpl_diff: self.metadata = tmpl_diff['Metadata'] if 'flavor' in prop_diff: flavor_update_policy = ( prop_diff.get('flavor_update_policy') or self.properties.get('flavor_update_policy')) if flavor_update_policy == 'REPLACE': raise resource.UpdateReplace(self.name) flavor = prop_diff['flavor'] flavor_id = nova_utils.get_flavor_id(self.nova(), flavor) server = self.nova().servers.get(self.resource_id) server.resize(flavor_id) scheduler.TaskRunner(nova_utils.check_resize, server, flavor)()
def test_update_replace(self): tmpl = {'Type': 'GenericResourceType', 'Properties': {'Foo': 'abc'}} res = generic_rsrc.ResourceWithProps('test_resource', tmpl, self.stack) res.update_allowed_keys = ('Properties', ) res.update_allowed_properties = ('Foo', ) scheduler.TaskRunner(res.create)() self.assertEqual((res.CREATE, res.COMPLETE), res.state) utmpl = {'Type': 'GenericResourceType', 'Properties': {'Foo': 'xyz'}} self.m.StubOutWithMock(generic_rsrc.ResourceWithProps, 'handle_update') tmpl_diff = {'Properties': {'Foo': 'xyz'}} prop_diff = {'Foo': 'xyz'} generic_rsrc.ResourceWithProps.handle_update( utmpl, tmpl_diff, prop_diff).AndRaise(resource.UpdateReplace()) self.m.ReplayAll() # should be re-raised so parser.Stack can handle replacement self.assertRaises(resource.UpdateReplace, res.update, utmpl) self.m.VerifyAll()
def handle_update(self, json_snippet=None, tmpl_diff=None, prop_diff=None): value = prop_diff.get(self.VALUE) new_prop = json_snippet._properties if value: update_replace = new_prop.get(self.UPDATE_REPLACE, False) if update_replace: raise resource.UpdateReplace(self.name) else: fail_prop = new_prop.get(self.FAIL, False) sleep_secs = new_prop.get(self.WAIT_SECS, 0) # emulate failure if fail_prop: raise Exception("Test Resource failed %s", self.name) # update in place self.data_set('value', value, redact=False) if sleep_secs: LOG.debug("Update of Resource %s sleeping for %s seconds", self.name, sleep_secs) eventlet.sleep(sleep_secs)
def _needs_update(self, after, before, after_props, before_props, prev_resource, check_init_complete=True): # If stack resource is in CHECK_FAILED state, raise UpdateReplace # to replace the failed stack. if self.state == (self.CHECK, self.FAILED): raise resource.UpdateReplace(self) # If the nested stack has not been created, use the default # implementation to determine if we need to replace the resource. Note # that we do *not* return the result. if self.resource_id is None: super(StackResource, self)._needs_update(after, before, after_props, before_props, prev_resource, check_init_complete) # Always issue an update to the nested stack and let the individual # resources in it decide if they need updating. return True
def update(self, after, before=None, prev_resource=None): raise resource.UpdateReplace(self.name)
def handle_update(self, json_snippet, tmpl_diff, prop_diff): if 'Metadata' in tmpl_diff: self.metadata_set(tmpl_diff['Metadata']) checkers = [] server = None if self.METADATA in prop_diff: server = self.nova().servers.get(self.resource_id) self.client_plugin().meta_update(server, prop_diff[self.METADATA]) if self.FLAVOR in prop_diff: flavor_update_policy = (prop_diff.get(self.FLAVOR_UPDATE_POLICY) or self.properties.get( self.FLAVOR_UPDATE_POLICY)) if flavor_update_policy == 'REPLACE': raise resource.UpdateReplace(self.name) flavor = prop_diff[self.FLAVOR] flavor_id = self.client_plugin().get_flavor_id(flavor) if not server: server = self.nova().servers.get(self.resource_id) checker = scheduler.TaskRunner(self.client_plugin().resize, server, flavor, flavor_id) checkers.append(checker) if self.IMAGE in prop_diff: image_update_policy = (prop_diff.get(self.IMAGE_UPDATE_POLICY) or self.properties.get( self.IMAGE_UPDATE_POLICY)) if image_update_policy == 'REPLACE': raise resource.UpdateReplace(self.name) image = prop_diff[self.IMAGE] image_id = self.client_plugin('glance').get_image_id(image) if not server: server = self.nova().servers.get(self.resource_id) preserve_ephemeral = ( image_update_policy == 'REBUILD_PRESERVE_EPHEMERAL') checker = scheduler.TaskRunner( self.client_plugin().rebuild, server, image_id, preserve_ephemeral=preserve_ephemeral) checkers.append(checker) if self.NAME in prop_diff: if not server: server = self.nova().servers.get(self.resource_id) self.client_plugin().rename(server, prop_diff[self.NAME]) if self.NETWORKS in prop_diff: new_networks = prop_diff.get(self.NETWORKS) attach_first_free_port = False if not new_networks: new_networks = [] attach_first_free_port = True old_networks = self.properties.get(self.NETWORKS) if not server: server = self.nova().servers.get(self.resource_id) interfaces = server.interface_list() # if old networks is None, it means that the server got first # free port. so we should detach this interface. if old_networks is None: for iface in interfaces: checker = scheduler.TaskRunner(server.interface_detach, iface.port_id) checkers.append(checker) # if we have any information in networks field, we should: # 1. find similar networks, if they exist # 2. remove these networks from new_networks and old_networks # lists # 3. detach unmatched networks, which were present in old_networks # 4. attach unmatched networks, which were present in new_networks else: # remove not updated networks from old and new networks lists, # also get list these networks not_updated_networks = \ self._get_network_matches(old_networks, new_networks) self.update_networks_matching_iface_port( old_networks + not_updated_networks, interfaces) # according to nova interface-detach command detached port # will be deleted for net in old_networks: checker = scheduler.TaskRunner(server.interface_detach, net.get('port')) checkers.append(checker) # attach section similar for both variants that # were mentioned above for net in new_networks: if net.get('port'): checker = scheduler.TaskRunner(server.interface_attach, net['port'], None, None) checkers.append(checker) elif net.get('network'): checker = scheduler.TaskRunner(server.interface_attach, None, net['network'], net.get('fixed_ip')) checkers.append(checker) # if new_networks is None, we should attach first free port, # according to similar behavior during instance creation if attach_first_free_port: checker = scheduler.TaskRunner(server.interface_attach, None, None, None) checkers.append(checker) # Optimization: make sure the first task is started before # check_update_complete. if checkers: checkers[0].start() return checkers
def handle_update(self, json_snippet=None, tmpl_diff=None, prop_diff=None): for prop in prop_diff: if '!' in prop: raise resource.UpdateReplace(self.name) self.data_set(prop, prop_diff.get(prop), redact=False)
def handle_update(self, json_snippet, tmpl_diff, prop_diff): if 'Metadata' in tmpl_diff: raise resource.UpdateReplace(self.name)