def test_wait_meta(self): ''' 1 create stack 2 assert empty instance metadata 3 service.metadata_update() 4 assert valid waitcond metadata 5 assert valid instance metadata ''' self.stack = self.create_stack() watch = self.stack['WC'] inst = self.stack['S2'] def check_empty(sleep_time): self.assertEqual(watch.FnGetAtt('Data'), '{}') self.assertEqual(inst.metadata['test'], None) def update_metadata(id, data, reason): self.man.metadata_update( utils.dummy_context(), dict(self.stack.identifier()), 'WH', { 'Data': data, 'Reason': reason, 'Status': 'SUCCESS', 'UniqueId': id }) def post_success(sleep_time): update_metadata('123', 'foo', 'bar') scheduler.TaskRunner._sleep(mox.IsA(int)).WithSideEffects(check_empty) scheduler.TaskRunner._sleep(mox.IsA(int)).WithSideEffects(post_success) db_api.user_creds_get(mox.IgnoreArg()).MultipleTimes().AndReturn( self.stack.context.to_dict()) scheduler.TaskRunner._sleep(mox.IsA(int)).AndReturn(None) self.m.ReplayAll() self.stack.create() self.assertEqual(self.stack.state, (self.stack.CREATE, self.stack.COMPLETE)) self.assertEqual(watch.FnGetAtt('Data'), '{"123": "foo"}') self.assertEqual(inst.metadata['test'], '{"123": "foo"}') update_metadata('456', 'blarg', 'wibble') self.assertEqual(watch.FnGetAtt('Data'), '{"123": "foo", "456": "blarg"}') self.assertEqual(inst.metadata['test'], '{"123": "foo", "456": "blarg"}') self.m.VerifyAll()
def test_wait_meta(self): ''' 1 create stack 2 assert empty instance metadata 3 service.metadata_update() 4 assert valid waitcond metadata 5 assert valid instance metadata ''' self.stack = self.create_stack() watch = self.stack['WC'] inst = self.stack['S2'] def check_empty(sleep_time): self.assertEqual(watch.FnGetAtt('Data'), '{}') self.assertEqual(inst.metadata['test'], None) def update_metadata(id, data, reason): self.man.metadata_update(utils.dummy_context(), dict(self.stack.identifier()), 'WH', {'Data': data, 'Reason': reason, 'Status': 'SUCCESS', 'UniqueId': id}) def post_success(sleep_time): update_metadata('123', 'foo', 'bar') scheduler.TaskRunner._sleep(mox.IsA(int)).WithSideEffects(check_empty) scheduler.TaskRunner._sleep(mox.IsA(int)).WithSideEffects(post_success) db_api.user_creds_get(mox.IgnoreArg()).MultipleTimes().AndReturn( self.stack.context.to_dict()) scheduler.TaskRunner._sleep(mox.IsA(int)).AndReturn(None) self.m.ReplayAll() self.stack.create() self.assertEqual(self.stack.state, (self.stack.CREATE, self.stack.COMPLETE)) self.assertEqual(watch.FnGetAtt('Data'), '{"123": "foo"}') self.assertEqual(inst.metadata['test'], '{"123": "foo"}') update_metadata('456', 'blarg', 'wibble') self.assertEqual(watch.FnGetAtt('Data'), '{"123": "foo", "456": "blarg"}') self.assertEqual(inst.metadata['test'], '{"123": "foo", "456": "blarg"}') self.m.VerifyAll()
def metadata_update(self, cnxt, stack_identity, resource_name, metadata): """ Update the metadata for the given resource. """ s = self._get_stack(cnxt, stack_identity) stack = parser.Stack.load(cnxt, stack=s) if resource_name not in stack: raise exception.ResourceNotFound(resource_name=resource_name, stack_name=stack.name) resource = stack[resource_name] resource.metadata_update(new_metadata=metadata) # This is not "nice" converting to the stored context here, # but this happens because the keystone user associated with the # WaitCondition doesn't have permission to read the secret key of # the user associated with the cfn-credentials file user_creds = db_api.user_creds_get(s.user_creds_id) stack_context = context.RequestContext.from_dict(user_creds) refresh_stack = parser.Stack.load(stack_context, stack=s) # Refresh the metadata for all other resources, since we expect # resource_name to be a WaitCondition resource, and other # resources may refer to WaitCondition Fn::GetAtt Data, which # is updated here. for res in refresh_stack: if res.name != resource_name: res.metadata_update() return resource.metadata
def _delete_credentials(self, stack_status, reason, abandon): # Cleanup stored user_creds so they aren't accessible via # the soft-deleted stack which remains in the DB # The stack_status and reason passed in are current values, which # may get rewritten and returned from this method if self.user_creds_id: user_creds = db_api.user_creds_get(self.user_creds_id) # If we created a trust, delete it if user_creds is not None: trust_id = user_creds.get('trust_id') if trust_id: try: # If the trustor doesn't match the context user the # we have to use the stored context to cleanup the # trust, as although the user evidently has # permission to delete the stack, they don't have # rights to delete the trust unless an admin trustor_id = user_creds.get('trustor_user_id') if self.context.user_id != trustor_id: LOG.debug('Context user_id doesn\'t match ' 'trustor, using stored context') sc = self.stored_context() sc.clients.client('keystone').delete_trust( trust_id) else: self.clients.client('keystone').delete_trust( trust_id) except Exception as ex: LOG.exception(ex) stack_status = self.FAILED reason = ("Error deleting trust: %s" % six.text_type(ex)) # Delete the stored credentials try: db_api.user_creds_delete(self.context, self.user_creds_id) except exception.NotFound: LOG.info(_LI("Tried to delete user_creds that do not exist " "(stack=%(stack)s user_creds_id=%(uc)s)"), {'stack': self.id, 'uc': self.user_creds_id}) try: self.user_creds_id = None self.store() except exception.NotFound: LOG.info(_LI("Tried to store a stack that does not exist %s"), self.id) # If the stack has a domain project, delete it if self.stack_user_project_id and not abandon: try: keystone = self.clients.client('keystone') keystone.delete_stack_domain_project( project_id=self.stack_user_project_id) except Exception as ex: LOG.exception(ex) stack_status = self.FAILED reason = "Error deleting project: %s" % six.text_type(ex) return stack_status, reason
def run_rule(self, context, wr, now=timeutils.utcnow()): action_map = {'ALARM': 'AlarmActions', 'NORMAL': 'OKActions', 'NODATA': 'InsufficientDataActions'} watcher = watchrule.WatchRule(wr.rule, wr.watch_data, wr.last_evaluated, now) new_state = watcher.get_alarm_state() if new_state != wr.state: logger.warn('WATCH: stack:%s, watch_name:%s %s', wr.stack_name, wr.name, new_state) if not action_map[new_state] in wr.rule: logger.info('no action for new state %s', new_state) wr.state = new_state wr.save() else: s = db_api.stack_get_by_name(None, wr.stack_name) if s and s.status in ('CREATE_COMPLETE', 'UPDATE_COMPLETE'): user_creds = db_api.user_creds_get(s.user_creds_id) ctxt = ctxtlib.RequestContext.from_dict(dict(user_creds)) stack = parser.Stack.load(ctxt, s.id) for a in wr.rule[action_map[new_state]]: greenpool.spawn_n(stack[a].alarm) wr.state = new_state wr.save() wr.last_evaluated = now
def metadata_update(self, cnxt, stack_identity, resource_name, metadata): """ Update the metadata for the given resource. """ s = self._get_stack(cnxt, stack_identity) stack = parser.Stack.load(cnxt, stack=s) if resource_name not in stack: raise exception.ResourceNotFound(resource_name=resource_name, stack_name=stack.name) resource = stack[resource_name] resource.metadata_update(new_metadata=metadata) # This is not "nice" converting to the stored context here, # but this happens because the keystone user associated with the # WaitCondition doesn't have permission to read the secret key of # the user associated with the cfn-credentials file user_creds = db_api.user_creds_get(s.user_creds_id) stack_context = context.RequestContext.from_dict(user_creds) refresh_stack = parser.Stack.load(stack_context, stack=s) # Refresh the metadata for all other resources, since we expect # resource_name to be a WaitCondition resource, and other # resources may refer to WaitCondition Fn::GetAtt Data, which # is updated here. for res in refresh_stack: if res.name != resource_name and res.id is not None: res.metadata_update() return resource.metadata
def _periodic_watcher_task(self, sid): """ Periodic task, created for each stack, triggers watch-rule evaluation for all rules defined for the stack sid = stack ID """ # Retrieve the stored credentials & create context # Require admin=True to the stack_get to defeat tenant # scoping otherwise we fail to retrieve the stack logger.debug("Periodic watcher task for stack %s" % sid) admin_context = context.get_admin_context() stack = db_api.stack_get(admin_context, sid, admin=True) if not stack: logger.error("Unable to retrieve stack %s for periodic task" % sid) return user_creds = db_api.user_creds_get(stack.user_creds_id) stack_context = context.RequestContext.from_dict(user_creds) # Get all watchrules for this stack and evaluate them try: wrs = db_api.watch_rule_get_all_by_stack(stack_context, sid) except Exception as ex: logger.warn('periodic_task db error (%s) %s' % ('watch rule removed?', str(ex))) return for wr in wrs: rule = watchrule.WatchRule.load(stack_context, watch=wr) actions = rule.evaluate() for action in actions: self._start_in_thread(sid, action)
def _periodic_watcher_task(self, sid): """ Periodic task, created for each stack, triggers watch-rule evaluation for all rules defined for the stack sid = stack ID """ # Retrieve the stored credentials & create context # Require admin=True to the stack_get to defeat tenant # scoping otherwise we fail to retrieve the stack logger.debug("Periodic watcher task for stack %s" % sid) admin_context = context.get_admin_context() stack = db_api.stack_get(admin_context, sid, admin=True) if not stack: logger.error("Unable to retrieve stack %s for periodic task" % sid) return user_creds = db_api.user_creds_get(stack.user_creds_id) stack_context = context.RequestContext.from_dict(user_creds) # Get all watchrules for this stack and evaluate them try: wrs = db_api.watch_rule_get_all_by_stack(stack_context, sid) except Exception as ex: logger.warn('periodic_task db error (%s) %s' % ('watch rule removed?', str(ex))) return for wr in wrs: rule = watchrule.WatchRule.load(stack_context, watch=wr) rule.evaluate()
def delete(self, action=DELETE): ''' Delete all of the resources, and then the stack itself. The action parameter is used to differentiate between a user initiated delete and an automatic stack rollback after a failed create, which amount to the same thing, but the states are recorded differently. ''' if action not in (self.DELETE, self.ROLLBACK): logger.error(_("Unexpected action %s passed to delete!") % action) self.state_set(self.DELETE, self.FAILED, "Invalid action %s" % action) return stack_status = self.COMPLETE reason = 'Stack %s completed successfully' % action.lower() self.state_set(action, self.IN_PROGRESS, 'Stack %s started' % action) backup_stack = self._backup_stack(False) if backup_stack is not None: backup_stack.delete() if backup_stack.status != backup_stack.COMPLETE: errs = backup_stack.status_reason failure = 'Error deleting backup resources: %s' % errs self.state_set(action, self.FAILED, 'Failed to %s : %s' % (action, failure)) return action_task = scheduler.DependencyTaskGroup(self.dependencies, resource.Resource.destroy, reverse=True) try: scheduler.TaskRunner(action_task)(timeout=self.timeout_secs()) except exception.ResourceFailure as ex: stack_status = self.FAILED reason = 'Resource %s failed: %s' % (action.lower(), str(ex)) except scheduler.Timeout: stack_status = self.FAILED reason = '%s timed out' % action.title() if stack_status != self.FAILED: # If we created a trust, delete it stack = db_api.stack_get(self.context, self.id) user_creds = db_api.user_creds_get(stack.user_creds_id) trust_id = user_creds.get('trust_id') if trust_id: try: self.clients.keystone().delete_trust(trust_id) except Exception as ex: logger.exception(ex) stack_status = self.FAILED reason = "Error deleting trust: %s" % str(ex) self.state_set(action, stack_status, reason) if stack_status != self.FAILED: # delete the stack db_api.stack_delete(self.context, self.id) self.id = None
def delete(self, action=DELETE, backup=False): ''' Delete all of the resources, and then the stack itself. The action parameter is used to differentiate between a user initiated delete and an automatic stack rollback after a failed create, which amount to the same thing, but the states are recorded differently. ''' if action not in (self.DELETE, self.ROLLBACK): logger.error(_("Unexpected action %s passed to delete!") % action) self.state_set(self.DELETE, self.FAILED, "Invalid action %s" % action) return stack_status = self.COMPLETE reason = 'Stack %s completed successfully' % action.lower() self.state_set(action, self.IN_PROGRESS, 'Stack %s started' % action) backup_stack = self._backup_stack(False) if backup_stack is not None: backup_stack.delete(backup=True) if backup_stack.status != backup_stack.COMPLETE: errs = backup_stack.status_reason failure = 'Error deleting backup resources: %s' % errs self.state_set(action, self.FAILED, 'Failed to %s : %s' % (action, failure)) return action_task = scheduler.DependencyTaskGroup(self.dependencies, resource.Resource.destroy, reverse=True) try: scheduler.TaskRunner(action_task)(timeout=self.timeout_secs()) except exception.ResourceFailure as ex: stack_status = self.FAILED reason = 'Resource %s failed: %s' % (action.lower(), str(ex)) except scheduler.Timeout: stack_status = self.FAILED reason = '%s timed out' % action.title() if stack_status != self.FAILED and not backup: # If we created a trust, delete it stack = db_api.stack_get(self.context, self.id) user_creds = db_api.user_creds_get(stack.user_creds_id) trust_id = user_creds.get('trust_id') if trust_id: try: self.clients.keystone().delete_trust(trust_id) except Exception as ex: logger.exception(ex) stack_status = self.FAILED reason = "Error deleting trust: %s" % str(ex) self.state_set(action, stack_status, reason) if stack_status != self.FAILED: # delete the stack db_api.stack_delete(self.context, self.id) self.id = None
def _load_user_creds(self, creds_id): user_creds = db_api.user_creds_get(creds_id) stored_context = context.RequestContext.from_dict(user_creds) # heat_keystoneclient populates the context with an auth_token # either via the stored user/password or trust_id, depending # on how deferred_auth_method is configured in the conf file kc = hkc.KeystoneClient(stored_context) return stored_context
def load_user_creds(creds_id): user_creds = db_api.user_creds_get(creds_id) stored_context = context.RequestContext.from_dict(user_creds) # heat_keystoneclient populates the context with an auth_token # either via the stored user/password or trust_id, depending # on how deferred_auth_method is configured in the conf file hkc.KeystoneClient(stored_context) return stored_context
def _try_get_user_creds(self, user_creds_id): # There are cases where the user_creds cannot be returned # due to credentials truncated when being saved to DB. # Ignore this error instead of blocking stack deletion. user_creds = None try: user_creds = db_api.user_creds_get(self.user_creds_id) except exception.Error as err: LOG.exception(err) pass return user_creds
def stored_context(self): if self.user_creds_id: creds = db_api.user_creds_get(self.user_creds_id) # Maintain request_id from self.context so we retain traceability # in situations where servicing a request requires switching from # the request context to the stored context creds['request_id'] = self.context.request_id # We don't store roles in the user_creds table, so disable the # policy check for admin by setting is_admin=False. creds['is_admin'] = False return common_context.RequestContext.from_dict(creds) else: msg = _("Attempt to use stored_context with no user_creds") raise exception.Error(msg)
def stored_context(self): if self.user_creds_id: creds = db_api.user_creds_get(self.user_creds_id) # Maintain request_id from self.context so we retain tracability # in situations where servicing a request requires switching from # the request context to the stored context creds['request_id'] = self.context.request_id # We don't store roles in the user_creds table, so disable the # policy check for admin by setting is_admin=False. creds['is_admin'] = False return common_context.RequestContext.from_dict(creds) else: msg = _("Attempt to use stored_context with no user_creds") raise exception.Error(msg)
def resource_signal(self, cnxt, stack_identity, resource_name, details): s = self._get_stack(cnxt, stack_identity) # This is not "nice" converting to the stored context here, # but this happens because the keystone user associated with the # signal doesn't have permission to read the secret key of # the user associated with the cfn-credentials file user_creds = db_api.user_creds_get(s.user_creds_id) stack_context = context.RequestContext.from_dict(user_creds) stack = parser.Stack.load(stack_context, stack=s) if resource_name not in stack: raise exception.ResourceNotFound(resource_name=resource_name, stack_name=stack.name) resource = stack[resource_name] if resource.id is None: raise exception.ResourceNotAvailable(resource_name=resource_name) if callable(stack[resource_name].signal): stack[resource_name].signal(details)
def rule_action(self, new_state): logger.warn('WATCH: stack:%s, watch_name:%s %s', self.stack_name, self.name, new_state) actioned = False if not self.ACTION_MAP[new_state] in self.rule: logger.info('no action for new state %s', new_state) actioned = True else: s = db_api.stack_get_by_name(None, self.stack_name) if s and s.status in (parser.Stack.CREATE_COMPLETE, parser.Stack.UPDATE_COMPLETE): user_creds = db_api.user_creds_get(s.user_creds_id) ctxt = ctxtlib.RequestContext.from_dict(user_creds) stack = parser.Stack.load(ctxt, s.id) for a in self.rule[self.ACTION_MAP[new_state]]: greenpool.spawn_n(stack[a].alarm) actioned = True else: logger.warning("Could not process watch state %s for stack" % new_state) return actioned
def get_by_id(cls, context_id): user_creds_db = db_api.user_creds_get(context_id) user_creds = cls._from_db_object(cls(), user_creds_db) return user_creds
def delete(self, action=DELETE, backup=False): ''' Delete all of the resources, and then the stack itself. The action parameter is used to differentiate between a user initiated delete and an automatic stack rollback after a failed create, which amount to the same thing, but the states are recorded differently. ''' if action not in (self.DELETE, self.ROLLBACK): LOG.error(_("Unexpected action %s passed to delete!") % action) self.state_set(self.DELETE, self.FAILED, "Invalid action %s" % action) return stack_status = self.COMPLETE reason = 'Stack %s completed successfully' % action self.state_set(action, self.IN_PROGRESS, 'Stack %s started' % action) backup_stack = self._backup_stack(False) if backup_stack: for key, backup_resource in backup_stack.resources.items(): # If UpdateReplace is failed, we must restore backup_resource # to existing_stack in case of it may have dependencies in # these stacks. current_resource is the resource that just # created and failed, so put into the backup_stack to delete # anyway. backup_resource_id = backup_resource.resource_id current_resource = self.resources[key] current_resource_id = current_resource.resource_id if backup_resource_id: child_failed = False for child in self.dependencies[current_resource]: # If child resource failed to update, current_resource # should be replaced to resolve dependencies. But this # is not fundamental solution. If there are update # failer and success resources in the children, cannot # delete the stack. if (child.status == child.FAILED and child.action == child.CREATE): child_failed = True if (current_resource.status == current_resource.FAILED or child_failed): # Stack class owns dependencies as set of resource's # objects, so we switch members of the resource that is # needed to delete it. self.resources[key].resource_id = backup_resource_id self.resources[ key].properties = backup_resource.properties backup_stack.resources[ key].resource_id = current_resource_id backup_stack.resources[ key].properties = current_resource.properties backup_stack.delete(backup=True) if backup_stack.status != backup_stack.COMPLETE: errs = backup_stack.status_reason failure = 'Error deleting backup resources: %s' % errs self.state_set(action, self.FAILED, 'Failed to %s : %s' % (action, failure)) return action_task = scheduler.DependencyTaskGroup(self.dependencies, resource.Resource.destroy, reverse=True) try: scheduler.TaskRunner(action_task)(timeout=self.timeout_secs()) except exception.ResourceFailure as ex: stack_status = self.FAILED reason = 'Resource %s failed: %s' % (action, six.text_type(ex)) except scheduler.Timeout: stack_status = self.FAILED reason = '%s timed out' % action.title() if stack_status != self.FAILED and not backup: # Cleanup stored user_creds so they aren't accessible via # the soft-deleted stack which remains in the DB if self.user_creds_id: user_creds = db_api.user_creds_get(self.user_creds_id) # If we created a trust, delete it if user_creds is not None: trust_id = user_creds.get('trust_id') if trust_id: try: self.clients.client('keystone').delete_trust( trust_id) except Exception as ex: LOG.exception(ex) stack_status = self.FAILED reason = ("Error deleting trust: %s" % six.text_type(ex)) # Delete the stored credentials try: db_api.user_creds_delete(self.context, self.user_creds_id) except exception.NotFound: LOG.info( _("Tried to delete user_creds that do not exist " "(stack=%(stack)s user_creds_id=%(uc)s)") % { 'stack': self.id, 'uc': self.user_creds_id }) try: self.user_creds_id = None self.store() except exception.NotFound: LOG.info( _("Tried to store a stack that does not exist " "%s ") % self.id) # If the stack has a domain project, delete it if self.stack_user_project_id: try: keystone = self.clients.client('keystone') keystone.delete_stack_domain_project( project_id=self.stack_user_project_id) except Exception as ex: LOG.exception(ex) stack_status = self.FAILED reason = "Error deleting project: %s" % six.text_type(ex) try: self.state_set(action, stack_status, reason) except exception.NotFound: LOG.info( _("Tried to delete stack that does not exist " "%s ") % self.id) if stack_status != self.FAILED: # delete the stack try: db_api.stack_delete(self.context, self.id) except exception.NotFound: LOG.info( _("Tried to delete stack that does not exist " "%s ") % self.id) self.id = None
def delete(self, action=DELETE, backup=False): ''' Delete all of the resources, and then the stack itself. The action parameter is used to differentiate between a user initiated delete and an automatic stack rollback after a failed create, which amount to the same thing, but the states are recorded differently. ''' if action not in (self.DELETE, self.ROLLBACK): LOG.error(_("Unexpected action %s passed to delete!") % action) self.state_set(self.DELETE, self.FAILED, "Invalid action %s" % action) return stack_status = self.COMPLETE reason = 'Stack %s completed successfully' % action self.state_set(action, self.IN_PROGRESS, 'Stack %s started' % action) backup_stack = self._backup_stack(False) if backup_stack: for key, backup_resource in backup_stack.resources.items(): # If UpdateReplace is failed, we must restore backup_resource # to existing_stack in case of it may have dependencies in # these stacks. current_resource is the resource that just # created and failed, so put into the backup_stack to delete # anyway. backup_resource_id = backup_resource.resource_id current_resource = self.resources[key] current_resource_id = current_resource.resource_id if backup_resource_id: child_failed = False for child in self.dependencies[current_resource]: # If child resource failed to update, current_resource # should be replaced to resolve dependencies. But this # is not fundamental solution. If there are update # failer and success resources in the children, cannot # delete the stack. if (child.status == child.FAILED and child.action == child.CREATE): child_failed = True if (current_resource.status == current_resource.FAILED or child_failed): # Stack class owns dependencies as set of resource's # objects, so we switch members of the resource that is # needed to delete it. self.resources[key].resource_id = backup_resource_id self.resources[ key].properties = backup_resource.properties backup_stack.resources[ key].resource_id = current_resource_id backup_stack.resources[ key].properties = current_resource.properties backup_stack.delete(backup=True) if backup_stack.status != backup_stack.COMPLETE: errs = backup_stack.status_reason failure = 'Error deleting backup resources: %s' % errs self.state_set(action, self.FAILED, 'Failed to %s : %s' % (action, failure)) return action_task = scheduler.DependencyTaskGroup(self.dependencies, resource.Resource.destroy, reverse=True) try: scheduler.TaskRunner(action_task)(timeout=self.timeout_secs()) except exception.ResourceFailure as ex: stack_status = self.FAILED reason = 'Resource %s failed: %s' % (action, six.text_type(ex)) except scheduler.Timeout: stack_status = self.FAILED reason = '%s timed out' % action.title() if stack_status != self.FAILED and not backup: # Cleanup stored user_creds so they aren't accessible via # the soft-deleted stack which remains in the DB if self.user_creds_id: user_creds = db_api.user_creds_get(self.user_creds_id) # If we created a trust, delete it if user_creds is not None: trust_id = user_creds.get('trust_id') if trust_id: try: self.clients.keystone().delete_trust(trust_id) except Exception as ex: LOG.exception(ex) stack_status = self.FAILED reason = ("Error deleting trust: %s" % six.text_type(ex)) # Delete the stored credentials try: db_api.user_creds_delete(self.context, self.user_creds_id) except exception.NotFound: LOG.info(_("Tried to delete user_creds that do not exist " "(stack=%(stack)s user_creds_id=%(uc)s)") % {'stack': self.id, 'uc': self.user_creds_id}) self.user_creds_id = None self.store() # If the stack has a domain project, delete it if self.stack_user_project_id: try: self.clients.keystone().delete_stack_domain_project( project_id=self.stack_user_project_id) except Exception as ex: LOG.exception(ex) stack_status = self.FAILED reason = "Error deleting project: %s" % six.text_type(ex) self.state_set(action, stack_status, reason) if stack_status != self.FAILED: # delete the stack db_api.stack_delete(self.context, self.id) self.id = None
def delete(self, action=DELETE, backup=False, abandon=False): ''' Delete all of the resources, and then the stack itself. The action parameter is used to differentiate between a user initiated delete and an automatic stack rollback after a failed create, which amount to the same thing, but the states are recorded differently. Note abandon is a delete where all resources have been set to a RETAIN deletion policy, but we also don't want to delete anything required for those resources, e.g the stack_user_project. ''' if action not in (self.DELETE, self.ROLLBACK): LOG.error(_("Unexpected action %s passed to delete!") % action) self.state_set(self.DELETE, self.FAILED, "Invalid action %s" % action) return stack_status = self.COMPLETE reason = 'Stack %s completed successfully' % action self.state_set(action, self.IN_PROGRESS, 'Stack %s started' % action) backup_stack = self._backup_stack(False) if backup_stack: def failed(child): return (child.action == child.CREATE and child.status in (child.FAILED, child.IN_PROGRESS)) for key, backup_resource in backup_stack.resources.items(): # If UpdateReplace is failed, we must restore backup_resource # to existing_stack in case of it may have dependencies in # these stacks. current_resource is the resource that just # created and failed, so put into the backup_stack to delete # anyway. backup_resource_id = backup_resource.resource_id current_resource = self.resources[key] current_resource_id = current_resource.resource_id if backup_resource_id: if (any( failed(child) for child in self.dependencies[current_resource]) or current_resource.status in (current_resource.FAILED, current_resource.IN_PROGRESS)): # If child resource failed to update, current_resource # should be replaced to resolve dependencies. But this # is not fundamental solution. If there are update # failer and success resources in the children, cannot # delete the stack. # Stack class owns dependencies as set of resource's # objects, so we switch members of the resource that is # needed to delete it. self.resources[key].resource_id = backup_resource_id self.resources[ key].properties = backup_resource.properties backup_stack.resources[ key].resource_id = current_resource_id backup_stack.resources[ key].properties = current_resource.properties backup_stack.delete(backup=True) if backup_stack.status != backup_stack.COMPLETE: errs = backup_stack.status_reason failure = 'Error deleting backup resources: %s' % errs self.state_set(action, self.FAILED, 'Failed to %s : %s' % (action, failure)) return snapshots = db_api.snapshot_get_all(self.context, self.id) for snapshot in snapshots: self.delete_snapshot(snapshot) if not backup: try: lifecycle_plugin_utils.do_pre_ops(self.context, self, None, action) except Exception as e: self.state_set( action, self.FAILED, e.args[0] if e.args else 'Failed stack pre-ops: %s' % six.text_type(e)) return action_task = scheduler.DependencyTaskGroup(self.dependencies, resource.Resource.destroy, reverse=True) try: scheduler.TaskRunner(action_task)(timeout=self.timeout_secs()) except exception.ResourceFailure as ex: stack_status = self.FAILED reason = 'Resource %s failed: %s' % (action, six.text_type(ex)) except scheduler.Timeout: stack_status = self.FAILED reason = '%s timed out' % action.title() # If the stack delete succeeded, this is not a backup stack and it's # not a nested stack, we should delete the credentials if stack_status != self.FAILED and not backup and not self.owner_id: # Cleanup stored user_creds so they aren't accessible via # the soft-deleted stack which remains in the DB if self.user_creds_id: user_creds = db_api.user_creds_get(self.user_creds_id) # If we created a trust, delete it if user_creds is not None: trust_id = user_creds.get('trust_id') if trust_id: try: # If the trustor doesn't match the context user the # we have to use the stored context to cleanup the # trust, as although the user evidently has # permission to delete the stack, they don't have # rights to delete the trust unless an admin trustor_id = user_creds.get('trustor_user_id') if self.context.user_id != trustor_id: LOG.debug('Context user_id doesn\'t match ' 'trustor, using stored context') sc = self.stored_context() sc.clients.client('keystone').delete_trust( trust_id) else: self.clients.client('keystone').delete_trust( trust_id) except Exception as ex: LOG.exception(ex) stack_status = self.FAILED reason = ("Error deleting trust: %s" % six.text_type(ex)) # Delete the stored credentials try: db_api.user_creds_delete(self.context, self.user_creds_id) except exception.NotFound: LOG.info( _("Tried to delete user_creds that do not exist " "(stack=%(stack)s user_creds_id=%(uc)s)") % { 'stack': self.id, 'uc': self.user_creds_id }) try: self.user_creds_id = None self.store() except exception.NotFound: LOG.info( _("Tried to store a stack that does not exist " "%s ") % self.id) # If the stack has a domain project, delete it if self.stack_user_project_id and not abandon: try: keystone = self.clients.client('keystone') keystone.delete_stack_domain_project( project_id=self.stack_user_project_id) except Exception as ex: LOG.exception(ex) stack_status = self.FAILED reason = "Error deleting project: %s" % six.text_type(ex) try: self.state_set(action, stack_status, reason) except exception.NotFound: LOG.info( _("Tried to delete stack that does not exist " "%s ") % self.id) if not backup: lifecycle_plugin_utils.do_post_ops(self.context, self, None, action, (self.status == self.FAILED)) if stack_status != self.FAILED: # delete the stack try: db_api.stack_delete(self.context, self.id) except exception.NotFound: LOG.info( _("Tried to delete stack that does not exist " "%s ") % self.id) self.id = None
def _delete_credentials(self, stack_status, reason, abandon): # Cleanup stored user_creds so they aren't accessible via # the soft-deleted stack which remains in the DB # The stack_status and reason passed in are current values, which # may get rewritten and returned from this method if self.user_creds_id: user_creds = db_api.user_creds_get(self.user_creds_id) # If we created a trust, delete it if user_creds is not None: trust_id = user_creds.get('trust_id') if trust_id: try: # If the trustor doesn't match the context user the # we have to use the stored context to cleanup the # trust, as although the user evidently has # permission to delete the stack, they don't have # rights to delete the trust unless an admin trustor_id = user_creds.get('trustor_user_id') if self.context.user_id != trustor_id: LOG.debug('Context user_id doesn\'t match ' 'trustor, using stored context') sc = self.stored_context() sc.clients.client('keystone').delete_trust( trust_id) else: self.clients.client('keystone').delete_trust( trust_id) except Exception as ex: LOG.exception(ex) stack_status = self.FAILED reason = ("Error deleting trust: %s" % six.text_type(ex)) # Delete the stored credentials try: db_api.user_creds_delete(self.context, self.user_creds_id) except exception.NotFound: LOG.info( _LI("Tried to delete user_creds that do not exist " "(stack=%(stack)s user_creds_id=%(uc)s)"), { 'stack': self.id, 'uc': self.user_creds_id }) try: self.user_creds_id = None self.store() except exception.NotFound: LOG.info(_LI("Tried to store a stack that does not exist %s"), self.id) # If the stack has a domain project, delete it if self.stack_user_project_id and not abandon: try: keystone = self.clients.client('keystone') keystone.delete_stack_domain_project( project_id=self.stack_user_project_id) except Exception as ex: LOG.exception(ex) stack_status = self.FAILED reason = "Error deleting project: %s" % six.text_type(ex) return stack_status, reason
def delete(self, action=DELETE, backup=False, abandon=False): ''' Delete all of the resources, and then the stack itself. The action parameter is used to differentiate between a user initiated delete and an automatic stack rollback after a failed create, which amount to the same thing, but the states are recorded differently. Note abandon is a delete where all resources have been set to a RETAIN deletion policy, but we also don't want to delete anything required for those resources, e.g the stack_user_project. ''' if action not in (self.DELETE, self.ROLLBACK): LOG.error(_("Unexpected action %s passed to delete!") % action) self.state_set(self.DELETE, self.FAILED, "Invalid action %s" % action) return stack_status = self.COMPLETE reason = 'Stack %s completed successfully' % action self.state_set(action, self.IN_PROGRESS, 'Stack %s started' % action) backup_stack = self._backup_stack(False) if backup_stack: def failed(child): return (child.action == child.CREATE and child.status in (child.FAILED, child.IN_PROGRESS)) for key, backup_resource in backup_stack.resources.items(): # If UpdateReplace is failed, we must restore backup_resource # to existing_stack in case of it may have dependencies in # these stacks. current_resource is the resource that just # created and failed, so put into the backup_stack to delete # anyway. backup_resource_id = backup_resource.resource_id current_resource = self.resources[key] current_resource_id = current_resource.resource_id if backup_resource_id: if (any(failed(child) for child in self.dependencies[current_resource]) or current_resource.status in (current_resource.FAILED, current_resource.IN_PROGRESS)): # If child resource failed to update, current_resource # should be replaced to resolve dependencies. But this # is not fundamental solution. If there are update # failer and success resources in the children, cannot # delete the stack. # Stack class owns dependencies as set of resource's # objects, so we switch members of the resource that is # needed to delete it. self.resources[key].resource_id = backup_resource_id self.resources[ key].properties = backup_resource.properties backup_stack.resources[ key].resource_id = current_resource_id backup_stack.resources[ key].properties = current_resource.properties backup_stack.delete(backup=True) if backup_stack.status != backup_stack.COMPLETE: errs = backup_stack.status_reason failure = 'Error deleting backup resources: %s' % errs self.state_set(action, self.FAILED, 'Failed to %s : %s' % (action, failure)) return snapshots = db_api.snapshot_get_all(self.context, self.id) for snapshot in snapshots: self.delete_snapshot(snapshot) if not backup: try: lifecycle_plugin_utils.do_pre_ops(self.context, self, None, action) except Exception as e: self.state_set(action, self.FAILED, e.args[0] if e.args else 'Failed stack pre-ops: %s' % six.text_type(e)) return action_task = scheduler.DependencyTaskGroup(self.dependencies, resource.Resource.destroy, reverse=True) try: scheduler.TaskRunner(action_task)(timeout=self.timeout_secs()) except exception.ResourceFailure as ex: stack_status = self.FAILED reason = 'Resource %s failed: %s' % (action, six.text_type(ex)) except scheduler.Timeout: stack_status = self.FAILED reason = '%s timed out' % action.title() # If the stack delete succeeded, this is not a backup stack and it's # not a nested stack, we should delete the credentials if stack_status != self.FAILED and not backup and not self.owner_id: # Cleanup stored user_creds so they aren't accessible via # the soft-deleted stack which remains in the DB if self.user_creds_id: user_creds = db_api.user_creds_get(self.user_creds_id) # If we created a trust, delete it if user_creds is not None: trust_id = user_creds.get('trust_id') if trust_id: try: # If the trustor doesn't match the context user the # we have to use the stored context to cleanup the # trust, as although the user evidently has # permission to delete the stack, they don't have # rights to delete the trust unless an admin trustor_id = user_creds.get('trustor_user_id') if self.context.user_id != trustor_id: LOG.debug('Context user_id doesn\'t match ' 'trustor, using stored context') sc = self.stored_context() sc.clients.client('keystone').delete_trust( trust_id) else: self.clients.client('keystone').delete_trust( trust_id) except Exception as ex: LOG.exception(ex) stack_status = self.FAILED reason = ("Error deleting trust: %s" % six.text_type(ex)) # Delete the stored credentials try: db_api.user_creds_delete(self.context, self.user_creds_id) except exception.NotFound: LOG.info(_("Tried to delete user_creds that do not exist " "(stack=%(stack)s user_creds_id=%(uc)s)") % {'stack': self.id, 'uc': self.user_creds_id}) try: self.user_creds_id = None self.store() except exception.NotFound: LOG.info(_("Tried to store a stack that does not exist " "%s ") % self.id) # If the stack has a domain project, delete it if self.stack_user_project_id and not abandon: try: keystone = self.clients.client('keystone') keystone.delete_stack_domain_project( project_id=self.stack_user_project_id) except Exception as ex: LOG.exception(ex) stack_status = self.FAILED reason = "Error deleting project: %s" % six.text_type(ex) try: self.state_set(action, stack_status, reason) except exception.NotFound: LOG.info(_("Tried to delete stack that does not exist " "%s ") % self.id) if not backup: lifecycle_plugin_utils.do_post_ops(self.context, self, None, action, (self.status == self.FAILED)) if stack_status != self.FAILED: # delete the stack try: db_api.stack_delete(self.context, self.id) except exception.NotFound: LOG.info(_("Tried to delete stack that does not exist " "%s ") % self.id) self.id = None