def check_create_complete(self, data): attributes = self._show_resource() status = attributes['status'] if status == 'PENDING_CREATE': return False elif status == 'ACTIVE': vip_attributes = self.neutron().show_vip( self.metadata_get()['vip'])['vip'] vip_status = vip_attributes['status'] if vip_status == 'PENDING_CREATE': return False if vip_status == 'ACTIVE': return True if vip_status == 'ERROR': raise resource.ResourceInError(resource_status=vip_status, status_reason=_('error in vip')) raise resource.ResourceUnknownStatus( resource_status=vip_status, result=_('Pool creation failed due to vip')) elif status == 'ERROR': raise resource.ResourceInError(resource_status=status, status_reason=_('error in pool')) else: raise resource.ResourceUnknownStatus( resource_status=status, result=_('Pool creation failed'))
def _check_status_complete(self, action, show_deleted=False, cookie=None): try: nested = self.nested(force_reload=True, show_deleted=show_deleted) except exception.NotFound: if action == resource.Resource.DELETE: return True # It's possible the engine handling the create hasn't persisted # the stack to the DB when we first start polling for state return False if nested is None: return True if nested.action != action: return False # Has the action really started? # # The rpc call to update does not guarantee that the stack will be # placed into IN_PROGRESS by the time it returns (it runs stack.update # in a thread) so you could also have a situation where we get into # this method and the update hasn't even started. # # So we are using a mixture of state (action+status) and updated_at # to see if the action has actually progressed. # - very fast updates (like something with one RandomString) we will # probably miss the state change, but we should catch the updated_at. # - very slow updates we won't see the updated_at for quite a while, # but should see the state change. if cookie is not None: prev_state = cookie['previous']['state'] prev_updated_at = cookie['previous']['updated_at'] if (prev_updated_at == nested.updated_time and prev_state == nested.state): return False if nested.status == resource.Resource.IN_PROGRESS: return False elif nested.status == resource.Resource.COMPLETE: return True elif nested.status == resource.Resource.FAILED: raise resource.ResourceUnknownStatus( resource_status=nested.status, status_reason=nested.status_reason) else: raise resource.ResourceUnknownStatus( resource_status=nested.status, result=_('Stack unknown status'))
def __call__(self): LOG.debug(str(self)) cinder = self.clients.client('cinder').volumes vol = cinder.get(self.volume_id) try: cinder.extend(self.volume_id, self.size) except Exception as ex: if self.clients.client_plugin('cinder').is_client_exception(ex): raise exception.Error(_( "Failed to extend volume %(vol)s - %(err)s") % { 'vol': vol.id, 'err': str(ex)}) else: raise yield vol.get() while vol.status == 'extending': LOG.debug("Volume %s is being extended" % self.volume_id) yield vol.get() if vol.status != 'available': LOG.info(_("Resize failed: Volume %(vol)s is in %(status)s state." ) % {'vol': vol.id, 'status': vol.status}) raise resource.ResourceUnknownStatus( resource_status=vol.status, result=_('Volume resize failed')) LOG.info(_('%s - complete') % str(self))
def check_detach_volume_complete(self, vol_id): try: vol = self.client().volumes.get(vol_id) except Exception as ex: self.ignore_not_found(ex) return True if vol.status in ('in-use', 'detaching'): LOG.debug('%s - volume still in use' % vol_id) return False LOG.debug('Volume %(id)s - status: %(status)s' % { 'id': vol.id, 'status': vol.status }) if vol.status not in ('available', 'deleting'): LOG.debug("Detachment failed - volume %(vol)s " "is in %(status)s status" % { "vol": vol.id, "status": vol.status }) raise resource.ResourceUnknownStatus( resource_status=vol.status, result=_('Volume detachment failed')) else: return True
def __call__(self): """Return a co-routine which runs the task.""" LOG.debug(str(self)) va = self.clients.client('nova').volumes.create_server_volume( server_id=self.server_id, volume_id=self.volume_id, device=self.device) self.attachment_id = va.id yield cinder = self.clients.client('cinder') vol = cinder.volumes.get(self.volume_id) while vol.status == 'available' or vol.status == 'attaching': LOG.debug('%(name)s - volume status: %(status)s' % { 'name': str(self), 'status': vol.status }) yield vol = cinder.volumes.get(self.volume_id) if vol.status != 'in-use': LOG.info( _LI("Attachment failed - volume %(vol)s " "is in %(status)s status"), { "vol": vol.id, "status": vol.status }) raise resource.ResourceUnknownStatus( resource_status=vol.status, result=_('Volume attachment failed')) LOG.info(_LI('%s - complete'), str(self))
def check_create_complete(self, *args): share_status = self._request_share().status if share_status == self.STATUS_CREATING: return False elif share_status == self.STATUS_AVAILABLE: LOG.info(_LI('Applying access rules to created Share.')) # apply access rules to created share. please note that it is not # possible to define rules for share with share_status = creating access_rules = self.properties.get(self.ACCESS_RULES) try: if access_rules: for rule in access_rules: self.client().shares.allow( share=self.resource_id, access_type=rule.get(self.ACCESS_TYPE), access=rule.get(self.ACCESS_TO), access_level=rule.get(self.ACCESS_LEVEL)) return True except Exception as ex: reason = _( 'Error during applying access rules to share "{0}". ' 'The root cause of the problem is the following: {1}.' ).format(self.resource_id, ex.message) raise resource.ResourceInError(status_reason=reason) elif share_status == self.STATUS_ERROR: reason = _('Error during creation of share "{0}"').format( self.resource_id) raise resource.ResourceInError(status_reason=reason, resource_status=share_status) else: reason = _( 'Unknown share_status during creation of share "{0}"').format( self.resource_id) raise resource.ResourceUnknownStatus(status_reason=reason, resource_status=share_status)
def _backup(self): backup = self.cinder().backups.create(self.resource_id) while backup.status == 'creating': yield backup.get() if backup.status != 'available': raise resource.ResourceUnknownStatus( resource_status=backup.status, result=_('Volume backup failed'))
def _check_create_backup_complete(self, prg): backup = self.client().backups.get(prg.backup_id) if backup.status == 'creating': return False if backup.status == 'available': return True else: raise resource.ResourceUnknownStatus( resource_status=backup.status, result=_('Volume backup failed'))
def is_built(attributes): status = attributes['status'] if status == 'BUILD': return False if status in ('ACTIVE', 'DOWN'): return True elif status == 'ERROR': raise resource.ResourceInError(resource_status=status) else: raise resource.ResourceUnknownStatus( resource_status=status, result=_('Resource is not built'))
def verify_resize(self, server_id): server = self.fetch_server(server_id) if not server: return False status = self.get_status(server) if status == 'VERIFY_RESIZE': server.confirm_resize() return True else: msg = _("Could not confirm resize of server %s") % server_id raise resource.ResourceUnknownStatus(result=msg, resource_status=status)
def check_create_complete(self, vol): vol.get() if vol.status == 'available': return True if vol.status in self._volume_creating_status: return False if vol.status == 'error': raise resource.ResourceInError(resource_status=vol.status) else: raise resource.ResourceUnknownStatus( resource_status=vol.status, result=_('Volume create failed'))
def check_verify_resize(self, server_id): server = self.fetch_server(server_id) if not server: return False status = self.get_status(server) if status == 'ACTIVE': return True if status == 'VERIFY_RESIZE': return False else: msg = _("Confirm resize for server %s failed") % server_id raise resource.ResourceUnknownStatus(result=msg, resource_status=status)
def _check_action_complete(self, action): stack = self.heat().stacks.get(stack_id=self.resource_id) if stack.action == action: if stack.status == self.IN_PROGRESS: return False elif stack.status == self.COMPLETE: return True elif stack.status == self.FAILED: raise resource.ResourceInError( resource_status=stack.stack_status, status_reason=stack.stack_status_reason) else: # Note: this should never happen, so it really means that # the resource/engine is in serious problem if it happens. raise resource.ResourceUnknownStatus( resource_status=stack.stack_status, status_reason=stack.stack_status_reason) else: msg = _('Resource action mismatch detected: expected=%(expected)s ' 'actual=%(actual)s') % dict(expected=action, actual=stack.action) raise resource.ResourceUnknownStatus( resource_status=stack.stack_status, status_reason=msg)
def check_suspend_complete(self, server_id): cp = self.client_plugin() server = cp.fetch_server(server_id) if not server: return False status = cp.get_status(server) LOG.debug('%(name)s check_suspend_complete status = %(status)s' % { 'name': self.name, 'status': status }) if status in list(cp.deferred_server_statuses + ['ACTIVE']): return status == 'SUSPENDED' else: exc = resource.ResourceUnknownStatus( result=_('Suspend of instance %s failed') % server.name, resource_status=status) raise exc
def _check_extend_volume_complete(self): vol = self.client().volumes.get(self.resource_id) if vol.status == 'extending': LOG.debug("Volume %s is being extended" % vol.id) return False if vol.status != 'available': LOG.info( _LI("Resize failed: Volume %(vol)s " "is in %(status)s state."), { 'vol': vol.id, 'status': vol.status }) raise resource.ResourceUnknownStatus( resource_status=vol.status, result=_('Volume resize failed')) LOG.info(_LI('Volume %(id)s resize complete'), {'id': vol.id}) return True
def check_attach_volume_complete(self, vol_id): vol = self.client().volumes.get(vol_id) if vol.status in ('available', 'attaching'): LOG.debug("Volume %(id)s is being attached - " "volume status: %(status)s" % {'id': vol_id, 'status': vol.status}) return False if vol.status != 'in-use': LOG.debug("Attachment failed - volume %(vol)s is " "in %(status)s status" % {"vol": vol_id, "status": vol.status}) raise resource.ResourceUnknownStatus( resource_status=vol.status, result=_('Volume attachment failed')) LOG.info(_LI('Attaching volume %(id)s complete'), {'id': vol_id}) return True
def _check_backup_restore_complete(self): vol = self.client().volumes.get(self.resource_id) if vol.status == 'restoring-backup': LOG.debug("Volume %s is being restoring from backup" % vol.id) return False if vol.status != 'available': LOG.info( _LI("Restore failed: Volume %(vol)s is in %(status)s " "state."), { 'vol': vol.id, 'status': vol.status }) raise resource.ResourceUnknownStatus( resource_status=vol.status, result=_('Volume backup restore failed')) LOG.info(_LI('Volume %(id)s backup restore complete'), {'id': vol.id}) return True
def _check_action_complete(self, action): stack = self.heat().stacks.get(stack_id=self.resource_id) if stack.action != action: return False if stack.status == self.IN_PROGRESS: return False elif stack.status == self.COMPLETE: return True elif stack.status == self.FAILED: raise resource.ResourceInError( resource_status=stack.stack_status, status_reason=stack.stack_status_reason) else: # Note: this should never happen, so it really means that # the resource/engine is in serious problem if it happens. raise resource.ResourceUnknownStatus( resource_status=stack.stack_status, status_reason=stack.stack_status_reason)
def _check_active(self, server, res_name='Server'): """Check server status. Accepts both server IDs and server objects. Returns True if server is ACTIVE, raises errors when server has an ERROR or unknown to Heat status, returns False otherwise. :param res_name: name of the resource to use in the exception message """ # not checking with is_uuid_like as most tests use strings e.g. '1234' if isinstance(server, six.string_types): server = self.fetch_server(server) if server is None: return False else: status = self.get_status(server) else: status = self.get_status(server) if status != 'ACTIVE': self.refresh_server(server) status = self.get_status(server) if status in self.deferred_server_statuses: return False elif status == 'ACTIVE': return True elif status == 'ERROR': fault = getattr(server, 'fault', {}) raise resource.ResourceInError( resource_status=status, status_reason=_("Message: %(message)s, Code: %(code)s") % { 'message': fault.get('message', _('Unknown')), 'code': fault.get('code', _('Unknown')) }) else: raise resource.ResourceUnknownStatus(resource_status=server.status, result=_('%s is not active') % res_name)
def _check_active(self, server): cp = self.client_plugin() status = cp.get_status(server) if status != 'ACTIVE': cp.refresh_server(server) status = cp.get_status(server) if status in cp.deferred_server_statuses: return False elif status == 'ACTIVE': return True elif status == 'ERROR': fault = getattr(server, 'fault', {}) raise resource.ResourceInError( resource_status=status, status_reason=_("Message: %(message)s, Code: %(code)s") % { 'message': fault.get('message', _('Unknown')), 'code': fault.get('code', _('Unknown')) }) else: raise resource.ResourceUnknownStatus( resource_status=server.status, result=_('Server is not active'))
def check_delete_complete(self, *args): if not self.resource_id: return True try: share = self._request_share() except Exception as ex: self.client_plugin().ignore_not_found(ex) return True else: # when share creation is not finished proceed listening if share.status == self.STATUS_DELETING: return False elif share.status in (self.STATUS_ERROR, self.STATUS_ERROR_DELETING): raise resource.ResourceInError(status_reason=_( 'Error during deleting share "{0}".').format( self.resource_id), resource_status=share.status) else: reason = _('Unknown status during deleting share ' '"{0}"').format(self.resource_id) raise resource.ResourceUnknownStatus( status_reason=reason, resource_status=share.status)
def __call__(self): """Return a co-routine which runs the task.""" LOG.debug(str(self)) nova_plugin = self.clients.client_plugin('nova') cinder_plugin = self.clients.client_plugin('cinder') server_api = self.clients.client('nova').volumes cinder = self.clients.client('cinder') # get reference to the volume while it is attached try: nova_vol = server_api.get_server_volume(self.server_id, self.attachment_id) vol = cinder.volumes.get(nova_vol.id) except Exception as ex: if (cinder_plugin.is_not_found(ex) or nova_plugin.is_not_found(ex) or nova_plugin.is_bad_request(ex)): return else: raise if vol.status == 'deleting': return # detach the volume using volume_attachment try: server_api.delete_server_volume(self.server_id, self.attachment_id) except Exception as ex: if nova_plugin.is_not_found(ex) or nova_plugin.is_bad_request(ex): pass else: raise yield try: while vol.status in ('in-use', 'detaching'): LOG.debug('%s - volume still in use' % str(self)) yield vol = cinder.volumes.get(nova_vol.id) LOG.info(_LI('%(name)s - status: %(status)s'), { 'name': str(self), 'status': vol.status }) if vol.status not in ['available', 'deleting']: LOG.info( _LI("Detachment failed - volume %(vol)s " "is in %(status)s status"), { "vol": vol.id, "status": vol.status }) raise resource.ResourceUnknownStatus( resource_status=vol.status, result=_('Volume detachment failed')) except Exception as ex: cinder_plugin.ignore_not_found(ex) # The next check is needed for immediate reattachment when updating: # there might be some time between cinder marking volume as 'available' # and nova removing attachment from its own objects, so we # check that nova already knows that the volume is detached def server_has_attachment(server_id, attachment_id): try: server_api.get_server_volume(server_id, attachment_id) except Exception as ex: nova_plugin.ignore_not_found(ex) return False return True while server_has_attachment(self.server_id, self.attachment_id): LOG.info(_LI("Server %(srv)s still has attachment %(att)s."), { 'att': self.attachment_id, 'srv': self.server_id }) yield LOG.info(_LI("Volume %(vol)s is detached from server %(srv)s"), { 'vol': vol.id, 'srv': self.server_id })