def _determine_version_cap(self, target): global LAST_VERSION if LAST_VERSION: return LAST_VERSION service_version = objects.Service.get_minimum_version( context.get_admin_context(), 'nova-compute') history = service_obj.SERVICE_VERSION_HISTORY try: version_cap = history[service_version]['compute_rpc'] except IndexError: LOG.error(_LE('Failed to extract compute RPC version from ' 'service history because I am too ' 'old (minimum version is now %(version)i)'), {'version': service_version}) raise exception.ServiceTooOld(thisver=service_obj.SERVICE_VERSION, minver=service_version) except KeyError: LOG.error(_LE('Failed to extract compute RPC version from ' 'service history for version %(version)i'), {'version': service_version}) return target.version LAST_VERSION = version_cap LOG.info(_LI('Automatically selected compute RPC version %(rpc)s ' 'from minimum service version %(service)i'), {'rpc': version_cap, 'service': service_version}) return version_cap
def create_volume(self, volume): LOG.debug('start to create volume') LOG.debug('volume glance image metadata: %s' % volume.volume_glance_metadata) volume_args = {} volume_args['size'] = volume.size volume_args['display_description'] = volume.display_description volume_args['display_name'] = self._get_provider_volume_name( volume.display_name, volume.id) context = req_context.RequestContext(is_admin=True, project_id=volume.project_id) volume_type_id = volume.volume_type_id volume_type_name = None LOG.debug('volume type id %s ' % volume_type_id) if volume_type_id: volume_type_name = self._get_sub_type_name( req_context.get_admin_context(), volume_type_id) if volume_type_name: volume_args['volume_type'] = volume_type_name optionals = ('shareable', 'metadata', 'multiattach') volume_args.update((prop, getattr(volume, prop)) for prop in optionals if getattr(volume, prop, None)) if 'metadata' not in volume_args: volume_args['metadata'] = {} volume_args['metadata']['tag:caa_volume_id'] = volume.id sub_volume = self.os_cinderclient(context).create_volume(**volume_args) LOG.debug('submit create-volume task to sub os. ' 'sub volume id: %s' % sub_volume.id) LOG.debug('start to wait for volume %s in status ' 'available' % sub_volume.id) try: self.os_cinderclient(context).check_create_volume_complete( sub_volume) except Exception as ex: LOG.exception( _LE("volume(%s), check_create_volume_complete " "failed! ex = %s"), volume.id, ex) with excutils.save_and_reraise_exception(): sub_volume.delete() try: # create volume mapper values = {"provider_volume_id": sub_volume.id} self.caa_db_api.volume_mapper_create(context, volume.id, context.project_id, values) except Exception as ex: LOG.exception(_LE("volume_mapper_create failed! ex = %s"), ex) sub_volume.delete() raise LOG.debug('create volume %s success.' % volume.id) return {'provider_location': 'SUB-FusionSphere'}
def _get_meta_by_instance_id(self, instance_id, tenant_id, remote_address): try: meta_data = self.get_metadata_by_instance_id( instance_id, remote_address) except Exception: LOG.exception(_LE('Failed to get metadata for instance id: %s'), instance_id) msg = _('An unknown error has occurred. ' 'Please try your request again.') raise webob.exc.HTTPInternalServerError( explanation=six.text_type(msg)) if meta_data is None: LOG.error(_LE('Failed to get metadata for instance id: %s'), instance_id) elif meta_data.instance.project_id != tenant_id: LOG.warning( _LW("Tenant_id %(tenant_id)s does not match tenant_id " "of instance %(instance_id)s."), { 'tenant_id': tenant_id, 'instance_id': instance_id }) # causes a 404 to be raised meta_data = None return meta_data
def initialize_connection(self, context, volume_id, connector): try: connection_info = cinderclient( context).volumes.initialize_connection(volume_id, connector) connection_info['connector'] = connector return connection_info except cinder_exception.ClientException as ex: with excutils.save_and_reraise_exception(): LOG.error(_LE('Initialize connection failed for volume ' '%(vol)s on host %(host)s. Error: %(msg)s ' 'Code: %(code)s. Attempting to terminate ' 'connection.'), {'vol': volume_id, 'host': connector.get('host'), 'msg': six.text_type(ex), 'code': ex.code}) try: self.terminate_connection(context, volume_id, connector) except Exception as exc: LOG.error(_LE('Connection between volume %(vol)s and host ' '%(host)s might have succeeded, but attempt ' 'to terminate connection has failed. ' 'Validate the connection and determine if ' 'manual cleanup is needed. Error: %(msg)s ' 'Code: %(code)s.'), {'vol': volume_id, 'host': connector.get('host'), 'msg': six.text_type(exc), 'code': ( exc.code if hasattr(exc, 'code') else None)})
def _run(self, name, method_type, args, kwargs, func=None): if method_type not in ('pre', 'post'): msg = _("Wrong type of hook method. " "Only 'pre' and 'post' type allowed") raise ValueError(msg) for e in self.extensions: obj = e.obj hook_method = getattr(obj, method_type, None) if hook_method: LOG.warning(_LW("Hooks are deprecated as of Nova 13.0 and " "will be removed in a future release")) LOG.debug("Running %(name)s %(type)s-hook: %(obj)s", {'name': name, 'type': method_type, 'obj': obj}) try: if func: hook_method(func, *args, **kwargs) else: hook_method(*args, **kwargs) except FatalHookException: msg = _LE("Fatal Exception running %(name)s " "%(type)s-hook: %(obj)s") LOG.exception(msg, {'name': name, 'type': method_type, 'obj': obj}) raise except Exception: msg = _LE("Exception running %(name)s " "%(type)s-hook: %(obj)s") LOG.exception(msg, {'name': name, 'type': method_type, 'obj': obj})
def remove_from_instance(self, context, instance, security_group_name): """Remove the security group associated with the instance.""" neutron = neutronapi.get_client(context) try: security_group_id = neutronv20.find_resourceid_by_name_or_id( neutron, 'security_group', security_group_name, context.project_id) except n_exc.NeutronClientException as e: exc_info = sys.exc_info() if e.status_code == 404: msg = (_("Security group %(name)s is not found for " "project %(project)s") % {'name': security_group_name, 'project': context.project_id}) self.raise_not_found(msg) else: LOG.exception(_LE("Neutron Error:")) six.reraise(*exc_info) params = {'device_id': instance.uuid} try: ports = neutron.list_ports(**params).get('ports') except n_exc.NeutronClientException: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Neutron Error:")) if not ports: msg = (_("instance_id %s could not be found as device id on" " any ports") % instance.uuid) self.raise_not_found(msg) found_security_group = False for port in ports: try: port.get('security_groups', []).remove(security_group_id) except ValueError: # When removing a security group from an instance the security # group should be on both ports since it was added this way if # done through the compute api. In case it is not a 404 is only # raised if the security group is not found on any of the # ports on the instance. continue updated_port = {'security_groups': port['security_groups']} try: LOG.info(_LI("Adding security group %(security_group_id)s to " "port %(port_id)s"), {'security_group_id': security_group_id, 'port_id': port['id']}) neutron.update_port(port['id'], {'port': updated_port}) found_security_group = True except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Neutron Error:")) if not found_security_group: msg = (_("Security group %(security_group_name)s not associated " "with the instance %(instance)s") % {'security_group_name': security_group_name, 'instance': instance.uuid}) self.raise_not_found(msg)
def get(self, context, name=None, id=None, map_exception=False): neutron = neutronapi.get_client(context) try: if not id and name: # NOTE(flwang): The project id should be honoured so as to get # the correct security group id when user(with admin role but # non-admin project) try to query by name, so as to avoid # getting more than duplicated records with the same name. id = neutronv20.find_resourceid_by_name_or_id( neutron, 'security_group', name, context.project_id) group = neutron.show_security_group(id).get('security_group') return self._convert_to_nova_security_group_format(group) except n_exc.NeutronClientNoUniqueMatch as e: raise exception.NoUniqueMatch(six.text_type(e)) except n_exc.NeutronClientException as e: exc_info = sys.exc_info() if e.status_code == 404: LOG.debug("Neutron security group %s not found", name) raise exception.SecurityGroupNotFound(six.text_type(e)) else: LOG.error(_LE("Neutron Error: %s"), e) six.reraise(*exc_info) except TypeError as e: LOG.error(_LE("Neutron Error: %s"), e) msg = _("Invalid security group name: %(name)s.") % {"name": name} raise exception.SecurityGroupNotFound(six.text_type(msg))
def initialize_connection(self, context, volume_id, connector): try: connection_info = cinderclient( context).volumes.initialize_connection(volume_id, connector) connection_info['connector'] = connector return connection_info except cinder_exception.ClientException as ex: with excutils.save_and_reraise_exception(): LOG.error( _LE('Initialize connection failed for volume ' '%(vol)s on host %(host)s. Error: %(msg)s ' 'Code: %(code)s. Attempting to terminate ' 'connection.'), { 'vol': volume_id, 'host': connector.get('host'), 'msg': six.text_type(ex), 'code': ex.code }) try: self.terminate_connection(context, volume_id, connector) except Exception as exc: LOG.error( _LE('Connection between volume %(vol)s and host ' '%(host)s might have succeeded, but attempt ' 'to terminate connection has failed. ' 'Validate the connection and determine if ' 'manual cleanup is needed. Error: %(msg)s ' 'Code: %(code)s.'), { 'vol': volume_id, 'host': connector.get('host'), 'msg': six.text_type(exc), 'code': (exc.code if hasattr(exc, 'code') else None) })
def volume_delete(self, context, instance, volume_id): """Delete specified volume.""" try: aws_volume_id = self._get_provider_volume_id(context, volume_id) if not aws_volume_id: aws_volumes = self._get_provider_volume(context, volume_id) if not aws_volumes: LOG.error('the volume %s not found' % volume_id) return volume_ids = [] for aws_volume in aws_volumes: volume_ids.append(aws_volume.get('VolumeId')) self.aws_client.get_aws_client(context)\ .delete_volume(VolumeIds=volume_ids) else: LOG.debug('Delete the volume %s on aws', aws_volume_id) self.aws_client.get_aws_client(context)\ .delete_volume(VolumeIds=[aws_volume_id]) except Exception as e: with excutils.save_and_reraise_exception(): LOG.error(_LE('Error from delete volume. ' 'Error=%(e)s'), {'e': e}, instance=instance) try: # delelte volume mapper self.caa_db_api.volume_mapper_delete(context, volume_id, context.project_id) except Exception as ex: LOG.error(_LE("volume_mapper_delete failed! ex = %s"), ex)
def add_rules(self, context, id, name, vals): """Add security group rule(s) to security group. Note: the Nova security group API doesn't support adding multiple security group rules at once but the EC2 one does. Therefore, this function is written to support both. Multiple rules are installed to a security group in neutron using bulk support. """ neutron = neutronapi.get_client(context) body = self._make_neutron_security_group_rules_list(vals) try: rules = neutron.create_security_group_rule( body).get('security_group_rules') except n_exc.NeutronClientException as e: exc_info = sys.exc_info() if e.status_code == 404: LOG.exception(_LE("Neutron Error getting security group %s"), name) self.raise_not_found(six.text_type(e)) elif e.status_code == 409: LOG.exception(_LE("Neutron Error adding rules to security " "group %s"), name) self.raise_over_quota(six.text_type(e)) elif e.status_code == 400: LOG.exception(_LE("Neutron Error: %s"), six.text_type(e)) self.raise_invalid_property(six.text_type(e)) else: LOG.exception(_LE("Neutron Error:")) six.reraise(*exc_info) converted_rules = [] for rule in rules: converted_rules.append( self._convert_to_nova_security_group_rule_format(rule)) return converted_rules
def add_to_instance(self, context, instance, security_group_name): """Add security group to the instance.""" neutron = neutronapi.get_client(context) try: security_group_id = neutronv20.find_resourceid_by_name_or_id( neutron, 'security_group', security_group_name, context.project_id) except n_exc.NeutronClientNoUniqueMatch as e: raise exception.NoUniqueMatch(six.text_type(e)) except n_exc.NeutronClientException as e: exc_info = sys.exc_info() if e.status_code == 404: msg = (_("Security group %(name)s is not found for " "project %(project)s") % { 'name': security_group_name, 'project': context.project_id }) self.raise_not_found(msg) else: LOG.exception(_LE("Neutron Error:")) six.reraise(*exc_info) params = {'device_id': instance.uuid} try: ports = neutron.list_ports(**params).get('ports') except n_exc.NeutronClientException: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Neutron Error:")) if not ports: msg = (_("instance_id %s could not be found as device id on" " any ports") % instance.uuid) self.raise_not_found(msg) for port in ports: if not self._has_security_group_requirements(port): LOG.warning( _LW("Cannot add security group %(name)s to " "%(instance)s since the port %(port_id)s " "does not meet security requirements"), { 'name': security_group_name, 'instance': instance.uuid, 'port_id': port['id'] }) raise exception.SecurityGroupCannotBeApplied() if 'security_groups' not in port: port['security_groups'] = [] port['security_groups'].append(security_group_id) updated_port = {'security_groups': port['security_groups']} try: LOG.info( _LI("Adding security group %(security_group_id)s to " "port %(port_id)s"), { 'security_group_id': security_group_id, 'port_id': port['id'] }) neutron.update_port(port['id'], {'port': updated_port}) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Neutron Error:"))
def create_volume_from_snapshot(self, volume, snapshot): """Create a volume from a snapshot.""" context = req_context.RequestContext(is_admin=True, project_id=volume.project_id) try: provider_snap = self._get_provider_snapshot_id( context, snapshot.id) vol = self._create_volume(volume, context, snapshot=provider_snap) except Exception as ex: LOG.error( _LE('create_volume_from_snapshot failed,' 'snapshot:%(id)s,ex:%(ex)s'), { 'id': snapshot.id, 'ex': ex }) msg = (_("create_volume_from_snapshot failed! volume:%s") % volume.id) raise cinder_ex.VolumeBackendAPIException(data=msg) # create local volume mapper try: values = {'provider_volume_id': vol['VolumeId']} self.caa_db_api.volume_mapper_create(context, volume.id, context.project_id, values) except Exception as ex: LOG.error(_LE("volume_mapper_create failed! ex = %s"), ex) self._aws_client.get_aws_client(context).\ delete_volume(VolumeId=vol['VolumeId']) msg = (_("create_volume_from_snapshot failed! volume:%s") % volume.id) raise cinder_ex.VolumeBackendAPIException(data=msg) LOG.debug('create volume %s success.' % volume.id)
def create_volume(self, volume): LOG.debug('start to create volume') LOG.debug('volume glance image metadata: %s' % volume.volume_glance_metadata) volume_args = {} volume_args['size'] = volume.size volume_args['display_description'] = volume.display_description volume_args['display_name'] = self._get_provider_volume_name( volume.display_name, volume.id) context = req_context.RequestContext(is_admin=True, project_id=volume.project_id) volume_type_id = volume.volume_type_id volume_type_name = None LOG.debug('volume type id %s ' % volume_type_id) if volume_type_id: volume_type_name = self._get_sub_type_name( req_context.get_admin_context(), volume_type_id) if volume_type_name: volume_args['volume_type'] = volume_type_name optionals = ('shareable', 'metadata', 'multiattach') volume_args.update((prop, getattr(volume, prop)) for prop in optionals if getattr(volume, prop, None)) if 'metadata' not in volume_args: volume_args['metadata'] = {} volume_args['metadata']['tag:caa_volume_id'] = volume.id sub_volume = self.os_cinderclient(context).create_volume(**volume_args) LOG.debug('submit create-volume task to sub os. ' 'sub volume id: %s' % sub_volume.id) LOG.debug('start to wait for volume %s in status ' 'available' % sub_volume.id) try: self.os_cinderclient(context).check_create_volume_complete( sub_volume) except Exception as ex: LOG.exception(_LE("volume(%s), check_create_volume_complete " "failed! ex = %s"), volume.id, ex) with excutils.save_and_reraise_exception(): sub_volume.delete() try: # create volume mapper values = {"provider_volume_id": sub_volume.id} self.caa_db_api.volume_mapper_create(context, volume.id, context.project_id, values) except Exception as ex: LOG.exception(_LE("volume_mapper_create failed! ex = %s"), ex) sub_volume.delete() raise LOG.debug('create volume %s success.' % volume.id) return {'provider_location': 'SUB-FusionSphere'}
def destroy(self, context, instance, network_info, block_device_info=None, destroy_disks=True, migrate_data=None): """Destroy the specified instance from the Hypervisor.""" LOG.debug('Start to delete server: %s' % instance.uuid) instance_ids = [] try: aws_instance_id = self._get_provider_instance_id( context, instance.uuid) if not aws_instance_id: filters = [{ 'Name': 'tag:caa_instance_id', 'Values': [instance.uuid] }] instances = self.aws_client.get_aws_client(context)\ .describe_instances(Filters=filters) if not instances: LOG.warn('Instance %s not found on aws' % instance.uuid) else: for node in instances: instance_ids.append(node.get('InstanceId')) else: LOG.debug('delete the instance %s on aws', aws_instance_id) instance_ids = [aws_instance_id] if instance_ids: self.aws_client.get_aws_client(context)\ .delete_instances(InstanceIds=instance_ids) except botocore.exceptions.ClientError as e: reason = e.response.get('Error', {}).get('Message', 'Unkown') LOG.error('Delete instance failed, the error is: %s' % reason) error_code = e.response.get('Error', {}).get('Code', 'Unkown') if error_code == 'InvalidInstanceID.NotFound': LOG.warn('The instance %s not found on aws' % instance.uuid) else: raise exception.InstanceTerminationFailure(reason=reason) except botocore.exceptions.WaiterError as e: reason = e.message LOG.warn('Cannot delete instance,operation time out') raise exception.InstanceTerminationFailure(reason=reason) except Exception as e: with excutils.save_and_reraise_exception(): LOG.error(_LE('Error from delete instance. ' 'Error=%(e)s'), {'e': e}, instance=instance) try: # delete instance mapper self.caa_db_api.instance_mapper_delete(context, instance.uuid, instance.project_id) except Exception as ex: LOG.warn(_LE("Instance_mapper_delete failed! ex = %s"), ex) LOG.debug('Success to delete instance: %s' % instance.uuid)
def add_to_instance(self, context, instance, security_group_name): """Add security group to the instance.""" neutron = neutronapi.get_client(context) try: security_group_id = neutronv20.find_resourceid_by_name_or_id( neutron, 'security_group', security_group_name, context.project_id) except n_exc.NeutronClientNoUniqueMatch as e: raise exception.NoUniqueMatch(six.text_type(e)) except n_exc.NeutronClientException as e: exc_info = sys.exc_info() if e.status_code == 404: msg = (_("Security group %(name)s is not found for " "project %(project)s") % {'name': security_group_name, 'project': context.project_id}) self.raise_not_found(msg) else: LOG.exception(_LE("Neutron Error:")) six.reraise(*exc_info) params = {'device_id': instance.uuid} try: ports = neutron.list_ports(**params).get('ports') except n_exc.NeutronClientException: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Neutron Error:")) if not ports: msg = (_("instance_id %s could not be found as device id on" " any ports") % instance.uuid) self.raise_not_found(msg) for port in ports: if not self._has_security_group_requirements(port): LOG.warning(_LW("Cannot add security group %(name)s to " "%(instance)s since the port %(port_id)s " "does not meet security requirements"), {'name': security_group_name, 'instance': instance.uuid, 'port_id': port['id']}) raise exception.SecurityGroupCannotBeApplied() if 'security_groups' not in port: port['security_groups'] = [] port['security_groups'].append(security_group_id) updated_port = {'security_groups': port['security_groups']} try: LOG.info(_LI("Adding security group %(security_group_id)s to " "port %(port_id)s"), {'security_group_id': security_group_id, 'port_id': port['id']}) neutron.update_port(port['id'], {'port': updated_port}) except Exception: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Neutron Error:"))
def create_shadow_table(migrate_engine, table_name=None, table=None, **col_name_col_instance): """This method create shadow table for table with name ``table_name`` or table instance ``table``. :param table_name: Autoload table with this name and create shadow table :param table: Autoloaded table, so just create corresponding shadow table. :param col_name_col_instance: contains pair column_name=column_instance. column_instance is instance of Column. These params are required only for columns that have unsupported types by sqlite. For example BigInteger. :returns: The created shadow_table object. """ meta = MetaData(bind=migrate_engine) if table_name is None and table is None: raise exception.NovaException( _("Specify `table_name` or `table` " "param")) if not (table_name is None or table is None): raise exception.NovaException( _("Specify only one param `table_name` " "`table`")) if table is None: table = Table(table_name, meta, autoload=True) columns = [] for column in table.columns: if isinstance(column.type, NullType): new_column = oslodbutils._get_not_supported_column( col_name_col_instance, column.name) columns.append(new_column) else: columns.append(column.copy()) shadow_table_name = db._SHADOW_TABLE_PREFIX + table.name shadow_table = Table(shadow_table_name, meta, *columns, mysql_engine='InnoDB') try: shadow_table.create() return shadow_table except (db_exc.DBError, OperationalError): # NOTE(ekudryashova): At the moment there is a case in oslo.db code, # which raises unwrapped OperationalError, so we should catch it until # oslo.db would wraps all such exceptions LOG.info(repr(shadow_table)) LOG.exception(_LE('Exception while creating table.')) raise exception.ShadowTableExists(name=shadow_table_name) except Exception: LOG.info(repr(shadow_table)) LOG.exception(_LE('Exception while creating table.'))
def detail(self, req): """Returns a detailed list of flavors mapper.""" context = req.environ['jacket.context'] try: volume_types = self.worker_api.sub_vol_type_detail(context) except exception.DriverNotSupported as ex: LOG.exception(_LE("get sub volume types failed, ex = %(ex)s"), ex=ex.format_message()) raise exc.HTTPBadRequest(explanation=ex.format_message()) except Exception as ex: LOG.exception(_LE("get sub volume types failed, ex = %(ex)s"), ex=ex.format_message()) raise exc.HTTPBadRequest(explanation=ex.message) return {'sub_volume_types': volume_types}
def create_shadow_table(migrate_engine, table_name=None, table=None, **col_name_col_instance): """This method create shadow table for table with name ``table_name`` or table instance ``table``. :param table_name: Autoload table with this name and create shadow table :param table: Autoloaded table, so just create corresponding shadow table. :param col_name_col_instance: contains pair column_name=column_instance. column_instance is instance of Column. These params are required only for columns that have unsupported types by sqlite. For example BigInteger. :returns: The created shadow_table object. """ meta = MetaData(bind=migrate_engine) if table_name is None and table is None: raise exception.NovaException(_("Specify `table_name` or `table` " "param")) if not (table_name is None or table is None): raise exception.NovaException(_("Specify only one param `table_name` " "`table`")) if table is None: table = Table(table_name, meta, autoload=True) columns = [] for column in table.columns: if isinstance(column.type, NullType): new_column = oslodbutils._get_not_supported_column( col_name_col_instance, column.name) columns.append(new_column) else: columns.append(column.copy()) shadow_table_name = db._SHADOW_TABLE_PREFIX + table.name shadow_table = Table(shadow_table_name, meta, *columns, mysql_engine='InnoDB') try: shadow_table.create() return shadow_table except (db_exc.DBError, OperationalError): # NOTE(ekudryashova): At the moment there is a case in oslo.db code, # which raises unwrapped OperationalError, so we should catch it until # oslo.db would wraps all such exceptions LOG.info(repr(shadow_table)) LOG.exception(_LE('Exception while creating table.')) raise exception.ShadowTableExists(name=shadow_table_name) except Exception: LOG.info(repr(shadow_table)) LOG.exception(_LE('Exception while creating table.'))
def create(self, req, body): """Creates a new flavor mapper.""" context = req.environ['jacket.context'] if not self.is_valid_body(body, 'flavor_mapper'): raise exc.HTTPUnprocessableEntity() flavor_mapper = body['flavor_mapper'] if 'flavor_id' not in flavor_mapper: raise exc.HTTPUnprocessableEntity() if 'dest_flavor_id' not in flavor_mapper: raise exc.HTTPUnprocessableEntity() flavor_id = flavor_mapper.pop('flavor_id') project_id = flavor_mapper.pop('project_id', None) try: flavor = self.config_api.flavor_mapper_create( context, flavor_id, project_id, flavor_mapper) except Exception as ex: LOG.exception( _LE("create flavor(%(flavor_id)s) mapper failed, ex = %(ex)s"), flavor_id=flavor_id, ex=ex) raise exc.HTTPBadRequest(explanation=ex) return {'flavor_mapper': flavor}
def _poll_shelved_instances(self, context): if CONF.shelved_offload_time <= 0: return filters = {'vm_state': vm_states.SHELVED, 'task_state': None, 'host': self.host} shelved_instances = objects.InstanceList.get_by_filters( context, filters=filters, expected_attrs=['system_metadata'], use_slave=True) to_gc = [] for instance in shelved_instances: sys_meta = instance.system_metadata shelved_at = timeutils.parse_strtime(sys_meta['shelved_at']) if timeutils.is_older_than(shelved_at, CONF.shelved_offload_time): to_gc.append(instance) for instance in to_gc: try: instance.task_state = task_states.SHELVING_OFFLOADING instance.save(expected_task_state=(None,)) self.shelve_offload_instance(context, instance, clean_shutdown=False) except Exception: LOG.exception(_LE('Periodic task failed to offload instance.'), instance=instance)
def _report_state(self, service): """Update the state of this service in the datastore.""" try: service.service_ref.report_count += 1 service.service_ref.save() # TODO(termie): make this pattern be more elegant. if getattr(service, 'model_disconnected', False): service.model_disconnected = False LOG.info(_LI('Recovered from being unable to report status.')) except messaging.MessagingTimeout: # NOTE(johngarbutt) during upgrade we will see messaging timeouts # as compute-conductor is restarted, so only log this error once. if not getattr(service, 'model_disconnected', False): service.model_disconnected = True LOG.warn( _LW('Lost connection to compute-conductor ' 'for reporting service status.')) except Exception: # NOTE(rpodolyaka): we'd like to avoid catching of all possible # exceptions here, but otherwise it would become possible for # the state reporting thread to stop abruptly, and thus leave # the service unusable until it's restarted. LOG.exception( _LE('Unexpected error while reporting service status')) # trigger the recovery log message, if this error goes away service.model_disconnected = True
def _error(self, inner, req): if not isinstance(inner, exception.QuotaError): LOG.exception(_LE("Caught error: %(type)s %(error)s"), { 'type': type(inner), 'error': inner }) safe = getattr(inner, 'safe', False) headers = getattr(inner, 'headers', None) status = getattr(inner, 'code', 500) if status is None: status = 500 msg_dict = dict(url=req.url, status=status) LOG.info(_LI("%(url)s returned with HTTP %(status)d"), msg_dict) outer = self.status_to_type(status) if headers: outer.headers = headers # NOTE(johannes): We leave the explanation empty here on # purpose. It could possibly have sensitive information # that should not be returned back to the user. See # bugs 868360 and 874472 # NOTE(eglynn): However, it would be over-conservative and # inconsistent with the EC2 API to hide every exception, # including those that are safe to expose, see bug 1021373 if safe: msg = (inner.msg if isinstance(inner, exception.JacketException) else six.text_type(inner)) params = { 'exception': inner.__class__.__name__, 'explanation': msg } outer.explanation = _('%(exception)s: %(explanation)s') % params return wsgi.Fault(outer)
def get_volume_encryptor(connection_info, **kwargs): """Creates a VolumeEncryptor used to encrypt the specified volume. :param: the connection information used to attach the volume :returns VolumeEncryptor: the VolumeEncryptor for the volume """ encryptor = nop.NoOpEncryptor(connection_info, **kwargs) location = kwargs.get('control_location', None) if location and location.lower() == 'front-end': # case insensitive provider = kwargs.get('provider') if provider == 'LuksEncryptor': provider = 'jacket.compute.volume.encryptors.luks.' + provider elif provider == 'CryptsetupEncryptor': provider = 'jacket.compute.volume.encryptors.cryptsetup.' + provider elif provider == 'NoOpEncryptor': provider = 'jacket.compute.volume.encryptors.nop.' + provider try: encryptor = importutils.import_object(provider, connection_info, **kwargs) except Exception as e: LOG.error(_LE("Error instantiating %(provider)s: %(exception)s"), {'provider': provider, 'exception': e}) raise msg = ("Using volume encryptor '%(encryptor)s' for connection: " "%(connection_info)s" % {'encryptor': encryptor, 'connection_info': connection_info}) LOG.debug(strutils.mask_password(msg)) return encryptor
def _reset_state(self, req, id, body): """Permit admins to reset the state of a server.""" context = req.environ["compute.context"] authorize(context, 'resetState') # Identify the desired state from the body try: state = state_map[body["os-resetState"]["state"]] except (TypeError, KeyError): msg = _("Desired state must be specified. Valid states " "are: %s") % ', '.join(sorted(state_map.keys())) raise exc.HTTPBadRequest(explanation=msg) instance = common.get_instance(self.compute_api, context, id) try: instance.vm_state = state instance.task_state = None instance.save(admin_state_reset=True) except exception.InstanceNotFound: msg = _("Server not found") raise exc.HTTPNotFound(explanation=msg) except Exception: readable = traceback.format_exc() LOG.exception(_LE("Compute.api::resetState %s"), readable) raise exc.HTTPUnprocessableEntity() return webob.Response(status_int=202)
def load_transfer_modules(): module_dictionary = {} ex = stevedore.extension.ExtensionManager( 'jacket.compute.image.download.modules') for module_name in ex.names(): mgr = stevedore.driver.DriverManager( namespace='jacket.compute.image.download.modules', name=module_name, invoke_on_load=False) schemes_list = mgr.driver.get_schemes() for scheme in schemes_list: if scheme in module_dictionary: LOG.error( _LE('%(scheme)s is registered as a module twice. ' '%(module_name)s is not being used.'), { 'scheme': scheme, 'module_name': module_name }) else: module_dictionary[scheme] = mgr.driver return module_dictionary
def reboot(self, context, instance, network_info, reboot_type, block_device_info=None, bad_volumes_callback=None): """Reboot the specified instance.""" LOG.debug('Start to reboot server: %s' % instance.uuid) try: aws_instance_id = self._get_provider_instance_id( context, instance.uuid) if aws_instance_id: LOG.debug('Reboot the instance %s on aws', aws_instance_id) instance_ids = [aws_instance_id] self.aws_client.get_aws_client(context)\ .reboot_instances(InstanceIds=instance_ids) LOG.debug('Reboot server: %s success' % instance.uuid) else: LOG.error('Cannot get the aws_instance_id of % s' % instance.uuid) raise exception.InstanceNotFound(instance_id=instance.uuid) except botocore.exceptions.ClientError as e: reason = e.response.get('Error', {}).get('Message', 'Unkown') LOG.error('Power on instance failed, the error is: %s' % reason) error_code = e.response.get('Error', {}).get('Code', 'Unkown') if error_code == 'InvalidInstanceID.NotFound': raise exception.InstanceNotFound(instance_id=instance.uuid) else: raise exception.InstanceRebootFailure(reason=reason) except Exception as e: with excutils.save_and_reraise_exception(): LOG.error(_LE('Error from power on instance. ' 'Error=%(e)s'), {'e': e}, instance=instance)
def __exit__(self, ex_type, ex_value, ex_traceback): if not ex_value: return True if isinstance(ex_value, exception.Forbidden): raise Fault( webob.exc.HTTPForbidden(explanation=ex_value.format_message())) elif isinstance(ex_value, exception.VersionNotFoundForAPIMethod): raise elif isinstance(ex_value, exception.Invalid): raise Fault( exception.ConvertedException( code=ex_value.code, explanation=ex_value.format_message())) elif isinstance(ex_value, TypeError): exc_info = (ex_type, ex_value, ex_traceback) LOG.error(_LE('Exception handling resource: %s'), ex_value, exc_info=exc_info) raise Fault(webob.exc.HTTPBadRequest()) elif isinstance(ex_value, Fault): LOG.info(_LI("Fault thrown: %s"), ex_value) raise ex_value elif isinstance(ex_value, webob.exc.HTTPException): LOG.info(_LI("HTTP exception thrown: %s"), ex_value) raise Fault(ex_value) # We didn't handle the exception return False
def delete(self, req, id): context = req.environ['compute.context'] authorize(context) reservation = None try: if CONF.enable_network_quota: reservation = QUOTAS.reserve(context, networks=-1) except Exception: reservation = None LOG.exception(_LE("Failed to update usages deallocating " "network.")) def _rollback_quota(reservation): if CONF.enable_network_quota and reservation: QUOTAS.rollback(context, reservation) try: self.network_api.disassociate(context, id) self.network_api.delete(context, id) except exception.PolicyNotAuthorized as e: _rollback_quota(reservation) raise exc.HTTPForbidden(explanation=six.text_type(e)) except exception.NetworkInUse as e: _rollback_quota(reservation) raise exc.HTTPConflict(explanation=e.format_message()) except exception.NetworkNotFound: _rollback_quota(reservation) msg = _("Network not found") raise exc.HTTPNotFound(explanation=msg) if CONF.enable_network_quota and reservation: QUOTAS.commit(context, reservation)
def _refresh_default_networks(self): self._default_networks = [] if CONF.use_neutron_default_nets == "True": try: self._default_networks = self._get_default_networks() except Exception: LOG.exception(_LE("Failed to get default networks"))
def __init__(self, cell_state_cls=None): super(CellStateManager, self).__init__() if not cell_state_cls: cell_state_cls = CellState self.cell_state_cls = cell_state_cls self.my_cell_state = cell_state_cls(CONF.cells.name, is_me=True) self.parent_cells = {} self.child_cells = {} self.last_cell_db_check = datetime.datetime.min self.servicegroup_api = servicegroup.API() attempts = 0 while True: try: self._cell_data_sync(force=True) break except db_exc.DBError: attempts += 1 if attempts > 120: raise LOG.exception(_LE('DB error')) time.sleep(30) my_cell_capabs = {} for cap in CONF.cells.capabilities: name, value = cap.split('=', 1) if ';' in value: values = set(value.split(';')) else: values = set([value]) my_cell_capabs[name] = values self.my_cell_state.update_capabilities(my_cell_capabs)
def delete(self, req, id): context = req.environ['compute.context'] authorize(context) reservation = None try: if CONF.enable_network_quota: reservation = QUOTAS.reserve(context, networks=-1) except Exception: reservation = None LOG.exception( _LE("Failed to update usages deallocating " "network.")) def _rollback_quota(reservation): if CONF.enable_network_quota and reservation: QUOTAS.rollback(context, reservation) try: self.network_api.disassociate(context, id) self.network_api.delete(context, id) except exception.PolicyNotAuthorized as e: _rollback_quota(reservation) raise exc.HTTPForbidden(explanation=six.text_type(e)) except exception.NetworkInUse as e: _rollback_quota(reservation) raise exc.HTTPConflict(explanation=e.format_message()) except exception.NetworkNotFound: _rollback_quota(reservation) msg = _("Network not found") raise exc.HTTPNotFound(explanation=msg) if CONF.enable_network_quota and reservation: QUOTAS.commit(context, reservation)
def _load_file(self, handle): """Decode the JSON file. Broken out for testing.""" try: return jsonutils.load(handle) except ValueError: LOG.exception(_LE("Could not decode scheduler options")) return {}
def do_associate(): # associate floating ip floating = objects.FloatingIP.associate(context, floating_address, fixed_address, self.host) fixed = floating.fixed_ip if not fixed: # NOTE(vish): ip was already associated return try: # gogo driver time self.l3driver.add_floating_ip(floating_address, fixed_address, interface, fixed['network']) except processutils.ProcessExecutionError as e: with excutils.save_and_reraise_exception(): try: objects.FloatingIP.disassociate(context, floating_address) except Exception: LOG.warning(_LW('Failed to disassociated floating ' 'address: %s'), floating_address) pass if "Cannot find device" in six.text_type(e): try: LOG.error(_LE('Interface %s not found'), interface) except Exception: pass raise exception.NoFloatingIpInterface( interface=interface) payload = dict(project_id=context.project_id, instance_id=instance_uuid, floating_ip=floating_address) self.notifier.info(context, 'network.floating_ip.associate', payload)
def delete(self, req, id): """Delete an server group.""" context = _authorize_context(req) try: sg = objects.InstanceGroup.get_by_uuid(context, id) except jacket.compute.exception.InstanceGroupNotFound as e: raise webob.exc.HTTPNotFound(explanation=e.format_message()) quotas = objects.Quotas(context=context) project_id, user_id = objects.quotas.ids_from_server_group(context, sg) try: # We have to add the quota back to the user that created # the server group quotas.reserve(project_id=project_id, user_id=user_id, server_groups=-1) except Exception: quotas = None LOG.exception( _LE("Failed to update usages deallocating " "server group")) try: sg.destroy() except jacket.compute.exception.InstanceGroupNotFound as e: if quotas: quotas.rollback() raise webob.exc.HTTPNotFound(explanation=e.format_message()) if quotas: quotas.commit()
def create_key(self, ctxt, expiration=None, name='Nova Compute Key', payload_content_type='application/octet-stream', mode='CBC', algorithm='AES', length=256): """Creates a key. :param ctxt: contains information of the user and the environment for the request (compute/context.py) :param expiration: the date the key will expire :param name: a friendly name for the secret :param payload_content_type: the format/type of the secret data :param mode: the algorithm mode (e.g. CBC or CTR mode) :param algorithm: the algorithm associated with the secret :param length: the bit length of the secret :return: the UUID of the new key :raises Exception: if key creation fails """ barbican_client = self._get_barbican_client(ctxt) try: key_order = barbican_client.orders.create_key( name, algorithm, length, mode, payload_content_type, expiration) order_ref = key_order.submit() order = barbican_client.orders.get(order_ref) return self._retrieve_secret_uuid(order.secret_ref) except Exception as e: with excutils.save_and_reraise_exception(): LOG.error(_LE("Error creating key: %s"), e)
def _get_compute_nodes_in_db(self, context, use_slave=False): try: return objects.ComputeNodeList.get_all_by_host(context, self.host, use_slave=use_slave) except exception.NotFound: LOG.error(_LE("No compute node record for host %s"), self.host) return []
def copy_key(self, ctxt, key_id): """Copies (i.e., clones) a key stored by barbican. :param ctxt: contains information of the user and the environment for the request (compute/context.py) :param key_id: the UUID of the key to copy :return: the UUID of the key copy :raises Exception: if key copying fails """ try: secret = self._get_secret(ctxt, key_id) con_type = secret.content_types['default'] secret_data = self._get_secret_data(secret, payload_content_type=con_type) key = keymgr_key.SymmetricKey(secret.algorithm, secret_data) copy_uuid = self.store_key(ctxt, key, secret.expiration, secret.name, con_type, 'base64', secret.algorithm, secret.bit_length, secret.mode, True) return copy_uuid except Exception as e: with excutils.save_and_reraise_exception(): LOG.error(_LE("Error copying key: %s"), e)
def _error(self, inner, req): LOG.exception(_LE("Caught error: %s"), six.text_type(inner)) safe = getattr(inner, 'safe', False) headers = getattr(inner, 'headers', None) status = getattr(inner, 'code', 500) if status is None: status = 500 msg_dict = dict(url=req.url, status=status) LOG.info(_LI("%(url)s returned with HTTP %(status)d"), msg_dict) outer = self.status_to_type(status) if headers: outer.headers = headers # NOTE(johannes): We leave the explanation empty here on # purpose. It could possibly have sensitive information # that should not be returned back to the user. See # bugs 868360 and 874472 # NOTE(eglynn): However, it would be over-conservative and # inconsistent with the EC2 API to hide every exception, # including those that are safe to expose, see bug 1021373 if safe: user_locale = req.best_match_language() inner_msg = translate(inner.message, user_locale) outer.explanation = '%s: %s' % (inner.__class__.__name__, inner_msg) notifications.send_api_fault(req.url, status, inner) return wsgi.Fault(outer)
def get_key(self, ctxt, key_id, payload_content_type='application/octet-stream'): """Retrieves the specified key. :param ctxt: contains information of the user and the environment for the request (compute/context.py) :param key_id: the UUID of the key to retrieve :param payload_content_type: The format/type of the secret data :return: SymmetricKey representation of the key :raises Exception: if key retrieval fails """ try: secret = self._get_secret(ctxt, key_id) secret_data = self._get_secret_data(secret, payload_content_type) if payload_content_type == 'application/octet-stream': # convert decoded string to list of unsigned ints for each byte key_data = array.array('B', base64.b64decode(secret_data)).tolist() else: key_data = secret_data key = keymgr_key.SymmetricKey(secret.algorithm, key_data) return key except Exception as e: with excutils.save_and_reraise_exception(): LOG.error(_LE("Error getting key: %s"), e)
def create(self, req, body): """Creates a new instance mapper.""" context = req.environ['jacket.context'] if not self.is_valid_body(body, 'instance_mapper'): raise exc.HTTPUnprocessableEntity() instance_mapper = body['instance_mapper'] if 'instance_id' not in instance_mapper: raise exc.HTTPUnprocessableEntity() if 'dest_instance_id' not in instance_mapper: raise exc.HTTPUnprocessableEntity() instance_id = instance_mapper.pop('instance_id') project_id = instance_mapper.pop('project_id', None) try: instance = self.config_api.instance_mapper_create(context, instance_id, project_id, instance_mapper) except Exception as ex: LOG.exception( _LE( "create instance(%(instance_id)s) mapper failed, ex = %(ex)s"), instance_id=instance_id, ex=ex) raise exc.HTTPBadRequest(explanation=ex) return {'instance_mapper': instance}
def _get_barbican_client(self, ctxt): """Creates a client to connect to the Barbican service. :param ctxt: the user context for authentication :return: a Barbican Client object :raises Forbidden: if the ctxt is None """ # Confirm context is provided, if not raise forbidden if not ctxt: msg = _("User is not authorized to use key manager.") LOG.error(msg) raise exception.Forbidden(msg) if not hasattr(ctxt, 'project_id') or ctxt.project_id is None: msg = _("Unable to create Barbican Client without project_id.") LOG.error(msg) raise exception.KeyManagerError(msg) # If same context, return cached barbican client if self._barbican_client and self._current_context == ctxt: return self._barbican_client try: _SESSION = ks_loading.load_session_from_conf_options( CONF, BARBICAN_OPT_GROUP) auth = ctxt.get_auth_plugin() service_type, service_name, interface = (CONF. barbican. catalog_info. split(':')) region_name = CONF.barbican.os_region_name service_parameters = {'service_type': service_type, 'service_name': service_name, 'interface': interface, 'region_name': region_name} if CONF.barbican.endpoint_template: self._base_url = (CONF.barbican.endpoint_template % ctxt.to_dict()) else: self._base_url = _SESSION.get_endpoint( auth, **service_parameters) # the barbican endpoint can't have the '/v1' on the end self._barbican_endpoint = self._base_url.rpartition('/')[0] sess = session.Session(auth=auth) self._barbican_client = barbican_client.Client( session=sess, endpoint=self._barbican_endpoint) self._current_context = ctxt except Exception as e: with excutils.save_and_reraise_exception(): LOG.error(_LE("Error creating Barbican client: %s"), e) return self._barbican_client
def parse_server_string(server_str): """Parses the given server_string and returns a tuple of host and port. If it's not a combination of host part and port, the port element is an empty string. If the input is invalid expression, return a tuple of two empty strings. """ try: # First of all, exclude pure IPv6 address (w/o port). if netaddr.valid_ipv6(server_str): return (server_str, '') # Next, check if this is IPv6 address with a port number combination. if server_str.find("]:") != -1: (address, port) = server_str.replace('[', '', 1).split(']:') return (address, port) # Third, check if this is a combination of an address and a port if server_str.find(':') == -1: return (server_str, '') # This must be a combination of an address and a port (address, port) = server_str.split(':') return (address, port) except (ValueError, netaddr.AddrFormatError): LOG.error(_LE('Invalid server_string: %s'), server_str) return ('', '')
def _error(self, inner, req): if not isinstance(inner, exception.QuotaError): LOG.exception(_LE("Caught error: %(type)s %(error)s"), {'type': type(inner), 'error': inner}) safe = getattr(inner, 'safe', False) headers = getattr(inner, 'headers', None) status = getattr(inner, 'code', 500) if status is None: status = 500 msg_dict = dict(url=req.url, status=status) LOG.info(_LI("%(url)s returned with HTTP %(status)d"), msg_dict) outer = self.status_to_type(status) if headers: outer.headers = headers # NOTE(johannes): We leave the explanation empty here on # purpose. It could possibly have sensitive information # that should not be returned back to the user. See # bugs 868360 and 874472 # NOTE(eglynn): However, it would be over-conservative and # inconsistent with the EC2 API to hide every exception, # including those that are safe to expose, see bug 1021373 if safe: msg = (inner.msg if isinstance(inner, exception.JacketException) else six.text_type(inner)) params = {'exception': inner.__class__.__name__, 'explanation': msg} outer.explanation = _('%(exception)s: %(explanation)s') % params return wsgi.Fault(outer)
def create(self, req, body): """Creates a new flavor mapper.""" context = req.environ['jacket.context'] if not self.is_valid_body(body, 'flavor_mapper'): raise exc.HTTPUnprocessableEntity() flavor_mapper = body['flavor_mapper'] if 'flavor_id' not in flavor_mapper: raise exc.HTTPUnprocessableEntity() if 'dest_flavor_id' not in flavor_mapper: raise exc.HTTPUnprocessableEntity() flavor_id = flavor_mapper.pop('flavor_id') project_id = flavor_mapper.pop('project_id', None) try: flavor = self.config_api.flavor_mapper_create(context, flavor_id, project_id, flavor_mapper) except Exception as ex: LOG.exception( _LE("create flavor(%(flavor_id)s) mapper failed, ex = %(ex)s"), flavor_id=flavor_id, ex=ex) raise exc.HTTPBadRequest(explanation=ex) return {'flavor_mapper': flavor}
def __exit__(self, ex_type, ex_value, ex_traceback): if not ex_value: return True if isinstance(ex_value, exception.Forbidden): raise Fault(webob.exc.HTTPForbidden( explanation=ex_value.format_message())) elif isinstance(ex_value, exception.VersionNotFoundForAPIMethod): raise elif isinstance(ex_value, exception.Invalid): raise Fault(exception.ConvertedException( code=ex_value.code, explanation=ex_value.format_message())) elif isinstance(ex_value, TypeError): exc_info = (ex_type, ex_value, ex_traceback) LOG.error(_LE('Exception handling resource: %s'), ex_value, exc_info=exc_info) raise Fault(webob.exc.HTTPBadRequest()) elif isinstance(ex_value, Fault): LOG.info(_LI("Fault thrown: %s"), ex_value) raise ex_value elif isinstance(ex_value, webob.exc.HTTPException): LOG.info(_LI("HTTP exception thrown: %s"), ex_value) raise Fault(ex_value) # We didn't handle the exception return False
def list(self, context, names=None, ids=None, project=None, search_opts=None): """Returns list of security group rules owned by tenant.""" neutron = neutronapi.get_client(context) params = {} search_opts = search_opts if search_opts else {} if names: params['name'] = names if ids: params['id'] = ids # NOTE(jeffrey4l): list all the security groups when following # conditions are met # * names and ids don't exist. # * it is admin context and all_tenants exist in search_opts. # * project is not specified. list_all_tenants = (context.is_admin and 'all_tenants' in search_opts and not any([names, ids])) # NOTE(jeffrey4l): The neutron doesn't have `all-tenants` concept. # All the security group will be returned if the project/tenant # id is not passed. if project and not list_all_tenants: params['tenant_id'] = project try: security_groups = neutron.list_security_groups(**params).get( 'security_groups') except n_exc.NeutronClientException: with excutils.save_and_reraise_exception(): LOG.exception(_LE("Neutron Error getting security groups")) converted_rules = [] for security_group in security_groups: converted_rules.append( self._convert_to_nova_security_group_format(security_group)) return converted_rules
def do_associate(): # associate floating ip floating = objects.FloatingIP.associate(context, floating_address, fixed_address, self.host) fixed = floating.fixed_ip if not fixed: # NOTE(vish): ip was already associated return try: # gogo driver time self.l3driver.add_floating_ip(floating_address, fixed_address, interface, fixed['network']) except processutils.ProcessExecutionError as e: with excutils.save_and_reraise_exception(): try: objects.FloatingIP.disassociate( context, floating_address) except Exception: LOG.warning( _LW('Failed to disassociated floating ' 'address: %s'), floating_address) pass if "Cannot find device" in six.text_type(e): try: LOG.error(_LE('Interface %s not found'), interface) except Exception: pass raise exception.NoFloatingIpInterface( interface=interface) payload = dict(project_id=context.project_id, instance_id=instance_uuid, floating_ip=floating_address) self.notifier.info(context, 'network.floating_ip.associate', payload)
def _report_state(self, service): """Update the state of this service in the datastore.""" try: service.service_ref.report_count += 1 service.service_ref.save() # TODO(termie): make this pattern be more elegant. if getattr(service, 'model_disconnected', False): service.model_disconnected = False LOG.info( _LI('Recovered from being unable to report status.')) except messaging.MessagingTimeout: # NOTE(johngarbutt) during upgrade we will see messaging timeouts # as compute-conductor is restarted, so only log this error once. if not getattr(service, 'model_disconnected', False): service.model_disconnected = True LOG.warn(_LW('Lost connection to compute-conductor ' 'for reporting service status.')) except Exception: # NOTE(rpodolyaka): we'd like to avoid catching of all possible # exceptions here, but otherwise it would become possible for # the state reporting thread to stop abruptly, and thus leave # the service unusable until it's restarted. LOG.exception( _LE('Unexpected error while reporting service status')) # trigger the recovery log message, if this error goes away service.model_disconnected = True
def __init__(self, message=None, **kwargs): self.kwargs = kwargs if 'code' not in self.kwargs: try: self.kwargs['code'] = self.code except AttributeError: pass if not message: try: message = self.msg_fmt % kwargs except Exception: exc_info = sys.exc_info() # kwargs doesn't match a variable in the message # log the issue and the kwargs LOG.exception(_LE('Exception in string format operation')) for name, value in six.iteritems(kwargs): LOG.error("%s: %s" % (name, value)) # noqa if CONF.fatal_exception_format_errors: six.reraise(*exc_info) else: # at least get the core message out if something happened message = self.msg_fmt self.message = message super(JacketException, self).__init__(message)
def _poll_shelved_instances(self, context): if CONF.shelved_offload_time <= 0: return filters = { 'vm_state': vm_states.SHELVED, 'task_state': None, 'host': self.host } shelved_instances = objects.InstanceList.get_by_filters( context, filters=filters, expected_attrs=['system_metadata'], use_slave=True) to_gc = [] for instance in shelved_instances: sys_meta = instance.system_metadata shelved_at = timeutils.parse_strtime(sys_meta['shelved_at']) if timeutils.is_older_than(shelved_at, CONF.shelved_offload_time): to_gc.append(instance) for instance in to_gc: try: instance.task_state = task_states.SHELVING_OFFLOADING instance.save(expected_task_state=(None, )) self.shelve_offload_instance(context, instance, clean_shutdown=False) except Exception: LOG.exception(_LE('Periodic task failed to offload instance.'), instance=instance)