コード例 #1
0
class HyperVLiveMigrateData(LiveMigrateData):
    # Version 1.0: Initial version
    # Version 1.1: Added is_shared_instance_path
    # Version 1.2: Added old_vol_attachment_ids
    # Version 1.3: Added wait_for_vif_plugged
    VERSION = '1.3'

    fields = {'is_shared_instance_path': fields.BooleanField()}

    def obj_make_compatible(self, primitive, target_version):
        super(HyperVLiveMigrateData,
              self).obj_make_compatible(primitive, target_version)
        target_version = versionutils.convert_version_to_tuple(target_version)
        if target_version < (1, 3) and 'wait_for_vif_plugged' in primitive:
            del primitive['wait_for_vif_plugged']
        if target_version < (1, 2):
            if 'old_vol_attachment_ids' in primitive:
                del primitive['old_vol_attachment_ids']
        if target_version < (1, 1):
            if 'is_shared_instance_path' in primitive:
                del primitive['is_shared_instance_path']

    def to_legacy_dict(self, pre_migration_result=False):
        legacy = super(HyperVLiveMigrateData, self).to_legacy_dict()
        if self.obj_attr_is_set('is_shared_instance_path'):
            legacy['is_shared_instance_path'] = self.is_shared_instance_path

        return legacy

    def from_legacy_dict(self, legacy):
        super(HyperVLiveMigrateData, self).from_legacy_dict(legacy)
        if 'is_shared_instance_path' in legacy:
            self.is_shared_instance_path = legacy['is_shared_instance_path']
コード例 #2
0
class InstancePCIRequest(base.NovaObject, base.NovaObjectDictCompat):
    # Version 1.0: Initial version
    # Version 1.1: Add request_id
    # Version 1.2: Add PCI NUMA affinity policy
    VERSION = '1.2'

    fields = {
        'count': fields.IntegerField(),
        'spec': fields.ListOfDictOfNullableStringsField(),
        'alias_name': fields.StringField(nullable=True),
        # Note(moshele): is_new is deprecated and should be removed
        # on major version bump
        'is_new': fields.BooleanField(default=False),
        'request_id': fields.UUIDField(nullable=True),
        'numa_policy': fields.PCINUMAAffinityPolicyField(nullable=True),
    }

    def obj_load_attr(self, attr):
        setattr(self, attr, None)

    def obj_make_compatible(self, primitive, target_version):
        super(InstancePCIRequest,
              self).obj_make_compatible(primitive, target_version)
        target_version = versionutils.convert_version_to_tuple(target_version)
        if target_version < (1, 2) and 'numa_policy' in primitive:
            del primitive['numa_policy']
        if target_version < (1, 1) and 'request_id' in primitive:
            del primitive['request_id']
コード例 #3
0
class XenapiLiveMigrateData(LiveMigrateData):
    # Version 1.0: Initial version
    # Version 1.1: Added vif_uuid_map
    # Version 1.2: Added old_vol_attachment_ids
    # Version 1.3: Added wait_for_vif_plugged
    # Version 1.4: Inherited vifs from LiveMigrateData
    VERSION = '1.4'

    fields = {
        'block_migration': fields.BooleanField(nullable=True),
        'destination_sr_ref': fields.StringField(nullable=True),
        'migrate_send_data': fields.DictOfStringsField(nullable=True),
        'sr_uuid_map': fields.DictOfStringsField(),
        'kernel_file': fields.StringField(),
        'ramdisk_file': fields.StringField(),
        'vif_uuid_map': fields.DictOfStringsField(),
    }

    def obj_make_compatible(self, primitive, target_version):
        super(XenapiLiveMigrateData,
              self).obj_make_compatible(primitive, target_version)
        target_version = versionutils.convert_version_to_tuple(target_version)
        if target_version < (1, 4) and 'vifs' in primitive:
            del primitive['vifs']
        if target_version < (1, 3) and 'wait_for_vif_plugged' in primitive:
            del primitive['wait_for_vif_plugged']
        if target_version < (1, 2):
            if 'old_vol_attachment_ids' in primitive:
                del primitive['old_vol_attachment_ids']
        if target_version < (1, 1):
            if 'vif_uuid_map' in primitive:
                del primitive['vif_uuid_map']
コード例 #4
0
class InstancePCIRequest(base.NovaObject, base.NovaObjectDictCompat):
    # Version 1.0: Initial version
    # Version 1.1: Add request_id
    VERSION = '1.1'

    fields = {
        'count': fields.IntegerField(),
        'spec': fields.ListOfDictOfNullableStringsField(),
        'alias_name': fields.StringField(nullable=True),
        # A stashed request related to a resize, not current
        'is_new': fields.BooleanField(default=False),
        'request_id': fields.UUIDField(nullable=True),
    }

    def obj_load_attr(self, attr):
        setattr(self, attr, None)

    # NOTE(danms): The dict that this object replaces uses a key of 'new'
    # so we translate it here to our more appropropriately-named 'is_new'.
    # This is not something that affects the object version, so we could
    # remove this later when all dependent code is fixed.
    @property
    def new(self):
        return self.is_new

    def obj_make_compatible(self, primitive, target_version):
        target_version = versionutils.convert_version_to_tuple(target_version)
        if target_version < (1, 1) and 'request_id' in primitive:
            del primitive['request_id']
コード例 #5
0
class LiveMigrateData(obj_base.NovaObject):
    fields = {
        'is_volume_backed': fields.BooleanField(),
        'migration': fields.ObjectField('Migration'),
    }

    def to_legacy_dict(self, pre_migration_result=False):
        legacy = {}
        if self.obj_attr_is_set('is_volume_backed'):
            legacy['is_volume_backed'] = self.is_volume_backed
        if self.obj_attr_is_set('migration'):
            legacy['migration'] = self.migration
        if pre_migration_result:
            legacy['pre_live_migration_result'] = {}

        return legacy

    def from_legacy_dict(self, legacy):
        if 'is_volume_backed' in legacy:
            self.is_volume_backed = legacy['is_volume_backed']
        if 'migration' in legacy:
            self.migration = legacy['migration']

    @classmethod
    def detect_implementation(cls, legacy_dict):
        if 'instance_relative_path' in legacy_dict:
            obj = LibvirtLiveMigrateData()
        elif 'image_type' in legacy_dict:
            obj = LibvirtLiveMigrateData()
        elif 'migrate_data' in legacy_dict:
            obj = XenapiLiveMigrateData()
        else:
            obj = LiveMigrateData()
        obj.from_legacy_dict(legacy_dict)
        return obj
コード例 #6
0
 def setUp(self):
     super(TestBoolean, self).setUp()
     self.field = fields.BooleanField()
     self.coerce_good_values = [(True, True), (False, False), (1, True),
                                ('foo', True), (0, False), ('', False)]
     self.coerce_bad_values = []
     self.to_primitive_values = self.coerce_good_values[0:2]
     self.from_primitive_values = self.coerce_good_values[0:2]
コード例 #7
0
class XenapiLiveMigrateData(LiveMigrateData):
    # Version 1.0: Initial version
    # Version 1.1: Added vif_uuid_map
    VERSION = '1.1'

    fields = {
        'block_migration': fields.BooleanField(nullable=True),
        'destination_sr_ref': fields.StringField(nullable=True),
        'migrate_send_data': fields.DictOfStringsField(nullable=True),
        'sr_uuid_map': fields.DictOfStringsField(),
        'kernel_file': fields.StringField(),
        'ramdisk_file': fields.StringField(),
        'vif_uuid_map': fields.DictOfStringsField(),
    }

    def to_legacy_dict(self, pre_migration_result=False):
        legacy = super(XenapiLiveMigrateData, self).to_legacy_dict()
        if self.obj_attr_is_set('block_migration'):
            legacy['block_migration'] = self.block_migration
        if self.obj_attr_is_set('migrate_send_data'):
            legacy['migrate_data'] = {
                'migrate_send_data': self.migrate_send_data,
                'destination_sr_ref': self.destination_sr_ref,
            }
        live_result = {
            'sr_uuid_map': ('sr_uuid_map' in self and self.sr_uuid_map or {}),
            'vif_uuid_map': ('vif_uuid_map' in self and self.vif_uuid_map
                             or {}),
        }
        if pre_migration_result:
            legacy['pre_live_migration_result'] = live_result
        return legacy

    def from_legacy_dict(self, legacy):
        super(XenapiLiveMigrateData, self).from_legacy_dict(legacy)
        if 'block_migration' in legacy:
            self.block_migration = legacy['block_migration']
        else:
            self.block_migration = False
        if 'migrate_data' in legacy:
            self.migrate_send_data = \
                legacy['migrate_data']['migrate_send_data']
            self.destination_sr_ref = \
                legacy['migrate_data']['destination_sr_ref']
        if 'pre_live_migration_result' in legacy:
            self.sr_uuid_map = \
                legacy['pre_live_migration_result']['sr_uuid_map']
            self.vif_uuid_map = \
                legacy['pre_live_migration_result'].get('vif_uuid_map', {})

    def obj_make_compatible(self, primitive, target_version):
        super(XenapiLiveMigrateData,
              self).obj_make_compatible(primitive, target_version)
        target_version = versionutils.convert_version_to_tuple(target_version)
        if target_version < (1, 1):
            if 'vif_uuid_map' in primitive:
                del primitive['vif_uuid_map']
コード例 #8
0
class LiveMigrateData(obj_base.NovaObject):
    fields = {
        'is_volume_backed': fields.BooleanField(),
        'migration': fields.ObjectField('Migration'),
        # old_vol_attachment_ids is a dict used to store the old attachment_ids
        # for each volume so they can be restored on a migration rollback. The
        # key is the volume_id, and the value is the attachment_id.
        'old_vol_attachment_ids': fields.DictOfStringsField(),
        # wait_for_vif_plugged is set in pre_live_migration on the destination
        # compute host based on the [compute]/live_migration_wait_for_vif_plug
        # config option value; a default value is not set here since the
        # default for the config option may change in the future
        'wait_for_vif_plugged': fields.BooleanField()
    }

    def to_legacy_dict(self, pre_migration_result=False):
        legacy = {}
        if self.obj_attr_is_set('is_volume_backed'):
            legacy['is_volume_backed'] = self.is_volume_backed
        if self.obj_attr_is_set('migration'):
            legacy['migration'] = self.migration
        if pre_migration_result:
            legacy['pre_live_migration_result'] = {}

        return legacy

    def from_legacy_dict(self, legacy):
        if 'is_volume_backed' in legacy:
            self.is_volume_backed = legacy['is_volume_backed']
        if 'migration' in legacy:
            self.migration = legacy['migration']

    @classmethod
    def detect_implementation(cls, legacy_dict):
        if 'instance_relative_path' in legacy_dict:
            obj = LibvirtLiveMigrateData()
        elif 'image_type' in legacy_dict:
            obj = LibvirtLiveMigrateData()
        elif 'migrate_data' in legacy_dict:
            obj = XenapiLiveMigrateData()
        else:
            obj = LiveMigrateData()
        obj.from_legacy_dict(legacy_dict)
        return obj
コード例 #9
0
ファイル: base.py プロジェクト: mrda/nova
class NovaPersistentObject(object):
    """Mixin class for Persistent objects.
    This adds the fields that we use in common for all persisent objects.
    """
    fields = {
        'created_at': fields.DateTimeField(nullable=True),
        'updated_at': fields.DateTimeField(nullable=True),
        'deleted_at': fields.DateTimeField(nullable=True),
        'deleted': fields.BooleanField(default=False),
    }
コード例 #10
0
class LiveMigrateData(obj_base.NovaObject):
    # Version 1.0: Initial version
    # Version 1.1: Added old_vol_attachment_ids field.
    # Version 1.2: Added wait_for_vif_plugged
    # Version 1.3: Added vifs field.
    VERSION = '1.3'

    fields = {
        'is_volume_backed': fields.BooleanField(),
        'migration': fields.ObjectField('Migration'),
        # old_vol_attachment_ids is a dict used to store the old attachment_ids
        # for each volume so they can be restored on a migration rollback. The
        # key is the volume_id, and the value is the attachment_id.
        # TODO(mdbooth): This field was made redundant by change I0390c9ff. We
        # should eventually remove it.
        'old_vol_attachment_ids': fields.DictOfStringsField(),
        # wait_for_vif_plugged is set in pre_live_migration on the destination
        # compute host based on the [compute]/live_migration_wait_for_vif_plug
        # config option value; a default value is not set here since the
        # default for the config option may change in the future
        'wait_for_vif_plugged': fields.BooleanField(),
        'vifs': fields.ListOfObjectsField('VIFMigrateData'),
    }

    @staticmethod
    def create_skeleton_migrate_vifs(vifs):
        """Create migrate vifs for live migration.

        :param vifs: a list of VIFs.
        :return: list of VIFMigrateData object corresponding to the provided
                 VIFs.
        """
        vif_mig_data = []

        for vif in vifs:
            mig_vif = VIFMigrateData(port_id=vif['id'], source_vif=vif)
            vif_mig_data.append(mig_vif)
        return vif_mig_data
コード例 #11
0
ファイル: migrate_data.py プロジェクト: SeanMooney/nova
class LiveMigrateData(obj_base.NovaObject):
    # Version 1.0: Initial version
    # Version 1.1: Added old_vol_attachment_ids field.
    # Version 1.2: Added wait_for_vif_plugged
    # Version 1.3: Added vifs field.
    VERSION = '1.3'

    fields = {
        'is_volume_backed': fields.BooleanField(),
        'migration': fields.ObjectField('Migration'),
        # old_vol_attachment_ids is a dict used to store the old attachment_ids
        # for each volume so they can be restored on a migration rollback. The
        # key is the volume_id, and the value is the attachment_id.
        # TODO(mdbooth): This field was made redundant by change Ibe9215c0. We
        # should eventually remove it.
        'old_vol_attachment_ids': fields.DictOfStringsField(),
        # wait_for_vif_plugged is set in pre_live_migration on the destination
        # compute host based on the [compute]/live_migration_wait_for_vif_plug
        # config option value; a default value is not set here since the
        # default for the config option may change in the future
        'wait_for_vif_plugged': fields.BooleanField(),
        'vifs': fields.ListOfObjectsField('VIFMigrateData'),
    }
コード例 #12
0
class BlockDevicePayload(base.NotificationPayloadBase):
    # Version 1.0: Initial version
    VERSION = '1.0'

    SCHEMA = {
        'device_name': ('bdm', 'device_name'),
        'boot_index': ('bdm', 'boot_index'),
        'delete_on_termination': ('bdm', 'delete_on_termination'),
        'volume_id': ('bdm', 'volume_id'),
        'tag': ('bdm', 'tag')
    }

    fields = {
        'device_name': fields.StringField(nullable=True),
        'boot_index': fields.IntegerField(nullable=True),
        'delete_on_termination': fields.BooleanField(default=False),
        'volume_id': fields.UUIDField(),
        'tag': fields.StringField(nullable=True)
    }

    def __init__(self, bdm):
        super(BlockDevicePayload, self).__init__()
        self.populate_schema(bdm=bdm)

    @classmethod
    def from_instance(cls, instance):
        """Returns a list of BlockDevicePayload objects based on the passed
        bdms.
        """
        if not CONF.notifications.bdms_in_notifications:
            return None

        instance_bdms = instance.get_bdms()
        if instance_bdms is not None:
            return cls.from_bdms(instance_bdms)
        else:
            return []

    @classmethod
    def from_bdms(cls, bdms):
        """Returns a list of BlockDevicePayload objects based on the passed
        BlockDeviceMappingList.
        """
        payloads = []
        for bdm in bdms:
            if bdm.volume_id is not None:
                payloads.append(cls(bdm))
        return payloads
コード例 #13
0
class InstancePCIRequest(base.NovaObject,
                         base.NovaObjectDictCompat):
    # Version 1.0: Initial version
    # Version 1.1: Added request_id field
    # Version 1.2: Added numa_policy field
    # Version 1.3: Added requester_id field
    # Version 1.4: Added 'socket' to numa_policy field
    VERSION = '1.4'

    # Possible sources for a PCI request:
    # FLAVOR_ALIAS : Request originated from a flavor alias.
    # NEUTRON_PORT : Request originated from a neutron port.
    FLAVOR_ALIAS = 0
    NEUTRON_PORT = 1

    fields = {
        'count': fields.IntegerField(),
        'spec': fields.ListOfDictOfNullableStringsField(),
        'alias_name': fields.StringField(nullable=True),
        # Note(moshele): is_new is deprecated and should be removed
        # on major version bump
        'is_new': fields.BooleanField(default=False),
        'request_id': fields.UUIDField(nullable=True),
        'requester_id': fields.StringField(nullable=True),
        'numa_policy': fields.PCINUMAAffinityPolicyField(nullable=True),
    }

    @property
    def source(self):
        # PCI requests originate from two sources: instance flavor alias and
        # neutron SR-IOV ports.
        # SR-IOV ports pci_request don't have an alias_name.
        return (InstancePCIRequest.NEUTRON_PORT if self.alias_name is None
                else InstancePCIRequest.FLAVOR_ALIAS)

    def obj_load_attr(self, attr):
        setattr(self, attr, None)

    def obj_make_compatible(self, primitive, target_version):
        super(InstancePCIRequest, self).obj_make_compatible(primitive,
                                                            target_version)
        target_version = versionutils.convert_version_to_tuple(target_version)
        if target_version < (1, 3) and 'requester_id' in primitive:
            del primitive['requester_id']
        if target_version < (1, 2) and 'numa_policy' in primitive:
            del primitive['numa_policy']
        if target_version < (1, 1) and 'request_id' in primitive:
            del primitive['request_id']
コード例 #14
0
class NetworkInterfaceMetadata(DeviceMetadata):
    # Version 1.0: Initial version
    # Version 1.1: Add vlans field
    # Version 1.2: Add vf_trusted field
    VERSION = '1.2'

    fields = {
        'mac': fields.MACAddressField(),
        'vlan': fields.IntegerField(),
        'vf_trusted': fields.BooleanField(default=False),
    }

    def obj_make_compatible(self, primitive, target_version):
        target_version = versionutils.convert_version_to_tuple(target_version)
        if target_version < (1, 1) and 'vlan' in primitive:
            del primitive['vlan']
        if target_version < (1, 2) and 'vf_trusted' in primitive:
            del primitive['vf_trusted']
コード例 #15
0
class XenapiLiveMigrateData(LiveMigrateData):
    VERSION = '1.0'

    fields = {
        'block_migration': fields.BooleanField(nullable=True),
        'destination_sr_ref': fields.StringField(nullable=True),
        'migrate_send_data': fields.DictOfStringsField(nullable=True),
        'sr_uuid_map': fields.DictOfStringsField(),
        'kernel_file': fields.StringField(),
        'ramdisk_file': fields.StringField(),
    }

    def to_legacy_dict(self, pre_migration_result=False):
        legacy = super(XenapiLiveMigrateData, self).to_legacy_dict()
        if self.obj_attr_is_set('block_migration'):
            legacy['block_migration'] = self.block_migration
        if self.obj_attr_is_set('migrate_send_data'):
            legacy['migrate_data'] = {
                'migrate_send_data': self.migrate_send_data,
                'destination_sr_ref': self.destination_sr_ref,
            }
        live_result = {
            'sr_uuid_map': ('sr_uuid_map' in self and self.sr_uuid_map
                            or {}),
        }
        if pre_migration_result:
            legacy['pre_live_migration_result'] = live_result
        return legacy

    def from_legacy_dict(self, legacy):
        super(XenapiLiveMigrateData, self).from_legacy_dict(legacy)
        if 'block_migration' in legacy:
            self.block_migration = legacy['block_migration']
        else:
            self.block_migration = False
        if 'migrate_data' in legacy:
            self.migrate_send_data = \
                legacy['migrate_data']['migrate_send_data']
            self.destination_sr_ref = \
                legacy['migrate_data']['destination_sr_ref']
        if 'pre_live_migration_result' in legacy:
            self.sr_uuid_map = \
                legacy['pre_live_migration_result']['sr_uuid_map']
コード例 #16
0
class Licence(base.NovaObject, base.NovaObjectDictCompat):

    VERSION = '1.1'

    fields = {
        'id': fields.IntegerField(),
        'starttime': fields.DateTimeField(nullable=True),
        'updated_at': fields.DateTimeField(nullable=True),
        'created_at': fields.DateTimeField(nullable=True),
        'system_uuid': fields.StringField(nullable=True),
        'encrypted_license': fields.StringField(nullable=True),
        'disabled': fields.BooleanField(),
        'used': fields.IntegerField(nullable=True),
    }

    @staticmethod
    def _from_db_object(context, licence, db_licence):
        for field in licence.fields:
            licence[field] = db_licence[field]
        licence._context = context
        licence.obj_reset_changes()
        return licence

    @base.remotable_classmethod
    def get_by_licence_id(cls, context, session):
        db_licence = db.get_licence(context, 1, session=session)
        if db_licence:
            return cls._from_db_object(context, cls(), db_licence)

    @base.remotable_classmethod
    def session(cls, context):
        alchemy = importutils.import_module('sqlalchemy.orm')
        Session = alchemy.sessionmaker()
        engine = db.get_engine(context)
        Session.configure(bind=engine)
        return Session()

    @base.remotable_classmethod
    def save(cls, context, session, **kwargs):
        db_licence = db.update_licence(context, 1, session=session, **kwargs)
        if db_licence:
            return True
コード例 #17
0
class CellMappingPayload(base.NotificationPayloadBase):
    # Version 1.0: Initial version
    # Version 2.0: Remove transport_url and database_connection fields.
    VERSION = '2.0'

    SCHEMA = {
        'uuid': ('cell', 'uuid'),
        'name': ('cell', 'name'),
        'disabled': ('cell', 'disabled'),
    }

    fields = {
        'uuid': fields.UUIDField(),
        'name': fields.StringField(nullable=True),
        'disabled': fields.BooleanField(default=False),
    }

    def __init__(self, cell):
        super(CellMappingPayload, self).__init__()
        self.populate_schema(cell=cell)
コード例 #18
0
ファイル: migrate_data.py プロジェクト: panguan737/nova
class LiveMigrateData(obj_base.NovaObject):
    fields = {
        'is_volume_backed': fields.BooleanField(),
        'migration': fields.ObjectField('Migration'),
        # old_vol_attachment_ids is a dict used to store the old attachment_ids
        # for each volume so they can be restored on a migration rollback. The
        # key is the volume_id, and the value is the attachment_id.
        # TODO(mdbooth): This field was made redundant by change I0390c9ff. We
        # should eventually remove it.
        'old_vol_attachment_ids': fields.DictOfStringsField(),
    }

    def to_legacy_dict(self, pre_migration_result=False):
        legacy = {}
        if self.obj_attr_is_set('is_volume_backed'):
            legacy['is_volume_backed'] = self.is_volume_backed
        if self.obj_attr_is_set('migration'):
            legacy['migration'] = self.migration
        if pre_migration_result:
            legacy['pre_live_migration_result'] = {}

        return legacy

    def from_legacy_dict(self, legacy):
        if 'is_volume_backed' in legacy:
            self.is_volume_backed = legacy['is_volume_backed']
        if 'migration' in legacy:
            self.migration = legacy['migration']

    @classmethod
    def detect_implementation(cls, legacy_dict):
        if 'instance_relative_path' in legacy_dict:
            obj = LibvirtLiveMigrateData()
        elif 'image_type' in legacy_dict:
            obj = LibvirtLiveMigrateData()
        elif 'migrate_data' in legacy_dict:
            obj = XenapiLiveMigrateData()
        else:
            obj = LiveMigrateData()
        obj.from_legacy_dict(legacy_dict)
        return obj
コード例 #19
0
class LiveMigrateData(obj_base.NovaObject):
    fields = {
        'is_volume_backed': fields.BooleanField(),
        'migration': fields.ObjectField('Migration'),
    }

    def to_legacy_dict(self, pre_migration_result=False):
        legacy = {}
        if self.obj_attr_is_set('is_volume_backed'):
            legacy['is_volume_backed'] = self.is_volume_backed
        if self.obj_attr_is_set('migration'):
            legacy['migration'] = self.migration
        if pre_migration_result:
            legacy['pre_live_migration_result'] = {}

        return legacy

    def from_legacy_dict(self, legacy):
        if 'is_volume_backed' in legacy:
            self.is_volume_backed = legacy['is_volume_backed']
        if 'migration' in legacy:
            self.migration = legacy['migration']
コード例 #20
0
ファイル: request_spec.py プロジェクト: youqiang95/nova
class RequestGroup(base.NovaObject):
    """Versioned object based on the unversioned
    nova.api.openstack.placement.lib.RequestGroup object.
    """
    VERSION = '1.0'

    fields = {
        'use_same_provider': fields.BooleanField(default=True),
        'resources': fields.DictOfIntegersField(default={}),
        'required_traits': fields.SetOfStringsField(default=set()),
        'forbidden_traits': fields.SetOfStringsField(default=set()),
        # The aggregates field has a form of
        #     [[aggregate_UUID1],
        #      [aggregate_UUID2, aggregate_UUID3]]
        # meaning that the request should be fulfilled from an RP that is a
        # member of the aggregate aggregate_UUID1 and member of the aggregate
        # aggregate_UUID2 or aggregate_UUID3 .
        'aggregates': fields.ListOfListsOfStringsField(default=[]),
    }

    def __init__(self, context=None, **kwargs):
        super(RequestGroup, self).__init__(context=context, **kwargs)
        self.obj_set_defaults()
コード例 #21
0
ファイル: migrate_data.py プロジェクト: SeanMooney/nova
class HyperVLiveMigrateData(LiveMigrateData):
    # Version 1.0: Initial version
    # Version 1.1: Added is_shared_instance_path
    # Version 1.2: Added old_vol_attachment_ids
    # Version 1.3: Added wait_for_vif_plugged
    # Version 1.4: Inherited vifs from LiveMigrateData
    VERSION = '1.4'

    fields = {'is_shared_instance_path': fields.BooleanField()}

    def obj_make_compatible(self, primitive, target_version):
        super(HyperVLiveMigrateData,
              self).obj_make_compatible(primitive, target_version)
        target_version = versionutils.convert_version_to_tuple(target_version)
        if target_version < (1, 4) and 'vifs' in primitive:
            del primitive['vifs']
        if target_version < (1, 3) and 'wait_for_vif_plugged' in primitive:
            del primitive['wait_for_vif_plugged']
        if target_version < (1, 2):
            if 'old_vol_attachment_ids' in primitive:
                del primitive['old_vol_attachment_ids']
        if target_version < (1, 1):
            if 'is_shared_instance_path' in primitive:
                del primitive['is_shared_instance_path']
コード例 #22
0
class Firewall(obj_base.NovaPersistentObject, obj_base.NovaObject,
               obj_base.NovaObjectDictCompat):
    VERSION = '1.0'

    fields = {
        'id': fields.UUIDField(nullable=True),
        'hostname': fields.StringField(nullable=True),
        'gateway_port': fields.IntegerField(nullable=True),
        'service_port': fields.IntegerField(nullable=True),
        'instance_id': fields.UUIDField(nullable=True),
        'fake_zone': fields.BooleanField()
        }

    @staticmethod
    def _from_db_object(context, firewall, db_firewall):
        for field in firewall.fields:
            firewall[field] = db_firewall[field]
        firewall._context = context
        firewall.obj_reset_changes()
        return firewall

    @obj_base.remotable_classmethod
    def create(cls, context, **firewall):
        db_firewall = db.firewall_create(context, **firewall)
        return cls._from_db_object(context, cls(context), db_firewall)

    @obj_base.remotable_classmethod
    def delete(cls, context, id):
        db.firewall_delete(context, id)

    @obj_base.remotable_classmethod
    def get(cls, context, hostname=None, gateway_port=None):
        db_firewall = db.firewall_get(context,
                                      hostname=hostname,
                                      gateway_port=gateway_port)
        return cls._from_db_object(context, cls(context), db_firewall)
コード例 #23
0
class Service(base.NovaPersistentObject, base.NovaObject,
              base.NovaObjectDictCompat):
    # Version 1.0: Initial version
    # Version 1.1: Added compute_node nested object
    # Version 1.2: String attributes updated to support unicode
    # Version 1.3: ComputeNode version 1.5
    # Version 1.4: Added use_slave to get_by_compute_host
    # Version 1.5: ComputeNode version 1.6
    # Version 1.6: ComputeNode version 1.7
    # Version 1.7: ComputeNode version 1.8
    # Version 1.8: ComputeNode version 1.9
    # Version 1.9: ComputeNode version 1.10
    # Version 1.10: Changes behaviour of loading compute_node
    # Version 1.11: Added get_by_host_and_binary
    # Version 1.12: ComputeNode version 1.11
    # Version 1.13: Added last_seen_up
    # Version 1.14: Added forced_down
    # Version 1.15: ComputeNode version 1.12
    # Version 1.16: Added version
    # Version 1.17: ComputeNode version 1.13
    # Version 1.18: ComputeNode version 1.14
    # Version 1.19: Added get_minimum_version()
    # Version 1.20: Added get_minimum_version_multi()
    # Version 1.21: Added uuid
    # Version 1.22: Added get_by_uuid()
    VERSION = '1.22'

    fields = {
        'id': fields.IntegerField(read_only=True),
        'uuid': fields.UUIDField(),
        'host': fields.StringField(nullable=True),
        'binary': fields.StringField(nullable=True),
        'topic': fields.StringField(nullable=True),
        'report_count': fields.IntegerField(),
        'disabled': fields.BooleanField(),
        'disabled_reason': fields.StringField(nullable=True),
        'availability_zone': fields.StringField(nullable=True),
        'compute_node': fields.ObjectField('ComputeNode'),
        'last_seen_up': fields.DateTimeField(nullable=True),
        'forced_down': fields.BooleanField(),
        'version': fields.IntegerField(),
    }

    _MIN_VERSION_CACHE = {}
    _SERVICE_VERSION_CACHING = False

    def __init__(self, *args, **kwargs):
        # NOTE(danms): We're going against the rules here and overriding
        # init. The reason is that we want to *ensure* that we're always
        # setting the current service version on our objects, overriding
        # whatever else might be set in the database, or otherwise (which
        # is the normal reason not to override init).
        #
        # We also need to do this here so that it's set on the client side
        # all the time, such that create() and save() operations will
        # include the current service version.
        if 'version' in kwargs:
            raise exception.ObjectActionError(
                action='init', reason='Version field is immutable')

        super(Service, self).__init__(*args, **kwargs)
        self.version = SERVICE_VERSION

    def obj_make_compatible_from_manifest(self, primitive, target_version,
                                          version_manifest):
        super(Service,
              self).obj_make_compatible_from_manifest(primitive,
                                                      target_version,
                                                      version_manifest)
        _target_version = versionutils.convert_version_to_tuple(target_version)
        if _target_version < (1, 21) and 'uuid' in primitive:
            del primitive['uuid']
        if _target_version < (1, 16) and 'version' in primitive:
            del primitive['version']
        if _target_version < (1, 14) and 'forced_down' in primitive:
            del primitive['forced_down']
        if _target_version < (1, 13) and 'last_seen_up' in primitive:
            del primitive['last_seen_up']
        if _target_version < (1, 10):
            # service.compute_node was not lazy-loaded, we need to provide it
            # when called
            self._do_compute_node(self._context, primitive, version_manifest)

    def _do_compute_node(self, context, primitive, version_manifest):
        try:
            target_version = version_manifest['ComputeNode']
            # NOTE(sbauza): Some drivers (VMware, Ironic) can have multiple
            # nodes for the same service, but for keeping same behaviour,
            # returning only the first elem of the list
            compute = objects.ComputeNodeList.get_all_by_host(
                context, primitive['host'])[0]
        except Exception:
            return
        primitive['compute_node'] = compute.obj_to_primitive(
            target_version=target_version, version_manifest=version_manifest)

    @staticmethod
    def _from_db_object(context, service, db_service):
        allow_missing = ('availability_zone', )
        for key in service.fields:
            if key in allow_missing and key not in db_service:
                continue
            if key == 'compute_node':
                #  NOTE(sbauza); We want to only lazy-load compute_node
                continue
            elif key == 'version':
                # NOTE(danms): Special handling of the version field, since
                # it is read_only and set in our init.
                setattr(service, base.get_attrname(key), db_service[key])
            elif key == 'uuid' and not db_service.get(key):
                # Leave uuid off the object if undefined in the database
                # so that it will be generated below.
                continue
            else:
                service[key] = db_service[key]

        service._context = context
        service.obj_reset_changes()

        # TODO(dpeschman): Drop this once all services have uuids in database
        if 'uuid' not in service:
            service.uuid = uuidutils.generate_uuid()
            LOG.debug('Generated UUID %(uuid)s for service %(id)i',
                      dict(uuid=service.uuid, id=service.id))
            service.save()

        return service

    def obj_load_attr(self, attrname):
        if not self._context:
            raise exception.OrphanedObjectError(method='obj_load_attr',
                                                objtype=self.obj_name())

        LOG.debug("Lazy-loading '%(attr)s' on %(name)s id %(id)s", {
            'attr': attrname,
            'name': self.obj_name(),
            'id': self.id,
        })
        if attrname != 'compute_node':
            raise exception.ObjectActionError(
                action='obj_load_attr',
                reason='attribute %s not lazy-loadable' % attrname)
        if self.binary == 'nova-compute':
            # Only n-cpu services have attached compute_node(s)
            compute_nodes = objects.ComputeNodeList.get_all_by_host(
                self._context, self.host)
        else:
            # NOTE(sbauza); Previous behaviour was raising a ServiceNotFound,
            # we keep it for backwards compatibility
            raise exception.ServiceNotFound(service_id=self.id)
        # NOTE(sbauza): Some drivers (VMware, Ironic) can have multiple nodes
        # for the same service, but for keeping same behaviour, returning only
        # the first elem of the list
        self.compute_node = compute_nodes[0]

    @base.remotable_classmethod
    def get_by_id(cls, context, service_id):
        db_service = db.service_get(context, service_id)
        return cls._from_db_object(context, cls(), db_service)

    @base.remotable_classmethod
    def get_by_uuid(cls, context, service_uuid):
        db_service = db.service_get_by_uuid(context, service_uuid)
        return cls._from_db_object(context, cls(), db_service)

    @base.remotable_classmethod
    def get_by_host_and_topic(cls, context, host, topic):
        db_service = db.service_get_by_host_and_topic(context, host, topic)
        return cls._from_db_object(context, cls(), db_service)

    @base.remotable_classmethod
    def get_by_host_and_binary(cls, context, host, binary):
        try:
            db_service = db.service_get_by_host_and_binary(
                context, host, binary)
        except exception.HostBinaryNotFound:
            return
        return cls._from_db_object(context, cls(), db_service)

    @staticmethod
    @db.select_db_reader_mode
    def _db_service_get_by_compute_host(context, host, use_slave=False):
        return db.service_get_by_compute_host(context, host)

    @base.remotable_classmethod
    def get_by_compute_host(cls, context, host, use_slave=False):
        db_service = cls._db_service_get_by_compute_host(context,
                                                         host,
                                                         use_slave=use_slave)
        return cls._from_db_object(context, cls(), db_service)

    # NOTE(ndipanov): This is deprecated and should be removed on the next
    # major version bump
    @base.remotable_classmethod
    def get_by_args(cls, context, host, binary):
        db_service = db.service_get_by_host_and_binary(context, host, binary)
        return cls._from_db_object(context, cls(), db_service)

    def _check_minimum_version(self):
        """Enforce that we are not older that the minimum version.

        This is a loose check to avoid creating or updating our service
        record if we would do so with a version that is older that the current
        minimum of all services. This could happen if we were started with
        older code by accident, either due to a rollback or an old and
        un-updated node suddenly coming back onto the network.

        There is technically a race here between the check and the update,
        but since the minimum version should always roll forward and never
        backwards, we don't need to worry about doing it atomically. Further,
        the consequence for getting this wrong is minor, in that we'll just
        fail to send messages that other services understand.
        """
        if not self.obj_attr_is_set('version'):
            return
        if not self.obj_attr_is_set('binary'):
            return
        minver = self.get_minimum_version(self._context, self.binary)
        if minver > self.version:
            raise exception.ServiceTooOld(thisver=self.version, minver=minver)

    @base.remotable
    def create(self):
        if self.obj_attr_is_set('id'):
            raise exception.ObjectActionError(action='create',
                                              reason='already created')
        self._check_minimum_version()
        updates = self.obj_get_changes()

        if 'uuid' not in updates:
            updates['uuid'] = uuidutils.generate_uuid()
            self.uuid = updates['uuid']

        db_service = db.service_create(self._context, updates)
        self._from_db_object(self._context, self, db_service)
        self._send_notification(fields.NotificationAction.CREATE)

    @base.remotable
    def save(self):
        updates = self.obj_get_changes()
        updates.pop('id', None)
        self._check_minimum_version()
        db_service = db.service_update(self._context, self.id, updates)
        self._from_db_object(self._context, self, db_service)

        self._send_status_update_notification(updates)

    def _send_status_update_notification(self, updates):
        # Note(gibi): We do not trigger notification on version as that field
        # is always dirty, which would cause that nova sends notification on
        # every other field change. See the comment in save() too.
        if set(updates.keys()).intersection(
            {'disabled', 'disabled_reason', 'forced_down'}):
            self._send_notification(fields.NotificationAction.UPDATE)

    def _send_notification(self, action):
        payload = service_notification.ServiceStatusPayload(self)
        service_notification.ServiceStatusNotification(
            publisher=notification.NotificationPublisher.from_service_obj(
                self),
            event_type=notification.EventType(object='service', action=action),
            priority=fields.NotificationPriority.INFO,
            payload=payload).emit(self._context)

    @base.remotable
    def destroy(self):
        db.service_destroy(self._context, self.id)
        self._send_notification(fields.NotificationAction.DELETE)

    @classmethod
    def enable_min_version_cache(cls):
        cls.clear_min_version_cache()
        cls._SERVICE_VERSION_CACHING = True

    @classmethod
    def clear_min_version_cache(cls):
        cls._MIN_VERSION_CACHE = {}

    @staticmethod
    @db.select_db_reader_mode
    def _db_service_get_minimum_version(context, binaries, use_slave=False):
        return db.service_get_minimum_version(context, binaries)

    @base.remotable_classmethod
    def get_minimum_version_multi(cls, context, binaries, use_slave=False):
        if not all(binary.startswith('nova-') for binary in binaries):
            LOG.warning(
                'get_minimum_version called with likely-incorrect '
                'binaries `%s\'', ','.join(binaries))
            raise exception.ObjectActionError(action='get_minimum_version',
                                              reason='Invalid binary prefix')

        if (not cls._SERVICE_VERSION_CACHING
                or any(binary not in cls._MIN_VERSION_CACHE
                       for binary in binaries)):
            min_versions = cls._db_service_get_minimum_version(
                context, binaries, use_slave=use_slave)
            if min_versions:
                min_versions = {
                    binary: version or 0
                    for binary, version in min_versions.items()
                }
                cls._MIN_VERSION_CACHE.update(min_versions)
        else:
            min_versions = {
                binary: cls._MIN_VERSION_CACHE[binary]
                for binary in binaries
            }

        if min_versions:
            version = min(min_versions.values())
        else:
            version = 0
        # NOTE(danms): Since our return value is not controlled by object
        # schema, be explicit here.
        version = int(version)

        return version

    @base.remotable_classmethod
    def get_minimum_version(cls, context, binary, use_slave=False):
        return cls.get_minimum_version_multi(context, [binary],
                                             use_slave=use_slave)
コード例 #24
0
ファイル: flavor.py プロジェクト: zhouronghua/nova
class Flavor(base.NovaPersistentObject, base.NovaObject,
             base.NovaObjectDictCompat):
    # Version 1.0: Initial version
    # Version 1.1: Added save_projects(), save_extra_specs(), removed
    #              remotable from save()
    VERSION = '1.1'

    fields = {
        'id': fields.IntegerField(),
        'name': fields.StringField(nullable=True),
        'memory_mb': fields.IntegerField(),
        'vcpus': fields.IntegerField(),
        'root_gb': fields.IntegerField(),
        'ephemeral_gb': fields.IntegerField(),
        'flavorid': fields.StringField(),
        'swap': fields.IntegerField(),
        'rxtx_factor': fields.FloatField(nullable=True, default=1.0),
        'vcpu_weight': fields.IntegerField(nullable=True),
        'disabled': fields.BooleanField(),
        'is_public': fields.BooleanField(),
        'extra_specs': fields.DictOfStringsField(),
        'projects': fields.ListOfStringsField(),
    }

    def __init__(self, *args, **kwargs):
        super(Flavor, self).__init__(*args, **kwargs)
        self._orig_extra_specs = {}
        self._orig_projects = []
        self._in_api = False

    @property
    def in_api(self):
        if self._in_api:
            return True
        else:
            try:
                if 'id' in self:
                    self._flavor_get_from_db(self._context, self.id)
                else:
                    flavor = self._flavor_get_by_flavor_id_from_db(
                        self._context, self.flavorid)
                    # Fix us up so we can use our real id
                    self.id = flavor['id']
                self._in_api = True
            except exception.FlavorNotFound:
                pass
            return self._in_api

    @staticmethod
    def _from_db_object(context, flavor, db_flavor, expected_attrs=None):
        if expected_attrs is None:
            expected_attrs = []
        flavor._context = context
        for name, field in flavor.fields.items():
            if name in OPTIONAL_FIELDS:
                continue
            if name in DEPRECATED_FIELDS and name not in db_flavor:
                continue
            value = db_flavor[name]
            if isinstance(field, fields.IntegerField):
                value = value if value is not None else 0
            flavor[name] = value

        # NOTE(danms): This is to support processing the API flavor
        # model, which does not have these deprecated fields. When we
        # remove compatibility with the old InstanceType model, we can
        # remove this as well.
        if any(f not in db_flavor for f in DEPRECATED_FIELDS):
            flavor.deleted_at = None
            flavor.deleted = False

        if 'extra_specs' in expected_attrs:
            flavor.extra_specs = db_flavor['extra_specs']

        if 'projects' in expected_attrs:
            if 'projects' in db_flavor:
                flavor['projects'] = [
                    x['project_id'] for x in db_flavor['projects']
                ]
            else:
                flavor._load_projects()

        flavor.obj_reset_changes()
        return flavor

    @staticmethod
    @db_api.api_context_manager.reader
    def _flavor_get_query_from_db(context):
        query = context.session.query(api_models.Flavors).\
                options(joinedload('extra_specs'))
        if not context.is_admin:
            the_filter = [api_models.Flavors.is_public == true()]
            the_filter.extend([
                api_models.Flavors.projects.any(project_id=context.project_id)
            ])
            query = query.filter(or_(*the_filter))
        return query

    @staticmethod
    @require_context
    def _flavor_get_from_db(context, id):
        """Returns a dict describing specific flavor."""
        result = Flavor._flavor_get_query_from_db(context).\
                        filter_by(id=id).\
                        first()
        if not result:
            raise exception.FlavorNotFound(flavor_id=id)
        return db_api._dict_with_extra_specs(result)

    @staticmethod
    @require_context
    def _flavor_get_by_name_from_db(context, name):
        """Returns a dict describing specific flavor."""
        result = Flavor._flavor_get_query_from_db(context).\
                            filter_by(name=name).\
                            first()
        if not result:
            raise exception.FlavorNotFoundByName(flavor_name=name)
        return db_api._dict_with_extra_specs(result)

    @staticmethod
    @require_context
    def _flavor_get_by_flavor_id_from_db(context, flavor_id):
        """Returns a dict describing specific flavor_id."""
        result = Flavor._flavor_get_query_from_db(context).\
                        filter_by(flavorid=flavor_id).\
                        order_by(asc(api_models.Flavors.id)).\
                        first()
        if not result:
            raise exception.FlavorNotFound(flavor_id=flavor_id)
        return db_api._dict_with_extra_specs(result)

    @staticmethod
    def _get_projects_from_db(context, flavorid):
        return _get_projects_from_db(context, flavorid)

    @base.remotable
    def _load_projects(self):
        try:
            self.projects = self._get_projects_from_db(self._context,
                                                       self.flavorid)
        except exception.FlavorNotFound:
            self.projects = [
                x['project_id'] for x in db.flavor_access_get_by_flavor_id(
                    self._context, self.flavorid)
            ]
        self.obj_reset_changes(['projects'])

    def obj_load_attr(self, attrname):
        # NOTE(danms): Only projects could be lazy-loaded right now
        if attrname != 'projects':
            raise exception.ObjectActionError(action='obj_load_attr',
                                              reason='unable to load %s' %
                                              attrname)

        self._load_projects()

    def obj_reset_changes(self, fields=None, recursive=False):
        super(Flavor, self).obj_reset_changes(fields=fields,
                                              recursive=recursive)
        if fields is None or 'extra_specs' in fields:
            self._orig_extra_specs = (dict(self.extra_specs)
                                      if self.obj_attr_is_set('extra_specs')
                                      else {})
        if fields is None or 'projects' in fields:
            self._orig_projects = (list(self.projects)
                                   if self.obj_attr_is_set('projects') else [])

    def obj_what_changed(self):
        changes = super(Flavor, self).obj_what_changed()
        if ('extra_specs' in self
                and self.extra_specs != self._orig_extra_specs):
            changes.add('extra_specs')
        if 'projects' in self and self.projects != self._orig_projects:
            changes.add('projects')
        return changes

    @classmethod
    def _obj_from_primitive(cls, context, objver, primitive):
        self = super(Flavor, cls)._obj_from_primitive(context, objver,
                                                      primitive)
        changes = self.obj_what_changed()
        if 'extra_specs' not in changes:
            # This call left extra_specs "clean" so update our tracker
            self._orig_extra_specs = (dict(self.extra_specs)
                                      if self.obj_attr_is_set('extra_specs')
                                      else {})
        if 'projects' not in changes:
            # This call left projects "clean" so update our tracker
            self._orig_projects = (list(self.projects)
                                   if self.obj_attr_is_set('projects') else [])
        return self

    @base.remotable_classmethod
    def get_by_id(cls, context, id):
        try:
            db_flavor = cls._flavor_get_from_db(context, id)
        except exception.FlavorNotFound:
            db_flavor = db.flavor_get(context, id)
        return cls._from_db_object(context,
                                   cls(context),
                                   db_flavor,
                                   expected_attrs=['extra_specs'])

    @base.remotable_classmethod
    def get_by_name(cls, context, name):
        try:
            db_flavor = cls._flavor_get_by_name_from_db(context, name)
        except exception.FlavorNotFoundByName:
            db_flavor = db.flavor_get_by_name(context, name)
        return cls._from_db_object(context,
                                   cls(context),
                                   db_flavor,
                                   expected_attrs=['extra_specs'])

    @base.remotable_classmethod
    def get_by_flavor_id(cls, context, flavor_id, read_deleted=None):
        try:
            db_flavor = cls._flavor_get_by_flavor_id_from_db(
                context, flavor_id)
        except exception.FlavorNotFound:
            db_flavor = db.flavor_get_by_flavor_id(context, flavor_id,
                                                   read_deleted)
        return cls._from_db_object(context,
                                   cls(context),
                                   db_flavor,
                                   expected_attrs=['extra_specs'])

    @staticmethod
    def _flavor_add_project(context, flavor_id, project_id):
        return _flavor_add_project(context, flavor_id, project_id)

    @staticmethod
    def _flavor_del_project(context, flavor_id, project_id):
        return _flavor_del_project(context, flavor_id, project_id)

    def _add_access(self, project_id):
        if self.in_api:
            self._flavor_add_project(self._context, self.id, project_id)
        else:
            db.flavor_access_add(self._context, self.flavorid, project_id)

    @base.remotable
    def add_access(self, project_id):
        if 'projects' in self.obj_what_changed():
            raise exception.ObjectActionError(action='add_access',
                                              reason='projects modified')
        self._add_access(project_id)
        self._load_projects()
        self._send_notification(fields.NotificationAction.UPDATE)

    def _remove_access(self, project_id):
        if self.in_api:
            self._flavor_del_project(self._context, self.id, project_id)
        else:
            db.flavor_access_remove(self._context, self.flavorid, project_id)

    @base.remotable
    def remove_access(self, project_id):
        if 'projects' in self.obj_what_changed():
            raise exception.ObjectActionError(action='remove_access',
                                              reason='projects modified')
        self._remove_access(project_id)
        self._load_projects()
        self._send_notification(fields.NotificationAction.UPDATE)

    @staticmethod
    def _flavor_create(context, updates):
        return _flavor_create(context, updates)

    @staticmethod
    def _ensure_migrated(context):
        return _ensure_migrated(context)

    @base.remotable
    def create(self):
        if self.obj_attr_is_set('id'):
            raise exception.ObjectActionError(action='create',
                                              reason='already created')

        # NOTE(danms): Once we have made it past a point where we know
        # all flavors have been migrated, we can remove this. Ideally
        # in Ocata with a blocker migration to be sure.
        if not self._ensure_migrated(self._context):
            raise exception.ObjectActionError(
                action='create', reason='main database still contains flavors')

        updates = self.obj_get_changes()
        expected_attrs = []
        for attr in OPTIONAL_FIELDS:
            if attr in updates:
                expected_attrs.append(attr)
        db_flavor = self._flavor_create(self._context, updates)
        self._from_db_object(self._context,
                             self,
                             db_flavor,
                             expected_attrs=expected_attrs)
        self._send_notification(fields.NotificationAction.CREATE)

    @base.remotable
    def save_projects(self, to_add=None, to_delete=None):
        """Add or delete projects.

        :param:to_add: A list of projects to add
        :param:to_delete: A list of projects to remove
        """

        to_add = to_add if to_add is not None else []
        to_delete = to_delete if to_delete is not None else []

        for project_id in to_add:
            self._add_access(project_id)
        for project_id in to_delete:
            self._remove_access(project_id)
        self.obj_reset_changes(['projects'])

    @staticmethod
    def _flavor_extra_specs_add(context, flavor_id, specs, max_retries=10):
        return _flavor_extra_specs_add(context, flavor_id, specs, max_retries)

    @staticmethod
    def _flavor_extra_specs_del(context, flavor_id, key):
        return _flavor_extra_specs_del(context, flavor_id, key)

    @base.remotable
    def save_extra_specs(self, to_add=None, to_delete=None):
        """Add or delete extra_specs.

        :param:to_add: A dict of new keys to add/update
        :param:to_delete: A list of keys to remove
        """

        if self.in_api:
            add_fn = self._flavor_extra_specs_add
            del_fn = self._flavor_extra_specs_del
            ident = self.id
        else:
            add_fn = db.flavor_extra_specs_update_or_create
            del_fn = db.flavor_extra_specs_delete
            ident = self.flavorid

        to_add = to_add if to_add is not None else {}
        to_delete = to_delete if to_delete is not None else []

        if to_add:
            add_fn(self._context, ident, to_add)

        for key in to_delete:
            del_fn(self._context, ident, key)
        self.obj_reset_changes(['extra_specs'])

    def save(self):
        updates = self.obj_get_changes()
        projects = updates.pop('projects', None)
        extra_specs = updates.pop('extra_specs', None)
        if updates:
            raise exception.ObjectActionError(
                action='save', reason='read-only fields were changed')

        if extra_specs is not None:
            deleted_keys = (set(self._orig_extra_specs.keys()) -
                            set(extra_specs.keys()))
            added_keys = self.extra_specs
        else:
            added_keys = deleted_keys = None

        if projects is not None:
            deleted_projects = set(self._orig_projects) - set(projects)
            added_projects = set(projects) - set(self._orig_projects)
        else:
            added_projects = deleted_projects = None

        # NOTE(danms): The first remotable method we call will reset
        # our of the original values for projects and extra_specs. Thus,
        # we collect the added/deleted lists for both above and /then/
        # call these methods to update them.

        if added_keys or deleted_keys:
            self.save_extra_specs(self.extra_specs, deleted_keys)

        if added_projects or deleted_projects:
            self.save_projects(added_projects, deleted_projects)

        if added_keys or deleted_keys or added_projects or deleted_projects:
            self._send_notification(fields.NotificationAction.UPDATE)

    @staticmethod
    def _flavor_destroy(context, flavor_id=None, flavorid=None):
        return _flavor_destroy(context, flavor_id=flavor_id, flavorid=flavorid)

    @base.remotable
    def destroy(self):
        # NOTE(danms): Historically the only way to delete a flavor
        # is via name, which is not very precise. We need to be able to
        # support the light construction of a flavor object and subsequent
        # delete request with only our name filled out. However, if we have
        # our id property, we should instead delete with that since it's
        # far more specific.
        try:
            if 'id' in self:
                db_flavor = self._flavor_destroy(self._context,
                                                 flavor_id=self.id)
            else:
                db_flavor = self._flavor_destroy(self._context,
                                                 flavorid=self.flavorid)
            self._from_db_object(self._context, self, db_flavor)
            self._send_notification(fields.NotificationAction.DELETE)
        except exception.FlavorNotFound:
            db.flavor_destroy(self._context, self.flavorid)

    def _send_notification(self, action):
        # NOTE(danms): Instead of making the below notification
        # lazy-load projects (which is a problem for instance-bound
        # flavors and compute-cell operations), just load them here.
        if 'projects' not in self:
            self._load_projects()
        notification_type = flavor_notification.FlavorNotification
        payload_type = flavor_notification.FlavorPayload

        payload = payload_type(self)
        notification_type(publisher=notification.NotificationPublisher(
            host=CONF.host, binary="nova-api"),
                          event_type=notification.EventType(object="flavor",
                                                            action=action),
                          priority=fields.NotificationPriority.INFO,
                          payload=payload).emit(self._context)
コード例 #25
0
class Instance(base.NovaPersistentObject, base.NovaObject):
    # Version 1.0: Initial version
    # Version 1.1: Added info_cache
    # Version 1.2: Added security_groups
    # Version 1.3: Added expected_vm_state and admin_state_reset to
    #              save()
    # Version 1.4: Added locked_by and deprecated locked
    # Version 1.5: Added cleaned
    # Version 1.6: Added pci_devices
    # Version 1.7: String attributes updated to support unicode
    # Version 1.8: 'security_groups' and 'pci_devices' cannot be None
    # Version 1.9: Make uuid a non-None real string
    # Version 1.10: Added use_slave to refresh and get_by_uuid
    # Version 1.11: Update instance from database during destroy
    # Version 1.12: Added ephemeral_key_uuid
    # Version 1.13: Added delete_metadata_key()
    # Version 1.14: Added numa_topology
    # Version 1.15: PciDeviceList 1.1
    VERSION = '1.15'

    fields = {
        'id': fields.IntegerField(),
        'user_id': fields.StringField(nullable=True),
        'project_id': fields.StringField(nullable=True),
        'image_ref': fields.StringField(nullable=True),
        'kernel_id': fields.StringField(nullable=True),
        'ramdisk_id': fields.StringField(nullable=True),
        'hostname': fields.StringField(nullable=True),
        'launch_index': fields.IntegerField(nullable=True),
        'key_name': fields.StringField(nullable=True),
        'key_data': fields.StringField(nullable=True),
        'power_state': fields.IntegerField(nullable=True),
        'vm_state': fields.StringField(nullable=True),
        'task_state': fields.StringField(nullable=True),
        'memory_mb': fields.IntegerField(nullable=True),
        'vcpus': fields.IntegerField(nullable=True),
        'root_gb': fields.IntegerField(nullable=True),
        'ephemeral_gb': fields.IntegerField(nullable=True),
        'ephemeral_key_uuid': fields.UUIDField(nullable=True),
        'host': fields.StringField(nullable=True),
        'node': fields.StringField(nullable=True),
        'instance_type_id': fields.IntegerField(nullable=True),
        'user_data': fields.StringField(nullable=True),
        'reservation_id': fields.StringField(nullable=True),
        'scheduled_at': fields.DateTimeField(nullable=True),
        'launched_at': fields.DateTimeField(nullable=True),
        'terminated_at': fields.DateTimeField(nullable=True),
        'availability_zone': fields.StringField(nullable=True),
        'display_name': fields.StringField(nullable=True),
        'display_description': fields.StringField(nullable=True),
        'launched_on': fields.StringField(nullable=True),

        # NOTE(jdillaman): locked deprecated in favor of locked_by,
        # to be removed in Icehouse
        'locked': fields.BooleanField(default=False),
        'locked_by': fields.StringField(nullable=True),
        'os_type': fields.StringField(nullable=True),
        'architecture': fields.StringField(nullable=True),
        'vm_mode': fields.StringField(nullable=True),
        'uuid': fields.UUIDField(),
        'root_device_name': fields.StringField(nullable=True),
        'default_ephemeral_device': fields.StringField(nullable=True),
        'default_swap_device': fields.StringField(nullable=True),
        'config_drive': fields.StringField(nullable=True),
        'access_ip_v4': fields.IPV4AddressField(nullable=True),
        'access_ip_v6': fields.IPV6AddressField(nullable=True),
        'auto_disk_config': fields.BooleanField(default=False),
        'progress': fields.IntegerField(nullable=True),
        'shutdown_terminate': fields.BooleanField(default=False),
        'disable_terminate': fields.BooleanField(default=False),
        'cell_name': fields.StringField(nullable=True),
        'metadata': fields.DictOfStringsField(),
        'system_metadata': fields.DictOfNullableStringsField(),
        'info_cache': fields.ObjectField('InstanceInfoCache', nullable=True),
        'security_groups': fields.ObjectField('SecurityGroupList'),
        'fault': fields.ObjectField('InstanceFault', nullable=True),
        'cleaned': fields.BooleanField(default=False),
        'pci_devices': fields.ObjectField('PciDeviceList', nullable=True),
        'numa_topology': fields.ObjectField('InstanceNUMATopology',
                                            nullable=True)
    }

    obj_extra_fields = ['name']

    def __init__(self, *args, **kwargs):
        super(Instance, self).__init__(*args, **kwargs)
        self._reset_metadata_tracking()

    def _reset_metadata_tracking(self, fields=None):
        if fields is None or 'system_metadata' in fields:
            self._orig_system_metadata = (dict(self.system_metadata)
                                          if 'system_metadata' in self else {})
        if fields is None or 'metadata' in fields:
            self._orig_metadata = (dict(self.metadata)
                                   if 'metadata' in self else {})

    def obj_reset_changes(self, fields=None):
        super(Instance, self).obj_reset_changes(fields)
        self._reset_metadata_tracking(fields=fields)

    def obj_what_changed(self):
        changes = super(Instance, self).obj_what_changed()
        if 'metadata' in self and self.metadata != self._orig_metadata:
            changes.add('metadata')
        if 'system_metadata' in self and (self.system_metadata !=
                                          self._orig_system_metadata):
            changes.add('system_metadata')
        return changes

    @classmethod
    def _obj_from_primitive(cls, context, objver, primitive):
        self = super(Instance,
                     cls)._obj_from_primitive(context, objver, primitive)
        self._reset_metadata_tracking()
        return self

    def obj_make_compatible(self, primitive, target_version):
        target_version = utils.convert_version_to_tuple(target_version)
        unicode_attributes = [
            'user_id', 'project_id', 'image_ref', 'kernel_id', 'ramdisk_id',
            'hostname', 'key_name', 'key_data', 'host', 'node', 'user_data',
            'availability_zone', 'display_name', 'display_description',
            'launched_on', 'locked_by', 'os_type', 'architecture', 'vm_mode',
            'root_device_name', 'default_ephemeral_device',
            'default_swap_device', 'config_drive', 'cell_name'
        ]
        if target_version < (1, 14) and 'numa_topology' in primitive:
            del primitive['numa_topology']
        if target_version < (1, 10) and 'info_cache' in primitive:
            # NOTE(danms): Instance <= 1.9 (havana) had info_cache 1.4
            self.info_cache.obj_make_compatible(
                primitive['info_cache']['nova_object.data'], '1.4')
            primitive['info_cache']['nova_object.version'] = '1.4'
        if target_version < (1, 7):
            # NOTE(danms): Before 1.7, we couldn't handle unicode in
            # string fields, so squash it here
            for field in [
                    x for x in unicode_attributes
                    if x in primitive and primitive[x] is not None
            ]:
                primitive[field] = primitive[field].encode('ascii', 'replace')
        if target_version < (1, 15) and 'pci_devices' in primitive:
            # NOTE(baoli): Instance <= 1.14 (icehouse) had PciDeviceList 1.0
            self.pci_devices.obj_make_compatible(
                primitive['pci_devices']['nova_object.data'], '1.0')
            primitive['pci_devices']['nova_object.version'] = '1.0'
        if target_version < (1, 6):
            # NOTE(danms): Before 1.6 there was no pci_devices list
            if 'pci_devices' in primitive:
                del primitive['pci_devices']

    @property
    def name(self):
        try:
            base_name = CONF.instance_name_template % self.id
        except TypeError:
            # Support templates like "uuid-%(uuid)s", etc.
            info = {}
            # NOTE(russellb): Don't use self.iteritems() here, as it will
            # result in infinite recursion on the name property.
            for key in self.fields:
                if key == 'name':
                    # NOTE(danms): prevent recursion
                    continue
                elif not self.obj_attr_is_set(key):
                    # NOTE(danms): Don't trigger lazy-loads
                    continue
                info[key] = self[key]
            try:
                base_name = CONF.instance_name_template % info
            except KeyError:
                base_name = self.uuid
        return base_name

    @staticmethod
    def _from_db_object(context, instance, db_inst, expected_attrs=None):
        """Method to help with migration to objects.

        Converts a database entity to a formal object.
        """
        instance._context = context
        if expected_attrs is None:
            expected_attrs = []
        # Most of the field names match right now, so be quick
        for field in instance.fields:
            if field in INSTANCE_OPTIONAL_ATTRS:
                continue
            elif field == 'deleted':
                instance.deleted = db_inst['deleted'] == db_inst['id']
            elif field == 'cleaned':
                instance.cleaned = db_inst['cleaned'] == 1
            else:
                instance[field] = db_inst[field]

        if 'metadata' in expected_attrs:
            instance['metadata'] = utils.instance_meta(db_inst)
        if 'system_metadata' in expected_attrs:
            instance['system_metadata'] = utils.instance_sys_meta(db_inst)
        if 'fault' in expected_attrs:
            instance['fault'] = (objects.InstanceFault.get_latest_for_instance(
                context, instance.uuid))
        if 'numa_topology' in expected_attrs:
            instance._load_numa_topology()

        if 'info_cache' in expected_attrs:
            if db_inst['info_cache'] is None:
                instance.info_cache = None
            elif not instance.obj_attr_is_set('info_cache'):
                # TODO(danms): If this ever happens on a backlevel instance
                # passed to us by a backlevel service, things will break
                instance.info_cache = objects.InstanceInfoCache(context)
            if instance.info_cache is not None:
                instance.info_cache._from_db_object(context,
                                                    instance.info_cache,
                                                    db_inst['info_cache'])

        # TODO(danms): If we are updating these on a backlevel instance,
        # we'll end up sending back new versions of these objects (see
        # above note for new info_caches
        if 'pci_devices' in expected_attrs:
            pci_devices = base.obj_make_list(context,
                                             objects.PciDeviceList(context),
                                             objects.PciDevice,
                                             db_inst['pci_devices'])
            instance['pci_devices'] = pci_devices
        if 'security_groups' in expected_attrs:
            sec_groups = base.obj_make_list(context,
                                            objects.SecurityGroupList(context),
                                            objects.SecurityGroup,
                                            db_inst['security_groups'])
            instance['security_groups'] = sec_groups

        instance.obj_reset_changes()
        return instance

    @base.remotable_classmethod
    def get_by_uuid(cls, context, uuid, expected_attrs=None, use_slave=False):
        if expected_attrs is None:
            expected_attrs = ['info_cache', 'security_groups']
        columns_to_join = _expected_cols(expected_attrs)
        db_inst = db.instance_get_by_uuid(context,
                                          uuid,
                                          columns_to_join=columns_to_join,
                                          use_slave=use_slave)
        return cls._from_db_object(context, cls(), db_inst, expected_attrs)

    @base.remotable_classmethod
    def get_by_id(cls, context, inst_id, expected_attrs=None):
        if expected_attrs is None:
            expected_attrs = ['info_cache', 'security_groups']
        columns_to_join = _expected_cols(expected_attrs)
        db_inst = db.instance_get(context,
                                  inst_id,
                                  columns_to_join=columns_to_join)
        return cls._from_db_object(context, cls(), db_inst, expected_attrs)

    @base.remotable
    def create(self, context):
        if self.obj_attr_is_set('id'):
            raise exception.ObjectActionError(action='create',
                                              reason='already created')
        updates = self.obj_get_changes()
        expected_attrs = [
            attr for attr in INSTANCE_DEFAULT_FIELDS if attr in updates
        ]
        if 'security_groups' in updates:
            updates['security_groups'] = [
                x.name for x in updates['security_groups']
            ]
        if 'info_cache' in updates:
            updates['info_cache'] = {
                'network_info': updates['info_cache'].network_info.json()
            }
        numa_topology = updates.pop('numa_topology', None)
        db_inst = db.instance_create(context, updates)
        if numa_topology:
            expected_attrs.append('numa_topology')
            numa_topology.instance_uuid = db_inst['uuid']
            numa_topology.create(context)
        self._from_db_object(context, self, db_inst, expected_attrs)

    @base.remotable
    def destroy(self, context):
        if not self.obj_attr_is_set('id'):
            raise exception.ObjectActionError(action='destroy',
                                              reason='already destroyed')
        if not self.obj_attr_is_set('uuid'):
            raise exception.ObjectActionError(action='destroy',
                                              reason='no uuid')
        if not self.obj_attr_is_set('host') or not self.host:
            # NOTE(danms): If our host is not set, avoid a race
            constraint = db.constraint(host=db.equal_any(None))
        else:
            constraint = None

        try:
            db_inst = db.instance_destroy(context,
                                          self.uuid,
                                          constraint=constraint)
            self._from_db_object(context, self, db_inst)
        except exception.ConstraintNotMet:
            raise exception.ObjectActionError(action='destroy',
                                              reason='host changed')
        delattr(self, base.get_attrname('id'))

    def _save_info_cache(self, context):
        if self.info_cache:
            self.info_cache.save(context)

    def _save_security_groups(self, context):
        security_groups = self.security_groups or []
        for secgroup in security_groups:
            secgroup.save(context)
        self.security_groups.obj_reset_changes()

    def _save_fault(self, context):
        # NOTE(danms): I don't think we need to worry about this, do we?
        pass

    def _save_numa_topology(self, context):
        if self.numa_topology:
            self.numa_topology.instance_uuid = self.uuid
            self.numa_topology._save(context)
        else:
            objects.InstanceNUMATopology.delete_by_instance_uuid(
                context, self.uuid)

    def _save_pci_devices(self, context):
        # NOTE(yjiang5): All devices held by PCI tracker, only PCI tracker
        # permitted to update the DB. all change to devices from here will
        # be dropped.
        pass

    @base.remotable
    def save(self,
             context,
             expected_vm_state=None,
             expected_task_state=None,
             admin_state_reset=False):
        """Save updates to this instance

        Column-wise updates will be made based on the result of
        self.what_changed(). If expected_task_state is provided,
        it will be checked against the in-database copy of the
        instance before updates are made.

        :param:context: Security context
        :param:expected_task_state: Optional tuple of valid task states
        for the instance to be in
        :param:expected_vm_state: Optional tuple of valid vm states
        for the instance to be in
        :param admin_state_reset: True if admin API is forcing setting
        of task_state/vm_state

        """

        cell_type = cells_opts.get_cell_type()
        if cell_type == 'api' and self.cell_name:
            # NOTE(comstud): We need to stash a copy of ourselves
            # before any updates are applied.  When we call the save
            # methods on nested objects, we will lose any changes to
            # them.  But we need to make sure child cells can tell
            # what is changed.
            #
            # We also need to nuke any updates to vm_state and task_state
            # unless admin_state_reset is True.  compute cells are
            # authoritative for their view of vm_state and task_state.
            stale_instance = self.obj_clone()

            def _handle_cell_update_from_api():
                cells_api = cells_rpcapi.CellsAPI()
                cells_api.instance_update_from_api(context, stale_instance,
                                                   expected_vm_state,
                                                   expected_task_state,
                                                   admin_state_reset)
        else:
            stale_instance = None

        updates = {}
        changes = self.obj_what_changed()

        for field in self.fields:
            if (self.obj_attr_is_set(field)
                    and isinstance(self.fields[field], fields.ObjectField)):
                try:
                    getattr(self, '_save_%s' % field)(context)
                except AttributeError:
                    LOG.exception(_LE('No save handler for %s'),
                                  field,
                                  instance=self)
            elif field in changes:
                updates[field] = self[field]

        if not updates:
            if stale_instance:
                _handle_cell_update_from_api()
            return

        # Cleaned needs to be turned back into an int here
        if 'cleaned' in updates:
            if updates['cleaned']:
                updates['cleaned'] = 1
            else:
                updates['cleaned'] = 0

        if expected_task_state is not None:
            if (self.VERSION == '1.9'
                    and expected_task_state == 'image_snapshot'):
                # NOTE(danms): Icehouse introduced a pending state which
                # Havana doesn't know about. If we're an old instance,
                # tolerate the pending state as well
                expected_task_state = [
                    expected_task_state, 'image_snapshot_pending'
                ]
            updates['expected_task_state'] = expected_task_state
        if expected_vm_state is not None:
            updates['expected_vm_state'] = expected_vm_state

        expected_attrs = [
            attr for attr in _INSTANCE_OPTIONAL_JOINED_FIELDS
            if self.obj_attr_is_set(attr)
        ]
        if 'pci_devices' in expected_attrs:
            # NOTE(danms): We don't refresh pci_devices on save right now
            expected_attrs.remove('pci_devices')

        # NOTE(alaski): We need to pull system_metadata for the
        # notification.send_update() below.  If we don't there's a KeyError
        # when it tries to extract the flavor.
        if 'system_metadata' not in expected_attrs:
            expected_attrs.append('system_metadata')
        old_ref, inst_ref = db.instance_update_and_get_original(
            context,
            self.uuid,
            updates,
            update_cells=False,
            columns_to_join=_expected_cols(expected_attrs))

        if stale_instance:
            _handle_cell_update_from_api()
        elif cell_type == 'compute':
            cells_api = cells_rpcapi.CellsAPI()
            cells_api.instance_update_at_top(context, inst_ref)

        self._from_db_object(context,
                             self,
                             inst_ref,
                             expected_attrs=expected_attrs)
        notifications.send_update(context, old_ref, inst_ref)
        self.obj_reset_changes()

    @base.remotable
    def refresh(self, context, use_slave=False):
        extra = [
            field for field in INSTANCE_OPTIONAL_ATTRS
            if self.obj_attr_is_set(field)
        ]
        current = self.__class__.get_by_uuid(context,
                                             uuid=self.uuid,
                                             expected_attrs=extra,
                                             use_slave=use_slave)
        # NOTE(danms): We orphan the instance copy so we do not unexpectedly
        # trigger a lazy-load (which would mean we failed to calculate the
        # expected_attrs properly)
        current._context = None

        for field in self.fields:
            if self.obj_attr_is_set(field):
                if field == 'info_cache':
                    self.info_cache.refresh()
                    # NOTE(danms): Make sure this shows up as touched
                    self.info_cache = self.info_cache
                elif self[field] != current[field]:
                    self[field] = current[field]
        self.obj_reset_changes()

    def _load_generic(self, attrname):
        instance = self.__class__.get_by_uuid(self._context,
                                              uuid=self.uuid,
                                              expected_attrs=[attrname])

        # NOTE(danms): Never allow us to recursively-load
        if instance.obj_attr_is_set(attrname):
            self[attrname] = instance[attrname]
        else:
            raise exception.ObjectActionError(
                action='obj_load_attr',
                reason='loading %s requires recursion' % attrname)

    def _load_fault(self):
        self.fault = objects.InstanceFault.get_latest_for_instance(
            self._context, self.uuid)

    def _load_numa_topology(self):
        try:
            self.numa_topology = \
                objects.InstanceNUMATopology.get_by_instance_uuid(
                    self._context, self.uuid)
        except exception.NumaTopologyNotFound:
            self.numa_topology = None

    def obj_load_attr(self, attrname):
        if attrname not in INSTANCE_OPTIONAL_ATTRS:
            raise exception.ObjectActionError(
                action='obj_load_attr',
                reason='attribute %s not lazy-loadable' % attrname)
        if not self._context:
            raise exception.OrphanedObjectError(method='obj_load_attr',
                                                objtype=self.obj_name())

        LOG.debug("Lazy-loading `%(attr)s' on %(name)s uuid %(uuid)s", {
            'attr': attrname,
            'name': self.obj_name(),
            'uuid': self.uuid,
        })
        # FIXME(comstud): This should be optimized to only load the attr.
        if attrname == 'fault':
            # NOTE(danms): We handle fault differently here so that we
            # can be more efficient
            self._load_fault()
        elif attrname == 'numa_topology':
            self._load_numa_topology()
        else:
            self._load_generic(attrname)
        self.obj_reset_changes([attrname])

    def get_flavor(self, namespace=None):
        prefix = ('%s_' % namespace) if namespace is not None else ''

        db_flavor = flavors.extract_flavor(self, prefix)
        flavor = objects.Flavor(self._context)
        for key in flavors.system_metadata_flavor_props:
            flavor[key] = db_flavor[key]
        return flavor

    def set_flavor(self, flavor, namespace=None):
        prefix = ('%s_' % namespace) if namespace is not None else ''

        self.system_metadata = flavors.save_flavor_info(
            self.system_metadata, flavor, prefix)
        self.save()

    def delete_flavor(self, namespace):
        self.system_metadata = flavors.delete_flavor_info(
            self.system_metadata, "%s_" % namespace)
        self.save()

    @base.remotable
    def delete_metadata_key(self, context, key):
        """Optimized metadata delete method.

        This provides a more efficient way to delete a single metadata
        key, instead of just calling instance.save(). This should be called
        with the key still present in self.metadata, which it will update
        after completion.
        """
        db.instance_metadata_delete(context, self.uuid, key)
        md_was_changed = 'metadata' in self.obj_what_changed()
        del self.metadata[key]
        self._orig_metadata.pop(key, None)
        instance_dict = base.obj_to_primitive(self)
        notifications.send_update(context, instance_dict, instance_dict)
        if not md_was_changed:
            self.obj_reset_changes(['metadata'])
コード例 #26
0
ファイル: fixed_ip.py プロジェクト: vincentpanqi/nova
class FixedIP(obj_base.NovaPersistentObject, obj_base.NovaObject):
    # Version 1.0: Initial version
    # Version 1.1: Added virtual_interface field
    # Version 1.2: Instance version 1.14
    # Version 1.3: Instance 1.15
    # Version 1.4: Added default_route field
    VERSION = '1.4'

    fields = {
        'id': fields.IntegerField(),
        'address': fields.IPV4AndV6AddressField(),
        'network_id': fields.IntegerField(nullable=True),
        'virtual_interface_id': fields.IntegerField(nullable=True),
        'instance_uuid': fields.UUIDField(nullable=True),
        'allocated': fields.BooleanField(),
        'leased': fields.BooleanField(),
        'reserved': fields.BooleanField(),
        'host': fields.StringField(nullable=True),
        'default_route': fields.BooleanField(),
        'instance': fields.ObjectField('Instance', nullable=True),
        'network': fields.ObjectField('Network', nullable=True),
        'virtual_interface': fields.ObjectField('VirtualInterface',
                                                nullable=True),
        }

    def obj_make_compatible(self, primitive, target_version):
        target_version = utils.convert_version_to_tuple(target_version)
        if target_version < (1, 4) and 'default_route' in primitive:
            del primitive['default_route']
        if target_version < (1, 3) and 'instance' in primitive:
            self.instance.obj_make_compatible(
                    primitive['instance']['nova_object.data'], '1.14')
            primitive['instance']['nova_object.version'] = '1.14'
        if target_version < (1, 2) and 'instance' in primitive:
            self.instance.obj_make_compatible(
                    primitive['instance']['nova_object.data'], '1.13')
            primitive['instance']['nova_object.version'] = '1.13'

    @property
    def floating_ips(self):
        return objects.FloatingIPList.get_by_fixed_ip_id(self._context,
                                                         self.id)

    @staticmethod
    def _from_db_object(context, fixedip, db_fixedip, expected_attrs=None):
        if expected_attrs is None:
            expected_attrs = []
        for field in fixedip.fields:
            if field in ('virtual_interface', 'default_route'):
                # NOTE(danms): These fields are only set when doing a
                # FixedIPList.get_by_network() because it's a relatively
                # special-case thing, so skip them here
                continue
            if field not in FIXED_IP_OPTIONAL_ATTRS:
                fixedip[field] = db_fixedip[field]
        # NOTE(danms): Instance could be deleted, and thus None
        if 'instance' in expected_attrs:
            fixedip.instance = objects.Instance._from_db_object(
                context,
                objects.Instance(context),
                db_fixedip['instance']) if db_fixedip['instance'] else None
        if 'network' in expected_attrs:
            fixedip.network = objects.Network._from_db_object(
                context, objects.Network(context), db_fixedip['network'])
        fixedip._context = context
        fixedip.obj_reset_changes()
        return fixedip

    @obj_base.remotable_classmethod
    def get_by_id(cls, context, id, expected_attrs=None):
        if expected_attrs is None:
            expected_attrs = []
        get_network = 'network' in expected_attrs
        db_fixedip = db.fixed_ip_get(context, id, get_network=get_network)
        return cls._from_db_object(context, cls(context), db_fixedip,
                                   expected_attrs)

    @obj_base.remotable_classmethod
    def get_by_address(cls, context, address, expected_attrs=None):
        if expected_attrs is None:
            expected_attrs = []
        db_fixedip = db.fixed_ip_get_by_address(context, str(address),
                                                columns_to_join=expected_attrs)
        return cls._from_db_object(context, cls(context), db_fixedip,
                                   expected_attrs)

    @obj_base.remotable_classmethod
    def get_by_floating_address(cls, context, address):
        db_fixedip = db.fixed_ip_get_by_floating_address(context, str(address))
        if db_fixedip is not None:
            return cls._from_db_object(context, cls(context), db_fixedip)

    @obj_base.remotable_classmethod
    def get_by_network_and_host(cls, context, network_id, host):
        db_fixedip = db.fixed_ip_get_by_network_host(context, network_id, host)
        return cls._from_db_object(context, cls(context), db_fixedip)

    @obj_base.remotable_classmethod
    def associate(cls, context, address, instance_uuid, network_id=None,
                  reserved=False):
        db_fixedip = db.fixed_ip_associate(context, address, instance_uuid,
                                           network_id=network_id,
                                           reserved=reserved)
        return cls._from_db_object(context, cls(context), db_fixedip)

    @obj_base.remotable_classmethod
    def associate_pool(cls, context, network_id, instance_uuid=None,
                       host=None):
        db_fixedip = db.fixed_ip_associate_pool(context, network_id,
                                                instance_uuid=instance_uuid,
                                                host=host)
        return cls._from_db_object(context, cls(context), db_fixedip)

    @obj_base.remotable_classmethod
    def disassociate_by_address(cls, context, address):
        db.fixed_ip_disassociate(context, address)

    @obj_base.remotable_classmethod
    def _disassociate_all_by_timeout(cls, context, host, time_str):
        time = timeutils.parse_isotime(time_str)
        return db.fixed_ip_disassociate_all_by_timeout(context, host, time)

    @classmethod
    def disassociate_all_by_timeout(cls, context, host, time):
        return cls._disassociate_all_by_timeout(context, host,
                                                timeutils.isotime(time))

    @obj_base.remotable
    def create(self, context):
        updates = self.obj_get_changes()
        if 'id' in updates:
            raise exception.ObjectActionError(action='create',
                                              reason='already created')
        if 'address' in updates:
            updates['address'] = str(updates['address'])
        db_fixedip = db.fixed_ip_create(context, updates)
        self._from_db_object(context, self, db_fixedip)

    @obj_base.remotable
    def save(self, context):
        updates = self.obj_get_changes()
        if 'address' in updates:
            raise exception.ObjectActionError(action='save',
                                              reason='address is not mutable')
        db.fixed_ip_update(context, str(self.address), updates)
        self.obj_reset_changes()

    @obj_base.remotable
    def disassociate(self, context):
        db.fixed_ip_disassociate(context, str(self.address))
        self.instance_uuid = None
        self.instance = None
        self.obj_reset_changes(['instance_uuid', 'instance'])
コード例 #27
0
ファイル: cell_mapping.py プロジェクト: sapcc/nova
class CellMapping(base.NovaTimestampObject, base.NovaObject):
    # Version 1.0: Initial version
    # Version 1.1: Added disabled field
    VERSION = '1.1'

    CELL0_UUID = '00000000-0000-0000-0000-000000000000'

    fields = {
        'id': fields.IntegerField(read_only=True),
        'uuid': fields.UUIDField(),
        'name': fields.StringField(nullable=True),
        'transport_url': fields.StringField(),
        'database_connection': fields.StringField(),
        'disabled': fields.BooleanField(default=False),
    }

    def obj_make_compatible(self, primitive, target_version):
        super(CellMapping, self).obj_make_compatible(primitive, target_version)
        target_version = versionutils.convert_version_to_tuple(target_version)
        if target_version < (1, 1):
            if 'disabled' in primitive:
                del primitive['disabled']

    @property
    def identity(self):
        if 'name' in self and self.name:
            return '%s(%s)' % (self.uuid, self.name)
        else:
            return self.uuid

    @staticmethod
    def _format_url(url, default):
        default_url = urlparse.urlparse(default)

        subs = {
            'username': default_url.username,
            'password': default_url.password,
            'hostname': default_url.hostname,
            'port': default_url.port,
            'scheme': default_url.scheme,
            'query': default_url.query,
            'fragment': default_url.fragment,
            'path': default_url.path.lstrip('/'),
        }

        # NOTE(danms): oslo.messaging has an extended format for the URL
        # which we need to support:
        #   scheme://user:pass@host:port[,user1:pass@host1:port, ...]/path
        # Encode these values, if they exist, as indexed keys like
        # username1, password1, hostname1, port1.
        if ',' in default_url.netloc:
            netlocs = default_url.netloc.split(',')
            index = 0
            for netloc in netlocs:
                index += 1
                these = _parse_netloc(netloc)
                for key in these:
                    subs['%s%i' % (key, index)] = these[key]

        return url.format(**subs)

    @staticmethod
    def format_db_url(url):
        if CONF.database.connection is None:
            if '{' in url:
                LOG.error('Cell mapping database_connection is a template, '
                          'but [database]/connection is not set')
            return url
        try:
            return CellMapping._format_url(url, CONF.database.connection)
        except Exception:
            LOG.exception('Failed to parse [database]/connection to '
                          'format cell mapping')
            return url

    @staticmethod
    def format_mq_url(url):
        if CONF.transport_url is None:
            if '{' in url:
                LOG.error('Cell mapping transport_url is a template, but '
                          '[DEFAULT]/transport_url is not set')
            return url
        try:
            return CellMapping._format_url(url, CONF.transport_url)
        except Exception:
            LOG.exception('Failed to parse [DEFAULT]/transport_url to '
                          'format cell mapping')
            return url

    @staticmethod
    def _from_db_object(context, cell_mapping, db_cell_mapping):
        for key in cell_mapping.fields:
            val = db_cell_mapping[key]
            if key == 'database_connection':
                val = cell_mapping.format_db_url(val)
            elif key == 'transport_url':
                val = cell_mapping.format_mq_url(val)
            setattr(cell_mapping, key, val)
        cell_mapping.obj_reset_changes()
        cell_mapping._context = context
        return cell_mapping

    @staticmethod
    @db_api.api_context_manager.reader
    def _get_by_uuid_from_db(context, uuid):

        db_mapping = context.session.query(api_models.CellMapping).filter_by(
                uuid=uuid).first()
        if not db_mapping:
            raise exception.CellMappingNotFound(uuid=uuid)

        return db_mapping

    @base.remotable_classmethod
    def get_by_uuid(cls, context, uuid):
        db_mapping = cls._get_by_uuid_from_db(context, uuid)

        return cls._from_db_object(context, cls(), db_mapping)

    @staticmethod
    @db_api.api_context_manager.writer
    def _create_in_db(context, updates):

        db_mapping = api_models.CellMapping()
        db_mapping.update(updates)
        db_mapping.save(context.session)
        return db_mapping

    @base.remotable
    def create(self):
        db_mapping = self._create_in_db(self._context, self.obj_get_changes())
        self._from_db_object(self._context, self, db_mapping)

    @staticmethod
    @db_api.api_context_manager.writer
    def _save_in_db(context, uuid, updates):

        db_mapping = context.session.query(
                api_models.CellMapping).filter_by(uuid=uuid).first()
        if not db_mapping:
            raise exception.CellMappingNotFound(uuid=uuid)

        db_mapping.update(updates)
        context.session.add(db_mapping)
        return db_mapping

    @base.remotable
    def save(self):
        changes = self.obj_get_changes()
        db_mapping = self._save_in_db(self._context, self.uuid, changes)
        self._from_db_object(self._context, self, db_mapping)
        self.obj_reset_changes()

    @staticmethod
    @db_api.api_context_manager.writer
    def _destroy_in_db(context, uuid):

        result = context.session.query(api_models.CellMapping).filter_by(
                uuid=uuid).delete()
        if not result:
            raise exception.CellMappingNotFound(uuid=uuid)

    @base.remotable
    def destroy(self):
        self._destroy_in_db(self._context, self.uuid)

    def is_cell0(self):
        return self.obj_attr_is_set('uuid') and self.uuid == self.CELL0_UUID
コード例 #28
0
ファイル: migrate_data.py プロジェクト: zhouyang1222/nova
class LibvirtLiveMigrateData(LiveMigrateData):
    # Version 1.0: Initial version
    # Version 1.1: Added target_connect_addr
    # Version 1.2: Added 'serial_listen_ports' to allow live migration with
    #              serial console.
    VERSION = '1.2'

    fields = {
        'filename': fields.StringField(),
        # FIXME: image_type should be enum?
        'image_type': fields.StringField(),
        'block_migration': fields.BooleanField(),
        'disk_over_commit': fields.BooleanField(),
        'disk_available_mb': fields.IntegerField(nullable=True),
        'is_shared_instance_path': fields.BooleanField(),
        'is_shared_block_storage': fields.BooleanField(),
        'instance_relative_path': fields.StringField(),
        'graphics_listen_addr_vnc': fields.IPAddressField(nullable=True),
        'graphics_listen_addr_spice': fields.IPAddressField(nullable=True),
        'serial_listen_addr': fields.StringField(nullable=True),
        'serial_listen_ports': fields.ListOfIntegersField(),
        'bdms': fields.ListOfObjectsField('LibvirtLiveMigrateBDMInfo'),
        'target_connect_addr': fields.StringField(nullable=True),
    }

    def obj_make_compatible(self, primitive, target_version):
        super(LibvirtLiveMigrateData,
              self).obj_make_compatible(primitive, target_version)
        target_version = versionutils.convert_version_to_tuple(target_version)
        if target_version < (1, 2):
            if 'serial_listen_ports' in primitive:
                del primitive['serial_listen_ports']
        if target_version < (1, 1) and 'target_connect_addr' in primitive:
            del primitive['target_connect_addr']

    def _bdms_to_legacy(self, legacy):
        if not self.obj_attr_is_set('bdms'):
            return
        legacy['volume'] = {}
        for bdmi in self.bdms:
            legacy['volume'][bdmi.serial] = {
                'disk_info': bdmi.as_disk_info(),
                'connection_info': bdmi.connection_info
            }

    def _bdms_from_legacy(self, legacy_pre_result):
        self.bdms = []
        volume = legacy_pre_result.get('volume', {})
        for serial in volume:
            vol = volume[serial]
            bdmi = objects.LibvirtLiveMigrateBDMInfo(serial=serial)
            bdmi.connection_info = vol['connection_info']
            bdmi.bus = vol['disk_info']['bus']
            bdmi.dev = vol['disk_info']['dev']
            bdmi.type = vol['disk_info']['type']
            if 'format' in vol:
                bdmi.format = vol['disk_info']['format']
            if 'boot_index' in vol:
                bdmi.boot_index = int(vol['disk_info']['boot_index'])
            self.bdms.append(bdmi)

    def to_legacy_dict(self, pre_migration_result=False):
        LOG.debug('Converting to legacy: %s' % self)
        legacy = super(LibvirtLiveMigrateData, self).to_legacy_dict()
        keys = (set(self.fields.keys()) - set(LiveMigrateData.fields.keys()) -
                {'bdms'})
        legacy.update(
            {k: getattr(self, k)
             for k in keys if self.obj_attr_is_set(k)})

        graphics_vnc = legacy.pop('graphics_listen_addr_vnc', None)
        graphics_spice = legacy.pop('graphics_listen_addr_spice', None)
        transport_target = legacy.pop('target_connect_addr', None)
        live_result = {
            'graphics_listen_addrs': {
                'vnc': graphics_vnc and str(graphics_vnc),
                'spice': graphics_spice and str(graphics_spice),
            },
            'serial_listen_addr': legacy.pop('serial_listen_addr', None),
            'target_connect_addr': transport_target,
        }

        if pre_migration_result:
            legacy['pre_live_migration_result'] = live_result
            self._bdms_to_legacy(live_result)

        LOG.debug('Legacy result: %s' % legacy)
        return legacy

    def from_legacy_dict(self, legacy):
        LOG.debug('Converting legacy dict to obj: %s' % legacy)
        super(LibvirtLiveMigrateData, self).from_legacy_dict(legacy)
        keys = set(self.fields.keys()) - set(LiveMigrateData.fields.keys())
        for k in keys - {'bdms'}:
            if k in legacy:
                setattr(self, k, legacy[k])
        if 'pre_live_migration_result' in legacy:
            pre_result = legacy['pre_live_migration_result']
            self.graphics_listen_addr_vnc = \
                pre_result['graphics_listen_addrs'].get('vnc')
            self.graphics_listen_addr_spice = \
                pre_result['graphics_listen_addrs'].get('spice')
            self.target_connect_addr = pre_result.get('target_connect_addr')
            if 'serial_listen_addr' in pre_result:
                self.serial_listen_addr = pre_result['serial_listen_addr']
            self._bdms_from_legacy(pre_result)
        LOG.debug('Converted object: %s' % self)

    def is_on_shared_storage(self):
        return self.is_shared_block_storage or self.is_shared_instance_path
コード例 #29
0
class Network(obj_base.NovaPersistentObject, obj_base.NovaObject):
    # Version 1.0: Initial version
    # Version 1.1: Added in_use_on_host()
    # Version 1.2: Added mtu, dhcp_server, enable_dhcp, share_address
    VERSION = '1.2'

    fields = {
        'id': fields.IntegerField(),
        'label': fields.StringField(),
        'injected': fields.BooleanField(),
        'cidr': fields.IPV4NetworkField(nullable=True),
        'cidr_v6': fields.IPV6NetworkField(nullable=True),
        'multi_host': fields.BooleanField(),
        'netmask': fields.IPV4AddressField(nullable=True),
        'gateway': fields.IPV4AddressField(nullable=True),
        'broadcast': fields.IPV4AddressField(nullable=True),
        'netmask_v6': fields.IPV6AddressField(nullable=True),
        'gateway_v6': fields.IPV6AddressField(nullable=True),
        'bridge': fields.StringField(nullable=True),
        'bridge_interface': fields.StringField(nullable=True),
        'dns1': fields.IPAddressField(nullable=True),
        'dns2': fields.IPAddressField(nullable=True),
        'vlan': fields.IntegerField(nullable=True),
        'vpn_public_address': fields.IPAddressField(nullable=True),
        'vpn_public_port': fields.IntegerField(nullable=True),
        'vpn_private_address': fields.IPAddressField(nullable=True),
        'dhcp_start': fields.IPV4AddressField(nullable=True),
        'rxtx_base': fields.IntegerField(nullable=True),
        'project_id': fields.UUIDField(nullable=True),
        'priority': fields.IntegerField(nullable=True),
        'host': fields.StringField(nullable=True),
        'uuid': fields.UUIDField(),
        'mtu': fields.IntegerField(nullable=True),
        'dhcp_server': fields.IPAddressField(nullable=True),
        'enable_dhcp': fields.BooleanField(),
        'share_address': fields.BooleanField(),
    }

    @staticmethod
    def _convert_legacy_ipv6_netmask(netmask):
        """Handle netmask_v6 possibilities from the database.

        Historically, this was stored as just an integral CIDR prefix,
        but in the future it should be stored as an actual netmask.
        Be tolerant of either here.
        """
        try:
            prefix = int(netmask)
            return netaddr.IPNetwork('1::/%i' % prefix).netmask
        except ValueError:
            pass

        try:
            return netaddr.IPNetwork(netmask).netmask
        except netaddr.AddrFormatError:
            raise ValueError('IPv6 netmask "%s" must be a netmask '
                             'or integral prefix' % netmask)

    def obj_make_compatible(self, primitive, target_version):
        target_version = tuple(int(x) for x in target_version.split('.'))
        if target_version < (1, 2):
            if 'mtu' in primitive:
                del primitive['mtu']
            if 'enable_dhcp' in primitive:
                del primitive['enable_dhcp']
            if 'dhcp_server' in primitive:
                del primitive['dhcp_server']
            if 'share_address' in primitive:
                del primitive['share_address']

    @staticmethod
    def _from_db_object(context, network, db_network):
        for field in network.fields:
            db_value = db_network[field]
            if field is 'netmask_v6' and db_value is not None:
                db_value = network._convert_legacy_ipv6_netmask(db_value)
            if field is 'mtu' and db_value is None:
                db_value = CONF.network_device_mtu
            if field is 'dhcp_server' and db_value is None:
                db_value = db_network['gateway']
            if field is 'share_address' and CONF.share_dhcp_address:
                db_value = CONF.share_dhcp_address

            network[field] = db_value
        network._context = context
        network.obj_reset_changes()
        return network

    @obj_base.remotable_classmethod
    def get_by_id(cls, context, network_id, project_only='allow_none'):
        db_network = db.network_get(context,
                                    network_id,
                                    project_only=project_only)
        return cls._from_db_object(context, cls(), db_network)

    @obj_base.remotable_classmethod
    def get_by_uuid(cls, context, network_uuid):
        db_network = db.network_get_by_uuid(context, network_uuid)
        return cls._from_db_object(context, cls(), db_network)

    @obj_base.remotable_classmethod
    def get_by_cidr(cls, context, cidr):
        db_network = db.network_get_by_cidr(context, cidr)
        return cls._from_db_object(context, cls(), db_network)

    @obj_base.remotable_classmethod
    def associate(cls, context, project_id, network_id=None, force=False):
        db.network_associate(context,
                             project_id,
                             network_id=network_id,
                             force=force)

    @obj_base.remotable_classmethod
    def disassociate(cls, context, network_id, host=False, project=False):
        db.network_disassociate(context, network_id, host, project)

    @obj_base.remotable_classmethod
    def in_use_on_host(cls, context, network_id, host):
        return db.network_in_use_on_host(context, network_id, host)

    def _get_primitive_changes(self):
        changes = {}
        for key, value in self.obj_get_changes().items():
            if isinstance(value, netaddr.IPAddress):
                changes[key] = str(value)
            else:
                changes[key] = value
        return changes

    @obj_base.remotable
    def create(self, context):
        updates = self._get_primitive_changes()
        if 'id' in updates:
            raise exception.ObjectActionError(action='create',
                                              reason='already created')
        db_network = db.network_create_safe(context, updates)
        self._from_db_object(context, self, db_network)

    @obj_base.remotable
    def destroy(self, context):
        db.network_delete_safe(context, self.id)
        self.deleted = True
        self.obj_reset_changes(['deleted'])

    @obj_base.remotable
    def save(self, context):
        updates = self._get_primitive_changes()
        if 'netmask_v6' in updates:
            # NOTE(danms): For some reason, historical code stores the
            # IPv6 netmask as just the CIDR mask length, so convert that
            # back here before saving for now.
            updates['netmask_v6'] = netaddr.IPNetwork(
                updates['netmask_v6']).netmask
        set_host = 'host' in updates
        if set_host:
            db.network_set_host(context, self.id, updates.pop('host'))
        if updates:
            db_network = db.network_update(context, self.id, updates)
        elif set_host:
            db_network = db.network_get(context, self.id)
        else:
            db_network = None
        if db_network is not None:
            self._from_db_object(context, self, db_network)
コード例 #30
0
ファイル: instance.py プロジェクト: bopopescu/NOVA-7
class InstancePayload(base.NotificationPayloadBase):
    SCHEMA = {
        'uuid': ('instance', 'uuid'),
        'user_id': ('instance', 'user_id'),
        'tenant_id': ('instance', 'project_id'),
        'reservation_id': ('instance', 'reservation_id'),
        'display_name': ('instance', 'display_name'),
        'display_description': ('instance', 'display_description'),
        'host_name': ('instance', 'hostname'),
        'host': ('instance', 'host'),
        'node': ('instance', 'node'),
        'os_type': ('instance', 'os_type'),
        'architecture': ('instance', 'architecture'),
        'availability_zone': ('instance', 'availability_zone'),
        'image_uuid': ('instance', 'image_ref'),
        'kernel_id': ('instance', 'kernel_id'),
        'ramdisk_id': ('instance', 'ramdisk_id'),
        'created_at': ('instance', 'created_at'),
        'launched_at': ('instance', 'launched_at'),
        'terminated_at': ('instance', 'terminated_at'),
        'deleted_at': ('instance', 'deleted_at'),
        'state': ('instance', 'vm_state'),
        'power_state': ('instance', 'power_state'),
        'task_state': ('instance', 'task_state'),
        'progress': ('instance', 'progress'),
        'metadata': ('instance', 'metadata'),
        'locked': ('instance', 'locked'),
    }
    # Version 1.0: Initial version
    # Version 1.1: add locked and display_description field
    VERSION = '1.1'
    fields = {
        'uuid': fields.UUIDField(),
        'user_id': fields.StringField(nullable=True),
        'tenant_id': fields.StringField(nullable=True),
        'reservation_id': fields.StringField(nullable=True),
        'display_name': fields.StringField(nullable=True),
        'display_description': fields.StringField(nullable=True),
        'host_name': fields.StringField(nullable=True),
        'host': fields.StringField(nullable=True),
        'node': fields.StringField(nullable=True),
        'os_type': fields.StringField(nullable=True),
        'architecture': fields.StringField(nullable=True),
        'availability_zone': fields.StringField(nullable=True),
        'flavor': fields.ObjectField('FlavorPayload'),
        'image_uuid': fields.StringField(nullable=True),
        'kernel_id': fields.StringField(nullable=True),
        'ramdisk_id': fields.StringField(nullable=True),
        'created_at': fields.DateTimeField(nullable=True),
        'launched_at': fields.DateTimeField(nullable=True),
        'terminated_at': fields.DateTimeField(nullable=True),
        'deleted_at': fields.DateTimeField(nullable=True),
        'state': fields.InstanceStateField(nullable=True),
        'power_state': fields.InstancePowerStateField(nullable=True),
        'task_state': fields.InstanceTaskStateField(nullable=True),
        'progress': fields.IntegerField(nullable=True),
        'ip_addresses': fields.ListOfObjectsField('IpPayload'),
        'metadata': fields.DictOfStringsField(),
        'locked': fields.BooleanField(),
    }

    def __init__(self, instance, **kwargs):
        super(InstancePayload, self).__init__(**kwargs)
        self.populate_schema(instance=instance)