Exemple #1
0
class VolumeProperties(base.CinderObject, base.CinderObjectDictCompat):
    # Version 1.0: Initial version
    VERSION = '1.0'

    # TODO(dulek): We should add this to initially move volume_properites to
    # ovo, but this should be removed as soon as possible. Most of the data
    # here is already in request_spec and volume there. Outstanding ones would
    # be reservation, and qos_specs. First one may be moved to request_spec and
    # second added as relationship in volume_type field and whole
    # volume_properties (and resource_properties) in request_spec won't be
    # needed.

    fields = {
        'attach_status': fields.StringField(nullable=True),
        'availability_zone': fields.StringField(nullable=True),
        'cgsnapshot_id': fields.UUIDField(nullable=True),
        'consistencygroup_id': fields.UUIDField(nullable=True),
        'display_description': fields.StringField(nullable=True),
        'display_name': fields.StringField(nullable=True),
        'encryption_key_id': fields.UUIDField(nullable=True),
        'metadata': fields.DictOfStringsField(nullable=True),
        'multiattach': fields.BooleanField(nullable=True),
        'project_id': fields.StringField(nullable=True),
        'qos_specs': fields.DictOfStringsField(nullable=True),
        'replication_status': fields.StringField(nullable=True),
        'reservations': fields.ListOfStringsField(nullable=True),
        'size': fields.IntegerField(nullable=True),
        'snapshot_id': fields.UUIDField(nullable=True),
        'source_replicaid': fields.UUIDField(nullable=True),
        'source_volid': fields.UUIDField(nullable=True),
        'status': fields.StringField(nullable=True),
        'user_id': fields.StringField(nullable=True),
        'volume_type_id': fields.UUIDField(nullable=True),
    }
Exemple #2
0
class Receiver(senlin_base.SenlinObject, base.VersionedObjectDictCompat):
    """Senlin receiver object."""

    fields = {
        'id': fields.UUIDField(),
        'name': fields.StringField(),
        'type': fields.StringField(),
        'cluster_id': fields.UUIDField(),
        'actor': fields.DictOfStringsField(nullable=True),
        'action': fields.StringField(),
        'params': fields.DictOfStringsField(nullable=True),
        'channel': fields.DictOfStringsField(nullable=True),
        'created_at': fields.DateTimeField(nullable=True),
        'updated_at': fields.DateTimeField(nullable=True),
        'user': fields.StringField(),
        'project': fields.StringField(),
        'domain': fields.StringField(),
    }

    @staticmethod
    def _from_db_object(context, receiver, db_obj):
        if db_obj is None:
            return None

        for field in receiver.fields:
            receiver[field] = db_obj[field]

        receiver._context = context
        receiver.obj_reset_changes()

        return receiver

    @classmethod
    def create(cls, context, values):
        obj = db_api.receiver_create(context, values)
        return cls._from_db_object(context, cls(context), obj)

    @classmethod
    def get(cls, context, receiver_id, **kwargs):
        obj = db_api.receiver_get(context, receiver_id, **kwargs)
        return cls._from_db_object(context, cls(), obj)

    @classmethod
    def get_by_name(cls, context, name, **kwargs):
        obj = db_api.receiver_get_by_name(context, name, **kwargs)
        return cls._from_db_object(context, cls(), obj)

    @classmethod
    def get_by_short_id(cls, context, short_id, **kwargs):
        obj = db_api.receiver_get_by_short_id(context, short_id, **kwargs)
        return cls._from_db_object(context, cls(), obj)

    @classmethod
    def get_all(cls, context, **kwargs):
        return db_api.receiver_get_all(context, **kwargs)

    @classmethod
    def delete(cls, context, receiver_id):
        db_api.receiver_delete(context, receiver_id)
Exemple #3
0
class Agent(base.NeutronDbObject):
    # Version 1.0: Initial version
    VERSION = '1.0'

    db_model = agent_model.Agent

    fields = {
        'id': obj_fields.UUIDField(),
        'agent_type': obj_fields.StringField(),
        'binary': obj_fields.StringField(),
        'topic': obj_fields.StringField(),
        'host': obj_fields.StringField(),
        'availability_zone': obj_fields.StringField(nullable=True),
        'admin_state_up': obj_fields.BooleanField(default=True),
        'started_at': obj_fields.DateTimeField(tzinfo_aware=False),
        'created_at': obj_fields.DateTimeField(tzinfo_aware=False),
        'heartbeat_timestamp': obj_fields.DateTimeField(tzinfo_aware=False),
        'description': obj_fields.StringField(nullable=True),
        'configurations': obj_fields.DictOfStringsField(),
        'resource_versions': obj_fields.DictOfStringsField(nullable=True),
        'load': obj_fields.IntegerField(default=0),
    }

    @classmethod
    def modify_fields_to_db(cls, fields):
        result = super(Agent, cls).modify_fields_to_db(fields)
        if 'configurations' in result:
            result['configurations'] = (cls.filter_to_json_str(
                result['configurations']))
        if 'resource_versions' in result:
            if result['resource_versions']:
                result['resource_versions'] = (cls.filter_to_json_str(
                    result['resource_versions']))
            if not fields['resource_versions']:
                result['resource_versions'] = None
        return result

    @classmethod
    def modify_fields_from_db(cls, db_obj):
        fields = super(Agent, cls).modify_fields_from_db(db_obj)
        if 'configurations' in fields:
            if fields['configurations']:
                fields['configurations'] = jsonutils.loads(
                    fields['configurations'])
            if not fields['configurations']:
                fields['configurations'] = {}
        if 'resource_versions' in fields:
            if fields['resource_versions']:
                fields['resource_versions'] = jsonutils.loads(
                    fields['resource_versions'])
            if not fields['resource_versions']:
                fields['resource_versions'] = None
        return fields

    @property
    def is_active(self):
        return not utils.is_agent_down(self.heartbeat_timestamp)
Exemple #4
0
class NodeFilter(base.DrydockObject):

    VERSION = '1.0'

    fields = {
        'filter_type': ovo_fields.StringField(nullable=False),
        'node_names': ovo_fields.ListOfStringsField(nullable=True),
        'node_tags': ovo_fields.ListOfStringsField(nullable=True),
        'node_labels': ovo_fields.DictOfStringsField(nullable=True),
        'rack_names': ovo_fields.ListOfStringsField(nullable=True),
        'rack_labels': ovo_fields.DictOfStringsField(nullable=True),
    }

    def __init__(self, **kwargs):
        super().__init__(**kwargs)
Exemple #5
0
class BaGPipePortHops(base.NeutronObject):
    # Version 1.0: Initial version
    VERSION = '1.0'

    fields = {
        'port_id':
        common_types.UUIDField(),
        'ingress_hops':
        obj_fields.ListOfObjectsField('BaGPipeChainHop', nullable=True),
        'egress_hops':
        obj_fields.ListOfObjectsField('BaGPipeChainHop', nullable=True),
        'service_function_parameters':
        obj_fields.DictOfStringsField(nullable=True)
    }

    synthetic_fields = {'ingress_hops', 'egress_hops'}

    @classmethod
    def get_object(cls, context, **kwargs):
        port_id = kwargs['port_id']
        ingress_hops = (BaGPipeChainHop.get_chain_hops_for_port_by_side(
            context, port_id, constants.INGRESS))
        egress_hops = (BaGPipeChainHop.get_chain_hops_for_port_by_side(
            context, port_id, constants.EGRESS))
        return cls(port_id=port_id,
                   ingress_hops=ingress_hops,
                   egress_hops=egress_hops)

    @classmethod
    def get_objects(cls, context, **kwargs):
        raise NotImplementedError()
Exemple #6
0
class Trigger(base.KarborPersistentObject, base.KarborObject,
              base.KarborObjectDictCompat, base.KarborComparableObject):
    # Version 1.0: Initial version
    VERSION = '1.0'

    fields = {
        'id': fields.UUIDField(),
        'name': fields.StringField(),
        'project_id': fields.UUIDField(),
        'type': fields.StringField(),
        'properties': fields.DictOfStringsField(),
    }

    @staticmethod
    def _from_db_object(context, trigger, db_trigger):
        special_fields = set(['properties'])

        normal_fields = set(trigger.fields) - special_fields
        for name in normal_fields:
            trigger[name] = db_trigger.get(name)

        properties = db_trigger['properties']
        if properties:
            trigger['properties'] = jsonutils.loads(properties)

        trigger._context = context
        trigger.obj_reset_changes()
        return trigger

    @staticmethod
    def _convert_properties_to_db_format(updates):
        properties = updates.pop('properties', None)
        if properties is not None:
            updates['properties'] = jsonutils.dumps(properties)

    @base.remotable_classmethod
    def get_by_id(cls, context, id):
        db_trigger = db.trigger_get(context, id)
        if db_trigger:
            return cls._from_db_object(context, cls(), db_trigger)

    @base.remotable
    def create(self):
        updates = self.karbor_obj_get_changes()
        self._convert_properties_to_db_format(updates)
        db_trigger = db.trigger_create(self._context, updates)
        self._from_db_object(self._context, self, db_trigger)

    @base.remotable
    def save(self):
        updates = self.karbor_obj_get_changes()
        if updates and self.id:
            self._convert_properties_to_db_format(updates)
            db.trigger_update(self._context, self.id, updates)
            self.obj_reset_changes()

    @base.remotable
    def destroy(self):
        if self.id:
            db.trigger_delete(self._context, self.id)
Exemple #7
0
class Event(senlin_base.SenlinObject, base.VersionedObjectDictCompat):
    """Senlin event object."""

    fields = {
        'id': fields.UUIDField(),
        'timestamp': fields.DateTimeField(),
        'obj_id': fields.UUIDField(),
        'obj_name': fields.StringField(),
        'obj_type': fields.StringField(),
        'cluster_id': fields.UUIDField(nullable=True),
        'level': fields.StringField(),
        'user': fields.StringField(),
        'project': fields.StringField(),
        'action': fields.StringField(nullable=True),
        'status': fields.StringField(),
        'status_reason': fields.StringField(),
        'metadata': fields.DictOfStringsField(nullable=True),
    }

    @staticmethod
    def _from_db_object(context, event, db_obj):
        if db_obj is None:
            return None
        for field in event.fields:
            if field == 'metadata':
                event['metadata'] = db_obj['meta_data']
            else:
                event[field] = db_obj[field]

        event._context = context
        event.obj_reset_changes()

        return event

    @classmethod
    def create(cls, context, values):
        obj = db_api.event_create(context, values)
        return cls._from_db_object(context, cls(context), obj)

    @classmethod
    def get(cls, context, event_id, **kwargs):
        return db_api.event_get(context, event_id, **kwargs)

    @classmethod
    def get_by_short_id(cls, context, short_id, **kwargs):
        return db_api.event_get_by_short_id(context, short_id, **kwargs)

    @classmethod
    def get_all(cls, context, **kwargs):
        return db_api.event_get_all(context, **kwargs)

    @classmethod
    def count_by_cluster(cls, context, cluster_id, **kwargs):
        return db_api.event_count_by_cluster(context, cluster_id, **kwargs)

    @classmethod
    def get_all_by_cluster(cls, context, cluster_id, **kwargs):
        objs = db_api.event_get_all_by_cluster(context, cluster_id, **kwargs)
        return [cls._from_db_object(context, cls(), obj) for obj in objs]
 def setUp(self):
     super(TestDictOfStrings, self).setUp()
     self.field = fields.DictOfStringsField()
     self.coerce_good_values = [({'foo': 'bar'}, {'foo': 'bar'}),
                                ({'foo': 1}, {'foo': '1'})]
     self.coerce_bad_values = [{1: 'bar'}, {'foo': None}, 'foo']
     self.to_primitive_values = [({'foo': 'bar'}, {'foo': 'bar'})]
     self.from_primitive_values = [({'foo': 'bar'}, {'foo': 'bar'})]
Exemple #9
0
class Credential(senlin_base.SenlinObject, base.VersionedObjectDictCompat):
    """Senlin credential object."""

    fields = {
        'user': fields.StringField(),
        'project': fields.StringField(),
        'cred': fields.DictOfStringsField(),
        'data': fields.DictOfStringsField(),
    }

    @staticmethod
    def _from_db_object(context, credential, db_obj):
        if db_obj is None:
            return None
        for field in credential.fields:
            credential[field] = db_obj[field]

        credential._context = context
        credential.obj_reset_changes()

        return credential

    @classmethod
    def create(cls, context, values):
        obj = db_api.cred_create(context, values)
        return cls._from_db_object(context, cls(context), obj)

    @classmethod
    def get(cls, context, user, project):
        obj = db_api.cred_get(context, user, project)
        return cls._from_db_object(context, cls(), obj)

    @classmethod
    def update(cls, context, user, project, values):
        obj = db_api.cred_update(context, user, project, values)
        return cls._from_db_object(context, cls(), obj)

    @classmethod
    def delete(cls, context, user, project):
        return db_api.cred_delete(context, user, project)

    @classmethod
    def update_or_create(cls, context, values):
        obj = db_api.cred_create_update(context, values)
        return cls._from_db_object(context, cls(), obj)
Exemple #10
0
class ClusterPolicy(senlin_base.SenlinObject, base.VersionedObjectDictCompat):
    """Senlin cluster-policy binding object."""

    fields = {
        'id': fields.UUIDField(),
        'cluster_id': fields.UUIDField(),
        'policy_id': fields.UUIDField(),
        'cluster': fields.ObjectField('Cluster'),
        'policy': fields.ObjectField('Policy'),
        'enabled': fields.BooleanField(),
        'priority': fields.IntegerField(),
        'data': fields.DictOfStringsField(),
        'last_op': fields.DateTimeField(),
    }

    @staticmethod
    def _from_db_object(context, binding, db_obj):
        if db_obj is None:
            return None
        for field in binding.fields:
            binding[field] = db_obj[field]

        binding._context = context
        binding.obj_reset_changes()

        return binding

    @classmethod
    def create(cls, context, cluster_id, policy_id, values):
        obj = db_api.cluster_policy_attach(context, cluster_id, policy_id,
                                           values)
        return cls._from_db_object(context, cls(context), obj)

    @classmethod
    def get(cls, context, cluster_id, policy_id):
        obj = db_api.cluster_policy_get(context, cluster_id, policy_id)
        return cls._from_db_object(context, cls(), obj)

    @classmethod
    def get_by_type(cls, context, cluster_id, policy_type, filters=None):
        objs = db_api.cluster_policy_get_by_type(context, cluster_id,
                                                 policy_type, filters=filters)
        return [cls._from_db_object(context, cls(), obj) for obj in objs]

    @classmethod
    def get_all(cls, context, cluster_id, **kwargs):
        objs = db_api.cluster_policy_get_all(context, cluster_id, **kwargs)
        return [cls._from_db_object(context, cls(), obj) for obj in objs]

    @classmethod
    def update(cls, context, cluster_id, policy_id, values):
        db_api.cluster_policy_update(context, cluster_id, policy_id, values)

    @classmethod
    def delete(cls, context, cluster_id, policy_id):
        db_api.cluster_policy_detach(context, cluster_id, policy_id)
Exemple #11
0
class Restore(base.SmaugPersistentObject, base.SmaugObject,
              base.SmaugObjectDictCompat, base.SmaugComparableObject):
    # Version 1.0: Initial version
    VERSION = '1.0'

    fields = {
        'id': fields.UUIDField(),
        'project_id': fields.UUIDField(),
        'provider_id': fields.UUIDField(),
        'checkpoint_id': fields.UUIDField(),
        'restore_target': fields.StringField(nullable=True),
        'parameters': fields.DictOfStringsField(nullable=True),
        'status': fields.StringField(nullable=True),
    }

    @staticmethod
    def _from_db_object(context, restore, db_restore):
        for name, field in restore.fields.items():
            value = db_restore.get(name)
            if isinstance(field, fields.IntegerField):
                value = value or 0
            elif isinstance(field, fields.DateTimeField):
                value = value or None
            if name == "parameters" and value is not None:
                value = jsonutils.loads(value)
            restore[name] = value

        restore._context = context
        restore.obj_reset_changes()
        return restore

    @base.remotable
    def create(self):
        if self.obj_attr_is_set('id'):
            raise exception.ObjectActionError(action='create',
                                              reason=_('already created'))
        updates = self.smaug_obj_get_changes()

        parameters = updates.pop('parameters', None)
        if parameters is not None:
            updates['parameters'] = jsonutils.dumps(parameters)

        db_restore = db.restore_create(self._context, updates)
        self._from_db_object(self._context, self, db_restore)

    @base.remotable
    def save(self):
        updates = self.smaug_obj_get_changes()
        if updates:
            db.restore_update(self._context, self.id, updates)
            self.obj_reset_changes()

    @base.remotable
    def destroy(self):
        with self.obj_as_admin():
            db.restore_destroy(self._context, self.id)
Exemple #12
0
class PortBinding(PortBindingBase):
    # Version 1.0: Initial version
    VERSION = '1.0'

    db_model = ml2_models.PortBinding

    fields = {
        'port_id': common_types.UUIDField(),
        'host': obj_fields.StringField(),
        'profile': obj_fields.StringField(),
        'vif_type': obj_fields.StringField(),
        'vif_details': obj_fields.DictOfStringsField(nullable=True),
        'vnic_type': obj_fields.StringField(),
    }

    primary_keys = ['port_id']
Exemple #13
0
class DistributedPortBinding(PortBindingBase):
    # Version 1.0: Initial version
    VERSION = '1.0'

    db_model = ml2_models.DistributedPortBinding

    fields = {
        'port_id': common_types.UUIDField(),
        'host': obj_fields.StringField(),
        'profile': obj_fields.StringField(),
        'vif_type': obj_fields.StringField(),
        'vif_details': obj_fields.DictOfStringsField(nullable=True),
        'vnic_type': obj_fields.StringField(),
        # NOTE(ihrachys): Fields below are specific to this type of binding. In
        # the future, we could think of converging different types of bindings
        # into a single field
        'status': obj_fields.StringField(),
        'router_id': obj_fields.StringField(nullable=True),
    }

    primary_keys = ['host', 'port_id']
Exemple #14
0
class Repository(base.DrydockObject):

    VERSION = '1.0'

    fields = {
        'name': ovo_fields.StringField(),
        'url': ovo_fields.StringField(),
        'repo_type': ovo_fields.StringField(),
        'gpgkey': ovo_fields.StringField(nullable=True),
        'distributions': ovo_fields.ListOfStringsField(nullable=True),
        'subrepos': ovo_fields.ListOfStringsField(nullable=True),
        'components': ovo_fields.ListOfStringsField(nullable=True),
        'arches': ovo_fields.ListOfStringsField(default=['amd64']),
        'options': ovo_fields.DictOfStringsField(nullable=True)
    }

    STANDARD_COMPONENTS = {
        'apt': {'main', 'restricted', 'universe', 'multiverse'},
    }

    STANDARD_SUBREPOS = {
        'apt': {'security', 'updates', 'backports'},
    }

    def __init__(self, **kwargs):
        super(Repository, self).__init__(**kwargs)

    # Repository keyed by tag
    def get_id(self):
        return self.name

    def get_disabled_components(self):
        enabled = set(self.components or [])
        std = self.STANDARD_COMPONENTS.get(self.repo_type, ())
        return std - enabled

    def get_disabled_subrepos(self):
        enabled = set(self.subrepos or [])
        std = self.STANDARD_SUBREPOS.get(self.repo_type, ())
        return std - enabled
Exemple #15
0
class Rack(base.DrydockPersistentObject, base.DrydockObject):

    VERSION = '1.0'

    fields = {
        'name': obj_fields.StringField(nullable=False),
        'site': obj_fields.StringField(nullable=False),
        'source': hd_fields.ModelSourceField(nullable=False),
        'tor_switches': obj_fields.ObjectField('TorSwitchList',
                                               nullable=False),
        'location': obj_fields.DictOfStringsField(nullable=False),
        'local_networks': obj_fields.ListOfStringsField(nullable=True),
    }

    def __init__(self, **kwargs):
        super().__init__(**kwargs)

    def get_id(self):
        return self.get_name()

    def get_name(self):
        return self.name
Exemple #16
0
class HealthRegistry(senlin_base.SenlinObject, base.VersionedObjectDictCompat):
    """Senlin health registry object."""

    fields = {
        'id': fields.UUIDField(),
        'cluster_id': fields.UUIDField(),
        'check_type': fields.StringField(),
        'interval': fields.IntegerField(nullable=True),
        'params': fields.DictOfStringsField(),
        'engine_id': fields.UUIDField(),
    }

    @staticmethod
    def _from_db_object(context, registry, db_obj):
        for field in registry.fields:
            registry[field] = db_obj[field]

        registry._context = context
        registry.obj_reset_changes()

        return registry

    @classmethod
    def create(cls, context, cluster_id, check_type, interval, params,
               engine_id):
        obj = db_api.registry_create(context, cluster_id, check_type, interval,
                                     params, engine_id)
        return cls._from_db_object(context, cls(context), obj)

    @classmethod
    def claim(cls, context, engine_id):
        objs = db_api.registry_claim(context, engine_id)
        return [cls._from_db_object(context, cls(), obj) for obj in objs]

    @classmethod
    def delete(cls, context, cluster_id):
        db_api.registry_delete(context, cluster_id)
Exemple #17
0
class Container(base.ZunPersistentObject, base.ZunObject):
    # Version 1.0: Initial version
    # Version 1.1: Add container_id column
    # Version 1.2: Add memory column
    # Version 1.3: Add task_state column
    # Version 1.4: Add cpu, workdir, ports, hostname and labels columns
    # Version 1.5: Add meta column
    # Version 1.6: Add addresses column
    # Version 1.7: Add host column
    # Version 1.8: Add restart_policy
    # Version 1.9: Add status_detail column
    # Version 1.10: Add tty, stdin_open
    # Version 1.11: Add image_driver
    # Version 1.12: Add 'Created' to ContainerStatus
    # Version 1.13: Add more task states for container
    # Version 1.14: Add method 'list_by_host'
    # Version 1.15: Combine tty and stdin_open
    # Version 1.16: Add websocket_url and token
    # Version 1.17: Add security_groups
    # Version 1.18: Add auto_remove
    # Version 1.19: Add runtime column
    # Version 1.20: Change runtime to String type
    # Version 1.21: Add pci_device attribute
    # Version 1.22: Add 'Deleting' to ContainerStatus
    # Version 1.23: Add the missing 'pci_devices' attribute
    # Version 1.24: Add the storage_opt attribute
    # Version 1.25: Change TaskStateField definition
    # Version 1.26:  Add auto_heal
    # Version 1.27: Make auto_heal field nullable
    # Version 1.28: Add 'Dead' to ContainerStatus
    # Version 1.29: Add 'Restarting' to ContainerStatus
    # Version 1.30: Add capsule_id attribute
    # Version 1.31: Add 'started_at' attribute
    # Version 1.32: Add 'exec_instances' attribute
    VERSION = '1.33'

    fields = {
        'id': fields.IntegerField(),
        'container_id': fields.StringField(nullable=True),
        'uuid': fields.UUIDField(nullable=True),
        'name': fields.StringField(nullable=True),
        'project_id': fields.StringField(nullable=True),
        'user_id': fields.StringField(nullable=True),
        'image': fields.StringField(nullable=True),
        'cpu': fields.FloatField(nullable=True),
        'memory': fields.StringField(nullable=True),
        'command': fields.ListOfStringsField(nullable=True),
        'status': z_fields.ContainerStatusField(nullable=True),
        'status_reason': fields.StringField(nullable=True),
        'task_state': z_fields.TaskStateField(nullable=True),
        'environment': fields.DictOfStringsField(nullable=True),
        'workdir': fields.StringField(nullable=True),
        'auto_remove': fields.BooleanField(nullable=True),
        'ports': z_fields.ListOfIntegersField(nullable=True),
        'hostname': fields.StringField(nullable=True),
        'labels': fields.DictOfStringsField(nullable=True),
        'meta': fields.DictOfStringsField(nullable=True),
        'addresses': z_fields.JsonField(nullable=True),
        'image_pull_policy': fields.StringField(nullable=True),
        'host': fields.StringField(nullable=True),
        'restart_policy': fields.DictOfStringsField(nullable=True),
        'status_detail': fields.StringField(nullable=True),
        'interactive': fields.BooleanField(nullable=True),
        'image_driver': fields.StringField(nullable=True),
        'websocket_url': fields.StringField(nullable=True),
        'websocket_token': fields.StringField(nullable=True),
        'security_groups': fields.ListOfStringsField(nullable=True),
        'runtime': fields.StringField(nullable=True),
        'pci_devices': fields.ListOfObjectsField('PciDevice', nullable=True),
        'disk': fields.IntegerField(nullable=True),
        'auto_heal': fields.BooleanField(nullable=True),
        'capsule_id': fields.IntegerField(nullable=True),
        'started_at': fields.DateTimeField(tzinfo_aware=False, nullable=True),
        'exec_instances': fields.ListOfObjectsField('ExecInstance',
                                                    nullable=True),
    }

    @staticmethod
    def _from_db_object(container, db_container):
        """Converts a database entity to a formal object."""
        for field in container.fields:
            if field in ['pci_devices', 'exec_instances']:
                continue
            setattr(container, field, db_container[field])

        container.obj_reset_changes()
        return container

    @staticmethod
    def _from_db_object_list(db_objects, cls, context):
        """Converts a list of database entities to a list of formal objects."""
        return [
            Container._from_db_object(cls(context), obj) for obj in db_objects
        ]

    @base.remotable_classmethod
    def get_by_uuid(cls, context, uuid):
        """Find a container based on uuid and return a :class:`Container` object.

        :param uuid: the uuid of a container.
        :param context: Security context
        :returns: a :class:`Container` object.
        """
        db_container = dbapi.get_container_by_uuid(context, uuid)
        container = Container._from_db_object(cls(context), db_container)
        return container

    @base.remotable_classmethod
    def get_by_name(cls, context, name):
        """Find a container based on name and return a Container object.

        :param name: the logical name of a container.
        :param context: Security context
        :returns: a :class:`Container` object.
        """
        db_container = dbapi.get_container_by_name(context, name)
        container = Container._from_db_object(cls(context), db_container)
        return container

    @base.remotable_classmethod
    def list(cls,
             context,
             limit=None,
             marker=None,
             sort_key=None,
             sort_dir=None,
             filters=None):
        """Return a list of Container objects.

        :param context: Security context.
        :param limit: maximum number of resources to return in a single result.
        :param marker: pagination marker for large data sets.
        :param sort_key: column to sort results by.
        :param sort_dir: direction to sort. "asc" or "desc".
        :param filters: filters when list containers, the filter name could be
                        'name', 'image', 'project_id', 'user_id', 'memory'.
                        For example, filters={'image': 'nginx'}
        :returns: a list of :class:`Container` object.

        """
        db_containers = dbapi.list_containers(context,
                                              limit=limit,
                                              marker=marker,
                                              sort_key=sort_key,
                                              sort_dir=sort_dir,
                                              filters=filters)
        return Container._from_db_object_list(db_containers, cls, context)

    @base.remotable_classmethod
    def list_by_host(cls, context, host):
        """Return a list of Container objects by host.

        :param context: Security context.
        :param host: A compute host.
        :returns: a list of :class:`Container` object.

        """
        db_containers = dbapi.list_containers(context, filters={'host': host})
        return Container._from_db_object_list(db_containers, cls, context)

    @base.remotable_classmethod
    def list_by_capsule_id(cls, context, capsule_id):
        """Return a list of Container objects by capsule_id.

        :param context: Security context.
        :param host: A capsule id.
        :returns: a list of :class:`Container` object.

        """
        db_containers = dbapi.list_containers(
            context, filters={'capsule_id': capsule_id})
        return Container._from_db_object_list(db_containers, cls, context)

    @base.remotable
    def create(self, context):
        """Create a Container record in the DB.

        :param context: Security context. NOTE: This should only
                        be used internally by the indirection_api.
                        Unfortunately, RPC requires context as the first
                        argument, even though we don't use it.
                        A context should be set when instantiating the
                        object, e.g.: Container(context)

        """
        values = self.obj_get_changes()
        db_container = dbapi.create_container(context, values)
        self._from_db_object(self, db_container)

    @base.remotable
    def destroy(self, context=None):
        """Delete the Container from the DB.

        :param context: Security context. NOTE: This should only
                        be used internally by the indirection_api.
                        Unfortunately, RPC requires context as the first
                        argument, even though we don't use it.
                        A context should be set when instantiating the
                        object, e.g.: Container(context)
        """
        dbapi.destroy_container(context, self.uuid)
        self.obj_reset_changes()

    @base.remotable
    def save(self, context=None):
        """Save updates to this Container.

        Updates will be made column by column based on the result
        of self.what_changed().

        :param context: Security context. NOTE: This should only
                        be used internally by the indirection_api.
                        Unfortunately, RPC requires context as the first
                        argument, even though we don't use it.
                        A context should be set when instantiating the
                        object, e.g.: Container(context)
        """
        updates = self.obj_get_changes()
        dbapi.update_container(context, self.uuid, updates)

        self.obj_reset_changes()

    @base.remotable
    def refresh(self, context=None):
        """Loads updates for this Container.

        Loads a container with the same uuid from the database and
        checks for updated attributes. Updates are applied from
        the loaded container column by column, if there are any updates.

        :param context: Security context. NOTE: This should only
                        be used internally by the indirection_api.
                        Unfortunately, RPC requires context as the first
                        argument, even though we don't use it.
                        A context should be set when instantiating the
                        object, e.g.: Container(context)
        """
        current = self.__class__.get_by_uuid(self._context, uuid=self.uuid)
        for field in self.fields:
            if self.obj_attr_is_set(field) and \
               getattr(self, field) != getattr(current, field):
                setattr(self, field, getattr(current, field))

    def get_sandbox_id(self):
        if self.meta:
            return self.meta.get('sandbox_id', None)
        else:
            return None

    def set_sandbox_id(self, sandbox_id):
        if self.meta is None:
            self.meta = {'sandbox_id': sandbox_id}
        else:
            self.meta['sandbox_id'] = sandbox_id
            self._changed_fields.add('meta')

    def obj_load_attr(self, attrname):
        if attrname not in CONTAINER_OPTIONAL_ATTRS:
            raise exception.ObjectActionError(
                action='obj_load_attr',
                reason=_('attribute %s not lazy-loadable') % attrname)

        if not self._context:
            raise exception.OrphanedObjectError(method='obj_load_attr',
                                                objtype=self.obj_name())

        LOG.debug("Lazy-loading '%(attr)s' on %(name)s uuid %(uuid)s", {
            'attr': attrname,
            'name': self.obj_name(),
            'uuid': self.uuid,
        })

        # NOTE(danms): We handle some fields differently here so that we
        # can be more efficient
        if attrname == 'pci_devices':
            self._load_pci_devices()

        if attrname == 'exec_instances':
            self._load_exec_instances()

        self.obj_reset_changes([attrname])

    def _load_pci_devices(self):
        self.pci_devices = pci_device.PciDevice.list_by_container_uuid(
            self._context, self.uuid)

    def _load_exec_instances(self):
        self.exec_instances = exec_inst.ExecInstance.list_by_container_id(
            self._context, self.id)
Exemple #18
0
class Backup(base.CinderPersistentObject, base.CinderObject,
             base.CinderObjectDictCompat, base.CinderComparableObject):
    # Version 1.0: Initial version
    # Version 1.1: Add new field num_dependent_backups and extra fields
    #              is_incremental and has_dependent_backups.
    # Version 1.2: Add new field snapshot_id and data_timestamp.
    # Version 1.3: Changed 'status' field to use BackupStatusField
    # Version 1.4: Add restore_volume_id
    # Version 1.5: Add metadata
    # Version 1.6: Add encryption_key_id
    # Version 1.7: Add parent
    VERSION = '1.7'

    OPTIONAL_FIELDS = ('metadata', 'parent')

    fields = {
        'id': fields.UUIDField(),
        'user_id': fields.StringField(),
        'project_id': fields.StringField(),
        'volume_id': fields.UUIDField(),
        'host': fields.StringField(nullable=True),
        'availability_zone': fields.StringField(nullable=True),
        'container': fields.StringField(nullable=True),
        'parent_id': fields.StringField(nullable=True),
        'parent': fields.ObjectField('Backup', nullable=True),
        'status': c_fields.BackupStatusField(nullable=True),
        'fail_reason': fields.StringField(nullable=True),
        'size': fields.IntegerField(nullable=True),
        'display_name': fields.StringField(nullable=True),
        'display_description': fields.StringField(nullable=True),

        # NOTE(dulek): Metadata field is used to store any strings by backup
        # drivers, that's why it can't be DictOfStringsField.
        'service_metadata': fields.StringField(nullable=True),
        'service': fields.StringField(nullable=True),
        'object_count': fields.IntegerField(nullable=True),
        'temp_volume_id': fields.StringField(nullable=True),
        'temp_snapshot_id': fields.StringField(nullable=True),
        'num_dependent_backups': fields.IntegerField(nullable=True),
        'snapshot_id': fields.StringField(nullable=True),
        'data_timestamp': fields.DateTimeField(nullable=True),
        'restore_volume_id': fields.StringField(nullable=True),
        'metadata': fields.DictOfStringsField(nullable=True),
        'encryption_key_id': fields.StringField(nullable=True),
    }

    obj_extra_fields = ['name', 'is_incremental', 'has_dependent_backups']

    def __init__(self, *args, **kwargs):
        super(Backup, self).__init__(*args, **kwargs)
        self._orig_metadata = {}

        self._reset_metadata_tracking()

    def _reset_metadata_tracking(self, fields=None):
        if fields is None or 'metadata' in fields:
            self._orig_metadata = (dict(self.metadata)
                                   if self.obj_attr_is_set('metadata') else {})

    @classmethod
    def _get_expected_attrs(cls, context, *args, **kwargs):
        return 'metadata',

    @property
    def name(self):
        return CONF.backup_name_template % self.id

    @property
    def is_incremental(self):
        return bool(self.parent_id)

    @property
    def has_dependent_backups(self):
        return bool(self.num_dependent_backups)

    def obj_make_compatible(self, primitive, target_version):
        """Make an object representation compatible with a target version."""
        added_fields = (((1, 7), ('parent', )), )

        super(Backup, self).obj_make_compatible(primitive, target_version)
        target_version = versionutils.convert_version_to_tuple(target_version)
        for version, remove_fields in added_fields:
            if target_version < version:
                for obj_field in remove_fields:
                    primitive.pop(obj_field, None)

    @classmethod
    def _from_db_object(cls, context, backup, db_backup, expected_attrs=None):
        if expected_attrs is None:
            expected_attrs = []
        for name, field in backup.fields.items():
            if name in cls.OPTIONAL_FIELDS:
                continue
            value = db_backup.get(name)
            if isinstance(field, fields.IntegerField):
                value = value if value is not None else 0
            backup[name] = value

        if 'metadata' in expected_attrs:
            metadata = db_backup.get('backup_metadata')
            if metadata is None:
                raise exception.MetadataAbsent()
            backup.metadata = {item['key']: item['value'] for item in metadata}

        backup._context = context
        backup.obj_reset_changes()
        return backup

    def obj_reset_changes(self, fields=None):
        super(Backup, self).obj_reset_changes(fields)
        self._reset_metadata_tracking(fields=fields)

    def obj_load_attr(self, attrname):
        if attrname not in self.OPTIONAL_FIELDS:
            raise exception.ObjectActionError(
                action='obj_load_attr',
                reason=_('attribute %s not lazy-loadable') % attrname)
        if not self._context:
            raise exception.OrphanedObjectError(method='obj_load_attr',
                                                objtype=self.obj_name())
        if attrname == 'parent':
            if self.parent_id:
                self.parent = self.get_by_id(self._context, self.parent_id)
            else:
                self.parent = None
        self.obj_reset_changes(fields=[attrname])

    def obj_what_changed(self):
        changes = super(Backup, self).obj_what_changed()
        if hasattr(self, 'metadata') and self.metadata != self._orig_metadata:
            changes.add('metadata')

        return changes

    def create(self):
        if self.obj_attr_is_set('id'):
            raise exception.ObjectActionError(action='create',
                                              reason='already created')
        updates = self.cinder_obj_get_changes()

        db_backup = db.backup_create(self._context, updates)
        self._from_db_object(self._context, self, db_backup)

    def save(self):
        updates = self.cinder_obj_get_changes()
        if updates:
            if 'metadata' in updates:
                metadata = updates.pop('metadata', None)
                self.metadata = db.backup_metadata_update(
                    self._context, self.id, metadata, True)
            updates.pop('parent', None)
            db.backup_update(self._context, self.id, updates)

        self.obj_reset_changes()

    def destroy(self):
        with self.obj_as_admin():
            updated_values = db.backup_destroy(self._context, self.id)
        self.update(updated_values)
        self.obj_reset_changes(updated_values.keys())

    @staticmethod
    def decode_record(backup_url):
        """Deserialize backup metadata from string into a dictionary.

        :raises InvalidInput:
        """
        try:
            return jsonutils.loads(base64.decode_as_text(backup_url))
        except TypeError:
            msg = _("Can't decode backup record.")
        except ValueError:
            msg = _("Can't parse backup record.")
        raise exception.InvalidInput(reason=msg)

    def encode_record(self, **kwargs):
        """Serialize backup object, with optional extra info, into a string."""
        # We don't want to export extra fields and we want to force lazy
        # loading, so we can't use dict(self) or self.obj_to_primitive
        record = {
            name: field.to_primitive(self, name, getattr(self, name))
            for name, field in self.fields.items() if name != 'parent'
        }
        # We must update kwargs instead of record to ensure we don't overwrite
        # "real" data from the backup
        kwargs.update(record)
        retval = jsonutils.dump_as_bytes(kwargs)
        return base64.encode_as_text(retval)
class Snapshot(base.CinderPersistentObject, base.CinderObject,
               base.CinderObjectDictCompat):
    # Version 1.0: Initial version
    VERSION = '1.0'

    fields = {
        'id': fields.UUIDField(),
        'user_id': fields.UUIDField(nullable=True),
        'project_id': fields.UUIDField(nullable=True),
        'volume_id': fields.UUIDField(),
        'cgsnapshot_id': fields.UUIDField(nullable=True),
        'status': fields.StringField(nullable=True),
        'progress': fields.StringField(nullable=True),
        'volume_size': fields.IntegerField(),
        'display_name': fields.StringField(nullable=True),
        'display_description': fields.StringField(nullable=True),
        'encryption_key_id': fields.UUIDField(nullable=True),
        'volume_type_id': fields.UUIDField(nullable=True),
        'provider_location': fields.StringField(nullable=True),
        'provider_id': fields.UUIDField(nullable=True),
        'metadata': fields.DictOfStringsField(),
        'provider_auth': fields.StringField(nullable=True),
        'volume': fields.ObjectField('Volume', nullable=True),
    }

    # NOTE(thangp): obj_extra_fields is used to hold properties that are not
    # usually part of the model
    obj_extra_fields = ['name', 'volume_name']

    @property
    def name(self):
        return CONF.snapshot_name_template % self.id

    @property
    def volume_name(self):
        return self.volume.name

    def __init__(self, *args, **kwargs):
        super(Snapshot, self).__init__(*args, **kwargs)
        self._orig_metadata = {}

        self._reset_metadata_tracking()

    def obj_reset_changes(self, fields=None):
        super(Snapshot, self).obj_reset_changes(fields)
        self._reset_metadata_tracking(fields=fields)

    def _reset_metadata_tracking(self, fields=None):
        if fields is None or 'metadata' in fields:
            self._orig_metadata = (dict(self.metadata)
                                   if 'metadata' in self else {})

    def obj_what_changed(self):
        changes = super(Snapshot, self).obj_what_changed()
        if 'metadata' in self and self.metadata != self._orig_metadata:
            changes.add('metadata')

        return changes

    def obj_make_compatible(self, primitive, target_version):
        """Make an object representation compatible with a target version."""
        super(Snapshot, self).obj_make_compatible(primitive, target_version)
        target_version = utils.convert_version_to_tuple(target_version)

    @staticmethod
    def _from_db_object(context, snapshot, db_snapshot, expected_attrs=None):
        if expected_attrs is None:
            expected_attrs = []
        for name, field in snapshot.fields.items():
            if name in OPTIONAL_FIELDS:
                continue
            value = db_snapshot.get(name)
            if isinstance(field, fields.IntegerField):
                value = value if value is not None else 0
            snapshot[name] = value

        if 'volume' in expected_attrs:
            volume = objects.Volume(context)
            volume._from_db_object(context, volume, db_snapshot['volume'])
            snapshot.volume = volume
        if 'metadata' in expected_attrs:
            metadata = db_snapshot.get('snapshot_metadata')
            if metadata is None:
                raise exception.MetadataAbsent()
            snapshot.metadata = {
                item['key']: item['value']
                for item in metadata
            }
        snapshot._context = context
        snapshot.obj_reset_changes()
        return snapshot

    @base.remotable_classmethod
    def get_by_id(cls, context, id):
        db_snapshot = db.snapshot_get(context, id)
        return cls._from_db_object(context,
                                   cls(context),
                                   db_snapshot,
                                   expected_attrs=['metadata'])

    @base.remotable
    def create(self):
        if self.obj_attr_is_set('id'):
            raise exception.ObjectActionError(action='create',
                                              reason=_('already created'))
        updates = self.cinder_obj_get_changes()

        if 'volume' in updates:
            raise exception.ObjectActionError(action='create',
                                              reason=_('volume assigned'))

        db_snapshot = db.snapshot_create(self._context, updates)
        self._from_db_object(self._context, self, db_snapshot)

    @base.remotable
    def save(self):
        updates = self.cinder_obj_get_changes()
        if updates:
            if 'volume' in updates:
                raise exception.ObjectActionError(action='save',
                                                  reason=_('volume changed'))

            if 'metadata' in updates:
                # Metadata items that are not specified in the
                # self.metadata will be deleted
                metadata = updates.pop('metadata', None)
                self.metadata = db.snapshot_metadata_update(
                    self._context, self.id, metadata, True)

            db.snapshot_update(self._context, self.id, updates)

        self.obj_reset_changes()

    @base.remotable
    def destroy(self):
        db.snapshot_destroy(self._context, self.id)

    def obj_load_attr(self, attrname):
        if attrname not in OPTIONAL_FIELDS:
            raise exception.ObjectActionError(
                action='obj_load_attr',
                reason=_('attribute %s not lazy-loadable') % attrname)
        if not self._context:
            raise exception.OrphanedObjectError(method='obj_load_attr',
                                                objtype=self.obj_name())

        if attrname == 'volume':
            self.volume = objects.Volume.get_by_id(self._context,
                                                   self.volume_id)

        self.obj_reset_changes(fields=[attrname])

    def delete_metadata_key(self, context, key):
        db.snapshot_metadata_delete(context, self.id, key)
        md_was_changed = 'metadata' in self.obj_what_changed()

        del self.metadata[key]
        self._orig_metadata.pop(key, None)

        if not md_was_changed:
            self.obj_reset_changes(['metadata'])

    @base.remotable_classmethod
    def snapshot_data_get_for_project(cls,
                                      context,
                                      project_id,
                                      volume_type_id=None):
        return db.snapshot_data_get_for_project(context, project_id,
                                                volume_type_id)
Exemple #20
0
class Node(senlin_base.SenlinObject, base.VersionedObjectDictCompat):
    """Senlin node object."""

    fields = {
        'id': fields.UUIDField(),
        'name': fields.StringField(),
        'profile_id': fields.UUIDField(),
        'cluster_id': fields.UUIDField(),
        'physical_id': fields.UUIDField(),
        'index': fields.IntegerField(),
        'role': fields.StringField(nullable=True),
        'init_at': fields.DateTimeField(nullable=True),
        'created_at': fields.DateTimeField(nullable=True),
        'updated_at': fields.DateTimeField(nullable=True),
        'status': fields.StringField(),
        'status_reason': fields.StringField(),
        'metadata': fields.DictOfStringsField(),
        'data': fields.DictOfStringsField(),
        'user': fields.StringField(),
        'project': fields.StringField(),
        'domain': fields.StringField(nullable=True),
    }

    @staticmethod
    def _from_db_object(context, node, db_obj):
        if db_obj is None:
            return None

        for field in node.fields:
            if field == 'metadata':
                node['metadata'] = db_obj['meta_data']
            else:
                node[field] = db_obj[field]

        node._context = context
        node.obj_reset_changes()

        return node

    @classmethod
    def create(cls, context, values):
        obj = db_api.node_create(context, values)
        return cls._from_db_object(context, cls(context), obj)

    @classmethod
    def get(cls, context, node_id, **kwargs):
        obj = db_api.node_get(context, node_id, **kwargs)
        return cls._from_db_object(context, cls(), obj)

    @classmethod
    def get_by_name(cls, context, name, **kwargs):
        obj = db_api.node_get_by_name(context, name, **kwargs)
        return cls._from_db_object(context, cls(), obj)

    @classmethod
    def get_by_short_id(cls, context, short_id, **kwargs):
        obj = db_api.node_get_by_short_id(context, short_id, **kwargs)
        return cls._from_db_object(context, cls(), obj)

    @classmethod
    def get_all(cls, context, **kwargs):
        objs = db_api.node_get_all(context, **kwargs)
        return [cls._from_db_object(context, cls(), obj) for obj in objs]

    @classmethod
    def get_all_by_cluster(cls, context, cluster_id, **kwargs):
        objs = db_api.node_get_all_by_cluster(context, cluster_id, **kwargs)
        return [cls._from_db_object(context, cls(), obj) for obj in objs]

    @classmethod
    def count_by_cluster(cls, context, cluster_id, **kwargs):
        return db_api.node_count_by_cluster(context, cluster_id, **kwargs)

    @classmethod
    def update(cls, context, obj_id, values):
        db_api.node_update(context, obj_id, values)

    @classmethod
    def migrate(cls, context, obj_id, to_cluster, timestamp, role=None):
        return db_api.node_migrate(context,
                                   obj_id,
                                   to_cluster,
                                   timestamp,
                                   role=role)

    @classmethod
    def delete(cls, context, obj_id):
        db_api.node_delete(context, obj_id)
Exemple #21
0
class Pod(base.MagnumPersistentObject, base.MagnumObject,
          base.MagnumObjectDictCompat):
    # Version 1.0: Initial version
    # Version 1.1: Remove unused Pod object API 'list_by_bay_uuid'
    VERSION = '1.1'

    dbapi = dbapi.get_instance()

    fields = {
        'id': fields.IntegerField(),
        'uuid': fields.StringField(nullable=True),
        'name': fields.StringField(nullable=True),
        'desc': fields.StringField(nullable=True),
        'project_id': fields.StringField(nullable=True),
        'user_id': fields.StringField(nullable=True),
        'bay_uuid': fields.StringField(nullable=True),
        'images': fields.ListOfStringsField(nullable=True),
        'labels': fields.DictOfStringsField(nullable=True),
        'status': fields.StringField(nullable=True),
        'manifest_url': fields.StringField(nullable=True),
        'manifest': fields.StringField(nullable=True),
        'host': fields.StringField(nullable=True),
    }

    @base.remotable_classmethod
    def get_by_uuid(cls, context, uuid, bay_uuid, k8s_api):
        """Find a pod based on pod uuid and the uuid for a bay.

        :param context: Security context
        :param uuid: the uuid of a pod.
        :param bay_uuid: the UUID of the Bay
        :param k8s_api: k8s API object

        :returns: a :class:`Pod` object.
        """
        try:
            resp = k8s_api.list_namespaced_pod(namespace='default')
        except rest.ApiException as err:
            raise exception.KubernetesAPIFailed(err=err)

        if resp is None:
            raise exception.PodListNotFound(bay_uuid=bay_uuid)

        pod = {}
        for pod_entry in resp.items:
            if pod_entry.metadata.uid == uuid:
                pod['uuid'] = pod_entry.metadata.uid
                pod['name'] = pod_entry.metadata.name
                pod['project_id'] = context.project_id
                pod['user_id'] = context.user_id
                pod['bay_uuid'] = bay_uuid
                pod['images'] = [c.image for c in pod_entry.spec.containers]
                if not pod_entry.metadata.labels:
                    pod['labels'] = {}
                else:
                    pod['labels'] = ast.literal_eval(pod_entry.metadata.labels)
                pod['status'] = pod_entry.status.phase
                pod['host'] = pod_entry.spec.node_name

                pod_obj = Pod(context, **pod)
                return pod_obj
        raise exception.PodNotFound(pod=uuid)

    @base.remotable_classmethod
    def get_by_name(cls, context, name, bay_uuid, k8s_api):
        """Find a pod based on pod name and the uuid for a bay.

        :param context: Security context
        :param name: the name of a pod.
        :param bay_uuid: the UUID of the Bay
        :param k8s_api: k8s API object

        :returns: a :class:`Pod` object.
        """
        try:
            resp = k8s_api.read_namespaced_pod(name=name, namespace='default')
        except rest.ApiException as err:
            raise exception.KubernetesAPIFailed(err=err)

        if resp is None:
            raise exception.PodNotFound(pod=name)

        pod = {}
        pod['uuid'] = resp.metadata.uid
        pod['name'] = resp.metadata.name
        pod['project_id'] = context.project_id
        pod['user_id'] = context.user_id
        pod['bay_uuid'] = bay_uuid
        pod['images'] = [c.image for c in resp.spec.containers]
        if not resp.metadata.labels:
            pod['labels'] = {}
        else:
            pod['labels'] = ast.literal_eval(resp.metadata.labels)
        pod['status'] = resp.status.phase
        pod['host'] = resp.spec.node_name

        pod_obj = Pod(context, **pod)
        return pod_obj
Exemple #22
0
class VolumeType(base.CinderPersistentObject, base.CinderObject,
                 base.CinderObjectDictCompat, base.CinderComparableObject):
    # Version 1.0: Initial version
    VERSION = '1.0'

    DEFAULT_EXPECTED_ATTR = ('extra_specs', 'projects')

    fields = {
        'id': fields.UUIDField(),
        'name': fields.StringField(nullable=True),
        'description': fields.StringField(nullable=True),
        'is_public': fields.BooleanField(default=True),
        'projects': fields.ListOfStringsField(nullable=True),
        'extra_specs': fields.DictOfStringsField(nullable=True),
    }

    @staticmethod
    def _from_db_object(context, type, db_type, expected_attrs=None):
        if expected_attrs is None:
            expected_attrs = []
        for name, field in type.fields.items():
            if name in OPTIONAL_FIELDS:
                continue
            value = db_type[name]
            if isinstance(field, fields.IntegerField):
                value = value or 0
            type[name] = value

        # Get data from db_type object that was queried by joined query
        # from DB
        if 'extra_specs' in expected_attrs:
            type.extra_specs = {}
            specs = db_type.get('extra_specs')
            if specs and isinstance(specs, list):
                type.extra_specs = {
                    item['key']: item['value']
                    for item in specs
                }
            elif specs and isinstance(specs, dict):
                type.extra_specs = specs
        if 'projects' in expected_attrs:
            type.projects = db_type.get('projects', [])

        type._context = context
        type.obj_reset_changes()
        return type

    @base.remotable
    def create(self):
        if self.obj_attr_is_set('id'):
            raise exception.ObjectActionError(action='create',
                                              reason=_('already created'))
        db_volume_type = volume_types.create(self._context, self.name,
                                             self.extra_specs, self.is_public,
                                             self.projects, self.description)
        self._from_db_object(self._context, self, db_volume_type)

    @base.remotable
    def save(self):
        updates = self.cinder_obj_get_changes()
        if updates:
            volume_types.update(self._context, self.id, self.name,
                                self.description)
            self.obj_reset_changes()

    @base.remotable
    def destroy(self):
        with self.obj_as_admin():
            volume_types.destroy(self._context, self.id)
Exemple #23
0
class Capsule(base.ZunPersistentObject, base.ZunObject):
    # Version 1.0: Initial version
    # Version 1.1: Add host to capsule
    # Version 1.2: Change the properties of meta_labels
    # Version 1.3: Add 'Deleting' to ContainerStatus
    # Version 1.4: Add addresses and volumes_info
    # Version 1.5: Change the properties of restort_policy
    # Version 1.6: Change the type of status
    VERSION = '1.6'

    fields = {
        'capsule_version': fields.StringField(nullable=True),
        'kind': fields.StringField(nullable=True),
        'restart_policy': fields.StringField(nullable=True),
        'host_selector': fields.StringField(nullable=True),
        'id': fields.IntegerField(),
        'uuid': fields.UUIDField(nullable=True),
        'project_id': fields.StringField(nullable=True),
        'user_id': fields.StringField(nullable=True),
        'status': z_fields.CapsuleStatusField(nullable=True),
        'status_reason': fields.StringField(nullable=True),
        'cpu': fields.FloatField(nullable=True),
        'memory': fields.StringField(nullable=True),
        'addresses': z_fields.JsonField(nullable=True),

        # conclude the readable message
        # 'key': 'value'--> 'time':'message'
        # wait until zun notify is finished
        # 'message': fields.DictOfStringsField(nullable=True),
        'spec': z_fields.JsonField(nullable=True),
        'meta_name': fields.StringField(nullable=True),
        'meta_labels': fields.DictOfStringsField(nullable=True),
        'containers': fields.ListOfObjectsField('Container', nullable=True),
        # The list of containers uuids inside the capsule
        'containers_uuids': fields.ListOfStringsField(nullable=True),
        'host': fields.StringField(nullable=True),

        # volumes_info records the volume and container attached
        # relationship:
        # {'<volume-uuid1>': ['<container-uuid1>', '<container-uuid2>'],
        # '<volume-uuid2>': ['<container-uuid2>', '<container-uuid3>']},
        # one container can attach at least one volume, also will support
        # one volume multiple in the future.
        'volumes_info': z_fields.JsonField(nullable=True),
    }

    @staticmethod
    def _from_db_object(capsule, db_capsule):
        """Converts a database entity to a formal object."""
        for field in capsule.fields:
            if field in CAPSULE_OPTIONAL_ATTRS:
                continue
            setattr(capsule, field, db_capsule[field])
        capsule.obj_reset_changes()
        return capsule

    @staticmethod
    def _from_db_object_list(db_objects, cls, context):
        """Converts a list of database entities to a list of formal objects."""
        return [
            Capsule._from_db_object(cls(context), obj) for obj in db_objects
        ]

    @base.remotable_classmethod
    def get_by_uuid(cls, context, uuid):
        """Find a capsule based on uuid and return a :class:`Capsule` object.

        :param uuid: the uuid of a capsule.
        :param context: Security context
        :returns: a :class:`Capsule` object.
        """
        db_capsule = dbapi.get_capsule_by_uuid(context, uuid)
        capsule = Capsule._from_db_object(cls(context), db_capsule)
        return capsule

    @base.remotable_classmethod
    def get_by_name(cls, context, name):
        """Find a capsule based on name and return a :class:`Capsule` object.

        :param name: the meta_name of a capsule.
        :param context: Security context
        :returns: a :class:`Capsule` object.
        """
        db_capsule = dbapi.get_capsule_by_meta_name(context, name)
        capsule = Capsule._from_db_object(cls(context), db_capsule)
        return capsule

    @base.remotable_classmethod
    def list(cls,
             context,
             limit=None,
             marker=None,
             sort_key=None,
             sort_dir=None,
             filters=None):
        """Return a list of Capsule objects.

        :param context: Security context.
        :param limit: maximum number of resources to return in a single result.
        :param marker: pagination marker for large data sets.
        :param sort_key: column to sort results by.
        :param sort_dir: direction to sort. "asc" or "desc".
        :param filters: filters when list capsules, the filter name could be
                        'name', 'image', 'project_id', 'user_id', 'memory'.
                        For example, filters={'image': 'nginx'}
        :returns: a list of :class:`Capsule` object.

        """
        db_capsules = dbapi.list_capsules(context,
                                          limit=limit,
                                          marker=marker,
                                          sort_key=sort_key,
                                          sort_dir=sort_dir,
                                          filters=filters)
        return Capsule._from_db_object_list(db_capsules, cls, context)

    @base.remotable
    def create(self, context):
        """Create a Container record in the DB.

        :param context: Security context. NOTE: This should only
                        be used internally by the indirection_api.
                        Unfortunately, RPC requires context as the first
                        argument, even though we don't use it.
                        A context should be set when instantiating the
                        object, e.g.: Capsule(context)

        """
        values = self.obj_get_changes()
        if 'containers' in values:
            raise exception.ObjectActionError(action='create',
                                              reason='containers assigned')

        db_capsule = dbapi.create_capsule(context, values)
        self._from_db_object(self, db_capsule)

    @base.remotable
    def destroy(self, context=None):
        """Delete the Container from the DB.

        :param context: Security context. NOTE: This should only
                        be used internally by the indirection_api.
                        Unfortunately, RPC requires context as the first
                        argument, even though we don't use it.
                        A context should be set when instantiating the
                        object, e.g.: Capsule(context)
        """
        dbapi.destroy_capsule(context, self.uuid)
        self.obj_reset_changes()

    @base.remotable
    def save(self, context=None):
        """Save updates to this Capsule.

        Updates will be made column by column based on the result
        of self.what_changed().

        :param context: Security context. NOTE: This should only
                        be used internally by the indirection_api.
                        Unfortunately, RPC requires context as the first
                        argument, even though we don't use it.
                        A context should be set when instantiating the
                        object, e.g.: Capsule(context)
        """
        updates = self.obj_get_changes()
        if 'containers' in updates:
            raise exception.ObjectActionError(action='save',
                                              reason='containers changed')
        dbapi.update_capsule(context, self.uuid, updates)

        self.obj_reset_changes()

    def as_dict(self):
        capsule_dict = super(Capsule, self).as_dict()
        capsule_dict['containers'] = [c.as_dict() for c in self.containers]
        return capsule_dict

    def obj_load_attr(self, attrname):
        if attrname not in CAPSULE_OPTIONAL_ATTRS:
            raise exception.ObjectActionError(
                action='obj_load_attr',
                reason='attribute %s not lazy-loadable' % attrname)
        if not self._context:
            raise exception.OrphanedObjectError(method='obj_load_attr',
                                                objtype=self.obj_name())

        LOG.debug("Lazy-loading '%(attr)s' on %(name)s uuid %(uuid)s", {
            'attr': attrname,
            'name': self.obj_name(),
            'uuid': self.uuid,
        })

        if attrname == 'containers':
            self.containers = container.Container.list_by_capsule_id(
                self._context, self.id)

        self.obj_reset_changes(fields=[attrname])
Exemple #24
0
class ContainerBase(base.ZunPersistentObject, base.ZunObject):

    fields = {
        'id': fields.IntegerField(),
        'container_id': fields.StringField(nullable=True),
        'uuid': fields.UUIDField(nullable=True),
        'name': fields.StringField(nullable=True),
        'project_id': fields.StringField(nullable=True),
        'user_id': fields.StringField(nullable=True),
        'image': fields.StringField(nullable=True),
        'cpu': fields.FloatField(nullable=True),
        'cpu_policy': fields.StringField(nullable=True),
        'cpuset': fields.ObjectField("Cpuset", nullable=True),
        'memory': fields.StringField(nullable=True),
        'command': fields.ListOfStringsField(nullable=True),
        'status': z_fields.ContainerStatusField(nullable=True),
        'status_reason': fields.StringField(nullable=True),
        'task_state': z_fields.TaskStateField(nullable=True),
        'environment': fields.DictOfStringsField(nullable=True),
        'workdir': fields.StringField(nullable=True),
        'auto_remove': fields.BooleanField(nullable=True),
        'ports': z_fields.ListOfIntegersField(nullable=True),
        'hostname': fields.StringField(nullable=True),
        'labels': fields.DictOfStringsField(nullable=True),
        'addresses': z_fields.JsonField(nullable=True),
        'image_pull_policy': fields.StringField(nullable=True),
        'host': fields.StringField(nullable=True),
        'restart_policy': fields.DictOfStringsField(nullable=True),
        'status_detail': fields.StringField(nullable=True),
        'interactive': fields.BooleanField(nullable=True),
        'tty': fields.BooleanField(nullable=True),
        'image_driver': fields.StringField(nullable=True),
        'websocket_url': fields.StringField(nullable=True),
        'websocket_token': fields.StringField(nullable=True),
        'security_groups': fields.ListOfStringsField(nullable=True),
        'runtime': fields.StringField(nullable=True),
        'pci_devices': fields.ListOfObjectsField('PciDevice', nullable=True),
        'disk': fields.IntegerField(nullable=True),
        'auto_heal': fields.BooleanField(nullable=True),
        'started_at': fields.DateTimeField(tzinfo_aware=False, nullable=True),
        'exposed_ports': z_fields.JsonField(nullable=True),
        'exec_instances': fields.ListOfObjectsField('ExecInstance',
                                                    nullable=True),
        'privileged': fields.BooleanField(nullable=True),
        'healthcheck': z_fields.JsonField(nullable=True),
        'registry_id': fields.IntegerField(nullable=True),
        'registry': fields.ObjectField("Registry", nullable=True),
        'annotations': z_fields.JsonField(nullable=True),
        'cni_metadata': z_fields.JsonField(nullable=True),
        'entrypoint': fields.ListOfStringsField(nullable=True),
    }

    # should be redefined in subclasses
    container_type = None

    @staticmethod
    def _from_db_object(container, db_container):
        """Converts a database entity to a formal object."""
        for field in container.fields:
            if field in [
                    'pci_devices', 'exec_instances', 'registry', 'containers',
                    'init_containers'
            ]:
                continue
            if field == 'cpuset':
                container.cpuset = Cpuset._from_dict(db_container['cpuset'])
                continue
            setattr(container, field, db_container[field])

        container.obj_reset_changes()
        return container

    @staticmethod
    def _from_db_object_list(db_objects, cls, context):
        """Converts a list of database entities to a list of formal objects."""
        return [cls._from_db_object(cls(context), obj) for obj in db_objects]

    @base.remotable_classmethod
    def get_by_uuid(cls, context, uuid):
        """Find a container based on uuid and return a :class:`Container` object.

        :param uuid: the uuid of a container.
        :param context: Security context
        :returns: a :class:`Container` object.
        """
        db_container = dbapi.get_container_by_uuid(context, cls.container_type,
                                                   uuid)
        container = cls._from_db_object(cls(context), db_container)
        return container

    @base.remotable_classmethod
    def get_by_name(cls, context, name):
        """Find a container based on name and return a Container object.

        :param name: the logical name of a container.
        :param context: Security context
        :returns: a :class:`Container` object.
        """
        db_container = dbapi.get_container_by_name(context, cls.container_type,
                                                   name)
        container = cls._from_db_object(cls(context), db_container)
        return container

    @staticmethod
    def get_container_any_type(context, uuid):
        """Find a container of any type based on uuid.

        :param uuid: the uuid of a container.
        :param context: Security context
        :returns: a :class:`ContainerBase` object.
        """
        db_container = dbapi.get_container_by_uuid(context, consts.TYPE_ANY,
                                                   uuid)
        type = db_container['container_type']
        if type == consts.TYPE_CONTAINER:
            container_cls = Container
        elif type == consts.TYPE_CAPSULE:
            container_cls = Capsule
        elif type == consts.TYPE_CAPSULE_CONTAINER:
            container_cls = CapsuleContainer
        elif type == consts.TYPE_CAPSULE_INIT_CONTAINER:
            container_cls = CapsuleInitContainer
        else:
            raise exception.ZunException(_('Unknown container type: %s'), type)

        obj = container_cls(context)
        container = container_cls._from_db_object(obj, db_container)
        return container

    @base.remotable_classmethod
    def list(cls,
             context,
             limit=None,
             marker=None,
             sort_key=None,
             sort_dir=None,
             filters=None):
        """Return a list of Container objects.

        :param context: Security context.
        :param limit: maximum number of resources to return in a single result.
        :param marker: pagination marker for large data sets.
        :param sort_key: column to sort results by.
        :param sort_dir: direction to sort. "asc" or "desc".
        :param filters: filters when list containers, the filter name could be
                        'name', 'image', 'project_id', 'user_id', 'memory'.
                        For example, filters={'image': 'nginx'}
        :returns: a list of :class:`Container` object.

        """
        db_containers = dbapi.list_containers(context,
                                              cls.container_type,
                                              limit=limit,
                                              marker=marker,
                                              sort_key=sort_key,
                                              sort_dir=sort_dir,
                                              filters=filters)
        return cls._from_db_object_list(db_containers, cls, context)

    @base.remotable_classmethod
    def list_by_host(cls, context, host):
        """Return a list of Container objects by host.

        :param context: Security context.
        :param host: A compute host.
        :returns: a list of :class:`Container` object.

        """
        db_containers = dbapi.list_containers(context,
                                              cls.container_type,
                                              filters={'host': host})
        return cls._from_db_object_list(db_containers, cls, context)

    @base.remotable
    def create(self, context):
        """Create a Container record in the DB.

        :param context: Security context. NOTE: This should only
                        be used internally by the indirection_api.
                        Unfortunately, RPC requires context as the first
                        argument, even though we don't use it.
                        A context should be set when instantiating the
                        object, e.g.: Container(context)

        """
        values = self.obj_get_changes()
        cpuset_obj = values.pop('cpuset', None)
        if cpuset_obj is not None:
            values['cpuset'] = cpuset_obj._to_dict()
        annotations = values.pop('annotations', None)
        if annotations is not None:
            values['annotations'] = self.fields['annotations'].to_primitive(
                self, 'annotations', self.annotations)
        cni_metadata = values.pop('cni_metadata', None)
        if cni_metadata is not None:
            values['cni_metadata'] = self.fields['cni_metadata'].to_primitive(
                self, 'cni_metadata', self.cni_metadata)
        values['container_type'] = self.container_type
        db_container = dbapi.create_container(context, values)
        self._from_db_object(self, db_container)

    @base.remotable
    def destroy(self, context=None):
        """Delete the Container from the DB.

        :param context: Security context. NOTE: This should only
                        be used internally by the indirection_api.
                        Unfortunately, RPC requires context as the first
                        argument, even though we don't use it.
                        A context should be set when instantiating the
                        object, e.g.: Container(context)
        """
        dbapi.destroy_container(context, self.container_type, self.uuid)
        self.obj_reset_changes()

    @base.remotable
    def save(self, context=None):
        """Save updates to this Container.

        Updates will be made column by column based on the result
        of self.what_changed().

        :param context: Security context. NOTE: This should only
                        be used internally by the indirection_api.
                        Unfortunately, RPC requires context as the first
                        argument, even though we don't use it.
                        A context should be set when instantiating the
                        object, e.g.: Container(context)
        """
        updates = self.obj_get_changes()
        cpuset_obj = updates.pop('cpuset', None)
        if cpuset_obj is not None:
            updates['cpuset'] = cpuset_obj._to_dict()
        annotations = updates.pop('annotations', None)
        if annotations is not None:
            updates['annotations'] = self.fields['annotations'].to_primitive(
                self, 'annotations', self.annotations)
        cni_metadata = updates.pop('cni_metadata', None)
        if cni_metadata is not None:
            updates['cni_metadata'] = self.fields['cni_metadata'].to_primitive(
                self, 'cni_metadata', self.cni_metadata)
        dbapi.update_container(context, self.container_type, self.uuid,
                               updates)

        self.obj_reset_changes()

    @base.remotable
    def refresh(self, context=None):
        """Loads updates for this Container.

        Loads a container with the same uuid from the database and
        checks for updated attributes. Updates are applied from
        the loaded container column by column, if there are any updates.

        :param context: Security context. NOTE: This should only
                        be used internally by the indirection_api.
                        Unfortunately, RPC requires context as the first
                        argument, even though we don't use it.
                        A context should be set when instantiating the
                        object, e.g.: Container(context)
        """
        current = self.__class__.get_by_uuid(self._context, uuid=self.uuid)
        for field in self.fields:
            if self.obj_attr_is_set(field) and \
               getattr(self, field) != getattr(current, field):
                setattr(self, field, getattr(current, field))

    def obj_load_attr(self, attrname):
        if attrname not in CONTAINER_OPTIONAL_ATTRS:
            raise exception.ObjectActionError(
                action='obj_load_attr',
                reason=_('attribute %s not lazy-loadable') % attrname)

        if not self._context:
            raise exception.OrphanedObjectError(method='obj_load_attr',
                                                objtype=self.obj_name())

        LOG.debug("Lazy-loading '%(attr)s' on %(name)s uuid %(uuid)s", {
            'attr': attrname,
            'name': self.obj_name(),
            'uuid': self.uuid,
        })

        # NOTE(danms): We handle some fields differently here so that we
        # can be more efficient
        if attrname == 'pci_devices':
            self._load_pci_devices()

        if attrname == 'exec_instances':
            self._load_exec_instances()

        if attrname == 'registry':
            self._load_registry()

        self.obj_reset_changes([attrname])

    def _load_pci_devices(self):
        self.pci_devices = pci_device.PciDevice.list_by_container_uuid(
            self._context, self.uuid)

    def _load_exec_instances(self):
        self.exec_instances = exec_inst.ExecInstance.list_by_container_id(
            self._context, self.id)

    def _load_registry(self):
        self.registry = None
        if self.registry_id:
            self.registry = registry.Registry.get_by_id(
                self._context, self.registry_id)

    @base.remotable_classmethod
    def get_count(cls, context, project_id, flag):
        """Get the counts of Container objects in the database.

        :param context: The request context for database access.
        :param project_id: The project_id to count across.
        :param flag: The name of resource, one of the following options:
                     - containers: Count the number of containers owned by the
                     project.
                     - memory: The sum of containers's memory.
                     - cpu: The sum of container's cpu.
                     - disk: The sum of container's disk size.
        """
        usage = dbapi.count_usage(context, cls.container_type, project_id,
                                  flag)[0] or 0.0
        return usage
Exemple #25
0
class Volume(cleanable.CinderCleanableObject, base.CinderObject,
             base.CinderObjectDictCompat, base.CinderComparableObject,
             base.ClusteredObject):
    # Version 1.0: Initial version
    # Version 1.1: Added metadata, admin_metadata, volume_attachment, and
    #              volume_type
    # Version 1.2: Added glance_metadata, consistencygroup and snapshots
    # Version 1.3: Added finish_volume_migration()
    # Version 1.4: Added cluster fields
    # Version 1.5: Added group
    # Version 1.6: This object is now cleanable (adds rows to workers table)
    # Version 1.7: Added service_uuid
    # Version 1.8: Added shared_targets
    VERSION = '1.8'

    OPTIONAL_FIELDS = ('metadata', 'admin_metadata', 'glance_metadata',
                       'volume_type', 'volume_attachment', 'consistencygroup',
                       'snapshots', 'cluster', 'group')

    fields = {
        'id': fields.UUIDField(),
        '_name_id': fields.UUIDField(nullable=True),
        'ec2_id': fields.UUIDField(nullable=True),
        'user_id': fields.StringField(nullable=True),
        'project_id': fields.StringField(nullable=True),

        'snapshot_id': fields.UUIDField(nullable=True),

        'cluster_name': fields.StringField(nullable=True),
        'cluster': fields.ObjectField('Cluster', nullable=True,
                                      read_only=True),
        'host': fields.StringField(nullable=True),
        'size': fields.IntegerField(nullable=True),
        'availability_zone': fields.StringField(nullable=True),
        'status': fields.StringField(nullable=True),
        'attach_status': c_fields.VolumeAttachStatusField(nullable=True),
        'migration_status': fields.StringField(nullable=True),

        'scheduled_at': fields.DateTimeField(nullable=True),
        'launched_at': fields.DateTimeField(nullable=True),
        'terminated_at': fields.DateTimeField(nullable=True),

        'display_name': fields.StringField(nullable=True),
        'display_description': fields.StringField(nullable=True),

        'provider_id': fields.StringField(nullable=True),
        'provider_location': fields.StringField(nullable=True),
        'provider_auth': fields.StringField(nullable=True),
        'provider_geometry': fields.StringField(nullable=True),

        'volume_type_id': fields.UUIDField(nullable=True),
        'source_volid': fields.UUIDField(nullable=True),
        'encryption_key_id': fields.UUIDField(nullable=True),

        'consistencygroup_id': fields.UUIDField(nullable=True),
        'group_id': fields.UUIDField(nullable=True),

        'deleted': fields.BooleanField(default=False, nullable=True),
        'bootable': fields.BooleanField(default=False, nullable=True),
        'multiattach': fields.BooleanField(default=False, nullable=True),

        'replication_status': fields.StringField(nullable=True),
        'replication_extended_status': fields.StringField(nullable=True),
        'replication_driver_data': fields.StringField(nullable=True),

        'previous_status': fields.StringField(nullable=True),

        'metadata': fields.DictOfStringsField(nullable=True),
        'admin_metadata': fields.DictOfStringsField(nullable=True),
        'glance_metadata': fields.DictOfStringsField(nullable=True),
        'volume_type': fields.ObjectField('VolumeType', nullable=True),
        'volume_attachment': fields.ObjectField('VolumeAttachmentList',
                                                nullable=True),
        'consistencygroup': fields.ObjectField('ConsistencyGroup',
                                               nullable=True),
        'snapshots': fields.ObjectField('SnapshotList', nullable=True),
        'group': fields.ObjectField('Group', nullable=True),
        'service_uuid': fields.StringField(nullable=True),
        'shared_targets': fields.BooleanField(default=True, nullable=True),
    }

    # NOTE(thangp): obj_extra_fields is used to hold properties that are not
    # usually part of the model
    obj_extra_fields = ['name', 'name_id', 'volume_metadata',
                        'volume_admin_metadata', 'volume_glance_metadata']

    @classmethod
    def _get_expected_attrs(cls, context, *args, **kwargs):
        expected_attrs = ['metadata', 'volume_type', 'volume_type.extra_specs']
        if context.is_admin:
            expected_attrs.append('admin_metadata')

        return expected_attrs

    @property
    def name_id(self):
        return self.id if not self._name_id else self._name_id

    @name_id.setter
    def name_id(self, value):
        self._name_id = value

    @property
    def name(self):
        return CONF.volume_name_template % self.name_id

    # TODO(dulek): Three properties below are for compatibility with dict
    # representation of volume. The format there is different (list of
    # SQLAlchemy models) so we need a conversion. Anyway - these should be
    # removed when we stop this class from deriving from DictObjectCompat.
    @property
    def volume_metadata(self):
        md = [MetadataObject(k, v) for k, v in self.metadata.items()]
        return md

    @volume_metadata.setter
    def volume_metadata(self, value):
        md = {d['key']: d['value'] for d in value}
        self.metadata = md

    @property
    def volume_admin_metadata(self):
        md = [MetadataObject(k, v) for k, v in self.admin_metadata.items()]
        return md

    @volume_admin_metadata.setter
    def volume_admin_metadata(self, value):
        md = {d['key']: d['value'] for d in value}
        self.admin_metadata = md

    @property
    def volume_glance_metadata(self):
        md = [MetadataObject(k, v) for k, v in self.glance_metadata.items()]
        return md

    @volume_glance_metadata.setter
    def volume_glance_metadata(self, value):
        md = {d['key']: d['value'] for d in value}
        self.glance_metadata = md

    def __init__(self, *args, **kwargs):
        super(Volume, self).__init__(*args, **kwargs)

        self._reset_metadata_tracking()

    def obj_reset_changes(self, fields=None):
        super(Volume, self).obj_reset_changes(fields)
        self._reset_metadata_tracking(fields=fields)

    @classmethod
    def _obj_from_primitive(cls, context, objver, primitive):
        obj = super(Volume, Volume)._obj_from_primitive(context, objver,
                                                        primitive)
        obj._reset_metadata_tracking()
        return obj

    def _reset_metadata_tracking(self, fields=None):
        if fields is None or 'metadata' in fields:
            self._orig_metadata = (dict(self.metadata)
                                   if 'metadata' in self else {})
        if fields is None or 'admin_metadata' in fields:
            self._orig_admin_metadata = (dict(self.admin_metadata)
                                         if 'admin_metadata' in self
                                         else {})
        if fields is None or 'glance_metadata' in fields:
            self._orig_glance_metadata = (dict(self.glance_metadata)
                                          if 'glance_metadata' in self
                                          else {})

    def obj_what_changed(self):
        changes = super(Volume, self).obj_what_changed()
        if 'metadata' in self and self.metadata != self._orig_metadata:
            changes.add('metadata')
        if ('admin_metadata' in self and
                self.admin_metadata != self._orig_admin_metadata):
            changes.add('admin_metadata')
        if ('glance_metadata' in self and
                self.glance_metadata != self._orig_glance_metadata):
            changes.add('glance_metadata')

        return changes

    def obj_make_compatible(self, primitive, target_version):
        """Make a Volume representation compatible with a target version."""
        added_fields = (((1, 4), ('cluster', 'cluster_name')),
                        ((1, 5), ('group', 'group_id')),
                        ((1, 7), ('service_uuid')))

        # Convert all related objects
        super(Volume, self).obj_make_compatible(primitive, target_version)

        target_version = versionutils.convert_version_to_tuple(target_version)
        for version, remove_fields in added_fields:
            if target_version < version:
                for obj_field in remove_fields:
                    primitive.pop(obj_field, None)

    @classmethod
    def _from_db_object(cls, context, volume, db_volume, expected_attrs=None):
        if expected_attrs is None:
            expected_attrs = []
        for name, field in volume.fields.items():
            if name in cls.OPTIONAL_FIELDS:
                continue
            value = db_volume.get(name)
            if isinstance(field, fields.IntegerField):
                value = value or 0
            volume[name] = value

        # Get data from db_volume object that was queried by joined query
        # from DB
        if 'metadata' in expected_attrs:
            metadata = db_volume.get('volume_metadata', [])
            volume.metadata = {item['key']: item['value'] for item in metadata}
        if 'admin_metadata' in expected_attrs:
            metadata = db_volume.get('volume_admin_metadata', [])
            volume.admin_metadata = {item['key']: item['value']
                                     for item in metadata}
        if 'glance_metadata' in expected_attrs:
            metadata = db_volume.get('volume_glance_metadata', [])
            volume.glance_metadata = {item['key']: item['value']
                                      for item in metadata}
        if 'volume_type' in expected_attrs:
            db_volume_type = db_volume.get('volume_type')
            if db_volume_type:
                vt_expected_attrs = []
                if 'volume_type.extra_specs' in expected_attrs:
                    vt_expected_attrs.append('extra_specs')
                volume.volume_type = objects.VolumeType._from_db_object(
                    context, objects.VolumeType(), db_volume_type,
                    expected_attrs=vt_expected_attrs)
        if 'volume_attachment' in expected_attrs:
            attachments = base.obj_make_list(
                context, objects.VolumeAttachmentList(context),
                objects.VolumeAttachment,
                db_volume.get('volume_attachment'))
            volume.volume_attachment = attachments
        if volume.consistencygroup_id and 'consistencygroup' in expected_attrs:
            consistencygroup = objects.ConsistencyGroup(context)
            consistencygroup._from_db_object(context,
                                             consistencygroup,
                                             db_volume['consistencygroup'])
            volume.consistencygroup = consistencygroup
        if 'snapshots' in expected_attrs:
            snapshots = base.obj_make_list(
                context, objects.SnapshotList(context),
                objects.Snapshot,
                db_volume['snapshots'])
            volume.snapshots = snapshots
        if 'cluster' in expected_attrs:
            db_cluster = db_volume.get('cluster')
            # If this volume doesn't belong to a cluster the cluster field in
            # the ORM instance will have value of None.
            if db_cluster:
                volume.cluster = objects.Cluster(context)
                objects.Cluster._from_db_object(context, volume.cluster,
                                                db_cluster)
            else:
                volume.cluster = None
        if volume.group_id and 'group' in expected_attrs:
            group = objects.Group(context)
            group._from_db_object(context,
                                  group,
                                  db_volume['group'])
            volume.group = group

        volume._context = context
        volume.obj_reset_changes()
        return volume

    def create(self):
        if self.obj_attr_is_set('id'):
            raise exception.ObjectActionError(action='create',
                                              reason=_('already created'))
        updates = self.cinder_obj_get_changes()

        if 'consistencygroup' in updates:
            raise exception.ObjectActionError(
                action='create', reason=_('consistencygroup assigned'))
        if 'snapshots' in updates:
            raise exception.ObjectActionError(
                action='create', reason=_('snapshots assigned'))
        if 'cluster' in updates:
            raise exception.ObjectActionError(
                action='create', reason=_('cluster assigned'))
        if 'group' in updates:
            raise exception.ObjectActionError(
                action='create', reason=_('group assigned'))
        if ('volume_type_id' not in updates or
                updates['volume_type_id'] is None):
            updates['volume_type_id'] = (
                volume_types.get_default_volume_type()['id'])

        db_volume = db.volume_create(self._context, updates)
        self._from_db_object(self._context, self, db_volume)

    def save(self):
        updates = self.cinder_obj_get_changes()
        if updates:
            # NOTE(xyang): Allow this to pass if 'consistencygroup' is
            # set to None. This is to support backward compatibility.
            # Also remove 'consistencygroup' from updates because
            # consistencygroup is the name of a relationship in the ORM
            # Volume model, so SQLA tries to do some kind of update of
            # the foreign key based on the provided updates if
            # 'consistencygroup' is in updates.
            if updates.pop('consistencygroup', None):
                raise exception.ObjectActionError(
                    action='save', reason=_('consistencygroup changed'))
            if 'group' in updates:
                raise exception.ObjectActionError(
                    action='save', reason=_('group changed'))
            if 'glance_metadata' in updates:
                raise exception.ObjectActionError(
                    action='save', reason=_('glance_metadata changed'))
            if 'snapshots' in updates:
                raise exception.ObjectActionError(
                    action='save', reason=_('snapshots changed'))
            if 'cluster' in updates:
                raise exception.ObjectActionError(
                    action='save', reason=_('cluster changed'))
            if 'metadata' in updates:
                # Metadata items that are not specified in the
                # self.metadata will be deleted
                metadata = updates.pop('metadata', None)
                self.metadata = db.volume_metadata_update(self._context,
                                                          self.id, metadata,
                                                          True)
            if self._context.is_admin and 'admin_metadata' in updates:
                metadata = updates.pop('admin_metadata', None)
                self.admin_metadata = db.volume_admin_metadata_update(
                    self._context, self.id, metadata, True)

            # When we are creating a volume and we change from 'creating'
            # status to 'downloading' status we have to change the worker entry
            # in the DB to reflect this change, otherwise the cleanup will
            # not be performed as it will be mistaken for a volume that has
            # been somehow changed (reset status, forced operation...)
            if updates.get('status') == 'downloading':
                self.set_worker()

            # updates are changed after popping out metadata.
            if updates:
                db.volume_update(self._context, self.id, updates)
            self.obj_reset_changes()

    def destroy(self):
        with self.obj_as_admin():
            updated_values = db.volume_destroy(self._context, self.id)
        self.update(updated_values)
        self.obj_reset_changes(updated_values.keys())

    def obj_load_attr(self, attrname):
        if attrname not in self.OPTIONAL_FIELDS:
            raise exception.ObjectActionError(
                action='obj_load_attr',
                reason=_('attribute %s not lazy-loadable') % attrname)
        if not self._context:
            raise exception.OrphanedObjectError(method='obj_load_attr',
                                                objtype=self.obj_name())

        if attrname == 'metadata':
            self.metadata = db.volume_metadata_get(self._context, self.id)
        elif attrname == 'admin_metadata':
            self.admin_metadata = {}
            if self._context.is_admin:
                self.admin_metadata = db.volume_admin_metadata_get(
                    self._context, self.id)
        elif attrname == 'glance_metadata':
            try:
                # NOTE(dulek): We're using alias here to have conversion from
                # list to dict done there.
                self.volume_glance_metadata = db.volume_glance_metadata_get(
                    self._context, self.id)
            except exception.GlanceMetadataNotFound:
                # NOTE(dulek): DB API raises when volume has no
                # glance_metadata. Silencing this because at this level no
                # metadata is a completely valid result.
                self.glance_metadata = {}
        elif attrname == 'volume_type':
            # If the volume doesn't have volume_type, VolumeType.get_by_id
            # would trigger a db call which raise VolumeTypeNotFound exception.
            self.volume_type = (objects.VolumeType.get_by_id(
                self._context, self.volume_type_id) if self.volume_type_id
                else None)
        elif attrname == 'volume_attachment':
            attachments = objects.VolumeAttachmentList.get_all_by_volume_id(
                self._context, self.id)
            self.volume_attachment = attachments
        elif attrname == 'consistencygroup':
            if self.consistencygroup_id is None:
                self.consistencygroup = None
            else:
                consistencygroup = objects.ConsistencyGroup.get_by_id(
                    self._context, self.consistencygroup_id)
                self.consistencygroup = consistencygroup
        elif attrname == 'snapshots':
            self.snapshots = objects.SnapshotList.get_all_for_volume(
                self._context, self.id)
        elif attrname == 'cluster':
            # If this volume doesn't belong to a cluster (cluster_name is
            # empty), then cluster field will be None.
            if self.cluster_name:
                self.cluster = objects.Cluster.get_by_id(
                    self._context, name=self.cluster_name)
            else:
                self.cluster = None
        elif attrname == 'group':
            if self.group_id is None:
                self.group = None
            else:
                group = objects.Group.get_by_id(
                    self._context, self.group_id)
                self.group = group

        self.obj_reset_changes(fields=[attrname])

    def delete_metadata_key(self, key):
        db.volume_metadata_delete(self._context, self.id, key)
        md_was_changed = 'metadata' in self.obj_what_changed()

        del self.metadata[key]
        self._orig_metadata.pop(key, None)

        if not md_was_changed:
            self.obj_reset_changes(['metadata'])

    def finish_volume_migration(self, dest_volume):
        # We swap fields between source (i.e. self) and destination at the
        # end of migration because we want to keep the original volume id
        # in the DB but now pointing to the migrated volume.
        skip = ({'id', 'provider_location', 'glance_metadata',
                 'volume_type'} | set(self.obj_extra_fields))
        for key in set(dest_volume.fields.keys()) - skip:
            # Only swap attributes that are already set.  We do not want to
            # unexpectedly trigger a lazy-load.
            if not dest_volume.obj_attr_is_set(key):
                continue

            value = getattr(dest_volume, key)
            value_to_dst = getattr(self, key)

            # Destination must have a _name_id since the id no longer matches
            # the volume.  If it doesn't have a _name_id we set one.
            if key == '_name_id':
                if not dest_volume._name_id:
                    setattr(dest_volume, key, self.id)
                continue
            elif key == 'migration_status':
                value = None
                value_to_dst = 'deleting'
            elif key == 'display_description':
                value_to_dst = 'migration src for ' + self.id
            elif key == 'status':
                value_to_dst = 'deleting'
            # Because dest_volume will be deleted soon, we can
            # skip to copy volume_type_id and volume_type which
            # are not keys for volume deletion.
            elif key == 'volume_type_id':
                # Initialize volume_type of source volume using
                # new volume_type_id.
                self.update({'volume_type_id': value})
                continue

            setattr(self, key, value)
            setattr(dest_volume, key, value_to_dst)

        self.save()
        dest_volume.save()
        return dest_volume

    def get_latest_snapshot(self):
        """Get volume's latest snapshot"""
        snapshot_db = db.snapshot_get_latest_for_volume(self._context, self.id)
        snapshot = objects.Snapshot(self._context)
        return snapshot._from_db_object(self._context,
                                        snapshot, snapshot_db)

    @staticmethod
    def _is_cleanable(status, obj_version):
        # Before 1.6 we didn't have workers table, so cleanup wasn't supported.
        # cleaning.
        if obj_version and obj_version < 1.6:
            return False
        return status in ('creating', 'deleting', 'uploading', 'downloading')

    def begin_attach(self, attach_mode):
        attachment = objects.VolumeAttachment(
            context=self._context,
            attach_status=c_fields.VolumeAttachStatus.ATTACHING,
            volume_id=self.id)
        attachment.create()
        with self.obj_as_admin():
            self.admin_metadata['attached_mode'] = attach_mode
            self.save()
        return attachment

    def finish_detach(self, attachment_id):
        with self.obj_as_admin():
            volume_updates, attachment_updates = (
                db.volume_detached(self._context, self.id, attachment_id))
            db.volume_admin_metadata_delete(self._context, self.id,
                                            'attached_mode')
            self.admin_metadata.pop('attached_mode', None)
        # Remove attachment in volume only when this field is loaded.
        if attachment_updates and self.obj_attr_is_set('volume_attachment'):
            for i, attachment in enumerate(self.volume_attachment):
                if attachment.id == attachment_id:
                    del self.volume_attachment.objects[i]
                    break

        self.update(volume_updates)
        self.obj_reset_changes(
            list(volume_updates.keys()) +
            ['volume_attachment', 'admin_metadata'])

    def is_replicated(self):
        return self.volume_type and self.volume_type.is_replicated()

    def is_multiattach(self):
        return self.volume_type and self.volume_type.is_multiattach()
Exemple #26
0
class Service(base.MagnumPersistentObject, base.MagnumObject,
              base.MagnumObjectDictCompat):
    # Version 1.0: Initial version
    VERSION = '1.0'

    dbapi = dbapi.get_instance()

    fields = {
        'id': fields.IntegerField(),
        'uuid': fields.StringField(nullable=True),
        'name': fields.StringField(nullable=True),
        'project_id': fields.StringField(nullable=True),
        'user_id': fields.StringField(nullable=True),
        'bay_uuid': fields.StringField(nullable=True),
        'labels': fields.DictOfStringsField(nullable=True),
        'selector': fields.DictOfStringsField(nullable=True),
        'ip': fields.StringField(nullable=True),
        'ports': magnum_fields.ListOfDictsField(nullable=True),
        'manifest_url': fields.StringField(nullable=True),
        'manifest': fields.StringField(nullable=True),
    }

    @staticmethod
    def _from_db_object(service, db_service):
        """Converts a database entity to a formal object."""
        for field in service.fields:
            # ignore manifest_url as it was used for create service
            if field == 'manifest_url':
                continue
            if field == 'manifest':
                continue
            service[field] = db_service[field]

        service.obj_reset_changes()
        return service

    @staticmethod
    def _from_db_object_list(db_objects, cls, context):
        """Converts a list of database entities to a list of formal objects."""
        return [
            Service._from_db_object(cls(context), obj) for obj in db_objects
        ]

    @base.remotable_classmethod
    def get_by_id(cls, context, service_id):
        """Find a service based on its integer id and return a Service object.

        :param service_id: the id of a service.
        :returns: a :class:`Service` object.
        """
        db_service = cls.dbapi.get_service_by_id(context, service_id)
        service = Service._from_db_object(cls(context), db_service)
        return service

    @base.remotable_classmethod
    def get_by_uuid(cls, context, uuid):
        """Find a service based on uuid and return a :class:`Service` object.

        :param uuid: the uuid of a service.
        :param context: Security context
        :returns: a :class:`Service` object.
        """
        db_service = cls.dbapi.get_service_by_uuid(context, uuid)
        service = Service._from_db_object(cls(context), db_service)
        return service

    @base.remotable_classmethod
    def get_by_name(cls, context, name):
        """Find a service based on service name and
        return a :class:`Service` object.

        :param name: the name of a service.
        :param context: Security context
        :returns: a :class:`Service` object.
        """
        db_service = cls.dbapi.get_service_by_name(context, name)
        service = Service._from_db_object(cls(context), db_service)
        return service

    @base.remotable_classmethod
    def list_by_bay_uuid(cls, context, bay_uuid):
        """Return a list of :class:`Service` objects associated with a given bay.

        :param bay_uuid: the uuid of a bay.
        :param context: Security context
        :returns: a list of class:`Service` object.
        """
        db_services = cls.dbapi.get_services_by_bay_uuid(context, bay_uuid)
        return Service._from_db_object_list(db_services, cls, context)

    @base.remotable_classmethod
    def list(cls,
             context,
             limit=None,
             marker=None,
             sort_key=None,
             sort_dir=None):
        """Return a list of Service objects.

        :param context: Security context.
        :param limit: maximum number of resources to return in a single result.
        :param marker: pagination marker for large data sets.
        :param sort_key: column to sort results by.
        :param sort_dir: direction to sort. "asc" or "desc".
        :returns: a list of :class:`Service` object.

        """
        db_services = cls.dbapi.get_service_list(context,
                                                 limit=limit,
                                                 marker=marker,
                                                 sort_key=sort_key,
                                                 sort_dir=sort_dir)
        return Service._from_db_object_list(db_services, cls, context)

    @base.remotable
    def create(self, context=None):
        """Create a Service record in the DB.

        :param context: Security context. NOTE: This should only
                        be used internally by the indirection_api.
                        Unfortunately, RPC requires context as the first
                        argument, even though we don't use it.
                        A context should be set when instantiating the
                        object, e.g.: Service(context)

        """
        values = self.obj_get_changes()
        db_service = self.dbapi.create_service(values)
        self._from_db_object(self, db_service)

    @base.remotable
    def destroy(self, context=None):
        """Delete the Service from the DB.

        :param context: Security context. NOTE: This should only
                        be used internally by the indirection_api.
                        Unfortunately, RPC requires context as the first
                        argument, even though we don't use it.
                        A context should be set when instantiating the
                        object, e.g.: Service(context)
        """
        self.dbapi.destroy_service(self.uuid)
        self.obj_reset_changes()

    @base.remotable
    def save(self, context=None):
        """Save updates to this Service.

        Updates will be made column by column based on the result
        of self.what_changed().

        :param context: Security context. NOTE: This should only
                        be used internally by the indirection_api.
                        Unfortunately, RPC requires context as the first
                        argument, even though we don't use it.
                        A context should be set when instantiating the
                        object, e.g.: Service(context)
        """
        updates = self.obj_get_changes()
        self.dbapi.update_service(self.uuid, updates)

        self.obj_reset_changes()

    @base.remotable
    def refresh(self, context=None):
        """Loads updates for this Service.

        Loads a service with the same uuid from the database and
        checks for updated attributes. Updates are applied from
        the loaded service column by column, if there are any updates.

        :param context: Security context. NOTE: This should only
                        be used internally by the indirection_api.
                        Unfortunately, RPC requires context as the first
                        argument, even though we don't use it.
                        A context should be set when instantiating the
                        object, e.g.: Service(context)
        """
        current = self.__class__.get_by_uuid(self._context, uuid=self.uuid)
        for field in self.fields:
            if field == 'manifest_url':
                continue
            if field == 'manifest':
                continue
            if self.obj_attr_is_set(field) and self[field] != current[field]:
                self[field] = current[field]
Exemple #27
0
class Cluster(base.MagnumPersistentObject, base.MagnumObject,
              base.MagnumObjectDictCompat):
    # Version 1.0: Initial version
    # Version 1.1: Added 'bay_create_timeout' field
    # Version 1.2: Add 'registry_trust_id' field
    # Version 1.3: Added 'baymodel' field
    # Version 1.4: Added more types of status to bay's status field
    # Version 1.5: Rename 'registry_trust_id' to 'trust_id'
    #              Add 'trustee_user_name', 'trustee_password',
    #              'trustee_user_id' field
    # Version 1.6: Add rollback support for Bay
    # Version 1.7: Added 'coe_version'  and 'container_version' fields
    # Version 1.8: Rename 'baymodel' to 'cluster_template'
    # Version 1.9: Rename table name from 'bay' to 'cluster'
    #              Rename 'baymodel_id' to 'cluster_template_id'
    #              Rename 'bay_create_timeout' to 'create_timeout'
    # Version 1.10: Added 'keypair' field
    # Version 1.11: Added 'RESUME_FAILED' in status field
    # Version 1.12: Added 'get_stats' method
    # Version 1.13: Added get_count_all method
    # Version 1.14: Added 'docker_volume_size' field
    # Version 1.15: Added 'labels' field
    # Version 1.16: Added 'master_flavor_id' field
    # Version 1.17: Added 'flavor_id' field
    # Version 1.18: Added 'health_status' and 'health_status_reason' field
    # Version 1.19: Added nodegroups, default_ng_worker, default_ng_master
    # Version 1.20: Fields node_count, master_count, node_addresses,
    #               master_addresses are now properties.

    VERSION = '1.20'

    dbapi = dbapi.get_instance()

    fields = {
        'id': fields.IntegerField(),
        'uuid': fields.UUIDField(nullable=True),
        'name': fields.StringField(nullable=True),
        'project_id': fields.StringField(nullable=True),
        'user_id': fields.StringField(nullable=True),
        'cluster_template_id': fields.StringField(nullable=True),
        'keypair': fields.StringField(nullable=True),
        'docker_volume_size': fields.IntegerField(nullable=True),
        'labels': fields.DictOfStringsField(nullable=True),
        'master_flavor_id': fields.StringField(nullable=True),
        'flavor_id': fields.StringField(nullable=True),
        'stack_id': fields.StringField(nullable=True),
        'status': m_fields.ClusterStatusField(nullable=True),
        'status_reason': fields.StringField(nullable=True),
        'health_status': m_fields.ClusterHealthStatusField(nullable=True),
        'health_status_reason': fields.DictOfStringsField(nullable=True),
        'create_timeout': fields.IntegerField(nullable=True),
        'api_address': fields.StringField(nullable=True),
        'discovery_url': fields.StringField(nullable=True),
        'ca_cert_ref': fields.StringField(nullable=True),
        'magnum_cert_ref': fields.StringField(nullable=True),
        'cluster_template': fields.ObjectField('ClusterTemplate'),
        'trust_id': fields.StringField(nullable=True),
        'trustee_username': fields.StringField(nullable=True),
        'trustee_password': fields.StringField(nullable=True),
        'trustee_user_id': fields.StringField(nullable=True),
        'coe_version': fields.StringField(nullable=True),
        'container_version': fields.StringField(nullable=True)
    }

    @staticmethod
    def _from_db_object(cluster, db_cluster):
        """Converts a database entity to a formal object."""
        for field in cluster.fields:
            if field != 'cluster_template':
                cluster[field] = db_cluster[field]

        # Note(eliqiao): The following line needs to be placed outside the
        # loop because there is a dependency from cluster_template to
        # cluster_template_id. The cluster_template_id must be populated
        # first in the loop before it can be used to find the cluster_template.
        cluster['cluster_template'] = ClusterTemplate.get_by_uuid(
            cluster._context, cluster.cluster_template_id)

        cluster.obj_reset_changes()
        return cluster

    @property
    def nodegroups(self):
        # Returns all nodegroups that belong to the cluster.
        return NodeGroup.list(self._context, self.uuid)

    @property
    def default_ng_worker(self):
        # Assume that every cluster will have only one default
        # non-master nodegroup. We don't want to limit the roles
        # so each nodegroup that does not have a master role is
        # considered as a worker/minion nodegroup.
        filters = {'is_default': True}
        default_ngs = NodeGroup.list(self._context, self.uuid, filters=filters)
        return [n for n in default_ngs if n.role != 'master'][0]

    @property
    def default_ng_master(self):
        # Assume that every cluster will have only one default
        # master nodegroup.
        filters = {'role': 'master', 'is_default': True}
        return NodeGroup.list(self._context, self.uuid, filters=filters)[0]

    @property
    def node_count(self):
        return sum(n.node_count for n in self.nodegroups if n.role != 'master')

    @property
    def master_count(self):
        return sum(n.node_count for n in self.nodegroups if n.role == 'master')

    @property
    def node_addresses(self):
        node_addresses = []
        for ng in self.nodegroups:
            if ng.role != 'master':
                node_addresses += ng.node_addresses
        return node_addresses

    @property
    def master_addresses(self):
        master_addresses = []
        for ng in self.nodegroups:
            if ng.role == 'master':
                master_addresses += ng.node_addresses
        return master_addresses

    @staticmethod
    def _from_db_object_list(db_objects, cls, context):
        """Converts a list of database entities to a list of formal objects."""
        return [
            Cluster._from_db_object(cls(context), obj) for obj in db_objects
        ]

    @base.remotable_classmethod
    def get(cls, context, cluster_id):
        """Find a cluster based on its id or uuid and return a Cluster object.

        :param cluster_id: the id *or* uuid of a cluster.
        :param context: Security context
        :returns: a :class:`Cluster` object.
        """
        if strutils.is_int_like(cluster_id):
            return cls.get_by_id(context, cluster_id)
        elif uuidutils.is_uuid_like(cluster_id):
            return cls.get_by_uuid(context, cluster_id)
        else:
            raise exception.InvalidIdentity(identity=cluster_id)

    @base.remotable_classmethod
    def get_by_id(cls, context, cluster_id):
        """Find a cluster based on its integer id and return a Cluster object.

        :param cluster_id: the id of a cluster.
        :param context: Security context
        :returns: a :class:`Cluster` object.
        """
        db_cluster = cls.dbapi.get_cluster_by_id(context, cluster_id)
        cluster = Cluster._from_db_object(cls(context), db_cluster)
        return cluster

    @base.remotable_classmethod
    def get_by_uuid(cls, context, uuid):
        """Find a cluster based on uuid and return a :class:`Cluster` object.

        :param uuid: the uuid of a cluster.
        :param context: Security context
        :returns: a :class:`Cluster` object.
        """
        db_cluster = cls.dbapi.get_cluster_by_uuid(context, uuid)
        cluster = Cluster._from_db_object(cls(context), db_cluster)
        return cluster

    @base.remotable_classmethod
    def get_count_all(cls, context, filters=None):
        """Get count of matching clusters.

        :param context: The security context
        :param filters: filter dict, can includes 'cluster_template_id',
                        'name', 'node_count', 'stack_id', 'api_address',
                        'node_addresses', 'project_id', 'user_id',
                        'status'(should be a status list), 'master_count'.
        :returns: Count of matching clusters.
        """
        return cls.dbapi.get_cluster_count_all(context, filters=filters)

    @base.remotable_classmethod
    def get_by_name(cls, context, name):
        """Find a cluster based on name and return a Cluster object.

        :param name: the logical name of a cluster.
        :param context: Security context
        :returns: a :class:`Cluster` object.
        """
        db_cluster = cls.dbapi.get_cluster_by_name(context, name)
        cluster = Cluster._from_db_object(cls(context), db_cluster)
        return cluster

    @base.remotable_classmethod
    def list(cls,
             context,
             limit=None,
             marker=None,
             sort_key=None,
             sort_dir=None,
             filters=None):
        """Return a list of Cluster objects.

        :param context: Security context.
        :param limit: maximum number of resources to return in a single result.
        :param marker: pagination marker for large data sets.
        :param sort_key: column to sort results by.
        :param sort_dir: direction to sort. "asc" or "desc".
        :param filters: filter dict, can includes 'cluster_template_id',
                        'name', 'node_count', 'stack_id', 'api_address',
                        'node_addresses', 'project_id', 'user_id',
                        'status'(should be a status list), 'master_count'.
        :returns: a list of :class:`Cluster` object.

        """
        db_clusters = cls.dbapi.get_cluster_list(context,
                                                 limit=limit,
                                                 marker=marker,
                                                 sort_key=sort_key,
                                                 sort_dir=sort_dir,
                                                 filters=filters)
        return Cluster._from_db_object_list(db_clusters, cls, context)

    @base.remotable_classmethod
    def get_stats(cls, context, project_id=None):
        """Return a list of Cluster objects.

        :param context: Security context.
        :param project_id: project id
        """
        return cls.dbapi.get_cluster_stats(project_id)

    @base.remotable
    def create(self, context=None):
        """Create a Cluster record in the DB.

        :param context: Security context. NOTE: This should only
                        be used internally by the indirection_api.
                        Unfortunately, RPC requires context as the first
                        argument, even though we don't use it.
                        A context should be set when instantiating the
                        object, e.g.: Cluster(context)

        """
        values = self.obj_get_changes()
        db_cluster = self.dbapi.create_cluster(values)
        self._from_db_object(self, db_cluster)

    @base.remotable
    def destroy(self, context=None):
        """Delete the Cluster from the DB.

        :param context: Security context. NOTE: This should only
                        be used internally by the indirection_api.
                        Unfortunately, RPC requires context as the first
                        argument, even though we don't use it.
                        A context should be set when instantiating the
                        object, e.g.: Cluster(context)
        """
        self.dbapi.destroy_cluster(self.uuid)
        self.obj_reset_changes()

    @base.remotable
    def save(self, context=None):
        """Save updates to this Cluster.

        Updates will be made column by column based on the result
        of self.what_changed().

        :param context: Security context. NOTE: This should only
                        be used internally by the indirection_api.
                        Unfortunately, RPC requires context as the first
                        argument, even though we don't use it.
                        A context should be set when instantiating the
                        object, e.g.: Cluster(context)
        """
        updates = self.obj_get_changes()
        self.dbapi.update_cluster(self.uuid, updates)

        self.obj_reset_changes()

    @base.remotable
    def refresh(self, context=None):
        """Loads updates for this Cluster.

        Loads a Cluster with the same uuid from the database and
        checks for updated attributes. Updates are applied from
        the loaded Cluster column by column, if there are any updates.

        :param context: Security context. NOTE: This should only
                        be used internally by the indirection_api.
                        Unfortunately, RPC requires context as the first
                        argument, even though we don't use it.
                        A context should be set when instantiating the
                        object, e.g.: Cluster(context)
        """
        current = self.__class__.get_by_uuid(self._context, uuid=self.uuid)
        for field in self.fields:
            if self.obj_attr_is_set(field) and self[field] != current[field]:
                self[field] = current[field]

    def as_dict(self):
        dict_ = super(Cluster, self).as_dict()
        # Update the dict with the attributes coming form
        # the cluster's nodegroups.
        dict_.update({
            'node_count': self.node_count,
            'master_count': self.master_count,
            'node_addresses': self.node_addresses,
            'master_addresses': self.master_addresses
        })
        return dict_
Exemple #28
0
class Snapshot(cleanable.CinderCleanableObject, base.CinderObject,
               base.CinderObjectDictCompat, base.CinderComparableObject):
    # Version 1.0: Initial version
    # Version 1.1: Changed 'status' field to use SnapshotStatusField
    # Version 1.2: This object is now cleanable (adds rows to workers table)
    VERSION = '1.2'

    # NOTE(thangp): OPTIONAL_FIELDS are fields that would be lazy-loaded. They
    # are typically the relationship in the sqlalchemy object.
    OPTIONAL_FIELDS = ('volume', 'metadata', 'cgsnapshot', 'group_snapshot')

    fields = {
        'id': fields.UUIDField(),

        'user_id': fields.StringField(nullable=True),
        'project_id': fields.StringField(nullable=True),

        'volume_id': fields.UUIDField(nullable=True),
        'cgsnapshot_id': fields.UUIDField(nullable=True),
        'group_snapshot_id': fields.UUIDField(nullable=True),
        'status': c_fields.SnapshotStatusField(nullable=True),
        'progress': fields.StringField(nullable=True),
        'volume_size': fields.IntegerField(nullable=True),

        'display_name': fields.StringField(nullable=True),
        'display_description': fields.StringField(nullable=True),

        'encryption_key_id': fields.UUIDField(nullable=True),
        'volume_type_id': fields.UUIDField(nullable=True),

        'provider_location': fields.StringField(nullable=True),
        'provider_id': fields.StringField(nullable=True),
        'metadata': fields.DictOfStringsField(),
        'provider_auth': fields.StringField(nullable=True),

        'volume': fields.ObjectField('Volume', nullable=True),
        'cgsnapshot': fields.ObjectField('CGSnapshot', nullable=True),
        'group_snapshot': fields.ObjectField('GroupSnapshot', nullable=True),
    }

    @property
    def service_topic_queue(self):
        return self.volume.service_topic_queue

    @classmethod
    def _get_expected_attrs(cls, context, *args, **kwargs):
        return 'metadata',

    # NOTE(thangp): obj_extra_fields is used to hold properties that are not
    # usually part of the model
    obj_extra_fields = ['name', 'volume_name']

    @property
    def name(self):
        return CONF.snapshot_name_template % self.id

    @property
    def volume_name(self):
        return self.volume.name

    def __init__(self, *args, **kwargs):
        super(Snapshot, self).__init__(*args, **kwargs)
        self._orig_metadata = {}

        self._reset_metadata_tracking()

    def obj_reset_changes(self, fields=None):
        super(Snapshot, self).obj_reset_changes(fields)
        self._reset_metadata_tracking(fields=fields)

    def _reset_metadata_tracking(self, fields=None):
        if fields is None or 'metadata' in fields:
            self._orig_metadata = (dict(self.metadata)
                                   if self.obj_attr_is_set('metadata') else {})

    def obj_what_changed(self):
        changes = super(Snapshot, self).obj_what_changed()
        if hasattr(self, 'metadata') and self.metadata != self._orig_metadata:
            changes.add('metadata')

        return changes

    def obj_make_compatible(self, primitive, target_version):
        """Make an object representation compatible with a target version."""
        super(Snapshot, self).obj_make_compatible(primitive, target_version)
        target_version = versionutils.convert_version_to_tuple(target_version)

    @classmethod
    def _from_db_object(cls, context, snapshot, db_snapshot,
                        expected_attrs=None):
        if expected_attrs is None:
            expected_attrs = []
        for name, field in snapshot.fields.items():
            if name in cls.OPTIONAL_FIELDS:
                continue
            value = db_snapshot.get(name)
            if isinstance(field, fields.IntegerField):
                value = value if value is not None else 0
            setattr(snapshot, name, value)

        if 'volume' in expected_attrs:
            volume = objects.Volume(context)
            volume._from_db_object(context, volume, db_snapshot['volume'])
            snapshot.volume = volume
        if 'cgsnapshot' in expected_attrs:
            cgsnapshot = objects.CGSnapshot(context)
            cgsnapshot._from_db_object(context, cgsnapshot,
                                       db_snapshot['cgsnapshot'])
            snapshot.cgsnapshot = cgsnapshot
        if 'group_snapshot' in expected_attrs:
            group_snapshot = objects.GroupSnapshot(context)
            group_snapshot._from_db_object(context, group_snapshot,
                                           db_snapshot['group_snapshot'])
            snapshot.group_snapshot = group_snapshot

        if 'metadata' in expected_attrs:
            metadata = db_snapshot.get('snapshot_metadata')
            if metadata is None:
                raise exception.MetadataAbsent()
            snapshot.metadata = {item['key']: item['value']
                                 for item in metadata}
        snapshot._context = context
        snapshot.obj_reset_changes()
        return snapshot

    def create(self):
        if self.obj_attr_is_set('id'):
            raise exception.ObjectActionError(action='create',
                                              reason=_('already created'))
        updates = self.cinder_obj_get_changes()

        if 'volume' in updates:
            raise exception.ObjectActionError(action='create',
                                              reason=_('volume assigned'))
        if 'cgsnapshot' in updates:
            raise exception.ObjectActionError(action='create',
                                              reason=_('cgsnapshot assigned'))
        if 'cluster' in updates:
            raise exception.ObjectActionError(
                action='create', reason=_('cluster assigned'))
        if 'group_snapshot' in updates:
            raise exception.ObjectActionError(
                action='create',
                reason=_('group_snapshot assigned'))

        db_snapshot = db.snapshot_create(self._context, updates)
        self._from_db_object(self._context, self, db_snapshot)

    def save(self):
        updates = self.cinder_obj_get_changes()
        if updates:
            if 'volume' in updates:
                raise exception.ObjectActionError(action='save',
                                                  reason=_('volume changed'))
            if 'cgsnapshot' in updates:
                raise exception.ObjectActionError(
                    action='save', reason=_('cgsnapshot changed'))
            if 'group_snapshot' in updates:
                raise exception.ObjectActionError(
                    action='save', reason=_('group_snapshot changed'))

            if 'cluster' in updates:
                raise exception.ObjectActionError(
                    action='save', reason=_('cluster changed'))

            if 'metadata' in updates:
                # Metadata items that are not specified in the
                # self.metadata will be deleted
                metadata = updates.pop('metadata', None)
                self.metadata = db.snapshot_metadata_update(self._context,
                                                            self.id, metadata,
                                                            True)

            db.snapshot_update(self._context, self.id, updates)

        self.obj_reset_changes()

    def destroy(self):
        updated_values = db.snapshot_destroy(self._context, self.id)
        self.update(updated_values)
        self.obj_reset_changes(updated_values.keys())

    def obj_load_attr(self, attrname):
        if attrname not in self.OPTIONAL_FIELDS:
            raise exception.ObjectActionError(
                action='obj_load_attr',
                reason=_('attribute %s not lazy-loadable') % attrname)
        if not self._context:
            raise exception.OrphanedObjectError(method='obj_load_attr',
                                                objtype=self.obj_name())

        if attrname == 'volume':
            self.volume = objects.Volume.get_by_id(self._context,
                                                   self.volume_id)

        if attrname == 'cgsnapshot':
            self.cgsnapshot = objects.CGSnapshot.get_by_id(self._context,
                                                           self.cgsnapshot_id)

        if attrname == 'group_snapshot':
            self.group_snapshot = objects.GroupSnapshot.get_by_id(
                self._context,
                self.group_snapshot_id)

        self.obj_reset_changes(fields=[attrname])

    def delete_metadata_key(self, context, key):
        db.snapshot_metadata_delete(context, self.id, key)
        md_was_changed = 'metadata' in self.obj_what_changed()

        del self.metadata[key]
        self._orig_metadata.pop(key, None)

        if not md_was_changed:
            self.obj_reset_changes(['metadata'])

    @classmethod
    def snapshot_data_get_for_project(cls, context, project_id,
                                      volume_type_id=None):
        return db.snapshot_data_get_for_project(context, project_id,
                                                volume_type_id)

    @staticmethod
    def _is_cleanable(status, obj_version):
        # Before 1.2 we didn't have workers table, so cleanup wasn't supported.
        if obj_version and obj_version < 1.2:
            return False
        return status == 'creating'
Exemple #29
0
class Volume(base.CinderPersistentObject, base.CinderObject,
             base.CinderObjectDictCompat, base.CinderComparableObject):
    # Version 1.0: Initial version
    # Version 1.1: Added metadata, admin_metadata, volume_attachment, and
    #              volume_type
    # Version 1.2: Added glance_metadata, consistencygroup and snapshots
    # Version 1.3: Added finish_volume_migration()
    VERSION = '1.3'

    OPTIONAL_FIELDS = ('metadata', 'admin_metadata', 'glance_metadata',
                       'volume_type', 'volume_attachment', 'consistencygroup',
                       'snapshots')

    fields = {
        'id': fields.UUIDField(),
        '_name_id': fields.UUIDField(nullable=True),
        'ec2_id': fields.UUIDField(nullable=True),
        'user_id': fields.StringField(nullable=True),
        'project_id': fields.StringField(nullable=True),

        'snapshot_id': fields.UUIDField(nullable=True),

        'host': fields.StringField(nullable=True),
        'size': fields.IntegerField(nullable=True),
        'availability_zone': fields.StringField(nullable=True),
        'status': fields.StringField(nullable=True),
        'attach_status': fields.StringField(nullable=True),
        'migration_status': fields.StringField(nullable=True),

        'scheduled_at': fields.DateTimeField(nullable=True),
        'launched_at': fields.DateTimeField(nullable=True),
        'terminated_at': fields.DateTimeField(nullable=True),

        'display_name': fields.StringField(nullable=True),
        'display_description': fields.StringField(nullable=True),

        'provider_id': fields.UUIDField(nullable=True),
        'provider_location': fields.StringField(nullable=True),
        'provider_auth': fields.StringField(nullable=True),
        'provider_geometry': fields.StringField(nullable=True),

        'volume_type_id': fields.UUIDField(nullable=True),
        'source_volid': fields.UUIDField(nullable=True),
        'encryption_key_id': fields.UUIDField(nullable=True),

        'consistencygroup_id': fields.UUIDField(nullable=True),

        'deleted': fields.BooleanField(default=False, nullable=True),
        'bootable': fields.BooleanField(default=False, nullable=True),
        'multiattach': fields.BooleanField(default=False, nullable=True),

        'replication_status': fields.StringField(nullable=True),
        'replication_extended_status': fields.StringField(nullable=True),
        'replication_driver_data': fields.StringField(nullable=True),

        'previous_status': fields.StringField(nullable=True),

        'metadata': fields.DictOfStringsField(nullable=True),
        'admin_metadata': fields.DictOfStringsField(nullable=True),
        'glance_metadata': fields.DictOfStringsField(nullable=True),
        'volume_type': fields.ObjectField('VolumeType', nullable=True),
        'volume_attachment': fields.ObjectField('VolumeAttachmentList',
                                                nullable=True),
        'consistencygroup': fields.ObjectField('ConsistencyGroup',
                                               nullable=True),
        'snapshots': fields.ObjectField('SnapshotList', nullable=True),
    }

    # NOTE(thangp): obj_extra_fields is used to hold properties that are not
    # usually part of the model
    obj_extra_fields = ['name', 'name_id', 'volume_metadata',
                        'volume_admin_metadata', 'volume_glance_metadata']

    @classmethod
    def _get_expected_attrs(cls, context):
        expected_attrs = ['metadata', 'volume_type', 'volume_type.extra_specs']
        if context.is_admin:
            expected_attrs.append('admin_metadata')

        return expected_attrs

    @property
    def name_id(self):
        return self.id if not self._name_id else self._name_id

    @name_id.setter
    def name_id(self, value):
        self._name_id = value

    @property
    def name(self):
        return CONF.volume_name_template % self.name_id

    # TODO(dulek): Three properties below are for compatibility with dict
    # representation of volume. The format there is different (list of
    # SQLAlchemy models) so we need a conversion. Anyway - these should be
    # removed when we stop this class from deriving from DictObjectCompat.
    @property
    def volume_metadata(self):
        md = [MetadataObject(k, v) for k, v in self.metadata.items()]
        return md

    @volume_metadata.setter
    def volume_metadata(self, value):
        md = {d['key']: d['value'] for d in value}
        self.metadata = md

    @property
    def volume_admin_metadata(self):
        md = [MetadataObject(k, v) for k, v in self.admin_metadata.items()]
        return md

    @volume_admin_metadata.setter
    def volume_admin_metadata(self, value):
        md = {d['key']: d['value'] for d in value}
        self.admin_metadata = md

    @property
    def volume_glance_metadata(self):
        md = [MetadataObject(k, v) for k, v in self.glance_metadata.items()]
        return md

    @volume_glance_metadata.setter
    def volume_glance_metadata(self, value):
        md = {d['key']: d['value'] for d in value}
        self.glance_metadata = md

    def __init__(self, *args, **kwargs):
        super(Volume, self).__init__(*args, **kwargs)
        self._orig_metadata = {}
        self._orig_admin_metadata = {}
        self._orig_glance_metadata = {}

        self._reset_metadata_tracking()

    def obj_reset_changes(self, fields=None):
        super(Volume, self).obj_reset_changes(fields)
        self._reset_metadata_tracking(fields=fields)

    @classmethod
    def _obj_from_primitive(cls, context, objver, primitive):
        obj = super(Volume, Volume)._obj_from_primitive(context, objver,
                                                        primitive)
        obj._reset_metadata_tracking()
        return obj

    def _reset_metadata_tracking(self, fields=None):
        if fields is None or 'metadata' in fields:
            self._orig_metadata = (dict(self.metadata)
                                   if 'metadata' in self else {})
        if fields is None or 'admin_metadata' in fields:
            self._orig_admin_metadata = (dict(self.admin_metadata)
                                         if 'admin_metadata' in self
                                         else {})
        if fields is None or 'glance_metadata' in fields:
            self._orig_glance_metadata = (dict(self.glance_metadata)
                                          if 'glance_metadata' in self
                                          else {})

    def obj_what_changed(self):
        changes = super(Volume, self).obj_what_changed()
        if 'metadata' in self and self.metadata != self._orig_metadata:
            changes.add('metadata')
        if ('admin_metadata' in self and
                self.admin_metadata != self._orig_admin_metadata):
            changes.add('admin_metadata')
        if ('glance_metadata' in self and
                self.glance_metadata != self._orig_glance_metadata):
            changes.add('glance_metadata')

        return changes

    def obj_make_compatible(self, primitive, target_version):
        """Make an object representation compatible with a target version."""
        super(Volume, self).obj_make_compatible(primitive, target_version)
        target_version = versionutils.convert_version_to_tuple(target_version)

    @staticmethod
    def _from_db_object(context, volume, db_volume, expected_attrs=None):
        if expected_attrs is None:
            expected_attrs = []
        for name, field in volume.fields.items():
            if name in Volume.OPTIONAL_FIELDS:
                continue
            value = db_volume.get(name)
            if isinstance(field, fields.IntegerField):
                value = value or 0
            volume[name] = value

        # Get data from db_volume object that was queried by joined query
        # from DB
        if 'metadata' in expected_attrs:
            metadata = db_volume.get('volume_metadata', [])
            volume.metadata = {item['key']: item['value'] for item in metadata}
        if 'admin_metadata' in expected_attrs:
            metadata = db_volume.get('volume_admin_metadata', [])
            volume.admin_metadata = {item['key']: item['value']
                                     for item in metadata}
        if 'glance_metadata' in expected_attrs:
            metadata = db_volume.get('volume_glance_metadata', [])
            volume.glance_metadata = {item['key']: item['value']
                                      for item in metadata}
        if 'volume_type' in expected_attrs:
            db_volume_type = db_volume.get('volume_type')
            if db_volume_type:
                vt_expected_attrs = []
                if 'volume_type.extra_specs' in expected_attrs:
                    vt_expected_attrs.append('extra_specs')
                volume.volume_type = objects.VolumeType._from_db_object(
                    context, objects.VolumeType(), db_volume_type,
                    expected_attrs=vt_expected_attrs)
        if 'volume_attachment' in expected_attrs:
            attachments = base.obj_make_list(
                context, objects.VolumeAttachmentList(context),
                objects.VolumeAttachment,
                db_volume.get('volume_attachment'))
            volume.volume_attachment = attachments
        if 'consistencygroup' in expected_attrs:
            consistencygroup = objects.ConsistencyGroup(context)
            consistencygroup._from_db_object(context,
                                             consistencygroup,
                                             db_volume['consistencygroup'])
            volume.consistencygroup = consistencygroup
        if 'snapshots' in expected_attrs:
            snapshots = base.obj_make_list(
                context, objects.SnapshotList(context),
                objects.Snapshot,
                db_volume['snapshots'])
            volume.snapshots = snapshots

        volume._context = context
        volume.obj_reset_changes()
        return volume

    @base.remotable
    def create(self):
        if self.obj_attr_is_set('id'):
            raise exception.ObjectActionError(action='create',
                                              reason=_('already created'))
        updates = self.cinder_obj_get_changes()

        if 'consistencygroup' in updates:
            raise exception.ObjectActionError(
                action='create', reason=_('consistencygroup assigned'))
        if 'snapshots' in updates:
            raise exception.ObjectActionError(
                action='create', reason=_('snapshots assigned'))

        db_volume = db.volume_create(self._context, updates)
        self._from_db_object(self._context, self, db_volume)

    @base.remotable
    def save(self):
        updates = self.cinder_obj_get_changes()
        if updates:
            if 'consistencygroup' in updates:
                raise exception.ObjectActionError(
                    action='save', reason=_('consistencygroup changed'))
            if 'glance_metadata' in updates:
                raise exception.ObjectActionError(
                    action='save', reason=_('glance_metadata changed'))
            if 'snapshots' in updates:
                raise exception.ObjectActionError(
                    action='save', reason=_('snapshots changed'))
            if 'metadata' in updates:
                # Metadata items that are not specified in the
                # self.metadata will be deleted
                metadata = updates.pop('metadata', None)
                self.metadata = db.volume_metadata_update(self._context,
                                                          self.id, metadata,
                                                          True)
            if self._context.is_admin and 'admin_metadata' in updates:
                metadata = updates.pop('admin_metadata', None)
                self.admin_metadata = db.volume_admin_metadata_update(
                    self._context, self.id, metadata, True)

            db.volume_update(self._context, self.id, updates)
            self.obj_reset_changes()

    @base.remotable
    def destroy(self):
        with self.obj_as_admin():
            db.volume_destroy(self._context, self.id)

    def obj_load_attr(self, attrname):
        if attrname not in self.OPTIONAL_FIELDS:
            raise exception.ObjectActionError(
                action='obj_load_attr',
                reason=_('attribute %s not lazy-loadable') % attrname)
        if not self._context:
            raise exception.OrphanedObjectError(method='obj_load_attr',
                                                objtype=self.obj_name())

        if attrname == 'metadata':
            self.metadata = db.volume_metadata_get(self._context, self.id)
        elif attrname == 'admin_metadata':
            self.admin_metadata = {}
            if self._context.is_admin:
                self.admin_metadata = db.volume_admin_metadata_get(
                    self._context, self.id)
        elif attrname == 'glance_metadata':
            try:
                # NOTE(dulek): We're using alias here to have conversion from
                # list to dict done there.
                self.volume_glance_metadata = db.volume_glance_metadata_get(
                    self._context, self.id)
            except exception.GlanceMetadataNotFound:
                # NOTE(dulek): DB API raises when volume has no
                # glance_metadata. Silencing this because at this level no
                # metadata is a completely valid result.
                self.glance_metadata = {}
        elif attrname == 'volume_type':
            # If the volume doesn't have volume_type, VolumeType.get_by_id
            # would trigger a db call which raise VolumeTypeNotFound exception.
            self.volume_type = (objects.VolumeType.get_by_id(
                self._context, self.volume_type_id) if self.volume_type_id
                else None)
        elif attrname == 'volume_attachment':
            attachments = objects.VolumeAttachmentList.get_all_by_volume_id(
                self._context, self.id)
            self.volume_attachment = attachments
        elif attrname == 'consistencygroup':
            consistencygroup = objects.ConsistencyGroup.get_by_id(
                self._context, self.consistencygroup_id)
            self.consistencygroup = consistencygroup
        elif attrname == 'snapshots':
            self.snapshots = objects.SnapshotList.get_all_for_volume(
                self._context, self.id)

        self.obj_reset_changes(fields=[attrname])

    def delete_metadata_key(self, key):
        db.volume_metadata_delete(self._context, self.id, key)
        md_was_changed = 'metadata' in self.obj_what_changed()

        del self.metadata[key]
        self._orig_metadata.pop(key, None)

        if not md_was_changed:
            self.obj_reset_changes(['metadata'])

    def finish_volume_migration(self, dest_volume):
        # We swap fields between source (i.e. self) and destination at the
        # end of migration because we want to keep the original volume id
        # in the DB but now pointing to the migrated volume.
        skip = ({'id', 'provider_location', 'glance_metadata',
                 'volume_type'} | set(self.obj_extra_fields))
        for key in set(dest_volume.fields.keys()) - skip:
            # Only swap attributes that are already set.  We do not want to
            # unexpectedly trigger a lazy-load.
            if not dest_volume.obj_attr_is_set(key):
                continue

            value = getattr(dest_volume, key)
            value_to_dst = getattr(self, key)

            # Destination must have a _name_id since the id no longer matches
            # the volume.  If it doesn't have a _name_id we set one.
            if key == '_name_id':
                if not dest_volume._name_id:
                    setattr(dest_volume, key, self.id)
                continue
            elif key == 'migration_status':
                value = None
                value_to_dst = 'deleting'
            elif key == 'display_description':
                value_to_dst = 'migration src for ' + self.id
            elif key == 'status':
                value_to_dst = 'deleting'
            # Because dest_volume will be deleted soon, we can
            # skip to copy volume_type_id and volume_type which
            # are not keys for volume deletion.
            elif key == 'volume_type_id':
                # Initialize volume_type of source volume using
                # new volume_type_id.
                self.update({'volume_type_id': value})
                continue

            setattr(self, key, value)
            setattr(dest_volume, key, value_to_dst)

        dest_volume.save()
        return dest_volume
Exemple #30
0
class Volume(base.CinderPersistentObject, base.CinderObject,
             base.CinderObjectDictCompat, base.CinderComparableObject):
    # Version 1.0: Initial version
    # Version 1.1: Added metadata, admin_metadata, volume_attachment, and
    #              volume_type
    # Version 1.2: Added glance_metadata, consistencygroup and snapshots
    VERSION = '1.2'

    OPTIONAL_FIELDS = ('metadata', 'admin_metadata', 'glance_metadata',
                       'volume_type', 'volume_attachment', 'consistencygroup',
                       'snapshots')

    DEFAULT_EXPECTED_ATTR = ('admin_metadata', 'metadata')

    fields = {
        'id':
        fields.UUIDField(),
        '_name_id':
        fields.UUIDField(nullable=True),
        'ec2_id':
        fields.UUIDField(nullable=True),
        'user_id':
        fields.UUIDField(nullable=True),
        'project_id':
        fields.UUIDField(nullable=True),
        'snapshot_id':
        fields.UUIDField(nullable=True),
        'host':
        fields.StringField(nullable=True),
        'size':
        fields.IntegerField(),
        'availability_zone':
        fields.StringField(),
        'status':
        fields.StringField(),
        'attach_status':
        fields.StringField(),
        'migration_status':
        fields.StringField(nullable=True),
        'scheduled_at':
        fields.DateTimeField(nullable=True),
        'launched_at':
        fields.DateTimeField(nullable=True),
        'terminated_at':
        fields.DateTimeField(nullable=True),
        'display_name':
        fields.StringField(nullable=True),
        'display_description':
        fields.StringField(nullable=True),
        'provider_id':
        fields.UUIDField(nullable=True),
        'provider_location':
        fields.StringField(nullable=True),
        'provider_auth':
        fields.StringField(nullable=True),
        'provider_geometry':
        fields.StringField(nullable=True),
        'volume_type_id':
        fields.UUIDField(nullable=True),
        'source_volid':
        fields.UUIDField(nullable=True),
        'encryption_key_id':
        fields.UUIDField(nullable=True),
        'consistencygroup_id':
        fields.UUIDField(nullable=True),
        'deleted':
        fields.BooleanField(default=False),
        'bootable':
        fields.BooleanField(default=False),
        'multiattach':
        fields.BooleanField(default=False),
        'replication_status':
        fields.StringField(nullable=True),
        'replication_extended_status':
        fields.StringField(nullable=True),
        'replication_driver_data':
        fields.StringField(nullable=True),
        'previous_status':
        fields.StringField(nullable=True),
        'metadata':
        fields.DictOfStringsField(nullable=True),
        'admin_metadata':
        fields.DictOfStringsField(nullable=True),
        'glance_metadata':
        fields.DictOfStringsField(nullable=True),
        'volume_type':
        fields.ObjectField('VolumeType', nullable=True),
        'volume_attachment':
        fields.ObjectField('VolumeAttachmentList', nullable=True),
        'consistencygroup':
        fields.ObjectField('ConsistencyGroup', nullable=True),
        'snapshots':
        fields.ObjectField('SnapshotList', nullable=True),
    }

    # NOTE(thangp): obj_extra_fields is used to hold properties that are not
    # usually part of the model
    obj_extra_fields = ['name', 'name_id']

    @property
    def name_id(self):
        return self.id if not self._name_id else self._name_id

    @name_id.setter
    def name_id(self, value):
        self._name_id = value

    @property
    def name(self):
        return CONF.volume_name_template % self.name_id

    def __init__(self, *args, **kwargs):
        super(Volume, self).__init__(*args, **kwargs)
        self._orig_metadata = {}
        self._orig_admin_metadata = {}
        self._orig_glance_metadata = {}

        self._reset_metadata_tracking()

    def obj_reset_changes(self, fields=None):
        super(Volume, self).obj_reset_changes(fields)
        self._reset_metadata_tracking(fields=fields)

    def _reset_metadata_tracking(self, fields=None):
        if fields is None or 'metadata' in fields:
            self._orig_metadata = (dict(self.metadata)
                                   if 'metadata' in self else {})
        if fields is None or 'admin_metadata' in fields:
            self._orig_admin_metadata = (dict(self.admin_metadata)
                                         if 'admin_metadata' in self else {})
        if fields is None or 'glance_metadata' in fields:
            self._orig_glance_metadata = (dict(self.glance_metadata)
                                          if 'glance_metadata' in self else {})

    def obj_what_changed(self):
        changes = super(Volume, self).obj_what_changed()
        if 'metadata' in self and self.metadata != self._orig_metadata:
            changes.add('metadata')
        if ('admin_metadata' in self
                and self.admin_metadata != self._orig_admin_metadata):
            changes.add('admin_metadata')
        if ('glance_metadata' in self
                and self.glance_metadata != self._orig_glance_metadata):
            changes.add('glance_metadata')

        return changes

    def obj_make_compatible(self, primitive, target_version):
        """Make an object representation compatible with a target version."""
        super(Volume, self).obj_make_compatible(primitive, target_version)
        target_version = versionutils.convert_version_to_tuple(target_version)

    @staticmethod
    def _from_db_object(context, volume, db_volume, expected_attrs=None):
        if expected_attrs is None:
            expected_attrs = []
        for name, field in volume.fields.items():
            if name in Volume.OPTIONAL_FIELDS:
                continue
            value = db_volume.get(name)
            if isinstance(field, fields.IntegerField):
                value = value or 0
            volume[name] = value

        # Get data from db_volume object that was queried by joined query
        # from DB
        if 'metadata' in expected_attrs:
            volume.metadata = {}
            metadata = db_volume.get('volume_metadata', [])
            if metadata:
                volume.metadata = {
                    item['key']: item['value']
                    for item in metadata
                }
        if 'admin_metadata' in expected_attrs:
            volume.admin_metadata = {}
            metadata = db_volume.get('volume_admin_metadata', [])
            if metadata:
                volume.admin_metadata = {
                    item['key']: item['value']
                    for item in metadata
                }
        if 'glance_metadata' in expected_attrs:
            volume.glance_metadata = {}
            metadata = db_volume.get('volume_glance_metadata', [])
            if metadata:
                volume.glance_metadata = {
                    item['key']: item['value']
                    for item in metadata
                }
        if 'volume_type' in expected_attrs:
            db_volume_type = db_volume.get('volume_type')
            if db_volume_type:
                volume.volume_type = objects.VolumeType._from_db_object(
                    context,
                    objects.VolumeType(),
                    db_volume_type,
                    expected_attrs='extra_specs')
        if 'volume_attachment' in expected_attrs:
            attachments = base.obj_make_list(
                context, objects.VolumeAttachmentList(context),
                objects.VolumeAttachment, db_volume.get('volume_attachment'))
            volume.volume_attachment = attachments
        if 'consistencygroup' in expected_attrs:
            consistencygroup = objects.ConsistencyGroup(context)
            consistencygroup._from_db_object(context, consistencygroup,
                                             db_volume['consistencygroup'])
            volume.consistencygroup = consistencygroup
        if 'snapshots' in expected_attrs:
            snapshots = base.obj_make_list(context,
                                           objects.SnapshotList(context),
                                           objects.Snapshot,
                                           db_volume['snapshots'])
            volume.snapshots = snapshots

        volume._context = context
        volume.obj_reset_changes()
        return volume

    @base.remotable
    def create(self):
        if self.obj_attr_is_set('id'):
            raise exception.ObjectActionError(action='create',
                                              reason=_('already created'))
        updates = self.cinder_obj_get_changes()

        if 'consistencygroup' in updates:
            raise exception.ObjectActionError(
                action='create', reason=_('consistencygroup assigned'))
        if 'glance_metadata' in updates:
            raise exception.ObjectActionError(
                action='create', reason=_('glance_metadata assigned'))
        if 'snapshots' in updates:
            raise exception.ObjectActionError(action='create',
                                              reason=_('snapshots assigned'))

        db_volume = db.volume_create(self._context, updates)
        self._from_db_object(self._context, self, db_volume)

    @base.remotable
    def save(self):
        updates = self.cinder_obj_get_changes()
        if updates:
            if 'consistencygroup' in updates:
                raise exception.ObjectActionError(
                    action='save', reason=_('consistencygroup changed'))
            if 'glance_metadata' in updates:
                raise exception.ObjectActionError(
                    action='save', reason=_('glance_metadata changed'))
            if 'snapshots' in updates:
                raise exception.ObjectActionError(
                    action='save', reason=_('snapshots changed'))
            if 'metadata' in updates:
                # Metadata items that are not specified in the
                # self.metadata will be deleted
                metadata = updates.pop('metadata', None)
                self.metadata = db.volume_metadata_update(
                    self._context, self.id, metadata, True)
            if self._context.is_admin and 'admin_metadata' in updates:
                metadata = updates.pop('admin_metadata', None)
                self.admin_metadata = db.volume_admin_metadata_update(
                    self._context, self.id, metadata, True)

            db.volume_update(self._context, self.id, updates)
            self.obj_reset_changes()

    @base.remotable
    def destroy(self):
        with self.obj_as_admin():
            db.volume_destroy(self._context, self.id)

    def obj_load_attr(self, attrname):
        if attrname not in self.OPTIONAL_FIELDS:
            raise exception.ObjectActionError(
                action='obj_load_attr',
                reason=_('attribute %s not lazy-loadable') % attrname)
        if not self._context:
            raise exception.OrphanedObjectError(method='obj_load_attr',
                                                objtype=self.obj_name())

        if attrname == 'metadata':
            self.metadata = db.volume_metadata_get(self._context, self.id)
        elif attrname == 'admin_metadata':
            self.admin_metadata = {}
            if self._context.is_admin:
                self.admin_metadata = db.volume_admin_metadata_get(
                    self._context, self.id)
        elif attrname == 'glance_metadata':
            self.glance_metadata = db.volume_glance_metadata_get(
                self._context, self.id)
        elif attrname == 'volume_type':
            self.volume_type = objects.VolumeType.get_by_id(
                self._context, self.volume_type_id)
        elif attrname == 'volume_attachment':
            attachments = objects.VolumeAttachmentList.get_all_by_volume_id(
                self._context, self.id)
            self.volume_attachment = attachments
        elif attrname == 'consistencygroup':
            consistencygroup = objects.ConsistencyGroup.get_by_id(
                self._context, self.consistencygroup_id)
            self.consistencygroup = consistencygroup
        elif attrname == 'snapshots':
            self.snapshots = objects.SnapshotList.get_all_for_volume(
                self._context, self.id)

        self.obj_reset_changes(fields=[attrname])

    def delete_metadata_key(self, key):
        db.volume_metadata_delete(self._context, self.id, key)
        md_was_changed = 'metadata' in self.obj_what_changed()

        del self.metadata[key]
        self._orig_metadata.pop(key, None)

        if not md_was_changed:
            self.obj_reset_changes(['metadata'])