Example #1
0
 def load_foreign_relations(object_type):
     """
     This method will return a mapping of all relations towards a certain hybrid object type.
     The resulting mapping will be stored in volatile storage so it can be fetched faster
     """
     relation_key = 'ovs_relations_{0}'.format(object_type.__name__.lower())
     volatile = VolatileFactory.get_client()
     relation_info = volatile.get(relation_key)
     if relation_info is None:
         Toolbox.log_cache_hit('relations', False)
         relation_info = {}
         hybrid_structure = HybridRunner.get_hybrids()
         for class_descriptor in hybrid_structure.values():  # Extended objects
             cls = Descriptor().load(class_descriptor).get_object()
             for relation in cls._relations:
                 if relation.foreign_type is None:
                     remote_class = cls
                 else:
                     identifier = Descriptor(relation.foreign_type).descriptor['identifier']
                     if identifier in hybrid_structure and identifier != hybrid_structure[identifier]['identifier']:
                         remote_class = Descriptor().load(hybrid_structure[identifier]).get_object()
                     else:
                         remote_class = relation.foreign_type
                 itemname = remote_class.__name__
                 if itemname == object_type.__name__:
                     relation_info[relation.foreign_key] = {'class': Descriptor(cls).descriptor,
                                                            'key': relation.name,
                                                            'list': not relation.onetoone}
         volatile.set(relation_key, relation_info)
     else:
         Toolbox.log_cache_hit('relations', True)
     return relation_info
Example #2
0
 def load_foreign_relations(object_type):
     """
     This method will return a mapping of all relations towards a certain hybrid object type.
     The resulting mapping will be stored in volatile storage so it can be fetched faster
     """
     relation_key = 'ovs_relations_{0}'.format(object_type.__name__.lower())
     volatile = VolatileFactory.get_client()
     relation_info = volatile.get(relation_key)
     if relation_info is None:
         Toolbox.log_cache_hit('relations', False)
         relation_info = {}
         hybrid_structure = HybridRunner.get_hybrids()
         for class_descriptor in hybrid_structure.values():  # Extended objects
             cls = Descriptor().load(class_descriptor).get_object()
             for relation in cls._relations:
                 if relation.foreign_type is None:
                     remote_class = cls
                 else:
                     identifier = Descriptor(relation.foreign_type).descriptor['identifier']
                     if identifier in hybrid_structure and identifier != hybrid_structure[identifier]['identifier']:
                         remote_class = Descriptor().load(hybrid_structure[identifier]).get_object()
                     else:
                         remote_class = relation.foreign_type
                 itemname = remote_class.__name__
                 if itemname == object_type.__name__:
                     relation_info[relation.foreign_key] = {'class': Descriptor(cls).descriptor,
                                                            'key': relation.name,
                                                            'list': not relation.onetoone}
         volatile.set(relation_key, relation_info)
     else:
         Toolbox.log_cache_hit('relations', True)
     return relation_info
Example #3
0
 def __new__(cls, *args, **kwargs):
     """
     Initializes the class
     """
     hybrid_structure = HybridRunner.get_hybrids()
     identifier = Descriptor(cls).descriptor['identifier']
     if identifier in hybrid_structure and identifier != hybrid_structure[identifier]['identifier']:
         new_class = Descriptor().load(hybrid_structure[identifier]).get_object()
         return super(cls, new_class).__new__(new_class, *args, **kwargs)
     return super(DataObject, cls).__new__(cls)
Example #4
0
 def __new__(cls, *args, **kwargs):
     """
     Initializes the class
     """
     hybrid_structure = HybridRunner.get_hybrids()
     identifier = Descriptor(cls).descriptor['identifier']
     if identifier in hybrid_structure and identifier != hybrid_structure[identifier]['identifier']:
         new_class = Descriptor().load(hybrid_structure[identifier]).get_object()
         return super(cls, new_class).__new__(new_class, *args)
     return super(DataObject, cls).__new__(cls)
Example #5
0
 def load_foreign_relations(object_type):
     """
     This method will return a mapping of all relations towards a certain hybrid object type.
     The resulting mapping will be stored in volatile storage so it can be fetched faster
     """
     relation_key = "ovs_relations_{0}".format(object_type.__name__.lower())
     if relation_key in RelationMapper.cache:
         return RelationMapper.cache[relation_key]
     volatile = VolatileFactory.get_client()
     relation_info = volatile.get(relation_key)
     if relation_info is not None:
         RelationMapper.cache[relation_key] = relation_info
         return relation_info
     relation_info = {}
     hybrid_structure = HybridRunner.get_hybrids()
     for class_descriptor in hybrid_structure.values():  # Extended objects
         cls = Descriptor().load(class_descriptor).get_object()
         for relation in cls._relations:
             if relation.foreign_type is None:
                 remote_class = cls
             else:
                 identifier = Descriptor(relation.foreign_type).descriptor["identifier"]
                 if identifier in hybrid_structure and identifier != hybrid_structure[identifier]["identifier"]:
                     remote_class = Descriptor().load(hybrid_structure[identifier]).get_object()
                 else:
                     remote_class = relation.foreign_type
             itemname = remote_class.__name__
             if itemname == object_type.__name__:
                 relation_info[relation.foreign_key] = {
                     "class": Descriptor(cls).descriptor,
                     "key": relation.name,
                     "list": not relation.onetoone,
                 }
     RelationMapper.cache[relation_key] = relation_info
     volatile.set(relation_key, relation_info)
     return relation_info
Example #6
0
    def migrate(previous_version):
        """
        Migrates from a given version to the current version. It uses 'previous_version' to be smart
        wherever possible, but the code should be able to migrate any version towards the expected version.
        When this is not possible, the code can set a minimum version and raise when it is not met.
        :param previous_version: The previous version from which to start the migration
        :type previous_version: float
        """

        working_version = previous_version

        if working_version == 0:
            # Initial version:
            # * Set the version to THIS RELEASE version

            from ovs.dal.hybrids.user import User
            from ovs.dal.hybrids.group import Group
            from ovs.dal.hybrids.role import Role
            from ovs.dal.hybrids.client import Client
            from ovs.dal.hybrids.j_rolegroup import RoleGroup
            from ovs.dal.hybrids.j_roleclient import RoleClient
            from ovs.dal.hybrids.servicetype import ServiceType
            from ovs.dal.hybrids.branding import Branding
            from ovs.dal.lists.backendtypelist import BackendTypeList

            # Create groups
            admin_group = Group()
            admin_group.name = 'administrators'
            admin_group.description = 'Administrators'
            admin_group.save()
            viewers_group = Group()
            viewers_group.name = 'viewers'
            viewers_group.description = 'Viewers'
            viewers_group.save()

            # Create users
            admin = User()
            admin.username = '******'
            admin.password = hashlib.sha256('admin').hexdigest()
            admin.is_active = True
            admin.group = admin_group
            admin.save()

            # Create internal OAuth 2 clients
            admin_pw_client = Client()
            admin_pw_client.ovs_type = 'INTERNAL'
            admin_pw_client.grant_type = 'PASSWORD'
            admin_pw_client.user = admin
            admin_pw_client.save()
            admin_cc_client = Client()
            admin_cc_client.ovs_type = 'INTERNAL'
            admin_cc_client.grant_type = 'CLIENT_CREDENTIALS'
            admin_cc_client.client_secret = ''.join(random.choice(string.ascii_letters +
                                                                  string.digits +
                                                                  '|_=+*#@!/-[]{}<>.?,\'";:~')
                                                    for _ in range(128))
            admin_cc_client.user = admin
            admin_cc_client.save()

            # Create roles
            read_role = Role()
            read_role.code = 'read'
            read_role.name = 'Read'
            read_role.description = 'Can read objects'
            read_role.save()
            write_role = Role()
            write_role.code = 'write'
            write_role.name = 'Write'
            write_role.description = 'Can write objects'
            write_role.save()
            manage_role = Role()
            manage_role.code = 'manage'
            manage_role.name = 'Manage'
            manage_role.description = 'Can manage the system'
            manage_role.save()

            # Attach groups to roles
            mapping = [
                (admin_group, [read_role, write_role, manage_role]),
                (viewers_group, [read_role])
            ]
            for setting in mapping:
                for role in setting[1]:
                    rolegroup = RoleGroup()
                    rolegroup.group = setting[0]
                    rolegroup.role = role
                    rolegroup.save()
                for user in setting[0].users:
                    for role in setting[1]:
                        for client in user.clients:
                            roleclient = RoleClient()
                            roleclient.client = client
                            roleclient.role = role
                            roleclient.save()

            # Add service types
            for service_type_info in [ServiceType.SERVICE_TYPES.MD_SERVER, ServiceType.SERVICE_TYPES.ALBA_PROXY, ServiceType.SERVICE_TYPES.ARAKOON]:
                service_type = ServiceType()
                service_type.name = service_type_info
                service_type.save()

            # Branding
            branding = Branding()
            branding.name = 'Default'
            branding.description = 'Default bootstrap theme'
            branding.css = 'bootstrap-default.min.css'
            branding.productname = 'Open vStorage'
            branding.is_default = True
            branding.save()
            slate = Branding()
            slate.name = 'Slate'
            slate.description = 'Dark bootstrap theme'
            slate.css = 'bootstrap-slate.min.css'
            slate.productname = 'Open vStorage'
            slate.is_default = False
            slate.save()

        # From here on, all actual migration should happen to get to the expected state for THIS RELEASE
        elif working_version < OVSMigrator.THIS_VERSION:
            # Migrate unique constraints
            from ovs.dal.helpers import HybridRunner, Descriptor
            from ovs.extensions.storage.persistentfactory import PersistentFactory
            client = PersistentFactory.get_client()
            hybrid_structure = HybridRunner.get_hybrids()
            for class_descriptor in hybrid_structure.values():
                cls = Descriptor().load(class_descriptor).get_object()
                classname = cls.__name__.lower()
                unique_key = 'ovs_unique_{0}_{{0}}_'.format(classname)
                uniques = []
                # noinspection PyProtectedMember
                for prop in cls._properties:
                    if prop.unique is True and len([k for k in client.prefix(unique_key.format(prop.name))]) == 0:
                        uniques.append(prop.name)
                if len(uniques) > 0:
                    prefix = 'ovs_data_{0}_'.format(classname)
                    for key in client.prefix(prefix):
                        data = client.get(key)
                        for property_name in uniques:
                            ukey = '{0}{1}'.format(unique_key.format(property_name), hashlib.sha1(str(data[property_name])).hexdigest())
                            client.set(ukey, key)

            # Complete rework of the way we detect devices to assign roles or use as ASD
            # Allow loop-, raid-, nvme-, ??-devices and logical volumes as ASD (https://github.com/openvstorage/framework/issues/792)
            from ovs.dal.lists.storagerouterlist import StorageRouterList
            from ovs.extensions.generic.sshclient import SSHClient, UnableToConnectException
            from ovs.lib.disk import DiskController

            for storagerouter in StorageRouterList.get_storagerouters():
                try:
                    client = SSHClient(storagerouter, username='******')
                except UnableToConnectException:
                    raise

                # Retrieve all symlinks for all devices
                # Example of name_alias_mapping:
                # {'/dev/md0': ['/dev/disk/by-id/md-uuid-ad2de634:26d97253:5eda0a23:96986b76', '/dev/disk/by-id/md-name-OVS-1:0'],
                #  '/dev/sda': ['/dev/disk/by-path/pci-0000:03:00.0-sas-0x5000c295fe2ff771-lun-0'],
                #  '/dev/sda1': ['/dev/disk/by-uuid/e3e0bc62-4edc-4c6b-a6ce-1f39e8f27e41', '/dev/disk/by-path/pci-0000:03:00.0-sas-0x5000c295fe2ff771-lun-0-part1']}
                name_alias_mapping = {}
                alias_name_mapping = {}
                for path_type in client.dir_list(directory='/dev/disk'):
                    if path_type in ['by-uuid', 'by-partuuid']:  # UUIDs can change after creating a filesystem on a partition
                        continue
                    directory = '/dev/disk/{0}'.format(path_type)
                    for symlink in client.dir_list(directory=directory):
                        symlink_path = '{0}/{1}'.format(directory, symlink)
                        link = client.file_read_link(symlink_path)
                        if link not in name_alias_mapping:
                            name_alias_mapping[link] = []
                        name_alias_mapping[link].append(symlink_path)
                        alias_name_mapping[symlink_path] = link

                for disk in storagerouter.disks:
                    if disk.aliases is None:
                        # noinspection PyProtectedMember
                        device_path = '/dev/{0}'.format(disk.name)
                        disk.aliases = name_alias_mapping.get(device_path, [device_path])
                        disk.save()
                    for partition in disk.partitions:
                        if partition.aliases is None:
                            # noinspection PyProtectedMember
                            partition_device = alias_name_mapping.get(partition._data.get('path'))
                            if partition_device is None:
                                partition.aliases = []
                                partition.save()
                                continue
                            partition.aliases = name_alias_mapping.get(partition_device, [])
                            partition.save()

                DiskController.sync_with_reality(storagerouter_guid=storagerouter.guid)

            # Only support ALBA backend type
            from ovs.dal.lists.backendtypelist import BackendTypeList
            for backend_type in BackendTypeList.get_backend_types():
                if backend_type.code != 'alba':
                    backend_type.delete()

            # Reformat the vpool.metadata information
            from ovs.dal.lists.vpoollist import VPoolList
            for vpool in VPoolList.get_vpools():
                new_metadata = {}
                for metadata_key, value in vpool.metadata.items():
                    new_info = {}
                    storagerouter_guids = [key for key in vpool.metadata.keys() if not key.startswith('backend')]
                    if isinstance(value, dict):
                        read_cache = value.get('backend_info', {}).get('fragment_cache_on_read', True)
                        write_cache = value.get('backend_info', {}).get('fragment_cache_on_write', False)
                        new_info['backend_info'] = {'alba_backend_guid': value.get('backend_guid'),
                                                    'backend_guid': None,
                                                    'frag_size': value.get('backend_info', {}).get('frag_size'),
                                                    'name': value.get('name'),
                                                    'policies': value.get('backend_info', {}).get('policies'),
                                                    'preset': value.get('preset'),
                                                    'sco_size': value.get('backend_info', {}).get('sco_size'),
                                                    'total_size': value.get('backend_info', {}).get('total_size')}
                        new_info['arakoon_config'] = value.get('arakoon_config')
                        new_info['connection_info'] = {'host': value.get('connection', {}).get('host', ''),
                                                       'port': value.get('connection', {}).get('port', ''),
                                                       'local': value.get('connection', {}).get('local', ''),
                                                       'client_id': value.get('connection', {}).get('client_id', ''),
                                                       'client_secret': value.get('connection', {}).get('client_secret', '')}
                        if metadata_key == 'backend':
                            new_info['caching_info'] = dict((sr_guid, {'fragment_cache_on_read': read_cache, 'fragment_cache_on_write': write_cache}) for sr_guid in storagerouter_guids)
                    if metadata_key in storagerouter_guids:
                        metadata_key = 'backend_aa_{0}'.format(metadata_key)
                    new_metadata[metadata_key] = new_info
                vpool.metadata = new_metadata
                vpool.save()

            # Removal of READ role
            from ovs.dal.lists.diskpartitionlist import DiskPartitionList
            for partition in DiskPartitionList.get_partitions():
                if 'READ' in partition.roles:
                    partition.roles.remove('READ')
                    partition.save()

        return OVSMigrator.THIS_VERSION
Example #7
0
 def test_objectproperties(self):
     """
     Validates the correctness of all hybrid objects:
     * They should contain all required properties
     * Properties should have the correct type
     * All dynamic properties should be implemented
     """
     # Some stuff here to dynamically test all hybrid properties
     hybrid_structure = HybridRunner.get_hybrids()
     print ''
     print 'Validating hybrids...'
     for class_descriptor in hybrid_structure.values():
         cls = Descriptor().load(class_descriptor).get_object()
         print '* {0}'.format(cls.__name__)
         relation_info = RelationMapper.load_foreign_relations(cls)
         remote_properties_n = []
         remote_properties_1 = []
         if relation_info is not None:
             for key, info in relation_info.iteritems():
                 if info['list'] is True:
                     remote_properties_n.append(key)
                 else:
                     remote_properties_1.append(key)
         # Make sure certain attributes are correctly set
         self.assertIsInstance(cls._properties, list, '_properties required: {0}'.format(cls.__name__))
         self.assertIsInstance(cls._relations, list, '_relations required: {0}'.format(cls.__name__))
         self.assertIsInstance(cls._dynamics, list, '_dynamics required: {0}'.format(cls.__name__))
         # Check types
         allowed_types = [int, float, long, str, bool, list, dict]
         for prop in cls._properties:
             is_allowed_type = prop.property_type in allowed_types \
                 or isinstance(prop.property_type, list)
             self.assertTrue(is_allowed_type,
                             '_properties types in {0} should be one of {1}'.format(
                                 cls.__name__, str(allowed_types)
                             ))
         for dynamic in cls._dynamics:
             is_allowed_type = dynamic.return_type in allowed_types \
                 or isinstance(dynamic.return_type, list)
             self.assertTrue(is_allowed_type,
                             '_dynamics types in {0} should be one of {1}'.format(
                                 cls.__name__, str(allowed_types)
                             ))
         instance = cls()
         for prop in cls._properties:
             self.assertEqual(getattr(instance, prop.name), prop.default,
                              'Default property set correctly')
         # Make sure the type can be instantiated
         self.assertIsNotNone(instance.guid)
         properties = []
         for item in dir(instance):
             if hasattr(cls, item) and isinstance(getattr(cls, item), property):
                 properties.append(item)
         # All expires should be implemented
         missing_props = []
         for dynamic in instance._dynamics:
             if dynamic.name not in properties:
                 missing_props.append(dynamic.name)
         self.assertEqual(len(missing_props), 0,
                          'Missing dynamic properties in {0}: {1}'.format(cls.__name__, missing_props))
         # An all properties should be either in the blueprint, relations or expiry
         missing_metadata = []
         for found_prop in properties:
             found = found_prop in [prop.name for prop in cls._properties] \
                 or found_prop in [relation.name for relation in cls._relations] \
                 or found_prop in ['{0}_guid'.format(relation.name) for relation in cls._relations] \
                 or found_prop in [dynamic.name for dynamic in cls._dynamics] \
                 or found_prop in remote_properties_n \
                 or found_prop in remote_properties_1 \
                 or found_prop in ['{0}_guids'.format(key) for key in remote_properties_n] \
                 or found_prop in ['{0}_guid'.format(key) for key in remote_properties_1] \
                 or found_prop == 'guid'
             if not found:
                 missing_metadata.append(found_prop)
         self.assertEqual(len(missing_metadata), 0,
                          'Missing metadata for properties in {0}: {1}'.format(cls.__name__, missing_metadata))
         instance.delete()
Example #8
0
    def __init__(self, guid=None, data=None, datastore_wins=False, volatile=False, _hook=None):
        """
        Loads an object with a given guid. If no guid is given, a new object
        is generated with a new guid.
        * guid: The guid indicating which object should be loaded
        * datastoreWins: Optional boolean indicating save conflict resolve management.
        ** True: when saving, external modified fields will not be saved
        ** False: when saving, all changed data will be saved, regardless of external updates
        ** None: in case changed field were also changed externally, an error will be raised
        """

        # Initialize super class
        super(DataObject, self).__init__()

        # Initialize internal fields
        self._frozen = False
        self._datastore_wins = datastore_wins
        self._guid = None    # Guid identifier of the object
        self._original = {}  # Original data copy
        self._metadata = {}  # Some metadata, mainly used for unit testing
        self._data = {}      # Internal data storage
        self._objects = {}   # Internal objects storage

        # Initialize public fields
        self.dirty = False
        self.volatile = volatile

        # Worker fields/objects
        self._classname = self.__class__.__name__.lower()

        # Rebuild _relation types
        hybrid_structure = HybridRunner.get_hybrids()
        for relation in self._relations:
            if relation.foreign_type is not None:
                identifier = Descriptor(relation.foreign_type).descriptor['identifier']
                if identifier in hybrid_structure and identifier != hybrid_structure[identifier]['identifier']:
                    relation.foreign_type = Descriptor().load(hybrid_structure[identifier]).get_object()

        # Init guid
        self._new = False
        if guid is None:
            self._guid = str(uuid.uuid4())
            self._new = True
        else:
            self._guid = str(guid)

        # Build base keys
        self._key = '{0}_{1}_{2}'.format(DataObject.NAMESPACE, self._classname, self._guid)

        # Worker mutexes
        self._mutex_version = volatile_mutex('ovs_dataversion_{0}_{1}'.format(self._classname, self._guid))

        # Load data from cache or persistent backend where appropriate
        self._volatile = VolatileFactory.get_client()
        self._persistent = PersistentFactory.get_client()
        self._metadata['cache'] = None
        if self._new:
            self._data = {}
        else:
            if data is not None:
                self._data = copy.deepcopy(data)
                self._metadata['cache'] = None
            else:
                self._data = self._volatile.get(self._key)
                if self._data is None:
                    self._metadata['cache'] = False
                    try:
                        self._data = self._persistent.get(self._key)
                    except KeyNotFoundException:
                        raise ObjectNotFoundException('{0} with guid \'{1}\' could not be found'.format(
                            self.__class__.__name__, self._guid
                        ))
                else:
                    self._metadata['cache'] = True

        # Set default values on new fields
        for prop in self._properties:
            if prop.name not in self._data:
                self._data[prop.name] = prop.default
            self._add_property(prop)

        # Load relations
        for relation in self._relations:
            if relation.name not in self._data:
                if relation.foreign_type is None:
                    cls = self.__class__
                else:
                    cls = relation.foreign_type
                self._data[relation.name] = Descriptor(cls).descriptor
            self._add_relation_property(relation)

        # Add wrapped properties
        for dynamic in self._dynamics:
            self._add_dynamic_property(dynamic)

        # Load foreign keys
        relations = RelationMapper.load_foreign_relations(self.__class__)
        if relations is not None:
            for key, info in relations.iteritems():
                self._objects[key] = {'info': info,
                                      'data': None}
                self._add_list_property(key, info['list'])

        if _hook is not None and hasattr(_hook, '__call__'):
            _hook()

        if not self._new:
            # Re-cache the object, if required
            if self._metadata['cache'] is False:
                # The data wasn't loaded from the cache, so caching is required now
                try:
                    self._mutex_version.acquire(30)
                    this_version = self._data['_version']
                    store_version = self._persistent.get(self._key)['_version']
                    if this_version == store_version:
                        self._volatile.set(self._key, self._data)
                except KeyNotFoundException:
                    raise ObjectNotFoundException('{0} with guid \'{1}\' could not be found'.format(
                        self.__class__.__name__, self._guid
                    ))
                finally:
                    self._mutex_version.release()

        # Freeze property creation
        self._frozen = True

        # Optionally, initialize some fields
        if data is not None:
            for prop in self._properties:
                if prop.name in data:
                    setattr(self, prop.name, data[prop.name])

        # Store original data
        self._original = copy.deepcopy(self._data)
Example #9
0
    def get(self):
        """
        returns OpenAPI specs
        """
        version = settings.VERSION[-1]
        data = {'swagger': '2.0',
                'info': {'title': 'Open vStorage',
                         'description': 'The Open vStorage API.',
                         'version': str(version)},
                'basePath': '/api',
                'schemes': ['https'],
                'consumes': ['application/json'],
                'produces': ['application/json; version={0}'.format(version)],
                'paths': {'/': {'get': {'summary': 'Retrieve API metadata',
                                        'operationId': 'api',
                                        'responses': {'200': {'descirption': 'API metadata',
                                                              'schema': {'type': 'object',
                                                                         'title': 'APIMetadata',
                                                                         'properties': {'authenticated': {'type': 'boolean',
                                                                                                          'description': 'Indicates whether the client is authenticated.'},
                                                                                        'authentication_state': {'type': 'string',
                                                                                                                 'description': 'Povides more information on the "authenticated" state of a client.',
                                                                                                                 'enum': ['unauthenticated',
                                                                                                                          'invalid_authorization_type',
                                                                                                                          'invalid_token',
                                                                                                                          'token_expired',
                                                                                                                          'inactive_user',
                                                                                                                          'authenticated',
                                                                                                                          'unexpected_exception']},
                                                                                        'authentication_metadata': {'type': 'object',
                                                                                                                    'title': 'AuthenticationMetadata',
                                                                                                                    'description': 'Contains information on the usage of an optional 3rd party OAuth2.0 authentication service.',
                                                                                                                    'properties': {'ip': {'type': 'string',
                                                                                                                                          'description': 'The IP address of the current node.'},
                                                                                                                                   'mode': {'type': 'string',
                                                                                                                                            'description': 'Indicates wheter the "local" or a "remote" authentication endpoint should be used.',
                                                                                                                                            'enum': ['local',
                                                                                                                                                     'remote']},
                                                                                                                                   'authorize_uri': {'type': 'string',
                                                                                                                                                     'description': 'The URI to which the user has to be redirect to authenticate.'},
                                                                                                                                   'client_id': {'type': 'string',
                                                                                                                                                 'description': 'The client identifier to be used when authenticating.'},
                                                                                                                                   'scope': {'type': 'string',
                                                                                                                                             'description': 'The scope that has to be requested to the authentication endpoint.'}},
                                                                                                                    'required': []},
                                                                                        'username': {'type': 'string',
                                                                                                     'description': 'The username of the client or null if not available.'},
                                                                                        'userguid': {'type': 'string',
                                                                                                     'description': 'The GUID (primary key) of the client\'s user or null if not available.'},
                                                                                        'roles': {'type': 'array',
                                                                                                  'description': 'An array of the scopes that were granted to the client.',
                                                                                                  'items': {'type': 'string'}},
                                                                                        'identification': {'type': 'object',
                                                                                                           'title': 'APIIdentification',
                                                                                                           'description': 'Contains identification information about the API/environment.',
                                                                                                           'properties': {'cluster_id': {'type': 'string',
                                                                                                                                         'description': 'Environment identification string.'}},
                                                                                                           'required': ['cluster_id']},
                                                                                        'storagerouter_ips': {'type': 'array',
                                                                                                              'description': 'An array containing the IP addresses of all StorageRouters in the environment.',
                                                                                                              'items': {'type': 'string'}},
                                                                                        'versions': {'type': 'array',
                                                                                                     'description': 'An array of all versions that this instance of the API supports.',
                                                                                                     'items': {'type': 'integer'}},
                                                                                        'plugins': {}},
                                                                         'required': ['authenticated',
                                                                                      'authentication_state',
                                                                                      'authentication_metadata',
                                                                                      'username',
                                                                                      'userguid',
                                                                                      'roles',
                                                                                      'identification',
                                                                                      'storagerouter_ips',
                                                                                      'versions',
                                                                                      'plugins']}}}}}},
                'definitions': {'APIError': {'type': 'object',
                                             'properties': {'error': {'type': 'string',
                                                                      'description': 'An error code'},
                                                            'error_description': {'type': 'string',
                                                                                  'description': 'Descriptive error message'}},
                                             'required': ['error', 'error_description']}},
                'securityDefinitions': {'oauth2': {'type': 'oauth2',
                                                   'flow': 'password',
                                                   'tokenUrl': 'oauth2/token',
                                                   'scopes': {'read': 'Read access',
                                                              'write': 'Write access',
                                                              'manage': 'Management access'}}},
                'security': [{'oauth2': ['read', 'write', 'manage']}]}

        # Plugin information
        plugins = {}
        for backend_type in BackendTypeList.get_backend_types():
            if backend_type.has_plugin is True:
                if backend_type.code not in plugins:
                    plugins[backend_type.code] = []
                plugins[backend_type.code] += ['backend', 'gui']
        generic_plugins = Configuration.get('/ovs/framework/plugins/installed|generic')
        for plugin_name in generic_plugins:
            if plugin_name not in plugins:
                plugins[plugin_name] = []
            plugins[plugin_name] += ['gui']

        data['paths']['/']['get']['responses']['200']['schema']['properties']['plugins'] = {
            'type': 'object',
            'title': 'PluginMetadata',
            'description': 'Contains information about plugins active in the system. Each property represents a plugin and the area where they provide functionality.',
            'properties': {plugin: {'type': 'array',
                                    'description': 'An array of all areas the plugin provides functionality.',
                                    'items': {'type': 'string'}} for (plugin, info) in plugins.iteritems()},
            'required': []
        }

        # API paths
        def load_parameters(_fun):
            # Parameters by @load decorators
            parameter_info = []
            mandatory_args = _fun.ovs_metadata['load']['mandatory']
            optional_args = _fun.ovs_metadata['load']['optional']
            object_type = _fun.ovs_metadata['load']['object_type']
            entries = ['version', 'request', 'local_storagerouter', 'pk', 'contents']
            if object_type is not None:
                object_arg = object_type.__name__.lower()
                if object_arg in mandatory_args or object_arg in optional_args:
                    parameter_info.append({'name': 'guid',
                                           'in': 'path',
                                           'description': 'Identifier of the object on which to call is applied.',
                                           'required': True,
                                           'type': 'string'})
                entries.append(object_arg)
            for entry in entries:
                if entry in mandatory_args:
                    mandatory_args.remove(entry)
                if entry in optional_args:
                    optional_args.remove(entry)
            docs = _fun.__doc__
            doc_info = {}
            if docs is not None:
                for match in re.finditer(':(param|type) (.*?): (.*)', docs, re.MULTILINE):
                    entries = match.groups()
                    if entries[1] not in doc_info:
                        doc_info[entries[1]] = {}
                    doc_info[entries[1]][entries[0]] = entries[2]
            for argument in mandatory_args + optional_args:
                info = {'name': argument,
                        'in': 'query',
                        'required': argument in mandatory_args,
                        'type': 'string'}
                if argument in doc_info:
                    description = doc_info[argument].get('param')
                    if description:
                        info['description'] = description
                    type_info = doc_info[argument].get('type')
                    if type_info:
                        if type_info in ['int', 'long']:
                            info['type'] = 'integer'
                        elif type_info in ['float']:
                            info['type'] = 'number'
                        elif type_info in ['bool']:
                            info['type'] = 'boolean'
                        elif type_info in ['str', 'basestring', 'unicode']:
                            info['type'] = 'string'
                        elif type_info in ['dict']:
                            info['type'] = 'object'
                parameter_info.append(info)
            # Parameters by @returns_* decorators
            return_info = _fun.ovs_metadata.get('returns', None)
            if return_info is not None:
                # Extra parameters
                params = return_info['parameters']
                fields = []
                if 'contents' in params or 'sorting' in params:
                    _cls = return_info['object_type']
                    fields = [prop.name for prop in _cls._properties] + \
                             ['{0}_guid'.format(rel.name) for rel in _cls._relations] + \
                             [dynamic.name for dynamic in _cls._dynamics]
                    relation_info = RelationMapper.load_foreign_relations(_cls)
                    if relation_info is not None:
                        fields += [('{0}_guid' if rel_info['list'] is False else '{0}_guids').format(key)
                                   for key, rel_info in relation_info.iteritems()]
                    fields = fields + ['-{0}'.format(field) for field in fields]
                for parameter in params:
                    if parameter == 'contents':
                        parameter_info.append({'name': 'contents',
                                               'in': 'query',
                                               'description': 'Specify the returned contents.',
                                               'required': True,
                                               'collectionFormat': 'csv',
                                               'type': 'array',
                                               'enum': ['_dynamics', '_relations', 'guid'] + fields,
                                               'items': {'type': 'string'}})
                    elif parameter == 'paging':
                        parameter_info.append({'name': 'page',
                                               'in': 'query',
                                               'description': 'Specifies the page to be returned.',
                                               'required': False,
                                               'type': 'integer'})
                        parameter_info.append({'name': 'page_size',
                                               'in': 'query',
                                               'description': 'Specifies the size of a page. Supported values: 10, 25, 50 and 100. Requires "page" to be set.',
                                               'required': False,
                                               'type': 'integer'})
                    elif parameter == 'sorting':
                        parameter_info.append({'name': 'sort',
                                               'in': 'query',
                                               'description': 'Specifies the sorting of the list.',
                                               'required': False,
                                               'default': params[parameter],
                                               'enum': ['guid', '-guid'] + fields,
                                               'type': 'array',
                                               'items': {'type': 'string'}})
            return parameter_info

        def load_response(_fun):
            response_code = '200'
            response_schema = None
            return_info = _fun.ovs_metadata.get('returns', None)
            if return_info is not None:
                return_type, _return_code = return_info['returns']
                if _return_code is not None:
                    response_code = _return_code
                if return_type == 'object':
                    _cls = return_info['object_type']
                    response_schema = {'$ref': '#/definitions/{0}'.format(_cls.__name__)}
                elif return_type == 'list':
                    _cls = return_info['object_type']
                    class_schema = {'$ref': '#/definitions/{0}'.format(_cls.__name__)}
                    fields = [prop.name for prop in _cls._properties] + \
                             ['{0}_guid'.format(rel.name) for rel in _cls._relations] + \
                             [dynamic.name for dynamic in _cls._dynamics]
                    relation_info = RelationMapper.load_foreign_relations(_cls)
                    if relation_info is not None:
                        fields += [('{0}_guid' if rel_info['list'] is False else '{0}_guids').format(key)
                                   for key, rel_info in relation_info.iteritems()]
                    fields = fields + ['-{0}'.format(field) for field in fields]
                    response_schema = {'type': 'object',
                                       'title': 'DataList',
                                       'properties': {'_contents': {'type': 'array',
                                                                    'description': 'Requested contents.',
                                                                    'items': {'type': 'string'},
                                                                    'required': True,
                                                                    'collectionFormat': 'csv',
                                                                    'enum': ['_dynamics', '_relations', 'guid'] + fields},
                                                      '_paging': {'type': 'object',
                                                                  'title': 'PagingMetadata',
                                                                  'properties': {'total_items': {'type': 'integer',
                                                                                                 'description': 'Total items available.'},
                                                                                 'max_page': {'type': 'integer',
                                                                                              'description': 'Last page available.'},
                                                                                 'end_number': {'type': 'integer',
                                                                                                'description': '1-based index of the last item in the current page.'},
                                                                                 'current_page': {'type': 'integer',
                                                                                                  'description': 'Current page number.'},
                                                                                 'page_size': {'type': 'integer',
                                                                                               'description': 'Number of items in the current page.'},
                                                                                 'start_number': {'type': 'integer',
                                                                                                  'description': '1-based index of the first item in the current page'}},
                                                                  'required': ['total_items', 'max_page', 'end_number', 'current_page', 'page_size', 'start_number']},
                                                      '_sorting': {'type': 'array',
                                                                   'description': 'Applied sorting',
                                                                   'items': {'type': 'string'},
                                                                   'required': True,
                                                                   'collectionFormat': 'csv',
                                                                   'enum': ['-guid', 'guid'] + fields},
                                                      'data': {'type': 'array',
                                                               'description': 'List of serialized {0}s.'.format(_cls.__name__),
                                                               'required': True,
                                                               'items': class_schema}},
                                       'required': ['_contents', '_paging', '_sorting', 'data']}
                else:
                    docs = _fun.__doc__
                    doc_info = {}
                    if docs is not None:
                        for match in re.finditer(':(return|rtype): (.*)', docs, re.MULTILINE):
                            entries = match.groups()
                            doc_info[entries[0]] = entries[1]
                    if return_type == 'task':
                        task_return = ''
                        if 'return' in doc_info:
                            task_return = ' The task returns: {0}'.format(doc_info['return'])
                        response_schema = {'type': 'string',
                                           'description': 'A task identifier.{0}'.format(task_return)}
                    elif return_type is None:
                        response_schema = {'type': 'string'}
                        if 'return' in doc_info:
                            response_schema['description'] = doc_info['return']
                        if 'rtype' in doc_info:
                            type_info = doc_info['rtype']
                            if type_info in ['int', 'long']:
                                response_schema['type'] = 'integer'
                            elif type_info in ['float']:
                                response_schema['type'] = 'number'
                            elif type_info in ['bool']:
                                response_schema['type'] = 'boolean'
                            elif type_info in ['str', 'basestring', 'unicode']:
                                response_schema['type'] = 'string'
                            elif type_info in ['dict']:
                                response_schema['type'] = 'object'
                            elif type_info in ['None']:
                                response_schema = None
                                response_code = '204'
            return response_code, response_schema

        paths = data['paths']
        path = '/'.join([os.path.dirname(__file__), 'backend', 'views'])
        for filename in os.listdir(path):
            if os.path.isfile('/'.join([path, filename])) and filename.endswith('.py'):
                name = filename.replace('.py', '')
                module = imp.load_source(name, '/'.join([path, filename]))
                for member in inspect.getmembers(module):
                    if inspect.isclass(member[1]) \
                            and member[1].__module__ == name \
                            and 'ViewSet' in [base.__name__ for base in member[1].__bases__]:
                        cls = member[1]
                        if hasattr(cls, 'skip_spec') and cls.skip_spec is True:
                            continue
                        base_calls = {'list': ['get', '/{0}/'],
                                      'retrieve': ['get', '/{0}/{{guid}}/'],
                                      'create': ['post', '/{0}/'],
                                      'destroy': ['delete', '/{0}/{{guid}}/'],
                                      'partial_update': ['patch', '/{0}/{{guid}}/']}
                        for call, route_data in base_calls.iteritems():
                            if hasattr(cls, call):
                                fun = getattr(cls, call)
                                docstring = fun.__doc__.strip().split('\n')[0]
                                parameters = load_parameters(fun)
                                return_code, schema = load_response(fun)
                                route = {route_data[0]: {'summary': docstring,
                                                         'operationId': '{0}.{1}'.format(member[1].prefix, call),
                                                         'responses': {return_code: {'description': docstring},
                                                                       'default': {'description': 'Error payload',
                                                                                   'schema': {'$ref': '#/definitions/APIError'}}},
                                                         'parameters': parameters}}
                                if schema is not None:
                                    route[route_data[0]]['responses'][return_code]['schema'] = schema
                                current_path = route_data[1].format(member[1].prefix)
                                if current_path not in paths:
                                    paths[current_path] = {}
                                paths[current_path].update(route)
                        funs = [fun[1] for fun in inspect.getmembers(cls, predicate=inspect.ismethod)
                                if fun[0] not in base_calls.keys()]
                        for fun in funs:
                            if hasattr(fun, 'bind_to_methods'):
                                routes = {}
                                docstring = fun.__doc__.strip().split('\n')[0]
                                parameters = load_parameters(fun)
                                return_code, schema = load_response(fun)
                                name = fun.__name__
                                for verb in fun.bind_to_methods:
                                    routes[verb] = {'summary': docstring,
                                                    'operationId': '{0}.{1}_{2}'.format(member[1].prefix, verb, name),
                                                    'responses': {return_code: {'description': docstring},
                                                                  'default': {'description': 'Error payload',
                                                                              'schema': {'$ref': '#/definitions/APIError'}}},
                                                    'parameters': parameters}
                                    if schema is not None:
                                        routes[verb]['responses'][return_code]['schema'] = schema
                                paths['/{0}/{{guid}}/{1}/'.format(member[1].prefix, name)] = routes

        # DataObject / hybrids
        def build_property(prop):
            _docstring = prop.docstring or prop.name
            _docstring = _docstring.replace('None', 'null').replace('True', 'true').replace('False', 'false')
            info = {'description': _docstring}
            if prop.default is not None:
                info['default'] = prop.default
            if prop.property_type == int:
                info['type'] = 'integer'
            elif prop.property_type == float:
                info['type'] = 'number'
            elif prop.property_type == long:
                info['type'] = 'integer'
            elif prop.property_type == str:
                info['type'] = 'string'
            elif prop.property_type == bool:
                info['type'] = 'boolean'
            elif prop.property_type == list:
                info['type'] = 'array'
            elif prop.property_type == dict:
                info['type'] = 'object'
            elif prop.property_type == set:
                info['type'] = 'array'
            elif isinstance(prop.property_type, list):  # enumerator
                info['type'] = 'string'
                info['enum'] = prop.property_type
            return info

        def build_relation(_cls, relation):
            itemtype = relation.foreign_type.__name__ if relation.foreign_type is not None else _cls.__name__
            _docstring = '{1} instance identifier{3}. One-to-{0} relation with {1}.{2}.'.format(
                'one' if relation.onetoone is True else 'many',
                itemtype,
                ('{0}_guid' if relation.onetoone is True else '{0}_guids').format(relation.foreign_key),
                '' if relation.mandatory is True else ', null if relation is not set'
            )
            info = {'description': _docstring,
                    'type': 'string'}
            return '{0}_guid'.format(relation.name), info

        def build_dynamic(_cls, dynamic):
            _docstring = dynamic.name
            if hasattr(_cls, '_{0}'.format(dynamic.name)):
                docs = getattr(_cls, '_{0}'.format(dynamic.name)).__doc__
                if docs is not None:
                    _docstring = docs.strip().split('\n')[0]
                    _docstring = _docstring.replace('None', 'null').replace('True', 'true').replace('False', 'false')
            _docstring = '{0} (dynamic property, cache timeout: {1}s)'.format(_docstring, dynamic.timeout)
            info = {'description': _docstring,
                    'readOnly': True}
            if dynamic.return_type == int:
                info['type'] = 'integer'
            elif dynamic.return_type == float:
                info['type'] = 'number'
            elif dynamic.return_type == long:
                info['type'] = 'integer'
            elif dynamic.return_type == str:
                info['type'] = 'string'
            elif dynamic.return_type == bool:
                info['type'] = 'boolean'
            elif dynamic.return_type == list:
                info['type'] = 'array'
            elif dynamic.return_type == dict:
                info['type'] = 'object'
            elif dynamic.return_type == set:
                info['type'] = 'array'
            elif isinstance(dynamic.return_type, list):  # enumerator
                info['type'] = 'string'
                info['enum'] = dynamic.return_type
            return info

        def build_remote_relation(relation):
            key, relation_info = relation
            remote_cls = Descriptor().load(relation_info['class']).get_object()
            _docstring = '{1} instance identifier{3}. One-to-{0} relation with {1}.{2}.'.format(
                'one' if relation_info['list'] is False else 'many',
                remote_cls.__name__,
                '{0}_guid'.format(relation_info['key']),
                '' if relation_info['list'] is False else 's'
            )
            info = {'description': _docstring,
                    'readOnly': True}
            if relation_info['list'] is True:
                info['type'] = 'array'
                info['items'] = {'type': 'string'}
                _name = '{0}_guids'.format(key)
            else:
                info['type'] = 'string'
                _name = '{0}_guid'.format(key)
            return _name, info

        def get_properties(_cls):
            properties = {}
            properties.update({prop.name: build_property(prop) for prop in _cls._properties})
            properties.update(dict(build_relation(_cls, relation) for relation in _cls._relations))
            properties.update({dynamic.name: build_dynamic(_cls, dynamic) for dynamic in _cls._dynamics})
            relation_info = RelationMapper.load_foreign_relations(_cls)
            if relation_info is not None:
                properties.update(dict(build_remote_relation(relation) for relation in relation_info.iteritems()))
            return properties

        def get_required_properties(_cls):
            required = []
            for prop in _cls._properties:
                if prop.mandatory is True:
                    required.append(prop.name)
            for relation in _cls._relations:
                if relation.mandatory is True:
                    required.append('{0}_guid'.format(relation.name))
            return required

        definitions = data['definitions']
        definitions['DataObject'] = {'type': 'object',
                                     'title': 'DataObject',
                                     'description': 'Root object inherited by all hybrid objects. Shall not be used directly.',
                                     'properties': {'guid': {'type': 'string',
                                                             'description': 'Identifier of the object.'}},
                                     'required': ['guid']}
        hybrid_structure = HybridRunner.get_hybrids()
        for class_descriptor in hybrid_structure.values():
            cls = Descriptor().load(class_descriptor).get_object()
            definitions[cls.__name__] = {'description': cls.__doc__.strip().split('\n')[0],
                                         'allOf': [{'$ref': '#/definitions/DataObject'},
                                                   {'type': 'object',
                                                    'properties': get_properties(cls),
                                                    'required': get_required_properties(cls)}]}

        return data
Example #10
0
    def migrate(previous_version):
        """
        Migrates from any version to any version, running all migrations required
        If previous_version is for example 0 and this script is at
        version 3 it will execute two steps:
          - 1 > 2
          - 2 > 3
        @param previous_version: The previous version from which to start the migration.
        """

        working_version = previous_version

        # Version 1 introduced:
        # - The datastore is still empty, add defaults
        if working_version < 1:
            from ovs.dal.hybrids.user import User
            from ovs.dal.hybrids.group import Group
            from ovs.dal.hybrids.role import Role
            from ovs.dal.hybrids.client import Client
            from ovs.dal.hybrids.failuredomain import FailureDomain
            from ovs.dal.hybrids.j_rolegroup import RoleGroup
            from ovs.dal.hybrids.j_roleclient import RoleClient
            from ovs.dal.hybrids.backendtype import BackendType
            from ovs.dal.hybrids.servicetype import ServiceType
            from ovs.dal.hybrids.branding import Branding
            from ovs.dal.lists.backendtypelist import BackendTypeList

            # Create groups
            admin_group = Group()
            admin_group.name = 'administrators'
            admin_group.description = 'Administrators'
            admin_group.save()
            viewers_group = Group()
            viewers_group.name = 'viewers'
            viewers_group.description = 'Viewers'
            viewers_group.save()

            # Create users
            admin = User()
            admin.username = '******'
            admin.password = hashlib.sha256('admin').hexdigest()
            admin.is_active = True
            admin.group = admin_group
            admin.save()

            # Create internal OAuth 2 clients
            admin_pw_client = Client()
            admin_pw_client.ovs_type = 'INTERNAL'
            admin_pw_client.grant_type = 'PASSWORD'
            admin_pw_client.user = admin
            admin_pw_client.save()
            admin_cc_client = Client()
            admin_cc_client.ovs_type = 'INTERNAL'
            admin_cc_client.grant_type = 'CLIENT_CREDENTIALS'
            admin_cc_client.client_secret = ''.join(random.choice(string.ascii_letters +
                                                                  string.digits +
                                                                  '|_=+*#@!/-[]{}<>.?,\'";:~')
                                                    for _ in range(128))
            admin_cc_client.user = admin
            admin_cc_client.save()

            # Create roles
            read_role = Role()
            read_role.code = 'read'
            read_role.name = 'Read'
            read_role.description = 'Can read objects'
            read_role.save()
            write_role = Role()
            write_role.code = 'write'
            write_role.name = 'Write'
            write_role.description = 'Can write objects'
            write_role.save()
            manage_role = Role()
            manage_role.code = 'manage'
            manage_role.name = 'Manage'
            manage_role.description = 'Can manage the system'
            manage_role.save()

            # Attach groups to roles
            mapping = [
                (admin_group, [read_role, write_role, manage_role]),
                (viewers_group, [read_role])
            ]
            for setting in mapping:
                for role in setting[1]:
                    rolegroup = RoleGroup()
                    rolegroup.group = setting[0]
                    rolegroup.role = role
                    rolegroup.save()
                for user in setting[0].users:
                    for role in setting[1]:
                        for client in user.clients:
                            roleclient = RoleClient()
                            roleclient.client = client
                            roleclient.role = role
                            roleclient.save()

            # Add backends
            for backend_type_info in [('Ceph', 'ceph_s3'), ('Amazon', 'amazon_s3'), ('Swift', 'swift_s3'),
                                      ('Local', 'local'), ('Distributed', 'distributed'), ('ALBA', 'alba')]:
                code = backend_type_info[1]
                backend_type = BackendTypeList.get_backend_type_by_code(code)
                if backend_type is None:
                    backend_type = BackendType()
                backend_type.name = backend_type_info[0]
                backend_type.code = code
                backend_type.save()

            # Add service types
            for service_type_info in [ServiceType.SERVICE_TYPES.MD_SERVER, ServiceType.SERVICE_TYPES.ALBA_PROXY, ServiceType.SERVICE_TYPES.ARAKOON]:
                service_type = ServiceType()
                service_type.name = service_type_info
                service_type.save()

            # Branding
            branding = Branding()
            branding.name = 'Default'
            branding.description = 'Default bootstrap theme'
            branding.css = 'bootstrap-default.min.css'
            branding.productname = 'Open vStorage'
            branding.is_default = True
            branding.save()
            slate = Branding()
            slate.name = 'Slate'
            slate.description = 'Dark bootstrap theme'
            slate.css = 'bootstrap-slate.min.css'
            slate.productname = 'Open vStorage'
            slate.is_default = False
            slate.save()

            # Failure Domain
            failure_domain = FailureDomain()
            failure_domain.name = 'Default'
            failure_domain.save()

            # We're now at version 1
            working_version = 1

        # Version 2 introduced:
        # - new Descriptor format
        if working_version < 2:
            import imp
            from ovs.dal.helpers import Descriptor
            from ovs.extensions.storage.persistentfactory import PersistentFactory

            client = PersistentFactory.get_client()
            keys = client.prefix('ovs_data')
            for key in keys:
                data = client.get(key)
                modified = False
                for entry in data.keys():
                    if isinstance(data[entry], dict) and 'source' in data[entry] and 'hybrids' in data[entry]['source']:
                        filename = data[entry]['source']
                        if not filename.startswith('/'):
                            filename = '/opt/OpenvStorage/ovs/dal/{0}'.format(filename)
                        module = imp.load_source(data[entry]['name'], filename)
                        cls = getattr(module, data[entry]['type'])
                        new_data = Descriptor(cls, cached=False).descriptor
                        if 'guid' in data[entry]:
                            new_data['guid'] = data[entry]['guid']
                        data[entry] = new_data
                        modified = True
                if modified is True:
                    data['_version'] += 1
                    client.set(key, data)

            # We're now at version 2
            working_version = 2

        # Version 3 introduced:
        # - new Descriptor format
        if working_version < 3:
            import imp
            from ovs.dal.helpers import Descriptor
            from ovs.extensions.storage.persistentfactory import PersistentFactory

            client = PersistentFactory.get_client()
            keys = client.prefix('ovs_data')
            for key in keys:
                data = client.get(key)
                modified = False
                for entry in data.keys():
                    if isinstance(data[entry], dict) and 'source' in data[entry]:
                        module = imp.load_source(data[entry]['name'], data[entry]['source'])
                        cls = getattr(module, data[entry]['type'])
                        new_data = Descriptor(cls, cached=False).descriptor
                        if 'guid' in data[entry]:
                            new_data['guid'] = data[entry]['guid']
                        data[entry] = new_data
                        modified = True
                if modified is True:
                    data['_version'] += 1
                    client.set(key, data)

            working_version = 3

        # Version 4 introduced:
        # - Flexible SSD layout
        if working_version < 4:
            import os
            from ovs.dal.hybrids.diskpartition import DiskPartition
            from ovs.dal.hybrids.j_storagedriverpartition import StorageDriverPartition
            from ovs.dal.hybrids.servicetype import ServiceType
            from ovs.dal.lists.servicetypelist import ServiceTypeList
            from ovs.dal.lists.storagedriverlist import StorageDriverList
            from ovs.extensions.generic.remote import remote
            from ovs.extensions.generic.sshclient import SSHClient
            from ovs.extensions.storageserver.storagedriver import StorageDriverConfiguration
            for service in ServiceTypeList.get_by_name(ServiceType.SERVICE_TYPES.MD_SERVER).services:
                mds_service = service.mds_service
                storagedriver = None
                for current_storagedriver in service.storagerouter.storagedrivers:
                    if current_storagedriver.vpool_guid == mds_service.vpool_guid:
                        storagedriver = current_storagedriver
                        break
                tasks = {}
                if storagedriver._data.get('mountpoint_md'):
                    tasks['{0}/mds_{1}_{2}'.format(storagedriver._data.get('mountpoint_md'),
                                                   storagedriver.vpool.name,
                                                   mds_service.number)] = (DiskPartition.ROLES.DB, StorageDriverPartition.SUBROLE.MDS)
                if storagedriver._data.get('mountpoint_temp'):
                    tasks['{0}/mds_{1}_{2}'.format(storagedriver._data.get('mountpoint_temp'),
                                                   storagedriver.vpool.name,
                                                   mds_service.number)] = (DiskPartition.ROLES.SCRUB, StorageDriverPartition.SUBROLE.MDS)
                for disk in service.storagerouter.disks:
                    for partition in disk.partitions:
                        for directory, (role, subrole) in tasks.iteritems():
                            with remote(storagedriver.storagerouter.ip, [os], username='******') as rem:
                                stat_dir = directory
                                while not rem.os.path.exists(stat_dir) and stat_dir != '/':
                                    stat_dir = stat_dir.rsplit('/', 1)[0]
                                    if not stat_dir:
                                        stat_dir = '/'
                                inode = rem.os.stat(stat_dir).st_dev
                            if partition.inode == inode:
                                if role not in partition.roles:
                                    partition.roles.append(role)
                                    partition.save()
                                number = 0
                                migrated = False
                                for sd_partition in storagedriver.partitions:
                                    if sd_partition.role == role and sd_partition.sub_role == subrole:
                                        if sd_partition.mds_service == mds_service:
                                            migrated = True
                                            break
                                        if sd_partition.partition_guid == partition.guid:
                                            number = max(sd_partition.number, number)
                                if migrated is False:
                                    sd_partition = StorageDriverPartition()
                                    sd_partition.role = role
                                    sd_partition.sub_role = subrole
                                    sd_partition.partition = partition
                                    sd_partition.storagedriver = storagedriver
                                    sd_partition.mds_service = mds_service
                                    sd_partition.size = None
                                    sd_partition.number = number + 1
                                    sd_partition.save()
                                    client = SSHClient(storagedriver.storagerouter, username='******')
                                    path = sd_partition.path.rsplit('/', 1)[0]
                                    if path:
                                        client.dir_create(path)
                                        client.dir_chown(path, 'ovs', 'ovs')
                                    client.dir_create(directory)
                                    client.dir_chown(directory, 'ovs', 'ovs')
                                    client.symlink({sd_partition.path: directory})
            for storagedriver in StorageDriverList.get_storagedrivers():
                migrated_objects = {}
                for disk in storagedriver.storagerouter.disks:
                    for partition in disk.partitions:
                        # Process all mountpoints that are unique and don't have a specified size
                        for key, (role, sr_info) in {'mountpoint_md': (DiskPartition.ROLES.DB, {'metadata_{0}': StorageDriverPartition.SUBROLE.MD,
                                                                                                'tlogs_{0}': StorageDriverPartition.SUBROLE.TLOG}),
                                                     'mountpoint_fragmentcache': (DiskPartition.ROLES.WRITE, {'fcache_{0}': StorageDriverPartition.SUBROLE.FCACHE}),
                                                     'mountpoint_foc': (DiskPartition.ROLES.WRITE, {'fd_{0}': StorageDriverPartition.SUBROLE.FD,
                                                                                                    'dtl_{0}': StorageDriverPartition.SUBROLE.DTL}),
                                                     'mountpoint_dtl': (DiskPartition.ROLES.WRITE, {'fd_{0}': StorageDriverPartition.SUBROLE.FD,
                                                                                                    'dtl_{0}': StorageDriverPartition.SUBROLE.DTL}),
                                                     'mountpoint_readcaches': (DiskPartition.ROLES.READ, {'': None}),
                                                     'mountpoint_writecaches': (DiskPartition.ROLES.WRITE, {'sco_{0}': StorageDriverPartition.SUBROLE.SCO})}.iteritems():
                            if key in storagedriver._data:
                                is_list = isinstance(storagedriver._data[key], list)
                                entries = storagedriver._data[key][:] if is_list is True else [storagedriver._data[key]]
                                for entry in entries:
                                    if not entry:
                                        if is_list:
                                            storagedriver._data[key].remove(entry)
                                            if len(storagedriver._data[key]) == 0:
                                                del storagedriver._data[key]
                                        else:
                                            del storagedriver._data[key]
                                    else:
                                        with remote(storagedriver.storagerouter.ip, [os], username='******') as rem:
                                            inode = rem.os.stat(entry).st_dev
                                        if partition.inode == inode:
                                            if role not in partition.roles:
                                                partition.roles.append(role)
                                                partition.save()
                                            for folder, subrole in sr_info.iteritems():
                                                number = 0
                                                migrated = False
                                                for sd_partition in storagedriver.partitions:
                                                    if sd_partition.role == role and sd_partition.sub_role == subrole:
                                                        if sd_partition.partition_guid == partition.guid:
                                                            number = max(sd_partition.number, number)
                                                if migrated is False:
                                                    sd_partition = StorageDriverPartition()
                                                    sd_partition.role = role
                                                    sd_partition.sub_role = subrole
                                                    sd_partition.partition = partition
                                                    sd_partition.storagedriver = storagedriver
                                                    sd_partition.size = None
                                                    sd_partition.number = number + 1
                                                    sd_partition.save()
                                                    if folder:
                                                        source = '{0}/{1}'.format(entry, folder.format(storagedriver.vpool.name))
                                                    else:
                                                        source = entry
                                                    client = SSHClient(storagedriver.storagerouter, username='******')
                                                    path = sd_partition.path.rsplit('/', 1)[0]
                                                    if path:
                                                        client.dir_create(path)
                                                        client.dir_chown(path, 'ovs', 'ovs')
                                                    client.symlink({sd_partition.path: source})
                                                    migrated_objects[source] = sd_partition
                                            if is_list:
                                                storagedriver._data[key].remove(entry)
                                                if len(storagedriver._data[key]) == 0:
                                                    del storagedriver._data[key]
                                            else:
                                                del storagedriver._data[key]
                                            storagedriver.save()
                if 'mountpoint_bfs' in storagedriver._data:
                    storagedriver.mountpoint_dfs = storagedriver._data['mountpoint_bfs']
                    if not storagedriver.mountpoint_dfs:
                        storagedriver.mountpoint_dfs = None
                    del storagedriver._data['mountpoint_bfs']
                    storagedriver.save()
                if 'mountpoint_temp' in storagedriver._data:
                    del storagedriver._data['mountpoint_temp']
                    storagedriver.save()
                if migrated_objects:
                    print 'Loading sizes'
                    config = StorageDriverConfiguration('storagedriver', storagedriver.vpool_guid, storagedriver.storagedriver_id)
                    config.load()
                    for readcache in config.configuration.get('content_addressed_cache', {}).get('clustercache_mount_points', []):
                        path = readcache.get('path', '').rsplit('/', 1)[0]
                        size = int(readcache['size'].strip('KiB')) * 1024 if 'size' in readcache else None
                        if path in migrated_objects:
                            migrated_objects[path].size = long(size)
                            migrated_objects[path].save()
                    for writecache in config.configuration.get('scocache', {}).get('scocache_mount_points', []):
                        path = writecache.get('path', '')
                        size = int(writecache['size'].strip('KiB')) * 1024 if 'size' in writecache else None
                        if path in migrated_objects:
                            migrated_objects[path].size = long(size)
                            migrated_objects[path].save()

            working_version = 4

        # Version 5 introduced:
        # - Failure Domains
        if working_version < 5:
            import os
            from ovs.dal.hybrids.failuredomain import FailureDomain
            from ovs.dal.lists.failuredomainlist import FailureDomainList
            from ovs.dal.lists.storagerouterlist import StorageRouterList
            from ovs.extensions.generic.remote import remote
            from ovs.extensions.generic.sshclient import SSHClient
            failure_domains = FailureDomainList.get_failure_domains()
            if len(failure_domains) > 0:
                failure_domain = failure_domains[0]
            else:
                failure_domain = FailureDomain()
                failure_domain.name = 'Default'
                failure_domain.save()
            for storagerouter in StorageRouterList.get_storagerouters():
                change = False
                if storagerouter.primary_failure_domain is None:
                    storagerouter.primary_failure_domain = failure_domain
                    change = True
                if storagerouter.rdma_capable is None:
                    client = SSHClient(storagerouter, username='******')
                    rdma_capable = False
                    with remote(client.ip, [os], username='******') as rem:
                        for root, dirs, files in rem.os.walk('/sys/class/infiniband'):
                            for directory in dirs:
                                ports_dir = '/'.join([root, directory, 'ports'])
                                if not rem.os.path.exists(ports_dir):
                                    continue
                                for sub_root, sub_dirs, _ in rem.os.walk(ports_dir):
                                    if sub_root != ports_dir:
                                        continue
                                    for sub_directory in sub_dirs:
                                        state_file = '/'.join([sub_root, sub_directory, 'state'])
                                        if rem.os.path.exists(state_file):
                                            if 'ACTIVE' in client.run('cat {0}'.format(state_file)):
                                                rdma_capable = True
                    storagerouter.rdma_capable = rdma_capable
                    change = True
                if change is True:
                    storagerouter.save()

            working_version = 5

        # Version 6 introduced:
        # - Distributed scrubbing
        if working_version < 6:
            from ovs.dal.hybrids.diskpartition import DiskPartition
            from ovs.dal.lists.storagedriverlist import StorageDriverList
            from ovs.extensions.generic.sshclient import SSHClient
            for storage_driver in StorageDriverList.get_storagedrivers():
                root_client = SSHClient(storage_driver.storagerouter, username='******')
                for partition in storage_driver.partitions:
                    if partition.role == DiskPartition.ROLES.SCRUB:
                        old_path = partition.path
                        partition.sub_role = None
                        partition.save()
                        partition.invalidate_dynamics(['folder', 'path'])
                        if root_client.dir_exists(partition.path):
                            continue  # New directory already exists
                        if '_mds_' in old_path:
                            if root_client.dir_exists(old_path):
                                root_client.symlink({partition.path: old_path})
                        if not root_client.dir_exists(partition.path):
                            root_client.dir_create(partition.path)
                        root_client.dir_chmod(partition.path, 0777)

            working_version = 6

        # Version 7 introduced:
        # - vPool status
        if working_version < 7:
            from ovs.dal.hybrids import vpool
            reload(vpool)
            from ovs.dal.hybrids.vpool import VPool
            from ovs.dal.lists.vpoollist import VPoolList
            for _vpool in VPoolList.get_vpools():
                vpool = VPool(_vpool.guid)
                if hasattr(vpool, 'status') and vpool.status is None:
                    vpool.status = VPool.STATUSES.RUNNING
                    vpool.save()

            working_version = 7

        # Version 10 introduced:
        # - Reverse indexes are stored in persistent store
        # - Store more non-changing metadata on disk iso using a dynamic property
        if working_version < 10:
            from ovs.dal.helpers import HybridRunner, Descriptor
            from ovs.dal.datalist import DataList
            from ovs.extensions.storage.persistentfactory import PersistentFactory
            from ovs.extensions.storage.volatilefactory import VolatileFactory
            persistent = PersistentFactory.get_client()
            for prefix in ['ovs_listcache', 'ovs_reverseindex']:
                for key in persistent.prefix(prefix):
                    persistent.delete(key)
            for key in persistent.prefix('ovs_data_'):
                persistent.set(key, persistent.get(key))
            base_reverse_key = 'ovs_reverseindex_{0}_{1}|{2}|{3}'
            hybrid_structure = HybridRunner.get_hybrids()
            for class_descriptor in hybrid_structure.values():
                cls = Descriptor().load(class_descriptor).get_object()
                all_objects = DataList(cls, {'type': DataList.where_operator.AND,
                                             'items': []})
                for item in all_objects:
                    guid = item.guid
                    for relation in item._relations:
                        if relation.foreign_type is None:
                            rcls = cls
                            rclsname = rcls.__name__.lower()
                        else:
                            rcls = relation.foreign_type
                            rclsname = rcls.__name__.lower()
                        key = relation.name
                        rguid = item._data[key]['guid']
                        if rguid is not None:
                            reverse_key = base_reverse_key.format(rclsname, rguid, relation.foreign_key, guid)
                            persistent.set(reverse_key, 0)
            volatile = VolatileFactory.get_client()
            try:
                volatile._client.flush_all()
            except:
                pass
            from ovs.dal.lists.vdisklist import VDiskList
            for vdisk in VDiskList.get_vdisks():
                try:
                    vdisk.metadata = {'lba_size': vdisk.info['lba_size'],
                                      'cluster_multiplier': vdisk.info['cluster_multiplier']}
                    vdisk.save()
                except:
                    pass

            working_version = 10

        # Version 11 introduced:
        # - ALBA accelerated ALBA, meaning different vpool.metadata information
        if working_version < 11:
            from ovs.dal.lists.vpoollist import VPoolList

            for vpool in VPoolList.get_vpools():
                vpool.metadata = {'backend': vpool.metadata}
                if 'metadata' in vpool.metadata['backend']:
                    vpool.metadata['backend']['arakoon_config'] = vpool.metadata['backend'].pop('metadata')
                if 'backend_info' in vpool.metadata['backend']:
                    vpool.metadata['backend']['backend_info']['fragment_cache_on_read'] = True
                    vpool.metadata['backend']['backend_info']['fragment_cache_on_write'] = False
                vpool.save()
            working_version = 11

        return working_version
Example #11
0
    def _execute_query(self):
        """
        Tries to load the result for the given key from the volatile cache, or executes the query
        if not yet available. Afterwards (if a key is given), the result will be (re)cached

        Definitions:
        * <query>: Should be a dictionary:
                   {'type' : DataList.where_operator.XYZ,
                    'items': <items>}
        * <filter>: A tuple defining a single expression:
                    (<field>, DataList.operator.XYZ, <value> [, <ignore_case>])
                    The field is any property you would also find on the given object. In case of
                    properties, you can dot as far as you like.
        * <items>: A list of one or more <query> or <filter> items. This means the query structure is recursive and
                   complex queries are possible
        """
        from ovs.dal.dataobject import DataObject
        hybrid_structure = HybridRunner.get_hybrids()
        query_object_id = Descriptor(self._object_type).descriptor['identifier']
        if query_object_id in hybrid_structure and query_object_id != hybrid_structure[query_object_id]['identifier']:
            self._object_type = Descriptor().load(hybrid_structure[query_object_id]).get_object()
        object_type_name = self._object_type.__name__.lower()
        prefix = '{0}_{1}_'.format(DataObject.NAMESPACE, object_type_name)

        if self._guids is not None:
            keys = ['{0}{1}'.format(prefix, guid) for guid in self._guids]
            successful = False
            tries = 0
            entries = []
            while successful is False:
                tries += 1
                if tries > 5:
                    raise RaceConditionException()
                try:
                    entries = list(self._persistent.get_multi(keys))
                    successful = True
                except KeyNotFoundException as knfe:
                    keys.remove(knfe.message)
                    self._guids.remove(knfe.message.replace(prefix, ''))

            self._data = {}
            self._objects = {}
            for index, guid in enumerate(self._guids):
                self._data[guid] = {'data': entries[index],
                                    'guid': guid}
            self._executed = True
            return

        cached_data = self._volatile.get(self._key)
        if cached_data is None:
            self.from_cache = False

            query_type = self._query['type']
            query_items = self._query['items']

            invalidations = {object_type_name: ['__all']}
            DataList._build_invalidations(invalidations, self._object_type, query_items)
            transaction = self._persistent.begin_transaction()
            for class_name, fields in invalidations.iteritems():
                key = '{0}_{1}|{{0}}|{{1}}'.format(DataList.CACHELINK, class_name)
                for field in fields:
                    self._persistent.set(key.format(self._key, field), 0, transaction=transaction)
            self._persistent.apply_transaction(transaction)

            self._guids = []
            self._data = {}
            self._objects = {}
            elements = 0
            for key, data in self._persistent.prefix_entries(prefix):
                elements += 1
                try:
                    guid = key.replace(prefix, '')
                    result, instance = self._filter({'data': data, 'guid': guid}, query_items, query_type)
                    if result is True:
                        self._guids.append(guid)
                        self._data[guid] = {'data': data, 'guid': guid}
                        if not isinstance(instance, dict):
                            self._objects[guid] = instance
                except ObjectNotFoundException:
                    pass

            if 'post_query' in DataList.test_hooks:
                DataList.test_hooks['post_query'](self)

            if self._key is not None and elements > 0 and self._can_cache:
                self._volatile.set(self._key, self._guids, 300 + randint(0, 300))  # Cache between 5 and 10 minutes
                # Check whether the cache was invalidated and should be removed again
                for class_name in invalidations:
                    key = '{0}_{1}|{{0}}|'.format(DataList.CACHELINK, class_name)
                    if len(list(self._persistent.prefix(key.format(self._key)))) == 0:
                        self._volatile.delete(self._key)
                        break
            self._executed = True
        else:
            self.from_cache = True
            self._guids = cached_data

            keys = ['{0}{1}'.format(prefix, guid) for guid in self._guids]
            successful = False
            tries = 0
            entries = []
            while successful is False:
                tries += 1
                if tries > 5:
                    raise RaceConditionException()
                try:
                    entries = list(self._persistent.get_multi(keys))
                    successful = True
                except KeyNotFoundException as knfe:
                    keys.remove(knfe.message)
                    self._guids.remove(knfe.message.replace(prefix, ''))

            self._data = {}
            self._objects = {}
            for index in xrange(len(self._guids)):
                guid = self._guids[index]
                self._data[guid] = {'data': entries[index],
                                    'guid': guid}
            self._executed = True
Example #12
0
 def test_objectproperties(self):
     """
     Validates the correctness of all hybrid objects:
     * They should contain all required properties
     * Properties should have the correct type
     * All dynamic properties should be implemented
     """
     # Some stuff here to dynamically test all hybrid properties
     hybrid_structure = HybridRunner.get_hybrids()
     print ''
     print 'Validating hybrids...'
     for class_descriptor in hybrid_structure.values():
         cls = Descriptor().load(class_descriptor).get_object()
         print '* {0}'.format(cls.__name__)
         relation_info = RelationMapper.load_foreign_relations(cls)
         remote_properties_n = []
         remote_properties_1 = []
         if relation_info is not None:
             for key, info in relation_info.iteritems():
                 if info['list'] is True:
                     remote_properties_n.append(key)
                 else:
                     remote_properties_1.append(key)
         # Make sure certain attributes are correctly set
         self.assertIsInstance(
             cls._properties, list,
             '_properties required: {0}'.format(cls.__name__))
         self.assertIsInstance(
             cls._relations, list,
             '_relations required: {0}'.format(cls.__name__))
         self.assertIsInstance(
             cls._dynamics, list,
             '_dynamics required: {0}'.format(cls.__name__))
         # Check types
         allowed_types = [int, float, str, bool, list, dict]
         for prop in cls._properties:
             is_allowed_type = prop.property_type in allowed_types \
                 or isinstance(prop.property_type, list)
             self.assertTrue(
                 is_allowed_type,
                 '_properties types in {0} should be one of {1}'.format(
                     cls.__name__, str(allowed_types)))
         for dynamic in cls._dynamics:
             is_allowed_type = dynamic.return_type in allowed_types \
                 or isinstance(dynamic.return_type, list)
             self.assertTrue(
                 is_allowed_type,
                 '_dynamics types in {0} should be one of {1}'.format(
                     cls.__name__, str(allowed_types)))
         instance = cls()
         for prop in cls._properties:
             self.assertEqual(getattr(instance, prop.name), prop.default,
                              'Default property set correctly')
         # Make sure the type can be instantiated
         self.assertIsNotNone(instance.guid)
         properties = []
         for item in dir(instance):
             if hasattr(cls, item) and isinstance(getattr(cls, item),
                                                  property):
                 properties.append(item)
         # All expiries should be implemented
         missing_props = []
         for dynamic in instance._dynamics:
             if dynamic.name not in properties:
                 missing_props.append(dynamic.name)
             else:  # ... and should work
                 _ = getattr(instance, dynamic.name)
         self.assertEqual(
             len(missing_props), 0,
             'Missing dynamic properties in {0}: {1}'.format(
                 cls.__name__, missing_props))
         # An all properties should be either in the blueprint, relations or expiry
         missing_metadata = []
         for found_prop in properties:
             found = found_prop in [prop.name for prop in cls._properties] \
                 or found_prop in [relation.name for relation in cls._relations] \
                 or found_prop in ['{0}_guid'.format(relation.name) for relation in cls._relations] \
                 or found_prop in [dynamic.name for dynamic in cls._dynamics] \
                 or found_prop in remote_properties_n \
                 or found_prop in remote_properties_1 \
                 or found_prop in ['{0}_guids'.format(key) for key in remote_properties_n] \
                 or found_prop in ['{0}_guid'.format(key) for key in remote_properties_1] \
                 or found_prop == 'guid'
             if not found:
                 missing_metadata.append(found_prop)
         self.assertEqual(
             len(missing_metadata), 0,
             'Missing metadata for properties in {0}: {1}'.format(
                 cls.__name__, missing_metadata))
         instance.delete()
Example #13
0
    def migrate(previous_version):
        """
        Migrates from any version to any version, running all migrations required
        If previous_version is for example 0 and this script is at
        version 3 it will execute two steps:
          - 1 > 2
          - 2 > 3
        @param previous_version: The previous version from which to start the migration.
        """

        working_version = previous_version

        # Version 1 introduced:
        # - The datastore is still empty, add defaults
        if working_version < 1:
            from ovs.dal.hybrids.user import User
            from ovs.dal.hybrids.group import Group
            from ovs.dal.hybrids.role import Role
            from ovs.dal.hybrids.client import Client
            from ovs.dal.hybrids.failuredomain import FailureDomain
            from ovs.dal.hybrids.j_rolegroup import RoleGroup
            from ovs.dal.hybrids.j_roleclient import RoleClient
            from ovs.dal.hybrids.backendtype import BackendType
            from ovs.dal.hybrids.servicetype import ServiceType
            from ovs.dal.hybrids.branding import Branding
            from ovs.dal.lists.backendtypelist import BackendTypeList

            # Create groups
            admin_group = Group()
            admin_group.name = 'administrators'
            admin_group.description = 'Administrators'
            admin_group.save()
            viewers_group = Group()
            viewers_group.name = 'viewers'
            viewers_group.description = 'Viewers'
            viewers_group.save()

            # Create users
            admin = User()
            admin.username = '******'
            admin.password = hashlib.sha256('admin').hexdigest()
            admin.is_active = True
            admin.group = admin_group
            admin.save()

            # Create internal OAuth 2 clients
            admin_pw_client = Client()
            admin_pw_client.ovs_type = 'INTERNAL'
            admin_pw_client.grant_type = 'PASSWORD'
            admin_pw_client.user = admin
            admin_pw_client.save()
            admin_cc_client = Client()
            admin_cc_client.ovs_type = 'INTERNAL'
            admin_cc_client.grant_type = 'CLIENT_CREDENTIALS'
            admin_cc_client.client_secret = ''.join(
                random.choice(string.ascii_letters + string.digits +
                              '|_=+*#@!/-[]{}<>.?,\'";:~') for _ in range(128))
            admin_cc_client.user = admin
            admin_cc_client.save()

            # Create roles
            read_role = Role()
            read_role.code = 'read'
            read_role.name = 'Read'
            read_role.description = 'Can read objects'
            read_role.save()
            write_role = Role()
            write_role.code = 'write'
            write_role.name = 'Write'
            write_role.description = 'Can write objects'
            write_role.save()
            manage_role = Role()
            manage_role.code = 'manage'
            manage_role.name = 'Manage'
            manage_role.description = 'Can manage the system'
            manage_role.save()

            # Attach groups to roles
            mapping = [(admin_group, [read_role, write_role, manage_role]),
                       (viewers_group, [read_role])]
            for setting in mapping:
                for role in setting[1]:
                    rolegroup = RoleGroup()
                    rolegroup.group = setting[0]
                    rolegroup.role = role
                    rolegroup.save()
                for user in setting[0].users:
                    for role in setting[1]:
                        for client in user.clients:
                            roleclient = RoleClient()
                            roleclient.client = client
                            roleclient.role = role
                            roleclient.save()

            # Add backends
            for backend_type_info in [('Ceph', 'ceph_s3'),
                                      ('Amazon', 'amazon_s3'),
                                      ('Swift', 'swift_s3'),
                                      ('Local', 'local'),
                                      ('Distributed', 'distributed'),
                                      ('ALBA', 'alba')]:
                code = backend_type_info[1]
                backend_type = BackendTypeList.get_backend_type_by_code(code)
                if backend_type is None:
                    backend_type = BackendType()
                backend_type.name = backend_type_info[0]
                backend_type.code = code
                backend_type.save()

            # Add service types
            for service_type_info in [
                    ServiceType.SERVICE_TYPES.MD_SERVER,
                    ServiceType.SERVICE_TYPES.ALBA_PROXY,
                    ServiceType.SERVICE_TYPES.ARAKOON
            ]:
                service_type = ServiceType()
                service_type.name = service_type_info
                service_type.save()

            # Branding
            branding = Branding()
            branding.name = 'Default'
            branding.description = 'Default bootstrap theme'
            branding.css = 'bootstrap-default.min.css'
            branding.productname = 'Open vStorage'
            branding.is_default = True
            branding.save()
            slate = Branding()
            slate.name = 'Slate'
            slate.description = 'Dark bootstrap theme'
            slate.css = 'bootstrap-slate.min.css'
            slate.productname = 'Open vStorage'
            slate.is_default = False
            slate.save()

            # Failure Domain
            failure_domain = FailureDomain()
            failure_domain.name = 'Default'
            failure_domain.save()

            # We're now at version 1
            working_version = 1

        # Version 2 introduced:
        # - new Descriptor format
        if working_version < 2:
            import imp
            from ovs.dal.helpers import Descriptor
            from ovs.extensions.storage.persistentfactory import PersistentFactory

            client = PersistentFactory.get_client()
            keys = client.prefix('ovs_data')
            for key in keys:
                data = client.get(key)
                modified = False
                for entry in data.keys():
                    if isinstance(data[entry], dict) and 'source' in data[
                            entry] and 'hybrids' in data[entry]['source']:
                        filename = data[entry]['source']
                        if not filename.startswith('/'):
                            filename = '/opt/OpenvStorage/ovs/dal/{0}'.format(
                                filename)
                        module = imp.load_source(data[entry]['name'], filename)
                        cls = getattr(module, data[entry]['type'])
                        new_data = Descriptor(cls, cached=False).descriptor
                        if 'guid' in data[entry]:
                            new_data['guid'] = data[entry]['guid']
                        data[entry] = new_data
                        modified = True
                if modified is True:
                    data['_version'] += 1
                    client.set(key, data)

            # We're now at version 2
            working_version = 2

        # Version 3 introduced:
        # - new Descriptor format
        if working_version < 3:
            import imp
            from ovs.dal.helpers import Descriptor
            from ovs.extensions.storage.persistentfactory import PersistentFactory

            client = PersistentFactory.get_client()
            keys = client.prefix('ovs_data')
            for key in keys:
                data = client.get(key)
                modified = False
                for entry in data.keys():
                    if isinstance(data[entry],
                                  dict) and 'source' in data[entry]:
                        module = imp.load_source(data[entry]['name'],
                                                 data[entry]['source'])
                        cls = getattr(module, data[entry]['type'])
                        new_data = Descriptor(cls, cached=False).descriptor
                        if 'guid' in data[entry]:
                            new_data['guid'] = data[entry]['guid']
                        data[entry] = new_data
                        modified = True
                if modified is True:
                    data['_version'] += 1
                    client.set(key, data)

            working_version = 3

        # Version 4 introduced:
        # - Flexible SSD layout
        if working_version < 4:
            import os
            from ovs.dal.hybrids.diskpartition import DiskPartition
            from ovs.dal.hybrids.j_storagedriverpartition import StorageDriverPartition
            from ovs.dal.hybrids.servicetype import ServiceType
            from ovs.dal.lists.servicetypelist import ServiceTypeList
            from ovs.dal.lists.storagedriverlist import StorageDriverList
            from ovs.extensions.generic.remote import remote
            from ovs.extensions.generic.sshclient import SSHClient
            from ovs.extensions.storageserver.storagedriver import StorageDriverConfiguration
            for service in ServiceTypeList.get_by_name(
                    ServiceType.SERVICE_TYPES.MD_SERVER).services:
                mds_service = service.mds_service
                storagedriver = None
                for current_storagedriver in service.storagerouter.storagedrivers:
                    if current_storagedriver.vpool_guid == mds_service.vpool_guid:
                        storagedriver = current_storagedriver
                        break
                tasks = {}
                if storagedriver._data.get('mountpoint_md'):
                    tasks['{0}/mds_{1}_{2}'.format(
                        storagedriver._data.get('mountpoint_md'),
                        storagedriver.vpool.name, mds_service.number)] = (
                            DiskPartition.ROLES.DB,
                            StorageDriverPartition.SUBROLE.MDS)
                if storagedriver._data.get('mountpoint_temp'):
                    tasks['{0}/mds_{1}_{2}'.format(
                        storagedriver._data.get('mountpoint_temp'),
                        storagedriver.vpool.name, mds_service.number)] = (
                            DiskPartition.ROLES.SCRUB,
                            StorageDriverPartition.SUBROLE.MDS)
                for disk in service.storagerouter.disks:
                    for partition in disk.partitions:
                        for directory, (role, subrole) in tasks.iteritems():
                            with remote(storagedriver.storagerouter.ip, [os],
                                        username='******') as rem:
                                stat_dir = directory
                                while not rem.os.path.exists(
                                        stat_dir) and stat_dir != '/':
                                    stat_dir = stat_dir.rsplit('/', 1)[0]
                                    if not stat_dir:
                                        stat_dir = '/'
                                inode = rem.os.stat(stat_dir).st_dev
                            if partition.inode == inode:
                                if role not in partition.roles:
                                    partition.roles.append(role)
                                    partition.save()
                                number = 0
                                migrated = False
                                for sd_partition in storagedriver.partitions:
                                    if sd_partition.role == role and sd_partition.sub_role == subrole:
                                        if sd_partition.mds_service == mds_service:
                                            migrated = True
                                            break
                                        if sd_partition.partition_guid == partition.guid:
                                            number = max(
                                                sd_partition.number, number)
                                if migrated is False:
                                    sd_partition = StorageDriverPartition()
                                    sd_partition.role = role
                                    sd_partition.sub_role = subrole
                                    sd_partition.partition = partition
                                    sd_partition.storagedriver = storagedriver
                                    sd_partition.mds_service = mds_service
                                    sd_partition.size = None
                                    sd_partition.number = number + 1
                                    sd_partition.save()
                                    client = SSHClient(
                                        storagedriver.storagerouter,
                                        username='******')
                                    path = sd_partition.path.rsplit('/', 1)[0]
                                    if path:
                                        client.dir_create(path)
                                        client.dir_chown(path, 'ovs', 'ovs')
                                    client.dir_create(directory)
                                    client.dir_chown(directory, 'ovs', 'ovs')
                                    client.symlink(
                                        {sd_partition.path: directory})
            for storagedriver in StorageDriverList.get_storagedrivers():
                migrated_objects = {}
                for disk in storagedriver.storagerouter.disks:
                    for partition in disk.partitions:
                        # Process all mountpoints that are unique and don't have a specified size
                        for key, (role, sr_info) in {
                                'mountpoint_md': (DiskPartition.ROLES.DB, {
                                    'metadata_{0}':
                                    StorageDriverPartition.SUBROLE.MD,
                                    'tlogs_{0}':
                                    StorageDriverPartition.SUBROLE.TLOG
                                }),
                                'mountpoint_fragmentcache':
                            (DiskPartition.ROLES.WRITE, {
                                'fcache_{0}':
                                StorageDriverPartition.SUBROLE.FCACHE
                            }),
                                'mountpoint_foc': (DiskPartition.ROLES.WRITE, {
                                    'fd_{0}':
                                    StorageDriverPartition.SUBROLE.FD,
                                    'dtl_{0}':
                                    StorageDriverPartition.SUBROLE.DTL
                                }),
                                'mountpoint_dtl': (DiskPartition.ROLES.WRITE, {
                                    'fd_{0}':
                                    StorageDriverPartition.SUBROLE.FD,
                                    'dtl_{0}':
                                    StorageDriverPartition.SUBROLE.DTL
                                }),
                                'mountpoint_readcaches':
                            (DiskPartition.ROLES.READ, {
                                '': None
                            }),
                                'mountpoint_writecaches':
                            (DiskPartition.ROLES.WRITE, {
                                'sco_{0}': StorageDriverPartition.SUBROLE.SCO
                            })
                        }.iteritems():
                            if key in storagedriver._data:
                                is_list = isinstance(storagedriver._data[key],
                                                     list)
                                entries = storagedriver._data[
                                    key][:] if is_list is True else [
                                        storagedriver._data[key]
                                    ]
                                for entry in entries:
                                    if not entry:
                                        if is_list:
                                            storagedriver._data[key].remove(
                                                entry)
                                            if len(storagedriver._data[key]
                                                   ) == 0:
                                                del storagedriver._data[key]
                                        else:
                                            del storagedriver._data[key]
                                    else:
                                        with remote(
                                                storagedriver.storagerouter.ip,
                                            [os],
                                                username='******') as rem:
                                            inode = rem.os.stat(entry).st_dev
                                        if partition.inode == inode:
                                            if role not in partition.roles:
                                                partition.roles.append(role)
                                                partition.save()
                                            for folder, subrole in sr_info.iteritems(
                                            ):
                                                number = 0
                                                migrated = False
                                                for sd_partition in storagedriver.partitions:
                                                    if sd_partition.role == role and sd_partition.sub_role == subrole:
                                                        if sd_partition.partition_guid == partition.guid:
                                                            number = max(
                                                                sd_partition.
                                                                number, number)
                                                if migrated is False:
                                                    sd_partition = StorageDriverPartition(
                                                    )
                                                    sd_partition.role = role
                                                    sd_partition.sub_role = subrole
                                                    sd_partition.partition = partition
                                                    sd_partition.storagedriver = storagedriver
                                                    sd_partition.size = None
                                                    sd_partition.number = number + 1
                                                    sd_partition.save()
                                                    if folder:
                                                        source = '{0}/{1}'.format(
                                                            entry,
                                                            folder.format(
                                                                storagedriver.
                                                                vpool.name))
                                                    else:
                                                        source = entry
                                                    client = SSHClient(
                                                        storagedriver.
                                                        storagerouter,
                                                        username='******')
                                                    path = sd_partition.path.rsplit(
                                                        '/', 1)[0]
                                                    if path:
                                                        client.dir_create(path)
                                                        client.dir_chown(
                                                            path, 'ovs', 'ovs')
                                                    client.symlink({
                                                        sd_partition.path:
                                                        source
                                                    })
                                                    migrated_objects[
                                                        source] = sd_partition
                                            if is_list:
                                                storagedriver._data[
                                                    key].remove(entry)
                                                if len(storagedriver._data[key]
                                                       ) == 0:
                                                    del storagedriver._data[
                                                        key]
                                            else:
                                                del storagedriver._data[key]
                                            storagedriver.save()
                if 'mountpoint_bfs' in storagedriver._data:
                    storagedriver.mountpoint_dfs = storagedriver._data[
                        'mountpoint_bfs']
                    if not storagedriver.mountpoint_dfs:
                        storagedriver.mountpoint_dfs = None
                    del storagedriver._data['mountpoint_bfs']
                    storagedriver.save()
                if 'mountpoint_temp' in storagedriver._data:
                    del storagedriver._data['mountpoint_temp']
                    storagedriver.save()
                if migrated_objects:
                    print 'Loading sizes'
                    config = StorageDriverConfiguration(
                        'storagedriver', storagedriver.vpool_guid,
                        storagedriver.storagedriver_id)
                    config.load()
                    for readcache in config.configuration.get(
                            'content_addressed_cache',
                        {}).get('clustercache_mount_points', []):
                        path = readcache.get('path', '').rsplit('/', 1)[0]
                        size = int(readcache['size'].strip(
                            'KiB')) * 1024 if 'size' in readcache else None
                        if path in migrated_objects:
                            migrated_objects[path].size = long(size)
                            migrated_objects[path].save()
                    for writecache in config.configuration.get(
                            'scocache', {}).get('scocache_mount_points', []):
                        path = writecache.get('path', '')
                        size = int(writecache['size'].strip(
                            'KiB')) * 1024 if 'size' in writecache else None
                        if path in migrated_objects:
                            migrated_objects[path].size = long(size)
                            migrated_objects[path].save()

            working_version = 4

        # Version 5 introduced:
        # - Failure Domains
        if working_version < 5:
            import os
            from ovs.dal.hybrids.failuredomain import FailureDomain
            from ovs.dal.lists.failuredomainlist import FailureDomainList
            from ovs.dal.lists.storagerouterlist import StorageRouterList
            from ovs.extensions.generic.remote import remote
            from ovs.extensions.generic.sshclient import SSHClient
            failure_domains = FailureDomainList.get_failure_domains()
            if len(failure_domains) > 0:
                failure_domain = failure_domains[0]
            else:
                failure_domain = FailureDomain()
                failure_domain.name = 'Default'
                failure_domain.save()
            for storagerouter in StorageRouterList.get_storagerouters():
                change = False
                if storagerouter.primary_failure_domain is None:
                    storagerouter.primary_failure_domain = failure_domain
                    change = True
                if storagerouter.rdma_capable is None:
                    client = SSHClient(storagerouter, username='******')
                    rdma_capable = False
                    with remote(client.ip, [os], username='******') as rem:
                        for root, dirs, files in rem.os.walk(
                                '/sys/class/infiniband'):
                            for directory in dirs:
                                ports_dir = '/'.join(
                                    [root, directory, 'ports'])
                                if not rem.os.path.exists(ports_dir):
                                    continue
                                for sub_root, sub_dirs, _ in rem.os.walk(
                                        ports_dir):
                                    if sub_root != ports_dir:
                                        continue
                                    for sub_directory in sub_dirs:
                                        state_file = '/'.join(
                                            [sub_root, sub_directory, 'state'])
                                        if rem.os.path.exists(state_file):
                                            if 'ACTIVE' in client.run(
                                                    'cat {0}'.format(
                                                        state_file)):
                                                rdma_capable = True
                    storagerouter.rdma_capable = rdma_capable
                    change = True
                if change is True:
                    storagerouter.save()

            working_version = 5

        # Version 6 introduced:
        # - Distributed scrubbing
        if working_version < 6:
            from ovs.dal.hybrids.diskpartition import DiskPartition
            from ovs.dal.lists.storagedriverlist import StorageDriverList
            from ovs.extensions.generic.sshclient import SSHClient
            for storage_driver in StorageDriverList.get_storagedrivers():
                root_client = SSHClient(storage_driver.storagerouter,
                                        username='******')
                for partition in storage_driver.partitions:
                    if partition.role == DiskPartition.ROLES.SCRUB:
                        old_path = partition.path
                        partition.sub_role = None
                        partition.save()
                        partition.invalidate_dynamics(['folder', 'path'])
                        if root_client.dir_exists(partition.path):
                            continue  # New directory already exists
                        if '_mds_' in old_path:
                            if root_client.dir_exists(old_path):
                                root_client.symlink({partition.path: old_path})
                        if not root_client.dir_exists(partition.path):
                            root_client.dir_create(partition.path)
                        root_client.dir_chmod(partition.path, 0777)

            working_version = 6

        # Version 7 introduced:
        # - vPool status
        if working_version < 7:
            from ovs.dal.hybrids import vpool
            reload(vpool)
            from ovs.dal.hybrids.vpool import VPool
            from ovs.dal.lists.vpoollist import VPoolList
            for _vpool in VPoolList.get_vpools():
                vpool = VPool(_vpool.guid)
                if hasattr(vpool, 'status') and vpool.status is None:
                    vpool.status = VPool.STATUSES.RUNNING
                    vpool.save()

            working_version = 7

        # Version 10 introduced:
        # - Reverse indexes are stored in persistent store
        # - Store more non-changing metadata on disk iso using a dynamic property
        if working_version < 10:
            from ovs.dal.helpers import HybridRunner, Descriptor
            from ovs.dal.datalist import DataList
            from ovs.extensions.storage.persistentfactory import PersistentFactory
            from ovs.extensions.storage.volatilefactory import VolatileFactory
            persistent = PersistentFactory.get_client()
            for prefix in ['ovs_listcache', 'ovs_reverseindex']:
                for key in persistent.prefix(prefix):
                    persistent.delete(key)
            for key in persistent.prefix('ovs_data_'):
                persistent.set(key, persistent.get(key))
            base_reverse_key = 'ovs_reverseindex_{0}_{1}|{2}|{3}'
            hybrid_structure = HybridRunner.get_hybrids()
            for class_descriptor in hybrid_structure.values():
                cls = Descriptor().load(class_descriptor).get_object()
                all_objects = DataList(cls, {
                    'type': DataList.where_operator.AND,
                    'items': []
                })
                for item in all_objects:
                    guid = item.guid
                    for relation in item._relations:
                        if relation.foreign_type is None:
                            rcls = cls
                            rclsname = rcls.__name__.lower()
                        else:
                            rcls = relation.foreign_type
                            rclsname = rcls.__name__.lower()
                        key = relation.name
                        rguid = item._data[key]['guid']
                        if rguid is not None:
                            reverse_key = base_reverse_key.format(
                                rclsname, rguid, relation.foreign_key, guid)
                            persistent.set(reverse_key, 0)
            volatile = VolatileFactory.get_client()
            try:
                volatile._client.flush_all()
            except:
                pass
            from ovs.dal.lists.vdisklist import VDiskList
            for vdisk in VDiskList.get_vdisks():
                try:
                    vdisk.metadata = {
                        'lba_size': vdisk.info['lba_size'],
                        'cluster_multiplier': vdisk.info['cluster_multiplier']
                    }
                    vdisk.save()
                except:
                    pass

            working_version = 10

        # Version 11 introduced:
        # - ALBA accelerated ALBA, meaning different vpool.metadata information
        if working_version < 11:
            from ovs.dal.lists.vpoollist import VPoolList

            for vpool in VPoolList.get_vpools():
                vpool.metadata = {'backend': vpool.metadata}
                if 'metadata' in vpool.metadata['backend']:
                    vpool.metadata['backend'][
                        'arakoon_config'] = vpool.metadata['backend'].pop(
                            'metadata')
                if 'backend_info' in vpool.metadata['backend']:
                    vpool.metadata['backend']['backend_info'][
                        'fragment_cache_on_read'] = True
                    vpool.metadata['backend']['backend_info'][
                        'fragment_cache_on_write'] = False
                vpool.save()
            working_version = 11

        return working_version
Example #14
0
    def __init__(self, guid=None, data=None, datastore_wins=False, volatile=False, _hook=None):
        """
        Loads an object with a given guid. If no guid is given, a new object
        is generated with a new guid.
        * guid: The guid indicating which object should be loaded
        * datastoreWins: Optional boolean indicating save conflict resolve management.
        ** True: when saving, external modified fields will not be saved
        ** False: when saving, all changed data will be saved, regardless of external updates
        ** None: in case changed field were also changed externally, an error will be raised
        """

        # Initialize super class
        super(DataObject, self).__init__()

        # Initialize internal fields
        self._frozen = False
        self._datastore_wins = datastore_wins
        self._guid = None             # Guid identifier of the object
        self._original = {}           # Original data copy
        self._metadata = {}           # Some metadata, mainly used for unit testing
        self._data = {}               # Internal data storage
        self._objects = {}            # Internal objects storage

        # Initialize public fields
        self.dirty = False
        self.volatile = volatile

        # Worker fields/objects
        self._classname = self.__class__.__name__.lower()
        self._namespace = 'ovs_data'   # Namespace of the object
        self._mutex_listcache = VolatileMutex('listcache_{0}'.format(self._classname))
        self._mutex_reverseindex = VolatileMutex('reverseindex')

        # Rebuild _relation types
        hybrid_structure = HybridRunner.get_hybrids()
        for relation in self._relations:
            if relation.foreign_type is not None:
                identifier = Descriptor(relation.foreign_type).descriptor['identifier']
                if identifier in hybrid_structure and identifier != hybrid_structure[identifier]['identifier']:
                    relation.foreign_type = Descriptor().load(hybrid_structure[identifier]).get_object()

        # Init guid
        self._new = False
        if guid is None:
            self._guid = str(uuid.uuid4())
            self._new = True
        else:
            guid = str(guid).lower()
            if re.match('^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$', guid) is not None:
                self._guid = str(guid)
            else:
                raise ValueError('The given guid is invalid: {0}'.format(guid))

        # Build base keys
        self._key = '{0}_{1}_{2}'.format(self._namespace, self._classname, self._guid)

        # Worker mutexes
        self._mutex_version = VolatileMutex('ovs_dataversion_{0}_{1}'.format(self._classname, self._guid))

        # Load data from cache or persistent backend where appropriate
        self._volatile = VolatileFactory.get_client()
        self._persistent = PersistentFactory.get_client()
        self._metadata['cache'] = None
        if self._new:
            self._data = {}
        else:
            self._data = self._volatile.get(self._key)
            if self._data is None:
                Toolbox.log_cache_hit('object_load', False)
                self._metadata['cache'] = False
                try:
                    self._data = self._persistent.get(self._key)
                except KeyNotFoundException:
                    raise ObjectNotFoundException('{0} with guid \'{1}\' could not be found'.format(
                        self.__class__.__name__, self._guid
                    ))
            else:
                Toolbox.log_cache_hit('object_load', True)
                self._metadata['cache'] = True

        # Set default values on new fields
        for prop in self._properties:
            if prop.name not in self._data:
                self._data[prop.name] = prop.default
            self._add_property(prop)

        # Load relations
        for relation in self._relations:
            if relation.name not in self._data:
                if relation.foreign_type is None:
                    cls = self.__class__
                else:
                    cls = relation.foreign_type
                self._data[relation.name] = Descriptor(cls).descriptor
            self._add_relation_property(relation)

        # Add wrapped properties
        for dynamic in self._dynamics:
            self._add_dynamic_property(dynamic)

        # Load foreign keys
        relations = RelationMapper.load_foreign_relations(self.__class__)
        if relations is not None:
            for key, info in relations.iteritems():
                self._objects[key] = {'info': info,
                                      'data': None}
                self._add_list_property(key, info['list'])

        # Store original data
        self._original = copy.deepcopy(self._data)

        if _hook is not None and hasattr(_hook, '__call__'):
            _hook()

        if not self._new:
            # Re-cache the object, if required
            if self._metadata['cache'] is False:
                # The data wasn't loaded from the cache, so caching is required now
                try:
                    self._mutex_version.acquire(30)
                    this_version = self._data['_version']
                    store_version = self._persistent.get(self._key)['_version']
                    if this_version == store_version:
                        self._volatile.set(self._key, self._data)
                except KeyNotFoundException:
                    raise ObjectNotFoundException('{0} with guid \'{1}\' could not be found'.format(
                        self.__class__.__name__, self._guid
                    ))
                finally:
                    self._mutex_version.release()

        # Freeze property creation
        self._frozen = True

        # Optionally, initialize some fields
        if data is not None:
            for prop in self._properties:
                if prop.name in data:
                    setattr(self, prop.name, data[prop.name])
Example #15
0
    def __init__(self, guid=None, data=None, datastore_wins=False, volatile=False):
        """
        Loads an object with a given guid. If no guid is given, a new object
        is generated with a new guid.
        * guid: The guid indicating which object should be loaded
        * datastoreWins: Optional boolean indicating save conflict resolve management.
        ** True: when saving, external modified fields will not be saved
        ** False: when saving, all changed data will be saved, regardless of external updates
        ** None: in case changed field were also changed externally, an error will be raised
        """

        # Initialize super class
        super(DataObject, self).__init__()

        # Initialize internal fields
        self._frozen = False
        self._datastore_wins = datastore_wins
        self._guid = None             # Guid identifier of the object
        self._original = {}           # Original data copy
        self._metadata = {}           # Some metadata, mainly used for unit testing
        self._data = {}               # Internal data storage
        self._objects = {}            # Internal objects storage

        # Initialize public fields
        self.dirty = False
        self.volatile = volatile

        # Worker fields/objects
        self._name = self.__class__.__name__.lower()
        self._namespace = 'ovs_data'   # Namespace of the object
        self._mutex_listcache = VolatileMutex('listcache_{0}'.format(self._name))
        self._mutex_reverseindex = VolatileMutex('reverseindex')

        # Rebuild _relation types
        hybrid_structure = HybridRunner.get_hybrids()
        for relation in self._relations:
            if relation.foreign_type is not None:
                identifier = Descriptor(relation.foreign_type).descriptor['identifier']
                if identifier in hybrid_structure and identifier != hybrid_structure[identifier]['identifier']:
                    relation.foreign_type = Descriptor().load(hybrid_structure[identifier]).get_object()

        # Init guid
        self._new = False
        if guid is None:
            self._guid = str(uuid.uuid4())
            self._new = True
        else:
            guid = str(guid).lower()
            if re.match('^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$', guid) is not None:
                self._guid = str(guid)
            else:
                raise ValueError('The given guid is invalid: {0}'.format(guid))

        # Build base keys
        self._key = '{0}_{1}_{2}'.format(self._namespace, self._name, self._guid)

        # Version mutex
        self._mutex_version = VolatileMutex('ovs_dataversion_{0}_{1}'.format(self._name, self._guid))

        # Load data from cache or persistent backend where appropriate
        self._volatile = VolatileFactory.get_client()
        self._persistent = PersistentFactory.get_client()
        self._metadata['cache'] = None
        if self._new:
            self._data = {}
        else:
            self._data = self._volatile.get(self._key)
            if self._data is None:
                Toolbox.log_cache_hit('object_load', False)
                self._metadata['cache'] = False
                try:
                    self._data = self._persistent.get(self._key)
                except KeyNotFoundException:
                    raise ObjectNotFoundException('{0} with guid \'{1}\' could not be found'.format(
                        self.__class__.__name__, self._guid
                    ))
            else:
                Toolbox.log_cache_hit('object_load', True)
                self._metadata['cache'] = True

        # Set default values on new fields
        for prop in self._properties:
            if prop.name not in self._data:
                self._data[prop.name] = prop.default
            self._add_property(prop)

        # Load relations
        for relation in self._relations:
            if relation.name not in self._data:
                if relation.foreign_type is None:
                    cls = self.__class__
                else:
                    cls = relation.foreign_type
                self._data[relation.name] = Descriptor(cls).descriptor
            self._add_relation_property(relation)

        # Add wrapped properties
        for dynamic in self._dynamics:
            self._add_dynamic_property(dynamic)

        # Load foreign keys
        relations = RelationMapper.load_foreign_relations(self.__class__)
        if relations is not None:
            for key, info in relations.iteritems():
                self._objects[key] = {'info': info,
                                      'data': None}
                self._add_list_property(key, info['list'])

        # Store original data
        self._original = copy.deepcopy(self._data)

        if not self._new:
            # Re-cache the object
            self._volatile.set(self._key, self._data)

        # Freeze property creation
        self._frozen = True

        # Optionally, initialize some fields
        if data is not None:
            for field, value in data.iteritems():
                setattr(self, field, value)
Example #16
0
    def _load(self):
        """
        Tries to load the result for the given key from the volatile cache, or executes the query
        if not yet available. Afterwards (if a key is given), the result will be (re)cached
        """
        self.data = self._volatile.get(self._key) if self._key is not None else None
        if self.data is None:
            # The query should be a dictionary:
            #     {'object': Disk,  # Object on which the query should be executed
            #      'data'  : DataList.select.XYZ,  # The requested result
            #      'query' : <query>}  # The actual query
            # Where <query> is a query(group) dictionary:
            #     {'type' : DataList.where_operator.ABC,  # Whether the items should be AND/OR
            #      'items': <items>}  # The items in the group
            # Where the <items> is any combination of one or more <filter> or <query>
            # A <filter> tuple example:
            #     (<field>, DataList.operator.GHI, <value>)  # For example EQUALS
            # The field is any property you would also find on the given object. In case of
            # properties, you can dot as far as you like. This means you can combine AND and OR
            # in any possible combination

            Toolbox.log_cache_hit('datalist', False)
            hybrid_structure = HybridRunner.get_hybrids()

            items = self._query['query']['items']
            query_type = self._query['query']['type']
            query_data = self._query['data']
            query_object = self._query['object']
            query_object_id = Descriptor(query_object).descriptor['identifier']
            if query_object_id in hybrid_structure and query_object_id != hybrid_structure[query_object_id]['identifier']:
                query_object = Descriptor().load(hybrid_structure[query_object_id]).get_object()

            invalidations = {query_object.__name__.lower(): ['__all']}
            DataList._build_invalidations(invalidations, query_object, items)

            for class_name in invalidations:
                key = '{0}_{1}'.format(DataList.cachelink, class_name)
                mutex = VolatileMutex('listcache_{0}'.format(class_name))
                try:
                    mutex.acquire(60)
                    cache_list = Toolbox.try_get(key, {})
                    current_fields = cache_list.get(self._key, [])
                    current_fields = list(set(current_fields + ['__all'] + invalidations[class_name]))
                    cache_list[self._key] = current_fields
                    self._volatile.set(key, cache_list)
                    self._persistent.set(key, cache_list)
                finally:
                    mutex.release()

            self.from_cache = False
            namespace = query_object()._namespace
            name = query_object.__name__.lower()
            guids = DataList.get_pks(namespace, name)

            if query_data == DataList.select.COUNT:
                self.data = 0
            else:
                self.data = []

            for guid in guids:
                try:
                    instance = query_object(guid)
                    if query_type == DataList.where_operator.AND:
                        include = self._exec_and(instance, items)
                    elif query_type == DataList.where_operator.OR:
                        include = self._exec_or(instance, items)
                    else:
                        raise NotImplementedError('The given operator is not yet implemented.')
                    if include:
                        if query_data == DataList.select.COUNT:
                            self.data += 1
                        elif query_data == DataList.select.GUIDS:
                            self.data.append(guid)
                        else:
                            raise NotImplementedError('The given selector type is not implemented')
                except ObjectNotFoundException:
                    pass

            if 'post_query' in DataList.test_hooks:
                DataList.test_hooks['post_query'](self)

            if self._key is not None and len(guids) > 0 and self._can_cache:
                invalidated = False
                for class_name in invalidations:
                    key = '{0}_{1}'.format(DataList.cachelink, class_name)
                    cache_list = Toolbox.try_get(key, {})
                    if self._key not in cache_list:
                        invalidated = True
                # If the key under which the list should be saved was already invalidated since the invalidations
                # were saved, the returned list is most likely outdated. This is OK for this result, but the list
                # won't get cached
                if invalidated is False:
                    self._volatile.set(self._key, self.data, 300 + randint(0, 300))  # Cache between 5 and 10 minutes
        else:
            Toolbox.log_cache_hit('datalist', True)
            self.from_cache = True
        return self
Example #17
0
    def _execute_query(self):
        """
        Tries to load the result for the given key from the volatile cache, or executes the query
        if not yet available. Afterwards (if a key is given), the result will be (re)cached
        Definitions:
        * <query>: Should be a dictionary:
                   {'type' : DataList.where_operator.XYZ,
                    'items': <items>}
        * <filter>: A tuple defining a single expression:
                    (<field>, DataList.operator.XYZ, <value> [, <ignore_case>])
                    The field is any property you would also find on the given object. In case of
                    properties, you can dot as far as you like.
        * <items>: A list of one or more <query> or <filter> items. This means the query structure is recursive and
                   complex queries are possible
        """
        from ovs.dal.dataobject import DataObject

        hybrid_structure = HybridRunner.get_hybrids()
        query_object_id = Descriptor(self._object_type).descriptor['identifier']
        if query_object_id in hybrid_structure and query_object_id != hybrid_structure[query_object_id]['identifier']:
            self._object_type = Descriptor().load(hybrid_structure[query_object_id]).get_object()
        object_type_name = self._object_type.__name__.lower()
        prefix = '{0}_{1}_'.format(DataObject.NAMESPACE, object_type_name)

        if self._guids is not None:
            keys = ['{0}{1}'.format(prefix, guid) for guid in self._guids]
            entries = list(self._persistent.get_multi(keys, must_exist=False))

            self._data = {}
            self._objects = {}
            for index, guid in enumerate(self._guids[:]):
                if entries[index] is None:
                    self._guids.remove(guid)
                else:
                    self._data[guid] = {'data': entries[index],
                                        'guid': guid}
            self._executed = True
            return

        cached_data = self._volatile.get(self._key)
        if cached_data is None:
            self.from_cache = False

            query_type = self._query['type']
            query_items = self._query['items']

            start_references = {object_type_name: ['__all']}
            # Providing the arguments for thread safety. State could change if query would be set in a different thread
            class_references = self._get_referenced_fields(start_references, self._object_type, query_items)
            transaction = self._persistent.begin_transaction()
            for class_name, fields in class_references.iteritems():
                for field in fields:
                    key = self.generate_persistent_cache_key(class_name, field, self._key)
                    self._persistent.set(key, 0, transaction=transaction)
            self._persistent.apply_transaction(transaction)

            self._guids = []
            self._data = {}
            self._objects = {}
            elements = 0
            for key, data in self._data_generator(prefix, query_items, query_type):
                elements += 1
                try:
                    guid = key.replace(prefix, '')
                    result, instance = self._filter({'data': data, 'guid': guid}, query_items, query_type)
                    if result is True:
                        self._guids.append(guid)
                        self._data[guid] = {'data': data, 'guid': guid}
                        if not isinstance(instance, dict):
                            self._objects[guid] = instance
                except ObjectNotFoundException:
                    pass

            if 'post_query' in DataList._test_hooks:
                DataList._test_hooks['post_query'](self)

            if self._key is not None and elements > 0 and self._can_cache:
                self._volatile.set(self._key, self._guids, 300 + randint(0, 300))  # Cache between 5 and 10 minutes
                # Check whether the cache was invalidated and should be removed again
                if self.cache_invalidated(class_references):
                    # Pointers were removed. Remove the cached data
                    self.remove_cached_data()
        else:
            self.from_cache = True
            self._guids = cached_data

            # noinspection PyTypeChecker
            keys = ['{0}{1}'.format(prefix, guid) for guid in self._guids]
            entries = list(self._persistent.get_multi(keys, must_exist=False))

            self._data = {}
            self._objects = {}
            for index, guid in enumerate(self._guids[:]):
                if entries[index] is None:
                    self._guids.remove(guid)
                else:
                    self._data[guid] = {'data': entries[index],
                                        'guid': guid}
        self._executed = True
    def migrate(previous_version):
        """
        Migrates from a given version to the current version. It uses 'previous_version' to be smart
        wherever possible, but the code should be able to migrate any version towards the expected version.
        When this is not possible, the code can set a minimum version and raise when it is not met.
        :param previous_version: The previous version from which to start the migration
        :type previous_version: float
        """

        working_version = previous_version

        if working_version == 0:
            # Initial version:
            # * Add any basic configuration or model entries

            # Add backends
            for backend_type_info in [('ALBA', 'alba')]:
                code = backend_type_info[1]
                backend_type = BackendTypeList.get_backend_type_by_code(code)
                if backend_type is None:
                    backend_type = BackendType()
                backend_type.name = backend_type_info[0]
                backend_type.code = code
                backend_type.save()

            # Add service types
            for service_type_info in [ServiceType.SERVICE_TYPES.NS_MGR, ServiceType.SERVICE_TYPES.ALBA_MGR]:
                service_type = ServiceType()
                service_type.name = service_type_info
                service_type.save()

        # From here on, all actual migration should happen to get to the expected state for THIS RELEASE
        elif working_version < ALBAMigrator.THIS_VERSION:

            # Migrate unique constraints
            import hashlib
            from ovs.dal.helpers import HybridRunner, Descriptor
            from ovs.extensions.storage.persistentfactory import PersistentFactory
            client = PersistentFactory.get_client()
            hybrid_structure = HybridRunner.get_hybrids()
            for class_descriptor in hybrid_structure.values():
                cls = Descriptor().load(class_descriptor).get_object()
                classname = cls.__name__.lower()
                unique_key = 'ovs_unique_{0}_{{0}}_'.format(classname)
                uniques = []
                # noinspection PyProtectedMember
                for prop in cls._properties:
                    if prop.unique is True and len([k for k in client.prefix(unique_key.format(prop.name))]) == 0:
                        uniques.append(prop.name)
                if len(uniques) > 0:
                    prefix = 'ovs_data_{0}_'.format(classname)
                    for key in client.prefix(prefix):
                        data = client.get(key)
                        for property_name in uniques:
                            ukey = '{0}{1}'.format(unique_key.format(property_name), hashlib.sha1(str(data[property_name])).hexdigest())
                            client.set(ukey, key)

            # Changes on AlbaNodes & AlbaDisks
            from ovs.dal.lists.albanodelist import AlbaNodeList
            storagerouter_guids = []
            for alba_node in AlbaNodeList.get_albanodes():
                # StorageRouter - AlbaNode 1-to-many relation changes to 1-to-1
                if alba_node.storagerouter_guid is not None:
                    if alba_node.storagerouter_guid in storagerouter_guids:
                        alba_node.storagerouter = None
                        alba_node.save()
                    else:
                        storagerouter_guids.append(alba_node.storagerouter_guid)
                # Complete rework of the way we detect devices to assign roles or use as ASD
                # Allow loop-, raid-, nvme-, ??-devices and logical volumes as ASD
                # More info: https://github.com/openvstorage/framework/issues/792
                for alba_disk in alba_node.disks:
                    if alba_disk.aliases is not None:
                        continue
                    if 'name' in alba_disk._data:
                        alba_disk.aliases = ['/dev/disk/by-id/{0}'.format(alba_disk._data['name'])]
                        alba_disk.save()

        return ALBAMigrator.THIS_VERSION
Example #19
0
    def migrate(previous_version):
        """
        Migrates from a given version to the current version. It uses 'previous_version' to be smart
        wherever possible, but the code should be able to migrate any version towards the expected version.
        When this is not possible, the code can set a minimum version and raise when it is not met.
        :param previous_version: The previous version from which to start the migration
        :type previous_version: float
        """

        working_version = previous_version

        if working_version == 0:
            from ovs.dal.hybrids.servicetype import ServiceType
            # Initial version:
            # * Add any basic configuration or model entries

            # Add backends
            for backend_type_info in [('ALBA', 'alba')]:
                code = backend_type_info[1]
                backend_type = BackendTypeList.get_backend_type_by_code(code)
                if backend_type is None:
                    backend_type = BackendType()
                backend_type.name = backend_type_info[0]
                backend_type.code = code
                backend_type.save()

            # Add service types
            for service_type_info in [
                    ServiceType.SERVICE_TYPES.NS_MGR,
                    ServiceType.SERVICE_TYPES.ALBA_MGR,
                    ServiceType.SERVICE_TYPES.ALBA_S3_TRANSACTION
            ]:
                service_type = ServiceType()
                service_type.name = service_type_info
                service_type.save()

        # From here on, all actual migration should happen to get to the expected state for THIS RELEASE
        elif working_version < DALMigrator.THIS_VERSION:
            import hashlib
            from ovs.dal.exceptions import ObjectNotFoundException
            from ovs.dal.helpers import HybridRunner, Descriptor
            from ovs.dal.hybrids.albaabmcluster import ABMCluster
            from ovs.dal.hybrids.albaosd import AlbaOSD
            from ovs.dal.hybrids.albansmcluster import NSMCluster
            from ovs.dal.hybrids.j_abmservice import ABMService
            from ovs.dal.hybrids.j_nsmservice import NSMService
            from ovs.dal.hybrids.service import Service
            from ovs.dal.hybrids.servicetype import ServiceType
            from ovs.dal.lists.albabackendlist import AlbaBackendList
            from ovs.dal.lists.albanodelist import AlbaNodeList
            from ovs.dal.lists.servicetypelist import ServiceTypeList
            from ovs.dal.lists.storagerouterlist import StorageRouterList
            from ovs.extensions.db.arakooninstaller import ArakoonClusterConfig, ArakoonInstaller
            from ovs.extensions.generic.configuration import Configuration, NotFoundException
            from ovs_extensions.generic.toolbox import ExtensionsToolbox
            from ovs.extensions.plugins.albacli import AlbaCLI
            from ovs.extensions.storage.persistentfactory import PersistentFactory

            # Migrate unique constraints & indexes
            client = PersistentFactory.get_client()
            hybrid_structure = HybridRunner.get_hybrids()
            for class_descriptor in hybrid_structure.values():
                cls = Descriptor().load(class_descriptor).get_object()
                classname = cls.__name__.lower()
                unique_key = 'ovs_unique_{0}_{{0}}_'.format(classname)
                index_prefix = 'ovs_index_{0}|{{0}}|'.format(classname)
                index_key = 'ovs_index_{0}|{{0}}|{{1}}'.format(classname)
                uniques = []
                indexes = []
                # noinspection PyProtectedMember
                for prop in cls._properties:
                    if prop.unique is True and len([
                            k for k in client.prefix(
                                unique_key.format(prop.name))
                    ]) == 0:
                        uniques.append(prop.name)
                    if prop.indexed is True and len([
                            k for k in client.prefix(
                                index_prefix.format(prop.name))
                    ]) == 0:
                        indexes.append(prop.name)
                if len(uniques) > 0 or len(indexes) > 0:
                    prefix = 'ovs_data_{0}_'.format(classname)
                    for key, data in client.prefix_entries(prefix):
                        for property_name in uniques:
                            ukey = '{0}{1}'.format(
                                unique_key.format(property_name),
                                hashlib.sha1(str(
                                    data[property_name])).hexdigest())
                            client.set(ukey, key)
                        for property_name in indexes:
                            if property_name not in data:
                                continue  # This is the case when there's a new indexed property added.
                            ikey = index_key.format(
                                property_name,
                                hashlib.sha1(str(
                                    data[property_name])).hexdigest())
                            index = list(
                                client.get_multi([ikey], must_exist=False))[0]
                            transaction = client.begin_transaction()
                            if index is None:
                                client.assert_value(ikey,
                                                    None,
                                                    transaction=transaction)
                                client.set(ikey, [key],
                                           transaction=transaction)
                            elif key not in index:
                                client.assert_value(ikey,
                                                    index[:],
                                                    transaction=transaction)
                                client.set(ikey,
                                           index + [key],
                                           transaction=transaction)
                            client.apply_transaction(transaction)

            #############################################
            # Introduction of ABMCluster and NSMCluster #
            #############################################
            # Verify presence of unchanged ALBA Backends
            alba_backends = AlbaBackendList.get_albabackends()
            changes_required = False
            for alba_backend in alba_backends:
                if alba_backend.abm_cluster is None or len(
                        alba_backend.nsm_clusters) == 0:
                    changes_required = True
                    break

            if changes_required:
                # Retrieve ABM and NSM clusters
                abm_cluster_info = []
                nsm_cluster_info = []
                for cluster_name in Configuration.list('/ovs/arakoon'):
                    try:
                        metadata = ArakoonInstaller.get_arakoon_metadata_by_cluster_name(
                            cluster_name=cluster_name)
                        if metadata[
                                'cluster_type'] == ServiceType.ARAKOON_CLUSTER_TYPES.ABM:
                            abm_cluster_info.append(metadata)
                        elif metadata[
                                'cluster_type'] == ServiceType.ARAKOON_CLUSTER_TYPES.NSM:
                            nsm_cluster_info.append(metadata)
                    except NotFoundException:
                        continue

                # Retrieve NSM Arakoon cluster information
                cluster_arakoon_map = {}
                for cluster_info in abm_cluster_info + nsm_cluster_info:
                    cluster_name = cluster_info['cluster_name']
                    arakoon_config = ArakoonClusterConfig(
                        cluster_id=cluster_name)
                    cluster_arakoon_map[
                        cluster_name] = arakoon_config.export_dict()

                storagerouter_map = dict(
                    (storagerouter.machine_id, storagerouter) for storagerouter
                    in StorageRouterList.get_storagerouters())
                alba_backend_id_map = dict((alba_backend.alba_id, alba_backend)
                                           for alba_backend in alba_backends)
                for cluster_info in abm_cluster_info:
                    internal = cluster_info['internal']
                    cluster_name = cluster_info['cluster_name']
                    config_location = Configuration.get_configuration_path(
                        key=ArakoonClusterConfig.CONFIG_KEY.format(
                            cluster_name))
                    try:
                        alba_id = AlbaCLI.run(command='get-alba-id',
                                              config=config_location,
                                              named_params={'attempts':
                                                            3})['id']
                        nsm_hosts = AlbaCLI.run(command='list-nsm-hosts',
                                                config=config_location,
                                                named_params={'attempts': 3})
                    except RuntimeError:
                        continue

                    alba_backend = alba_backend_id_map.get(alba_id)
                    if alba_backend is None:  # ALBA Backend with ID not found in model
                        continue
                    if alba_backend.abm_cluster is not None and len(
                            alba_backend.nsm_clusters
                    ) > 0:  # Clusters already exist
                        continue

                    # Create ABM Cluster
                    if alba_backend.abm_cluster is None:
                        abm_cluster = ABMCluster()
                        abm_cluster.name = cluster_name
                        abm_cluster.alba_backend = alba_backend
                        abm_cluster.config_location = ArakoonClusterConfig.CONFIG_KEY.format(
                            cluster_name)
                        abm_cluster.save()
                    else:
                        abm_cluster = alba_backend.abm_cluster

                    # Create ABM Services
                    abm_arakoon_config = cluster_arakoon_map[cluster_name]
                    abm_arakoon_config.pop('global')
                    arakoon_nodes = abm_arakoon_config.keys()
                    if internal is False:
                        services_to_create = 1
                    else:
                        if set(arakoon_nodes).difference(
                                set(storagerouter_map.keys())):
                            continue
                        services_to_create = len(arakoon_nodes)
                    for index in range(services_to_create):
                        service = Service()
                        service.name = 'arakoon-{0}-abm'.format(
                            alba_backend.name)
                        service.type = ServiceTypeList.get_by_name(
                            ServiceType.SERVICE_TYPES.ALBA_MGR)
                        if internal is True:
                            arakoon_node_config = abm_arakoon_config[
                                arakoon_nodes[index]]
                            service.ports = [
                                arakoon_node_config['client_port'],
                                arakoon_node_config['messaging_port']
                            ]
                            service.storagerouter = storagerouter_map[
                                arakoon_nodes[index]]
                        else:
                            service.ports = []
                            service.storagerouter = None
                        service.save()

                        abm_service = ABMService()
                        abm_service.service = service
                        abm_service.abm_cluster = abm_cluster
                        abm_service.save()

                    # Create NSM Clusters
                    for cluster_index, nsm_host in enumerate(
                            sorted(nsm_hosts,
                                   key=lambda host: ExtensionsToolbox.
                                   advanced_sort(host['cluster_id'], '_'))):
                        nsm_cluster_name = nsm_host['cluster_id']
                        nsm_arakoon_config = cluster_arakoon_map.get(
                            nsm_cluster_name)
                        if nsm_arakoon_config is None:
                            continue

                        number = cluster_index if internal is False else int(
                            nsm_cluster_name.split('_')[-1])
                        nsm_cluster = NSMCluster()
                        nsm_cluster.name = nsm_cluster_name
                        nsm_cluster.number = number
                        nsm_cluster.alba_backend = alba_backend
                        nsm_cluster.config_location = ArakoonClusterConfig.CONFIG_KEY.format(
                            nsm_cluster_name)
                        nsm_cluster.save()

                        # Create NSM Services
                        nsm_arakoon_config.pop('global')
                        arakoon_nodes = nsm_arakoon_config.keys()
                        if internal is False:
                            services_to_create = 1
                        else:
                            if set(arakoon_nodes).difference(
                                    set(storagerouter_map.keys())):
                                continue
                            services_to_create = len(arakoon_nodes)
                        for service_index in range(services_to_create):
                            service = Service()
                            service.name = 'arakoon-{0}-nsm_{1}'.format(
                                alba_backend.name, number)
                            service.type = ServiceTypeList.get_by_name(
                                ServiceType.SERVICE_TYPES.NS_MGR)
                            if internal is True:
                                arakoon_node_config = nsm_arakoon_config[
                                    arakoon_nodes[service_index]]
                                service.ports = [
                                    arakoon_node_config['client_port'],
                                    arakoon_node_config['messaging_port']
                                ]
                                service.storagerouter = storagerouter_map[
                                    arakoon_nodes[service_index]]
                            else:
                                service.ports = []
                                service.storagerouter = None
                            service.save()

                            nsm_service = NSMService()
                            nsm_service.service = service
                            nsm_service.nsm_cluster = nsm_cluster
                            nsm_service.save()

            # Clean up all junction services no longer linked to an ALBA Backend
            all_nsm_services = [
                service.nsm_service for service in ServiceTypeList.get_by_name(
                    ServiceType.SERVICE_TYPES.NS_MGR).services
                if service.nsm_service.nsm_cluster is None
            ]
            all_abm_services = [
                service.abm_service for service in ServiceTypeList.get_by_name(
                    ServiceType.SERVICE_TYPES.ALBA_MGR).services
                if service.abm_service.abm_cluster is None
            ]
            for abm_service in all_abm_services:
                abm_service.delete()
                abm_service.service.delete()
            for nsm_service in all_nsm_services:
                nsm_service.delete()
                nsm_service.service.delete()

            ################################
            # Introduction of Active Drive #
            ################################
            # Update slot_id and Alba Node relation for all OSDs
            client = PersistentFactory.get_client()
            disk_osd_map = {}
            for key, data in client.prefix_entries('ovs_data_albaosd_'):
                alba_disk_guid = data.get('alba_disk', {}).get('guid')
                if alba_disk_guid is not None:
                    if alba_disk_guid not in disk_osd_map:
                        disk_osd_map[alba_disk_guid] = []
                    disk_osd_map[alba_disk_guid].append(
                        key.replace('ovs_data_albaosd_', ''))
                try:
                    value = client.get(key)
                    value.pop('alba_disk', None)
                    client.set(key=key, value=value)
                except Exception:
                    pass  # We don't care if we would have any leftover AlbaDisk information in _data, but its cleaner not to

            alba_guid_node_map = dict(
                (an.guid, an) for an in AlbaNodeList.get_albanodes())
            for key, data in client.prefix_entries('ovs_data_albadisk_'):
                alba_disk_guid = key.replace('ovs_data_albadisk_', '')
                alba_node_guid = data.get('alba_node', {}).get('guid')
                if alba_disk_guid in disk_osd_map and alba_node_guid in alba_guid_node_map and len(
                        data.get('aliases', [])) > 0:
                    slot_id = data['aliases'][0].split('/')[-1]
                    for osd_guid in disk_osd_map[alba_disk_guid]:
                        try:
                            osd = AlbaOSD(osd_guid)
                        except ObjectNotFoundException:
                            continue
                        osd.slot_id = slot_id
                        osd.alba_node = alba_guid_node_map[alba_node_guid]
                        osd.save()
                client.delete(key=key, must_exist=False)

            # Remove unique constraints for AlbaNode IP
            for key in client.prefix('ovs_unique_albanode_ip_'):
                client.delete(key=key, must_exist=False)

            # Remove relation for all Alba Disks
            for key in client.prefix('ovs_reverseindex_albadisk_'):
                client.delete(key=key, must_exist=False)

            # Remove the relation between AlbaNode and AlbaDisk
            for key in client.prefix('ovs_reverseindex_albanode_'):
                if '|disks|' in key:
                    client.delete(key=key, must_exist=False)

        return DALMigrator.THIS_VERSION
Example #20
0
    def migrate(previous_version):
        """
        Migrates from a given version to the current version. It uses 'previous_version' to be smart
        wherever possible, but the code should be able to migrate any version towards the expected version.
        When this is not possible, the code can set a minimum version and raise when it is not met.
        :param previous_version: The previous version from which to start the migration
        :type previous_version: float
        """

        working_version = previous_version

        if working_version == 0:
            # Initial version:
            # * Set the version to THIS RELEASE version
            from ovs.dal.hybrids.user import User
            from ovs.dal.hybrids.group import Group
            from ovs.dal.hybrids.role import Role
            from ovs.dal.hybrids.client import Client
            from ovs.dal.hybrids.j_rolegroup import RoleGroup
            from ovs.dal.hybrids.j_roleclient import RoleClient
            from ovs.dal.hybrids.servicetype import ServiceType
            from ovs.dal.hybrids.branding import Branding

            # Create groups
            admin_group = Group()
            admin_group.name = 'administrators'
            admin_group.description = 'Administrators'
            admin_group.save()
            viewers_group = Group()
            viewers_group.name = 'viewers'
            viewers_group.description = 'Viewers'
            viewers_group.save()

            # Create users
            admin = User()
            admin.username = '******'
            admin.password = hashlib.sha256('admin').hexdigest()
            admin.is_active = True
            admin.group = admin_group
            admin.save()

            # Create internal OAuth 2 clients
            admin_pw_client = Client()
            admin_pw_client.ovs_type = 'INTERNAL'
            admin_pw_client.grant_type = 'PASSWORD'
            admin_pw_client.user = admin
            admin_pw_client.save()
            admin_cc_client = Client()
            admin_cc_client.ovs_type = 'INTERNAL'
            admin_cc_client.grant_type = 'CLIENT_CREDENTIALS'
            admin_cc_client.client_secret = ''.join(
                random.choice(string.ascii_letters + string.digits +
                              '|_=+*#@!/-[]{}<>.?,\'";:~') for _ in range(128))
            admin_cc_client.user = admin
            admin_cc_client.save()

            # Create roles
            read_role = Role()
            read_role.code = 'read'
            read_role.name = 'Read'
            read_role.description = 'Can read objects'
            read_role.save()
            write_role = Role()
            write_role.code = 'write'
            write_role.name = 'Write'
            write_role.description = 'Can write objects'
            write_role.save()
            manage_role = Role()
            manage_role.code = 'manage'
            manage_role.name = 'Manage'
            manage_role.description = 'Can manage the system'
            manage_role.save()

            # Attach groups to roles
            mapping = [(admin_group, [read_role, write_role, manage_role]),
                       (viewers_group, [read_role])]
            for setting in mapping:
                for role in setting[1]:
                    rolegroup = RoleGroup()
                    rolegroup.group = setting[0]
                    rolegroup.role = role
                    rolegroup.save()
                for user in setting[0].users:
                    for role in setting[1]:
                        for client in user.clients:
                            roleclient = RoleClient()
                            roleclient.client = client
                            roleclient.role = role
                            roleclient.save()

            # Add service types
            for service_type_info in [
                    ServiceType.SERVICE_TYPES.MD_SERVER,
                    ServiceType.SERVICE_TYPES.ALBA_PROXY,
                    ServiceType.SERVICE_TYPES.ARAKOON
            ]:
                service_type = ServiceType()
                service_type.name = service_type_info
                service_type.save()

            # Branding
            branding = Branding()
            branding.name = 'Default'
            branding.description = 'Default bootstrap theme'
            branding.css = 'bootstrap-default.min.css'
            branding.productname = 'Open vStorage'
            branding.is_default = True
            branding.save()
            slate = Branding()
            slate.name = 'Slate'
            slate.description = 'Dark bootstrap theme'
            slate.css = 'bootstrap-slate.min.css'
            slate.productname = 'Open vStorage'
            slate.is_default = False
            slate.save()

        # From here on, all actual migration should happen to get to the expected state for THIS RELEASE
        elif working_version < DALMigrator.THIS_VERSION:
            from ovs.dal.datalist import DataList
            from ovs.dal.helpers import HybridRunner, Descriptor
            from ovs.dal.hybrids.diskpartition import DiskPartition
            from ovs.dal.hybrids.j_storagedriverpartition import StorageDriverPartition
            from ovs.dal.lists.vpoollist import VPoolList
            from ovs.extensions.generic.configuration import Configuration
            from ovs.extensions.storage.persistentfactory import PersistentFactory

            persistent_client = PersistentFactory.get_client()
            if working_version < 16:
                # The list caching keys were changed to class|field|list_id instead of class|list_id|field
                persistent_client.delete_prefix(
                    DataList.generate_persistent_cache_key())

            # Migrate unique constraints & indexes
            hybrid_structure = HybridRunner.get_hybrids()
            for class_descriptor in hybrid_structure.values():
                cls = Descriptor().load(class_descriptor).get_object()
                classname = cls.__name__.lower()
                unique_key = 'ovs_unique_{0}_{{0}}_'.format(classname)
                index_prefix = 'ovs_index_{0}|{{0}}|'.format(classname)
                index_key = 'ovs_index_{0}|{{0}}|{{1}}'.format(classname)
                uniques = []
                indexes = []
                # noinspection PyProtectedMember
                for prop in cls._properties:
                    if prop.unique is True and len([
                            k for k in persistent_client.prefix(
                                unique_key.format(prop.name))
                    ]) == 0:
                        uniques.append(prop.name)
                    if prop.indexed is True and len([
                            k for k in persistent_client.prefix(
                                index_prefix.format(prop.name))
                    ]) == 0:
                        indexes.append(prop.name)
                if len(uniques) > 0 or len(indexes) > 0:
                    prefix = 'ovs_data_{0}_'.format(classname)
                    for key, data in persistent_client.prefix_entries(prefix):
                        for property_name in uniques:
                            ukey = '{0}{1}'.format(
                                unique_key.format(property_name),
                                hashlib.sha1(str(
                                    data[property_name])).hexdigest())
                            persistent_client.set(ukey, key)
                        for property_name in indexes:
                            if property_name not in data:
                                continue  # This is the case when there's a new indexed property added.
                            ikey = index_key.format(
                                property_name,
                                hashlib.sha1(str(
                                    data[property_name])).hexdigest())
                            index = list(
                                persistent_client.get_multi(
                                    [ikey], must_exist=False))[0]
                            transaction = persistent_client.begin_transaction()
                            if index is None:
                                persistent_client.assert_value(
                                    ikey, None, transaction=transaction)
                                persistent_client.set(ikey, [key],
                                                      transaction=transaction)
                            elif key not in index:
                                persistent_client.assert_value(
                                    ikey, index[:], transaction=transaction)
                                persistent_client.set(ikey,
                                                      index + [key],
                                                      transaction=transaction)
                            persistent_client.apply_transaction(transaction)

            # Clean up - removal of obsolete 'cfgdir'
            paths = Configuration.get(key='/ovs/framework/paths')
            if 'cfgdir' in paths:
                paths.pop('cfgdir')
                Configuration.set(key='/ovs/framework/paths', value=paths)

            # Rewrite indices 'alba_proxy' --> 'alba_proxies'
            changes = False
            transaction = persistent_client.begin_transaction()
            for old_key in persistent_client.prefix(
                    'ovs_reverseindex_storagedriver'):
                if '|alba_proxy|' in old_key:
                    changes = True
                    new_key = old_key.replace('|alba_proxy|', '|alba_proxies|')
                    persistent_client.set(key=new_key,
                                          value=0,
                                          transaction=transaction)
                    persistent_client.delete(key=old_key,
                                             transaction=transaction)
            if changes is True:
                persistent_client.apply_transaction(transaction=transaction)

            # Introduction of DTL role (Replaces DTL sub_role)
            for vpool in VPoolList.get_vpools():
                for storagedriver in vpool.storagedrivers:
                    for junction_partition_guid in storagedriver.partitions_guids:
                        junction_partition = StorageDriverPartition(
                            junction_partition_guid)
                        if junction_partition.role == DiskPartition.ROLES.WRITE and junction_partition.sub_role == 'DTL':
                            junction_partition.role = DiskPartition.ROLES.DTL
                            junction_partition.sub_role = None
                            junction_partition.save()
                            if DiskPartition.ROLES.DTL not in junction_partition.partition.roles:
                                junction_partition.partition.roles.append(
                                    DiskPartition.ROLES.DTL)
                                junction_partition.partition.save()

        return DALMigrator.THIS_VERSION
Example #21
0
    def _execute_query(self):
        """
        Tries to load the result for the given key from the volatile cache, or executes the query
        if not yet available. Afterwards (if a key is given), the result will be (re)cached

        Definitions:
        * <query>: Should be a dictionary:
                   {'type' : DataList.where_operator.XYZ,
                    'items': <items>}
        * <filter>: A tuple defining a single expression:
                    (<field>, DataList.operator.XYZ, <value> [, <ignore_case>])
                    The field is any property you would also find on the given object. In case of
                    properties, you can dot as far as you like.
        * <items>: A list of one or more <query> or <filter> items. This means the query structure is recursive and
                   complex queries are possible
        """
        from ovs.dal.dataobject import DataObject
        hybrid_structure = HybridRunner.get_hybrids()
        query_object_id = Descriptor(
            self._object_type).descriptor['identifier']
        if query_object_id in hybrid_structure and query_object_id != hybrid_structure[
                query_object_id]['identifier']:
            self._object_type = Descriptor().load(
                hybrid_structure[query_object_id]).get_object()
        object_type_name = self._object_type.__name__.lower()
        prefix = '{0}_{1}_'.format(DataObject.NAMESPACE, object_type_name)

        if self._guids is not None:
            keys = ['{0}{1}'.format(prefix, guid) for guid in self._guids]
            successful = False
            tries = 0
            entries = []
            while successful is False:
                tries += 1
                if tries > 5:
                    raise RaceConditionException()
                try:
                    entries = list(self._persistent.get_multi(keys))
                    successful = True
                except KeyNotFoundException as knfe:
                    keys.remove(knfe.message)
                    self._guids.remove(knfe.message.replace(prefix, ''))

            self._data = {}
            self._objects = {}
            for index, guid in enumerate(self._guids):
                self._data[guid] = {'data': entries[index], 'guid': guid}
            self._executed = True
            return

        cached_data = self._volatile.get(self._key)
        if cached_data is None:
            self.from_cache = False

            query_type = self._query['type']
            query_items = self._query['items']

            invalidations = {object_type_name: ['__all']}
            DataList._build_invalidations(invalidations, self._object_type,
                                          query_items)
            transaction = self._persistent.begin_transaction()
            for class_name, fields in invalidations.iteritems():
                key = '{0}_{1}|{{0}}|{{1}}'.format(DataList.CACHELINK,
                                                   class_name)
                for field in fields:
                    self._persistent.set(key.format(self._key, field),
                                         0,
                                         transaction=transaction)
            self._persistent.apply_transaction(transaction)

            self._guids = []
            self._data = {}
            self._objects = {}
            elements = 0
            for key, data in self._persistent.prefix_entries(prefix):
                elements += 1
                try:
                    guid = key.replace(prefix, '')
                    result, instance = self._filter(
                        {
                            'data': data,
                            'guid': guid
                        }, query_items, query_type)
                    if result is True:
                        self._guids.append(guid)
                        self._data[guid] = {'data': data, 'guid': guid}
                        if not isinstance(instance, dict):
                            self._objects[guid] = instance
                except ObjectNotFoundException:
                    pass

            if 'post_query' in DataList.test_hooks:
                DataList.test_hooks['post_query'](self)

            if self._key is not None and elements > 0 and self._can_cache:
                self._volatile.set(
                    self._key, self._guids,
                    300 + randint(0, 300))  # Cache between 5 and 10 minutes
                # Check whether the cache was invalidated and should be removed again
                for class_name in invalidations:
                    key = '{0}_{1}|{{0}}|'.format(DataList.CACHELINK,
                                                  class_name)
                    if len(list(self._persistent.prefix(key.format(
                            self._key)))) == 0:
                        self._volatile.delete(self._key)
                        break
            self._executed = True
        else:
            self.from_cache = True
            self._guids = cached_data

            keys = ['{0}{1}'.format(prefix, guid) for guid in self._guids]
            successful = False
            tries = 0
            entries = []
            while successful is False:
                tries += 1
                if tries > 5:
                    raise RaceConditionException()
                try:
                    entries = list(self._persistent.get_multi(keys))
                    successful = True
                except KeyNotFoundException as knfe:
                    keys.remove(knfe.message)
                    self._guids.remove(knfe.message.replace(prefix, ''))

            self._data = {}
            self._objects = {}
            for index in xrange(len(self._guids)):
                guid = self._guids[index]
                self._data[guid] = {'data': entries[index], 'guid': guid}
            self._executed = True
Example #22
0
    def _load(self):
        """
        Tries to load the result for the given key from the volatile cache, or executes the query
        if not yet available. Afterwards (if a key is given), the result will be (re)cached
        """
        self.data = self._volatile.get(
            self._key) if self._key is not None else None
        if self.data is None:
            # The query should be a dictionary:
            #     {'object': Disk,  # Object on which the query should be executed
            #      'data'  : DataList.select.XYZ,  # The requested result
            #      'query' : <query>}  # The actual query
            # Where <query> is a query(group) dictionary:
            #     {'type' : DataList.where_operator.ABC,  # Whether the items should be AND/OR
            #      'items': <items>}  # The items in the group
            # Where the <items> is any combination of one or more <filter> or <query>
            # A <filter> tuple example:
            #     (<field>, DataList.operator.GHI, <value>)  # For example EQUALS
            # The field is any property you would also find on the given object. In case of
            # properties, you can dot as far as you like. This means you can combine AND and OR
            # in any possible combination

            Toolbox.log_cache_hit('datalist', False)
            hybrid_structure = HybridRunner.get_hybrids()

            items = self._query['query']['items']
            query_type = self._query['query']['type']
            query_data = self._query['data']
            query_object = self._query['object']
            query_object_id = Descriptor(query_object).descriptor['identifier']
            if query_object_id in hybrid_structure and query_object_id != hybrid_structure[
                    query_object_id]['identifier']:
                query_object = Descriptor().load(
                    hybrid_structure[query_object_id]).get_object()

            invalidations = {query_object.__name__.lower(): ['__all']}
            DataList._build_invalidations(invalidations, query_object, items)

            for class_name in invalidations:
                key = '{0}_{1}'.format(DataList.cachelink, class_name)
                mutex = VolatileMutex('listcache_{0}'.format(class_name))
                try:
                    mutex.acquire(60)
                    cache_list = Toolbox.try_get(key, {})
                    current_fields = cache_list.get(self._key, [])
                    current_fields = list(
                        set(current_fields + ['__all'] +
                            invalidations[class_name]))
                    cache_list[self._key] = current_fields
                    self._volatile.set(key, cache_list)
                    self._persistent.set(key, cache_list)
                finally:
                    mutex.release()

            self.from_cache = False
            namespace = query_object()._namespace
            name = query_object.__name__.lower()
            guids = DataList.get_pks(namespace, name)

            if query_data == DataList.select.COUNT:
                self.data = 0
            else:
                self.data = []

            for guid in guids:
                try:
                    instance = query_object(guid)
                    if query_type == DataList.where_operator.AND:
                        include = self._exec_and(instance, items)
                    elif query_type == DataList.where_operator.OR:
                        include = self._exec_or(instance, items)
                    else:
                        raise NotImplementedError(
                            'The given operator is not yet implemented.')
                    if include:
                        if query_data == DataList.select.COUNT:
                            self.data += 1
                        elif query_data == DataList.select.GUIDS:
                            self.data.append(guid)
                        else:
                            raise NotImplementedError(
                                'The given selector type is not implemented')
                except ObjectNotFoundException:
                    pass

            if self._post_query_hook is not None:
                self._post_query_hook(self)

            if self._key is not None and len(guids) > 0 and self._can_cache:
                invalidated = False
                for class_name in invalidations:
                    key = '{0}_{1}'.format(DataList.cachelink, class_name)
                    cache_list = Toolbox.try_get(key, {})
                    if self._key not in cache_list:
                        invalidated = True
                # If the key under which the list should be saved was already invalidated since the invalidations
                # were saved, the returned list is most likely outdated. This is OK for this result, but the list
                # won't get cached
                if invalidated is False:
                    self._volatile.set(
                        self._key, self.data, 300 +
                        randint(0, 300))  # Cache between 5 and 10 minutes
        else:
            Toolbox.log_cache_hit('datalist', True)
            self.from_cache = True
        return self