Exemple #1
0
    def __init__(self, object_type, query, key=None):
        """
        Initializes a DataList class with a given key (used for optional caching) and a given query
        :param object_type: The type of the objects that have to be queried
        :param query: The query to execute
        :param key: A key under which the result must be cached
        """
        super(DataList, self).__init__()

        if key is not None:
            self._key = '{0}_{1}'.format(DataList.NAMESPACE, key)
        else:
            identifier = copy.deepcopy(query)
            identifier['object'] = object_type.__name__
            self._key = '{0}_{1}'.format(
                DataList.NAMESPACE,
                hashlib.sha256(json.dumps(identifier)).hexdigest())

        self._volatile = VolatileFactory.get_client()
        self._persistent = PersistentFactory.get_client()
        self._query = query
        self._can_cache = True
        self._object_type = object_type
        self._data = {}
        self._objects = {}
        self._guids = None
        self._executed = False
        self._shallow_sort = True

        self.from_cache = None
Exemple #2
0
    def migrate():
        """
        Executes all migrations. It keeps track of an internal "migration version" which is
        a always increasing by one
        """
        key = 'ovs_model_version'
        persistent = PersistentFactory.get_client()
        if persistent.exists(key):
            data = persistent.get(key)
        else:
            data = {}

        migrators = []
        path = '/'.join([os.path.dirname(__file__), 'migration'])
        for filename in os.listdir(path):
            if os.path.isfile('/'.join([path, filename])) and filename.endswith('.py'):
                name = filename.replace('.py', '')
                mod = imp.load_source(name, '/'.join([path, filename]))
                for member in inspect.getmembers(mod, predicate=inspect.isclass):
                    if member[1].__module__ == name and 'object' in [base.__name__ for base in member[1].__bases__]:
                        migrators.append((member[1].identifier, member[1].migrate, member[1].THIS_VERSION))

        for identifier, method, end_version in migrators:
            start_version = data.get(identifier, 0)
            if end_version > start_version:
                data[identifier] = method(start_version)

        persistent.set(key, data)
    def _set(key, value, raw):
        key = EtcdConfiguration._coalesce_dashes(key=key)
        data = value
        if raw is False:
            data = json.dumps(value)

        # Unittests
        if hasattr(unittest, 'running_tests') and getattr(unittest, 'running_tests') is True:
            stripped_key = key.strip('/')
            ut_data = EtcdConfiguration._unittest_data
            for part in stripped_key.split('/')[:-1]:
                if part not in ut_data:
                    ut_data[part] = {}
                ut_data = ut_data[part]

            ut_data[stripped_key.split('/')[-1]] = data
            return

        # Real implementation
        client = EtcdConfiguration._get_client()
        client.write(key, data)
        try:
            def _escape(*args, **kwargs):
                _ = args, kwargs
                raise RuntimeError()
            from ovs.extensions.storage.persistentfactory import PersistentFactory
            client = PersistentFactory.get_client()
            signal.signal(signal.SIGALRM, _escape)
            signal.alarm(0.5)  # Wait only 0.5 seconds. This is a backup and should not slow down the system
            client.set(key, value)
            signal.alarm(0)
        except:
            pass
Exemple #4
0
 def get_pks(namespace, name):
     """
     This method will load the primary keys for a given namespace and name
     """
     persistent = PersistentFactory.get_client()
     prefix = '{0}_{1}_'.format(namespace, name)
     return set([key.replace(prefix, '') for key in persistent.prefix(prefix, max_elements=-1)])
Exemple #5
0
    def get_relation_set(remote_class, remote_key, own_class, own_key, own_guid):
        """
        This method will get a DataList for a relation.
        On a cache miss, the relation DataList will be rebuild and due to the nature of the full table scan, it will
        update all relations in the mean time.
        For below parameter information, use following example: We called "my_vmachine.vdisks".
        :param remote_class: The class of the remote part of the relation (e.g. VDisk)
        :param remote_key: The key in the remote_class that points to us (e.g. vmachine)
        :param own_class: The class of the base object of the relation (e.g. VMachine)
        :param own_key: The key in this class pointing to the remote classes (e.g. vdisks)
        :param own_guid: The guid of this object instance (e.g. the guid of my_vmachine)
        """

        # Example:
        # * remote_class = VDisk
        # * remote_key = vmachine
        # * own_class = VMachine
        # * own_key = vdisks
        # Called to load the vMachine.vdisks list (resulting in a possible scan of vDisk objects)
        # * own_guid = this vMachine object's guid

        persistent = PersistentFactory.get_client()
        own_name = own_class.__name__.lower()
        datalist = DataList(remote_class, {}, '{0}_{1}_{2}'.format(own_name, own_guid, remote_key))

        reverse_key = 'ovs_reverseindex_{0}_{1}|{2}|'.format(own_name, own_guid, own_key)
        datalist._guids = [guid.replace(reverse_key, '') for guid in persistent.prefix(reverse_key)]
        return datalist
Exemple #6
0
    def __init__(self, ensure_single_container, task):
        """
        Initialize a EnsureSingle container
        :param ensure_single_container: Ensure single arguments container
        :type ensure_single_container: EnsureSingleContainer
        :param task: Task instance
        :type task: celery.AsyncResult
        """
        self.ensure_single_container = ensure_single_container
        # Storage
        self.persistent_key = self.generate_key_for_task(
            ensure_single_container.task_name)
        self.persistent_client = PersistentFactory.get_client()
        self.task_id, self.async_task = self.get_task_id_and_async(task)

        # Logging
        self.logger = Logger('lib')

        # Runtime
        self.now = None
        self.thread_name = None
        self.unittest_mode = None
        self.message = None
        self.runtime_hooks = {}
        self.gather_run_time_info()
Exemple #7
0
    def _clean():
        volatile = VolatileFactory.get_client()
        persistent = PersistentFactory.get_client()

        # noinspection PyProtectedMember
        volatile._clean()
        # noinspection PyProtectedMember
        persistent._clean()
        # noinspection PyProtectedMember
        SSHClient._clean()
        # noinspection PyProtectedMember
        SystemdMock._clean()
        # noinspection PyProtectedMember
        MDSClient._clean()
        # noinspection PyProtectedMember
        Decorators._clean()
        # noinspection PyProtectedMember
        MockedSSHClient._clean()
        # noinspection PyProtectedMember
        StorageRouterClient._clean()

        Logger._logs = {}
        DataList._test_hooks = {}
        Toolbox._function_pointers = {}
        # Clean underlying persistent store
        Configuration.get_client()._clean()

        for file_name in glob.glob(
                ArakoonClusterConfig.CONFIG_FILE.format('unittest*')):
            os.remove(file_name)

        for full_path in glob.glob(DalHelper.UNITTEST_DIR.format('*')):
            shutil.rmtree(full_path)

        return volatile, persistent
 def _get(key):
     if key == 'ovs.core.storage.persistent':
         return 'arakoon'
     c = PersistentFactory.get_client()
     if c.exists(key):
         return c.get(key)
     return None
Exemple #9
0
    def get_relation_set(remote_class, remote_key, own_class, own_key,
                         own_guid):
        """
        This method will get a DataList for a relation.
        On a cache miss, the relation DataList will be rebuild and due to the nature of the full table scan, it will
        update all relations in the mean time.
        For below parameter information, use following example: We called "my_vmachine.vdisks".
        :param remote_class: The class of the remote part of the relation (e.g. VDisk)
        :param remote_key: The key in the remote_class that points to us (e.g. vmachine)
        :param own_class: The class of the base object of the relation (e.g. VMachine)
        :param own_key: The key in this class pointing to the remote classes (e.g. vdisks)
        :param own_guid: The guid of this object instance (e.g. the guid of my_vmachine)
        """

        # Example:
        # * remote_class = VDisk
        # * remote_key = vmachine
        # * own_class = VMachine
        # * own_key = vdisks
        # Called to load the vMachine.vdisks list (resulting in a possible scan of vDisk objects)
        # * own_guid = this vMachine object's guid

        persistent = PersistentFactory.get_client()
        own_name = own_class.__name__.lower()
        datalist = DataList(
            remote_class, {}, '{0}_{1}_{2}'.format(own_name, own_guid,
                                                   remote_key))

        reverse_key = 'ovs_reverseindex_{0}_{1}|{2}|'.format(
            own_name, own_guid, own_key)
        datalist._guids = [
            guid.replace(reverse_key, '')
            for guid in persistent.prefix(reverse_key)
        ]
        return datalist
Exemple #10
0
    def __init__(self, object_type, query, key=None):
        """
        Initializes a DataList class with a given key (used for optional caching) and a given query
        :param object_type: The type of the objects that have to be queried
        :param query: The query to execute
        :param key: A key under which the result must be cached
        """
        super(DataList, self).__init__()

        if key is not None:
            self._key = '{0}_{1}'.format(DataList.NAMESPACE, key)
        else:
            identifier = copy.deepcopy(query)
            identifier['object'] = object_type.__name__
            self._key = '{0}_{1}'.format(DataList.NAMESPACE, hashlib.sha256(json.dumps(identifier)).hexdigest())

        self._volatile = VolatileFactory.get_client()
        self._persistent = PersistentFactory.get_client()
        self._query = query
        self._can_cache = True
        self._object_type = object_type
        self._data = {}
        self._objects = {}
        self._guids = None
        self._executed = False
        self._shallow_sort = True

        self.from_cache = None
Exemple #11
0
def _clean_cache():
    ovs_logger.info('Executing celery "clear_cache" startup script...')
    from ovs.lib.helpers.decorators import ENSURE_SINGLE_KEY
    active = inspect().active()
    active_tasks = []
    if active is not None:
        for tasks in active.itervalues():
            active_tasks += [task['id'] for task in tasks]
    cache = PersistentFactory.get_client()
    for key in cache.prefix(ENSURE_SINGLE_KEY):
        try:
            with volatile_mutex(name=key, wait=5):
                entry = cache.get(key)
                values = entry.get('values', [])
                new_values = []
                for v in values:
                    task_id = v.get('task_id')
                    if task_id is not None and task_id in active_tasks:
                        new_values.append(v)
                if len(new_values) > 0:
                    entry['values'] = new_values
                    cache.set(key, entry)
                    ovs_logger.info('Updated key {0}'.format(key))
                else:
                    cache.delete(key)
                    ovs_logger.info('Deleted key {0}'.format(key))
        except KeyNotFoundException:
            pass
    ovs_logger.info('Executing celery "clear_cache" startup script... done')
Exemple #12
0
 def get_pks(namespace, name):
     """
     This method will load the primary keys for a given namespace and name
     """
     persistent = PersistentFactory.get_client()
     prefix = '{0}_{1}_'.format(namespace, name)
     for key in persistent.prefix(prefix):
         yield key.replace(prefix, '')
Exemple #13
0
 def get_pks(namespace, name):
     """
     This method will load the primary keys for a given namespace and name
     """
     persistent = PersistentFactory.get_client()
     prefix = '{0}_{1}_'.format(namespace, name)
     for key in persistent.prefix(prefix):
         yield key.replace(prefix, '')
Exemple #14
0
def _clean_cache():

    ovs_logger.info('Executing celery "clear_cache" startup script...')
    persistent = PersistentFactory.get_client()

    persistent.apply_callback_transaction(_get_registration_update_transaction,
                                          max_retries=5)
    ovs_logger.info('Executing celery "clear_cache" startup script... done')
Exemple #15
0
 def setUpClass(cls):
     """
     Sets up the unittest, mocking a certain set of 3rd party libraries and extensions.
     This makes sure the unittests can be executed without those libraries installed
     """
     cls.persistent = PersistentFactory.get_client()
     cls.persistent.clean()
     cls.volatile = VolatileFactory.get_client()
     cls.volatile.clean()
Exemple #16
0
 def __init__(self):
     self._works = False
     self._getepoch = lambda: int(time.time())
     self._begintime = self._getepoch()
     self._waitlimit = 1800
     self._client = PersistentFactory.get_client('arakoon')
     self._key = 'ensureworks'
     self._valueguid = str(uuid.uuid4())
     self._value = '{0}{1}'
 def tearDownClass(cls):
     """
     Clean up the unittest
     """
     fakesleep.monkey_restore()
     cls.volatile = VolatileFactory.get_client()
     cls.volatile.clean()
     cls.persistent = PersistentFactory.get_client()
     cls.persistent.clean()
Exemple #18
0
 def setUpClass(cls):
     """
     Sets up the unittest, mocking a certain set of 3rd party libraries and extensions.
     This makes sure the unittests can be executed without those libraries installed
     """
     cls.persistent = PersistentFactory.get_client()
     cls.persistent.clean()
     cls.volatile = VolatileFactory.get_client()
     cls.volatile.clean()
Exemple #19
0
 def __init__(self, *args, **kwargs):
     """
     Initializes the distributed scheduler
     """
     self._persistent = PersistentFactory.get_client()
     self._namespace = 'ovs_celery_beat'
     self._mutex = VolatileMutex('celery_beat')
     self._has_lock = False
     super(DistributedScheduler, self).__init__(*args, **kwargs)
     logger.debug('DS init')
Exemple #20
0
 def __init__(self, *args, **kwargs):
     """
     Initializes the distributed scheduler
     """
     self._persistent = PersistentFactory.get_client()
     self._namespace = 'ovs_celery_beat'
     self._mutex = VolatileMutex('celery_beat')
     self._has_lock = False
     super(DistributedScheduler, self).__init__(*args, **kwargs)
     logger.debug('DS init')
    def tearDownClass(cls):
        """
        Tear down changes made during setUpClass
        """
        Configuration._unittest_data = {}

        cls.persistent = PersistentFactory.get_client()
        cls.persistent.clean()

        cls.volatile = VolatileFactory.get_client()
        cls.volatile.clean()
    def tearDownClass(cls):
        """
        Tear down changes made during setUpClass
        """
        Configuration._unittest_data = {}

        cls.persistent = PersistentFactory.get_client()
        cls.persistent.clean()

        cls.volatile = VolatileFactory.get_client()
        cls.volatile.clean()
Exemple #23
0
 def __init__(self, *args, **kwargs):
     """
     Initializes the distributed scheduler
     """
     self._logger = LogHandler.get('celery', name='celery beat')
     self._persistent = PersistentFactory.get_client()
     self._namespace = 'ovs_celery_beat'
     self._mutex = volatile_mutex('celery_beat', 10)
     self._schedule_info = {}
     self._has_lock = False
     super(DistributedScheduler, self).__init__(*args, **kwargs)
     self._logger.debug('DS init')
Exemple #24
0
 def __init__(self, *args, **kwargs):
     """
     Initializes the distributed scheduler
     """
     self._logger = LogHandler.get('celery', name='celery beat')
     self._persistent = PersistentFactory.get_client()
     self._namespace = 'ovs_celery_beat'
     self._mutex = volatile_mutex('celery_beat', 10)
     self._schedule_info = {}
     self._has_lock = False
     super(DistributedScheduler, self).__init__(*args, **kwargs)
     self._logger.debug('DS init')
 def _set(key, value, raw):
     client = EtcdConfiguration._get_client()
     data = value
     if raw is False:
         data = json.dumps(value)
     client.write(key, data)
     try:
         from ovs.extensions.storage.persistentfactory import PersistentFactory
         client = PersistentFactory.get_client()
         client.set(key, value)
     except:
         pass
Exemple #26
0
 def _test_store(self, store_type, key=None, value=None):
     # type: (str, str, str) -> bool
     """
     Test specified store type
     :param store_type: name of the store type
     :type: str
     :param key: key content to test
     :type key: str
     :param value: value to put
     :type value: str
     :return: boolean
     """
     # Volatile
     self.log_message('Testing {0} store...'.format(store_type))
     max_tries = 5
     tries = 0
     while tries < max_tries:
         if store_type == 'arakoon_voldrv':
             try:
                 cluster_name = str(Configuration.get('/ovs/framework/arakoon_clusters|voldrv'))
                 configuration = Configuration.get('/ovs/arakoon/{0}/config'.format(cluster_name), raw=True)
                 client = PyrakoonStore(cluster=cluster_name, configuration=configuration)
                 client.nop()
                 break
             except Exception as message:
                 self.log_message('  Error during arakoon (voldrv) test: {0}'.format(message), 2)
         else:
             try:
                 if store_type == 'volatile':
                     VolatileFactory.store = None
                     volatile = VolatileFactory.get_client()
                     volatile.set(key, value)
                     if volatile.get(key) == value:
                         volatile.delete(key)
                         break
                     volatile.delete(key)
                 elif store_type == 'persistent':
                     persistent = PersistentFactory.get_client()
                     persistent.nop()
                     break
             except Exception as message:
                 self.log_message('  Error during {0} store test: {1}'.format(store_type, message), 3)
         key = 'ovs-watcher-{0}'.format(str(uuid.uuid4()))  # Get another key
         time.sleep(1)
         tries += 1
     if tries == max_tries:
         self.log_message('  {0} store not working correctly'.format(store_type), 2)
         return False
     self.log_message('  {0} store OK after {1} tries'.format(store_type, tries))
    def setUpClass(cls):
        """
        Sets up the unittest, mocking a certain set of 3rd party libraries and extensions.
        This makes sure the unittests can be executed without those libraries installed
        """
        cls.persistent = PersistentFactory.get_client()
        cls.persistent.clean()
        cls.volatile = VolatileFactory.get_client()
        cls.volatile.clean()
        StorageRouterClient.clean()

        fakesleep.monkey_patch()
        Configuration.set('/ovs/framework/storagedriver|mds_tlogs', 100)
        Configuration.set('/ovs/framework/storagedriver|mds_maxload', 75)
        Configuration.set('/ovs/framework/storagedriver|mds_safety', 2)
    def setUpClass(cls):
        """
        Sets up the unittest, mocking a certain set of 3rd party libraries and extensions.
        This makes sure the unittests can be executed without those libraries installed
        """
        cls.persistent = PersistentFactory.get_client()
        cls.persistent.clean()
        cls.volatile = VolatileFactory.get_client()
        cls.volatile.clean()
        StorageRouterClient.clean()

        fakesleep.monkey_patch()
        Configuration.set('/ovs/framework/arakoon_clusters|voldrv', 'voldrv')
        Configuration.set('/ovs/framework/hosts/1/ports', {'arakoon': [10000, 10100]})
        Configuration.set('/ovs/framework/rdma', False)
Exemple #29
0
    def setUpClass(cls):
        """
        Sets up the unittest, mocking a certain set of 3rd party libraries and extensions.
        This makes sure the unittests can be executed without those libraries installed
        """
        cls.persistent = PersistentFactory.get_client()
        cls.persistent.clean()
        cls.volatile = VolatileFactory.get_client()
        cls.volatile.clean()
        StorageRouterClient.clean()

        fakesleep.monkey_patch()
        Configuration.set('/ovs/framework/storagedriver|mds_tlogs', 100)
        Configuration.set('/ovs/framework/storagedriver|mds_maxload', 75)
        Configuration.set('/ovs/framework/storagedriver|mds_safety', 2)
Exemple #30
0
    def __init__(self):
        """
        Init
        """
        self.persistent = PersistentFactory.get_client()
        trap_host_port = "{}_config_trap_target".format(STORAGE_PREFIX)
        self.sender = None

        try:
            target_host, port = self.persistent.get(trap_host_port)
        except KeyNotFoundException:
            print('OVS SNMP Target not configured, cannot send TRAP')
        else:
            self.sender = SNMPTrapSender(target_host, port)
            # security from model
            self.sender.security_public()
Exemple #31
0
 def try_get(key, fallback):
     """
     Returns a value linked to a certain key from the volatile store.
     If not found in the volatile store, it will try fetch it from the persistent
     store. If not found, it returns the fallback
     """
     volatile = VolatileFactory.get_client()
     data = volatile.get(key)
     if data is None:
         try:
             persistent = PersistentFactory.get_client()
             data = persistent.get(key)
         except:
             data = fallback
         volatile.set(key, data)
     return data
Exemple #32
0
 def try_get(key, fallback):
     """
     Returns a value linked to a certain key from the volatile store.
     If not found in the volatile store, it will try fetch it from the persistent
     store. If not found, it returns the fallback
     """
     volatile = VolatileFactory.get_client()
     data = volatile.get(key)
     if data is None:
         try:
             persistent = PersistentFactory.get_client()
             data = persistent.get(key)
         except:
             data = fallback
         volatile.set(key, data)
     return data
Exemple #33
0
    def __init__(self):
        """
        Init
        """
        self.persistent = PersistentFactory.get_client()
        trap_host_port = "{}_config_trap_target".format(STORAGE_PREFIX)
        self.sender = None

        try:
            target_host, port = self.persistent.get(trap_host_port)
        except KeyNotFoundException:
            print('OVS SNMP Target not configured, cannot send TRAP')
        else:
            self.sender = SNMPTrapSender(target_host, port)
            # security from model
            self.sender.security_public()
    def __init__(self, port):
        """
        Init
        """
        signal.signal(signal.SIGTERM, self.SIGTERM)

        from ovs.extensions.generic.system import System
        my_storagerouter = System.get_my_storagerouter()
        self.host = my_storagerouter.ip
        self.port = port

        self.persistent = PersistentFactory.get_client()
        self.users = self.get_users()
        # Load from model
        self.assigned_oids = {}
        self.instance_oid = 0
        # Book-keeping
        self.model_oids = set()
Exemple #35
0
 def _set(key, value, raw):
     client = EtcdConfiguration._get_client()
     data = value
     if raw is False:
         data = json.dumps(value)
     client.write(key, data)
     try:
         def _escape(*args, **kwargs):
             _ = args, kwargs
             raise RuntimeError()
         from ovs.extensions.storage.persistentfactory import PersistentFactory
         client = PersistentFactory.get_client()
         signal.signal(signal.SIGALRM, _escape)
         signal.alarm(0.5)  # Wait only 0.5 seconds. This is a backup and should not slow down the system
         client.set(key, value)
         signal.alarm(0)
     except:
         pass
Exemple #36
0
 def verify_arakoon(arakoon_cluster_name):
     """
     Tries a nop on a arakoon cluster
     :param arakoon_cluster_name: name of the arakoon cluster
     :return: None
     """
     pyrakoon_client = PersistentFactory.get_client('pyrakoon')
     try:
         # determine if there is a healthy cluster
         pyrakoon_client.nop()
         result_handler.success('Arakoon {0} responded successfully.'.format(arakoon_cluster_name))
     except ArakoonNotFound as ex:
         result_handler.failure('Arakoon {0} seems to be down. Got {1}'.format(arakoon_cluster_name, str(ex)))
     except (ArakoonNoMaster, ArakoonNoMasterResult) as ex:
         result_handler.failure('Arakoon {0} cannot find a master. Got {1}'.format(arakoon_cluster_name, str(ex)))
     except TimeoutError:
         result_handler.warning('Arakoon {0} did not respond within {1}s'.format(arakoon_cluster_name, ArakoonHealthCheck.INTEGRITY_TIMEOUT))
     except Exception as ex:
         result_handler.exception('Arakoon {0} could not process a nop. Got {1}'.format(arakoon_cluster_name, str(ex)))
Exemple #37
0
    def __init__(self, object_type, query=None, key=None, guids=None):
        """
        Initializes a DataList class with a given key (used for optional caching) and a given query
        :param object_type: The type of the objects that have to be queried
        :param query: The query to execute. Example: {'type': DataList.where_operator.AND, 'items': [('storagedriver_id', DataList.operator.EQUALS, storagedriver_id)]}
        When query is None, it will default to a query which will not do any filtering
        :type query: dict or NoneType
        :param key: A key under which the result must be cached
        :type key: str
        :param guids: List of guids to use as a base
        These guids should be guids of objects related to the object_type param. If no object related to the guid could be found, these guids will not be included in the result
        When guids is None, it will default to querying all items
        :type guids: list[basestring] or NoneType
        """
        # Validation
        self.validate_guids(guids)
        self.validate_query(query)

        # Defaults
        if query is None:
            query = {'type': DataList.where_operator.AND, 'items': []}

        super(DataList, self).__init__()

        self._volatile = VolatileFactory.get_client()
        self._persistent = PersistentFactory.get_client()
        self._query = query
        self._can_cache = True
        self._object_type = object_type
        self._data = {}
        self._objects = {}
        self._guids = None
        self._executed = False
        self._shallow_sort = True
        self._provided_guids = guids
        self._provided_keys = None  # Conversion of guids to keys, cached for faster lookup
        self._key = None
        self._provided_key = False  # Keep track whether a key was explicitly set
        self.from_cache = None
        self.from_index = 'none'

        self.set_key(key)
Exemple #38
0
 def _get_pks(namespace, name):
     """
     Loads the primary key set information and pages, merges them to a single set
     and returns it
     """
     internal_key = 'ovs_primarykeys_{0}_{{0}}'.format(name)
     volatile = VolatileFactory.get_client()
     persistent = PersistentFactory.get_client()
     pointer = internal_key.format(0)
     keys = set()
     while pointer is not None:
         subset = volatile.get(pointer)
         if subset is None:
             prefix = '{0}_{1}_'.format(namespace, name)
             keys = set([key.replace(prefix, '') for key in persistent.prefix(prefix, max_elements=-1)])
             DataList._save_pks(name, keys)
             return keys
         keys = keys.union(subset[0])
         pointer = subset[1]
     return keys
Exemple #39
0
    def __init__(self,
                 app,
                 expires=None,
                 backend=None,
                 options=None,
                 url=None,
                 **kwargs):
        super(ArakoonResultBackend, self).__init__(app, **kwargs)
        if options is None:
            options = {}

        self.url = url
        self.options = dict(self.app.conf.CELERY_CACHE_BACKEND_OPTIONS,
                            **options)

        self.backend = url or backend or self.app.conf.CELERY_RESULT_BACKEND  # Will be 'arakoon'
        self.expires = self.prepare_expires(expires, type=int)
        self._encode_prefixes()  # rencode the keyprefixes

        self._client = PersistentFactory.get_client()
Exemple #40
0
    def migrate():
        """
        Executes all migrations. It keeps track of an internal "migration version" which is
        a always increasing by one
        """
        def execute(function, start, end):
            """
            Executes a single migration, syncing versions
            """
            version = function(start)
            if version > end:
                end = version
            return end

        key = 'ovs_model_version'
        persistent = PersistentFactory.get_client()
        if persistent.exists(key):
            data = persistent.get(key)
        else:
            data = {}

        migrators = []
        path = os.path.join(os.path.dirname(__file__), 'migration')
        for filename in os.listdir(path):
            if os.path.isfile(os.path.join(
                    path, filename)) and filename.endswith('.py'):
                name = filename.replace('.py', '')
                module = imp.load_source(name, os.path.join(path, filename))
                for member in inspect.getmembers(module):
                    if inspect.isclass(member[1]) \
                            and member[1].__module__ == name \
                            and 'object' in [base.__name__ for base in member[1].__bases__]:
                        migrators.append(
                            (member[1].identifier, member[1].migrate))
        for identifier, method in migrators:
            base_version = data[identifier] if identifier in data else 0
            new_version = execute(method, base_version, 0)
            data[identifier] = new_version

        persistent.set(key, data)
Exemple #41
0
    def migrate():
        """
        Executes all migrations. It keeps track of an internal "migration version" which is
        a always increasing by one
        """

        def execute(function, start, end):
            """
            Executes a single migration, syncing versions
            """
            version = function(start)
            if version > end:
                end = version
            return end

        key = 'ovs_model_version'
        persistent = PersistentFactory.get_client()
        if persistent.exists(key):
            data = persistent.get(key)
        else:
            data = {}

        migrators = []
        path = '/'.join([os.path.dirname(__file__), 'migration'])
        for filename in os.listdir(path):
            if os.path.isfile('/'.join([path, filename])) and filename.endswith('.py'):
                name = filename.replace('.py', '')
                module = imp.load_source(name, '/'.join([path, filename]))
                for member in inspect.getmembers(module):
                    if inspect.isclass(member[1]) \
                            and member[1].__module__ == name \
                            and 'object' in [base.__name__ for base in member[1].__bases__]:
                        migrators.append((member[1].identifier, member[1].migrate))
        for identifier, method in migrators:
            base_version = data[identifier] if identifier in data else 0
            new_version = execute(method, base_version, 0)
            data[identifier] = new_version

        persistent.set(key, data)
Exemple #42
0
    def __init__(self, query, key=None, load=True):
        """
        Initializes a DataList class with a given key (used for optional caching) and a given query
        """
        # Initialize super class
        super(DataList, self).__init__()

        if key is not None:
            self._key = key
        else:
            identifier = copy.deepcopy(query)
            identifier['object'] = identifier['object'].__name__
            self._key = hashlib.sha256(json.dumps(identifier)).hexdigest()
        self._key = '{0}_{1}'.format(DataList.namespace, self._key)
        self._volatile = VolatileFactory.get_client()
        self._persistent = PersistentFactory.get_client()
        self._query = query
        self.data = None
        self.from_cache = False
        self._can_cache = True
        if load is True:
            self._load()
    def _set(key, value, raw):
        key = EtcdConfiguration._coalesce_dashes(key=key)
        data = value
        if raw is False:
            data = json.dumps(value)

        # Unittests
        if hasattr(unittest, 'running_tests') and getattr(
                unittest, 'running_tests') is True:
            stripped_key = key.strip('/')
            ut_data = EtcdConfiguration._unittest_data
            for part in stripped_key.split('/')[:-1]:
                if part not in ut_data:
                    ut_data[part] = {}
                ut_data = ut_data[part]

            ut_data[stripped_key.split('/')[-1]] = data
            return

        # Real implementation
        client = EtcdConfiguration._get_client()
        client.write(key, data)
        try:

            def _escape(*args, **kwargs):
                _ = args, kwargs
                raise RuntimeError()

            from ovs.extensions.storage.persistentfactory import PersistentFactory
            client = PersistentFactory.get_client()
            signal.signal(signal.SIGALRM, _escape)
            signal.alarm(
                0.5
            )  # Wait only 0.5 seconds. This is a backup and should not slow down the system
            client.set(key, value)
            signal.alarm(0)
        except:
            pass
Exemple #44
0
 def _get_pks(namespace, name):
     """
     Loads the primary key set information and pages, merges them to a single set
     and returns it
     """
     internal_key = 'ovs_primarykeys_{0}_{{0}}'.format(name)
     volatile = VolatileFactory.get_client()
     persistent = PersistentFactory.get_client()
     pointer = internal_key.format(0)
     keys = set()
     while pointer is not None:
         subset = volatile.get(pointer)
         if subset is None:
             prefix = '{0}_{1}_'.format(namespace, name)
             keys = set([
                 key.replace(prefix, '')
                 for key in persistent.prefix(prefix, max_elements=-1)
             ])
             DataList._save_pks(name, keys)
             return keys
         keys = keys.union(subset[0])
         pointer = subset[1]
     return keys
Exemple #45
0
    def __init__(self, vdisk_guid):
        # type: (str) -> None
        """
        Initializes a new MDSCatchUp
        An instance populates some caches. These cached are cleared once the instance is garbage collected.
        When running MDSCatchup in bulk: add them to a list to speed up the process
        :param vdisk_guid: Guid of the vDisk to catch up for
        :type vdisk_guid: str
        """
        self.id = str(uuid.uuid4())
        self.vdisk = VDisk(vdisk_guid)
        self.mds_key = self._CATCH_UP_VDISK_KEY.format(self.vdisk.guid)
        self.tlog_threshold = Configuration.get(
            'ovs/volumedriver/mds|tlogs_behind', default=100)
        self.volumedriver_service_name = 'ovs-volumedriver_{0}'.format(
            self.vdisk.vpool.name)
        self.mds_client_timeout = Configuration.get(
            'ovs/vpools/{0}/mds_config|mds_client_connection_timeout'.format(
                self.vdisk.vpool_guid),
            default=120)
        self.mds_clients = {}
        self.dry_run = False
        self.catch_up_threads = []
        self.errors = []

        self._service_manager = ServiceFactory.get_manager()
        self._persistent = PersistentFactory.get_client()
        self._log = 'MDS catchup {0} - vDisk {1} (volume id: {2})'.format(
            self.id, self.vdisk.guid, self.vdisk.volume_id)

        self._clients = self.build_clients()
        self._volumedriver_contexts = self.get_volumedriver_contexts()
        self._worker_contexts = self.get_worker_contexts()
        self._worker_context = self._worker_contexts[
            System.get_my_storagerouter()]
        self._relevant_contexts = self._get_all_relevant_contexts(
        )  # All possible contexts (by mixing volumedriver ones with workers)
    def setUpClass(cls):
        """
        Sets up the unittest, mocking a certain set of 3rd party libraries and extensions.
        This makes sure the unittests can be executed without those libraries installed
        """
        cls.factory = None
        PersistentFactory.get_client().clean()
        VolatileFactory.get_client().clean()

        admin_group = Group()
        admin_group.name = 'administrators'
        admin_group.description = 'Administrators'
        admin_group.save()
        viewers_group = Group()
        viewers_group.name = 'viewers'
        viewers_group.description = 'Viewers'
        viewers_group.save()

        # Create users
        admin = User()
        admin.username = '******'
        admin.password = hashlib.sha256('admin').hexdigest()
        admin.is_active = True
        admin.group = admin_group
        admin.save()
        admin_npg = User()
        admin_npg.username = '******'
        admin_npg.password = hashlib.sha256('admin_npg').hexdigest()
        admin_npg.is_active = True
        admin_npg.group = admin_group
        admin_npg.save()
        admin_na = User()
        admin_na.username = '******'
        admin_na.password = hashlib.sha256('admin_na').hexdigest()
        admin_na.is_active = False
        admin_na.group = admin_group
        admin_na.save()
        user = User()
        user.username = '******'
        user.password = hashlib.sha256('user').hexdigest()
        user.is_active = True
        user.group = viewers_group
        user.save()

        # Create internal OAuth 2 clients
        admin_client = Client()
        admin_client.ovs_type = 'INTERNAL'
        admin_client.grant_type = 'PASSWORD'
        admin_client.user = admin
        admin_client.save()
        admin_na_client = Client()
        admin_na_client.ovs_type = 'INTERNAL'
        admin_na_client.grant_type = 'PASSWORD'
        admin_na_client.user = admin_na
        admin_na_client.save()
        user_client = Client()
        user_client.ovs_type = 'INTERNAL'
        user_client.grant_type = 'PASSWORD'
        user_client.user = user
        user_client.save()

        # Create roles
        read_role = Role()
        read_role.code = 'read'
        read_role.name = 'Read'
        read_role.description = 'Can read objects'
        read_role.save()
        write_role = Role()
        write_role.code = 'write'
        write_role.name = 'Write'
        write_role.description = 'Can write objects'
        write_role.save()
        manage_role = Role()
        manage_role.code = 'manage'
        manage_role.name = 'Manage'
        manage_role.description = 'Can manage the system'
        manage_role.save()

        # Attach groups to roles
        mapping = [(admin_group, [read_role, write_role, manage_role]),
                   (viewers_group, [read_role])]
        for setting in mapping:
            for role in setting[1]:
                rolegroup = RoleGroup()
                rolegroup.group = setting[0]
                rolegroup.role = role
                rolegroup.save()
            for user in setting[0].users:
                for role in setting[1]:
                    for client in user.clients:
                        roleclient = RoleClient()
                        roleclient.client = client
                        roleclient.role = role
                        roleclient.save()

        storagerouter = StorageRouter()
        storagerouter.machine_id = 'storagerouter'
        storagerouter.ip = '127.0.0.1'
        storagerouter.machine_id = '1'
        storagerouter.rdma_capable = False
        storagerouter.name = 'storagerouter'
        storagerouter.save()

        from django.test import RequestFactory
        cls.factory = RequestFactory()

        fakesleep.monkey_patch()

        Configuration.set('/ovs/framework/plugins/installed', {
            'generic': [],
            'backends': []
        })
        Configuration.set('/ovs/framework/cluster_id', 'cluster_id')

        System._machine_id = {'none': '1'}
Exemple #47
0
@worker_process_init.connect
def worker_process_init_handler(args=None, kwargs=None, **kwds):
    """
    Hook for process init
    """
    _ = args, kwargs, kwds
    VolatileFactory.store = None
    PersistentFactory.store = None
    ArakoonConfiguration.client = None


@after_setup_task_logger.connect
@after_setup_logger.connect
def load_ovs_logger(**kwargs):
    """Load a logger."""
    if 'logger' in kwargs:
        kwargs['logger'] = LogHandler.get('celery', name='celery')


if __name__ == '__main__':
    import sys
    if len(sys.argv) == 2 and sys.argv[1] == 'clear_cache':
        from ovs.lib.helpers.decorators import ENSURE_SINGLE_KEY

        cache = PersistentFactory.get_client()
        for key in cache.prefix(ENSURE_SINGLE_KEY):
            try:
                cache.delete(key)
            except KeyNotFoundException:
                pass
    def test_ensure_safety(self):
        """
        Validates whether the ensure_safety call works as expected
        """
        def _generate_mds_service_load_repr(_mds_service):
            """
            Generates a load representing thing for a given mds_service
            """
            masters, slaves = 0, 0
            for _junction in _mds_service.vdisks:
                if _junction.is_master:
                    masters += 1
                else:
                    slaves += 1
            capacity = _mds_service.capacity
            if capacity == -1:
                capacity = 'infinite'
            _load, _ = MDSServiceController.get_mds_load(_mds_service)
            if _load == float('inf'):
                _load = 'infinite'
            else:
                _load = round(_load, 2)
            return [_mds_service.service.storagerouter.ip, _mds_service.service.ports[0], masters, slaves, capacity, _load]

        def _check_reality(_configs, _loads, _vdisks, _mds_services, test=True, display=False):
            """
            Validates 'reality' with an expected config/load
            """
            reality_configs = []
            for _vdisk_id in _vdisks:
                reality_configs.append(_vdisks[_vdisk_id].info['metadata_backend_config'])
            if display is True:
                for c in reality_configs:
                    print c
            if test is True:
                self.assertListEqual(reality_configs, _configs)
            reality_loads = []
            for mds_id in _mds_services:
                reality_loads.append(_generate_mds_service_load_repr(_mds_services[mds_id]))
            if display is True:
                for l in reality_loads:
                    print l
            if test is True:
                self.assertListEqual(reality_loads, _loads)

        PersistentFactory.get_client().set('ovs.storagedriver.mds.safety', 3)
        PersistentFactory.get_client().set('ovs.storagedriver.mds.maxload', 75)
        PersistentFactory.get_client().set('ovs.storagedriver.mds.tlogs', 100)
        vpools, storagerouters, storagedrivers, _, mds_services, service_type = self._build_service_structure(
            {'vpools': [1],
             'storagerouters': [1, 2, 3, 4],
             'storagedrivers': [(1, 1, 1), (2, 1, 2), (3, 1, 3), (4, 1, 4)],  # (<id>, <vpool_id>, <sr_id>)
             'mds_services': [(1, 1), (2, 2), (3, 3), (4, 4)]}  # (<id>, <sd_id>)
        )
        vdisks = {}
        start_id = 1
        for mds_service in mds_services.itervalues():
            vdisks.update(self._create_vdisks_for_mds_service(2, start_id, mds_service=mds_service))
            start_id += 2

        # Validate the start configuration which is simple, each disk has only its default local master
        configs = [[{'ip': '10.0.0.1', 'port': 1}],
                   [{'ip': '10.0.0.1', 'port': 1}],
                   [{'ip': '10.0.0.2', 'port': 2}],
                   [{'ip': '10.0.0.2', 'port': 2}],
                   [{'ip': '10.0.0.3', 'port': 3}],
                   [{'ip': '10.0.0.3', 'port': 3}],
                   [{'ip': '10.0.0.4', 'port': 4}],
                   [{'ip': '10.0.0.4', 'port': 4}]]
        loads = [['10.0.0.1', 1, 2, 0, 10, 20.0],
                 ['10.0.0.2', 2, 2, 0, 10, 20.0],
                 ['10.0.0.3', 3, 2, 0, 10, 20.0],
                 ['10.0.0.4', 4, 2, 0, 10, 20.0]]
        _check_reality(configs, loads, vdisks, mds_services)

        # Validate first run. Each disk should now have sufficient nodes, since there are plenty of MDS services available
        configs = [[{'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.2', 'port': 2}, {'ip': '10.0.0.3', 'port': 3}],
                   [{'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.2', 'port': 2}],
                   [{'ip': '10.0.0.2', 'port': 2}, {'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.3', 'port': 3}],
                   [{'ip': '10.0.0.2', 'port': 2}, {'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.4', 'port': 4}],
                   [{'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.2', 'port': 2}],
                   [{'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.1', 'port': 1}],
                   [{'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.2', 'port': 2}],
                   [{'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.1', 'port': 1}]]
        loads = [['10.0.0.1', 1, 2, 5, 10, 70.0],
                 ['10.0.0.2', 2, 2, 4, 10, 60.0],
                 ['10.0.0.3', 3, 2, 4, 10, 60.0],
                 ['10.0.0.4', 4, 2, 3, 10, 50.0]]
        for vdisk_id in sorted(vdisks.keys()):
            MDSServiceController.ensure_safety(vdisks[vdisk_id])
        _check_reality(configs, loads, vdisks, mds_services)

        # Validate whether this extra (unnecessary) run doesn't change anything, preventing reconfiguring over and
        # over again
        for vdisk_id in sorted(vdisks.keys()):
            MDSServiceController.ensure_safety(vdisks[vdisk_id])
        _check_reality(configs, loads, vdisks, mds_services)

        # Validating whether an overloaded node is correctly rebalanced
        mds_services[2].capacity = 2
        mds_services[2].save()
        configs = [[{'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.4', 'port': 4}],
                   [{'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.3', 'port': 3}],
                   [{'ip': '10.0.0.2', 'port': 2}, {'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.3', 'port': 3}],
                   [{'ip': '10.0.0.2', 'port': 2}, {'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.1', 'port': 1}],
                   [{'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.4', 'port': 4}],
                   [{'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.1', 'port': 1}],
                   [{'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.3', 'port': 3}],
                   [{'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.1', 'port': 1}]]
        loads = [['10.0.0.1', 1, 2, 5, 10, 70.0],
                 ['10.0.0.2', 2, 2, 0, 2, 100.0],
                 ['10.0.0.3', 3, 2, 5, 10, 70.0],
                 ['10.0.0.4', 4, 2, 5, 10, 70.0]]
        for vdisk_id in sorted(vdisks.keys()):
            MDSServiceController.ensure_safety(vdisks[vdisk_id])
        _check_reality(configs, loads, vdisks, mds_services)

        # Validate whether the overloaded services are still handled. In this case, causing a re-order of the slaves as
        # ordered in the model
        configs = [[{'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.4', 'port': 4}],
                   [{'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.3', 'port': 3}],
                   [{'ip': '10.0.0.2', 'port': 2}, {'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.3', 'port': 3}],
                   [{'ip': '10.0.0.2', 'port': 2}, {'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.4', 'port': 4}],
                   [{'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.4', 'port': 4}],
                   [{'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.1', 'port': 1}],
                   [{'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.3', 'port': 3}],
                   [{'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.1', 'port': 1}]]
        loads = [['10.0.0.1', 1, 2, 5, 10, 70.0],
                 ['10.0.0.2', 2, 2, 0, 2, 100.0],
                 ['10.0.0.3', 3, 2, 5, 10, 70.0],
                 ['10.0.0.4', 4, 2, 5, 10, 70.0]]
        for vdisk_id in sorted(vdisks.keys()):
            MDSServiceController.ensure_safety(vdisks[vdisk_id])
        _check_reality(configs, loads, vdisks, mds_services)

        # Again, validating whether a subsequent run doesn't give unexpected changes
        for vdisk_id in sorted(vdisks.keys()):
            MDSServiceController.ensure_safety(vdisks[vdisk_id])
        _check_reality(configs, loads, vdisks, mds_services)

        # A MDS service will be added (next to the overloaded service), this should cause the expected to be rebalanced
        s_id = '{0}-5'.format(storagerouters[2].name)
        service = Service()
        service.name = s_id
        service.storagerouter = storagerouters[2]
        service.ports = [5]
        service.type = service_type
        service.save()
        mds_service = MDSService()
        mds_service.service = service
        mds_service.number = 0
        mds_service.capacity = 10
        mds_service.vpool = vpools[1]
        mds_service.save()
        mds_services[5] = mds_service
        configs = [[{'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.4', 'port': 4}],
                   [{'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.3', 'port': 3}],
                   [{'ip': '10.0.0.2', 'port': 2}, {'ip': '10.0.0.2', 'port': 5}, {'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.3', 'port': 3}],
                   [{'ip': '10.0.0.2', 'port': 2}, {'ip': '10.0.0.2', 'port': 5}, {'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.4', 'port': 4}],
                   [{'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.4', 'port': 4}],
                   [{'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.1', 'port': 1}],
                   [{'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.2', 'port': 5}],
                   [{'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.1', 'port': 1}]]
        loads = [['10.0.0.1', 1, 2, 5, 10, 70.0],
                 ['10.0.0.2', 2, 2, 0, 2, 100.0],
                 ['10.0.0.3', 3, 2, 5, 10, 70.0],
                 ['10.0.0.4', 4, 2, 5, 10, 70.0],
                 ['10.0.0.2', 5, 0, 3, 10, 30.0]]
        for vdisk_id in sorted(vdisks.keys()):
            MDSServiceController.ensure_safety(vdisks[vdisk_id])
        _check_reality(configs, loads, vdisks, mds_services)

        # If the tlogs are not caught up, nothing should be changed
        for vdisk_id in [3, 4]:
            StorageDriverClient.catch_up[vdisks[vdisk_id].volume_id] = 1000
        for vdisk_id in sorted(vdisks.keys()):
            MDSServiceController.ensure_safety(vdisks[vdisk_id])
        _check_reality(configs, loads, vdisks, mds_services)

        # The next run, after tlogs are caught up, a master switch should be executed
        for vdisk_id in [3, 4]:
            StorageDriverClient.catch_up[vdisks[vdisk_id].volume_id] = 50
        configs = [[{'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.4', 'port': 4}],
                   [{'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.3', 'port': 3}],
                   [{'ip': '10.0.0.2', 'port': 5}, {'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.3', 'port': 3}],
                   [{'ip': '10.0.0.2', 'port': 2}, {'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.4', 'port': 4}],
                   [{'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.4', 'port': 4}],
                   [{'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.1', 'port': 1}],
                   [{'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.2', 'port': 5}],
                   [{'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.1', 'port': 1}]]
        loads = [['10.0.0.1', 1, 2, 5, 10, 70.0],
                 ['10.0.0.2', 2, 1, 0, 2, 50.0],
                 ['10.0.0.3', 3, 2, 5, 10, 70.0],
                 ['10.0.0.4', 4, 2, 5, 10, 70.0],
                 ['10.0.0.2', 5, 1, 1, 10, 20.0]]
        for vdisk_id in sorted(vdisks.keys()):
            MDSServiceController.ensure_safety(vdisks[vdisk_id])
        _check_reality(configs, loads, vdisks, mds_services)

        # Validate whether a volume migration makes the master follow
        StorageDriverClient.vrouter_id[vdisks[1].volume_id] = storagedrivers[3].storagedriver_id
        configs = [[{'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.2', 'port': 5}],
                   [{'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.3', 'port': 3}],
                   [{'ip': '10.0.0.2', 'port': 5}, {'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.3', 'port': 3}],
                   [{'ip': '10.0.0.2', 'port': 2}, {'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.4', 'port': 4}],
                   [{'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.1', 'port': 1}, {'ip': '10.0.0.4', 'port': 4}],
                   [{'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.1', 'port': 1}],
                   [{'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.2', 'port': 5}],
                   [{'ip': '10.0.0.4', 'port': 4}, {'ip': '10.0.0.3', 'port': 3}, {'ip': '10.0.0.1', 'port': 1}]]
        loads = [['10.0.0.1', 1, 1, 6, 10, 70.0],
                 ['10.0.0.2', 2, 1, 0, 2, 50.0],
                 ['10.0.0.3', 3, 3, 4, 10, 70.0],
                 ['10.0.0.4', 4, 2, 4, 10, 60.0],
                 ['10.0.0.2', 5, 1, 2, 10, 30.0]]
        for vdisk_id in sorted(vdisks.keys()):
            MDSServiceController.ensure_safety(vdisks[vdisk_id])
        _check_reality(configs, loads, vdisks, mds_services)

        # Validates if a second run doesn't change anything
        for vdisk_id in sorted(vdisks.keys()):
            MDSServiceController.ensure_safety(vdisks[vdisk_id])
        _check_reality(configs, loads, vdisks, mds_services)
 def test_storagedriver_config_set(self):
     """
     Validates whether storagedriver configuration is generated as expected
     """
     PersistentFactory.get_client().set('ovs.storagedriver.mds.safety', 3)
     vpools, storagerouters, storagedrivers, services, mds_services, _ = self._build_service_structure(
         {'vpools': [1, 2],
          'storagerouters': [1, 2, 3, 4, 5, 6],
          'storagedrivers': [(1, 1, 1), (2, 1, 2), (3, 1, 3), (4, 1, 4), (5, 2, 4), (6, 2, 5), (7, 2, 6)],  # (<id>, <vpool_id>, <sr_id>)
          'mds_services': [(1, 1), (2, 1), (3, 2), (4, 3), (5, 4), (6, 5), (7, 6), (8, 7), (9, 7)]}  # (<id>, <sd_id>)
     )
     vdisks = {}
     start_id = 1
     for mds_service in mds_services.itervalues():
         vdisks.update(self._create_vdisks_for_mds_service(10, start_id, mds_service=mds_service))
         start_id += 10
     mds_services[1].capacity = 11  # on 1, vpool 1
     mds_services[1].save()
     mds_services[2].capacity = 20  # on 1, vpool 1
     mds_services[2].save()
     mds_services[3].capacity = 12  # on 2, vpool 1
     mds_services[3].save()
     mds_services[4].capacity = 14  # on 3, vpool 1
     mds_services[4].save()
     mds_services[5].capacity = 16  # on 4, vpool 1
     mds_services[5].save()
     mds_services[6].capacity = 11  # on 4, vpool 2
     mds_services[6].save()
     mds_services[7].capacity = 13  # on 5, vpool 2
     mds_services[7].save()
     mds_services[8].capacity = 19  # on 6, vpool 2
     mds_services[8].save()
     mds_services[9].capacity = 15  # on 6, vpool 2
     mds_services[9].save()
     config = MDSServiceController.get_mds_storagedriver_config_set(vpools[1])
     expected = {storagerouters[1].guid: [{'host': '10.0.0.1', 'port': 2},
                                          {'host': '10.0.0.4', 'port': 5},
                                          {'host': '10.0.0.3', 'port': 4}],
                 storagerouters[2].guid: [{'host': '10.0.0.2', 'port': 3},
                                          {'host': '10.0.0.1', 'port': 2},
                                          {'host': '10.0.0.4', 'port': 5}],
                 storagerouters[3].guid: [{'host': '10.0.0.3', 'port': 4},
                                          {'host': '10.0.0.1', 'port': 2},
                                          {'host': '10.0.0.4', 'port': 5}],
                 storagerouters[4].guid: [{'host': '10.0.0.4', 'port': 5},
                                          {'host': '10.0.0.1', 'port': 2},
                                          {'host': '10.0.0.3', 'port': 4}]}
     self.assertDictEqual(config, expected, 'Test 1. Got:\n{0}'.format(json.dumps(config, indent=2)))
     mds_services[2].capacity = 10  # on 1, vpool 1
     mds_services[2].save()
     config = MDSServiceController.get_mds_storagedriver_config_set(vpools[1])
     expected = {storagerouters[1].guid: [{'host': '10.0.0.1', 'port': 1},
                                          {'host': '10.0.0.4', 'port': 5},
                                          {'host': '10.0.0.3', 'port': 4}],
                 storagerouters[2].guid: [{'host': '10.0.0.2', 'port': 3},
                                          {'host': '10.0.0.4', 'port': 5},
                                          {'host': '10.0.0.3', 'port': 4}],
                 storagerouters[3].guid: [{'host': '10.0.0.3', 'port': 4},
                                          {'host': '10.0.0.4', 'port': 5},
                                          {'host': '10.0.0.2', 'port': 3}],
                 storagerouters[4].guid: [{'host': '10.0.0.4', 'port': 5},
                                          {'host': '10.0.0.3', 'port': 4},
                                          {'host': '10.0.0.2', 'port': 3}]}
     self.assertDictEqual(config, expected, 'Test 2. Got:\n{0}'.format(json.dumps(config, indent=2)))
 def _get(key):
     c = PersistentFactory.get_client()
     if c.exists(key):
         return c.get(key)
     return None
 def __init__(self):
     """
     Init method
     """
     self.persistent = PersistentFactory.get_client()
Exemple #52
0
    def migrate(previous_version):
        """
        Migrates from a given version to the current version. It uses 'previous_version' to be smart
        wherever possible, but the code should be able to migrate any version towards the expected version.
        When this is not possible, the code can set a minimum version and raise when it is not met.
        :param previous_version: The previous version from which to start the migration
        :type previous_version: float
        """

        working_version = previous_version

        if working_version == 0:
            from ovs.dal.hybrids.servicetype import ServiceType
            # Initial version:
            # * Add any basic configuration or model entries

            # Add backends
            for backend_type_info in [('ALBA', 'alba')]:
                code = backend_type_info[1]
                backend_type = BackendTypeList.get_backend_type_by_code(code)
                if backend_type is None:
                    backend_type = BackendType()
                backend_type.name = backend_type_info[0]
                backend_type.code = code
                backend_type.save()

            # Add service types
            for service_type_info in [
                    ServiceType.SERVICE_TYPES.NS_MGR,
                    ServiceType.SERVICE_TYPES.ALBA_MGR,
                    ServiceType.SERVICE_TYPES.ALBA_S3_TRANSACTION
            ]:
                service_type = ServiceType()
                service_type.name = service_type_info
                service_type.save()

        # From here on, all actual migration should happen to get to the expected state for THIS RELEASE
        elif working_version < DALMigrator.THIS_VERSION:
            import hashlib
            from ovs.dal.exceptions import ObjectNotFoundException
            from ovs.dal.helpers import HybridRunner, Descriptor
            from ovs.dal.hybrids.albaabmcluster import ABMCluster
            from ovs.dal.hybrids.albaosd import AlbaOSD
            from ovs.dal.hybrids.albansmcluster import NSMCluster
            from ovs.dal.hybrids.j_abmservice import ABMService
            from ovs.dal.hybrids.j_nsmservice import NSMService
            from ovs.dal.hybrids.service import Service
            from ovs.dal.hybrids.servicetype import ServiceType
            from ovs.dal.lists.albabackendlist import AlbaBackendList
            from ovs.dal.lists.albanodelist import AlbaNodeList
            from ovs.dal.lists.servicetypelist import ServiceTypeList
            from ovs.dal.lists.storagerouterlist import StorageRouterList
            from ovs.extensions.db.arakooninstaller import ArakoonClusterConfig, ArakoonInstaller
            from ovs.extensions.generic.configuration import Configuration, NotFoundException
            from ovs_extensions.generic.toolbox import ExtensionsToolbox
            from ovs.extensions.plugins.albacli import AlbaCLI
            from ovs.extensions.storage.persistentfactory import PersistentFactory

            # Migrate unique constraints & indexes
            client = PersistentFactory.get_client()
            hybrid_structure = HybridRunner.get_hybrids()
            for class_descriptor in hybrid_structure.values():
                cls = Descriptor().load(class_descriptor).get_object()
                classname = cls.__name__.lower()
                unique_key = 'ovs_unique_{0}_{{0}}_'.format(classname)
                index_prefix = 'ovs_index_{0}|{{0}}|'.format(classname)
                index_key = 'ovs_index_{0}|{{0}}|{{1}}'.format(classname)
                uniques = []
                indexes = []
                # noinspection PyProtectedMember
                for prop in cls._properties:
                    if prop.unique is True and len([
                            k for k in client.prefix(
                                unique_key.format(prop.name))
                    ]) == 0:
                        uniques.append(prop.name)
                    if prop.indexed is True and len([
                            k for k in client.prefix(
                                index_prefix.format(prop.name))
                    ]) == 0:
                        indexes.append(prop.name)
                if len(uniques) > 0 or len(indexes) > 0:
                    prefix = 'ovs_data_{0}_'.format(classname)
                    for key, data in client.prefix_entries(prefix):
                        for property_name in uniques:
                            ukey = '{0}{1}'.format(
                                unique_key.format(property_name),
                                hashlib.sha1(str(
                                    data[property_name])).hexdigest())
                            client.set(ukey, key)
                        for property_name in indexes:
                            if property_name not in data:
                                continue  # This is the case when there's a new indexed property added.
                            ikey = index_key.format(
                                property_name,
                                hashlib.sha1(str(
                                    data[property_name])).hexdigest())
                            index = list(
                                client.get_multi([ikey], must_exist=False))[0]
                            transaction = client.begin_transaction()
                            if index is None:
                                client.assert_value(ikey,
                                                    None,
                                                    transaction=transaction)
                                client.set(ikey, [key],
                                           transaction=transaction)
                            elif key not in index:
                                client.assert_value(ikey,
                                                    index[:],
                                                    transaction=transaction)
                                client.set(ikey,
                                           index + [key],
                                           transaction=transaction)
                            client.apply_transaction(transaction)

            #############################################
            # Introduction of ABMCluster and NSMCluster #
            #############################################
            # Verify presence of unchanged ALBA Backends
            alba_backends = AlbaBackendList.get_albabackends()
            changes_required = False
            for alba_backend in alba_backends:
                if alba_backend.abm_cluster is None or len(
                        alba_backend.nsm_clusters) == 0:
                    changes_required = True
                    break

            if changes_required:
                # Retrieve ABM and NSM clusters
                abm_cluster_info = []
                nsm_cluster_info = []
                for cluster_name in Configuration.list('/ovs/arakoon'):
                    try:
                        metadata = ArakoonInstaller.get_arakoon_metadata_by_cluster_name(
                            cluster_name=cluster_name)
                        if metadata[
                                'cluster_type'] == ServiceType.ARAKOON_CLUSTER_TYPES.ABM:
                            abm_cluster_info.append(metadata)
                        elif metadata[
                                'cluster_type'] == ServiceType.ARAKOON_CLUSTER_TYPES.NSM:
                            nsm_cluster_info.append(metadata)
                    except NotFoundException:
                        continue

                # Retrieve NSM Arakoon cluster information
                cluster_arakoon_map = {}
                for cluster_info in abm_cluster_info + nsm_cluster_info:
                    cluster_name = cluster_info['cluster_name']
                    arakoon_config = ArakoonClusterConfig(
                        cluster_id=cluster_name)
                    cluster_arakoon_map[
                        cluster_name] = arakoon_config.export_dict()

                storagerouter_map = dict(
                    (storagerouter.machine_id, storagerouter) for storagerouter
                    in StorageRouterList.get_storagerouters())
                alba_backend_id_map = dict((alba_backend.alba_id, alba_backend)
                                           for alba_backend in alba_backends)
                for cluster_info in abm_cluster_info:
                    internal = cluster_info['internal']
                    cluster_name = cluster_info['cluster_name']
                    config_location = Configuration.get_configuration_path(
                        key=ArakoonClusterConfig.CONFIG_KEY.format(
                            cluster_name))
                    try:
                        alba_id = AlbaCLI.run(command='get-alba-id',
                                              config=config_location,
                                              named_params={'attempts':
                                                            3})['id']
                        nsm_hosts = AlbaCLI.run(command='list-nsm-hosts',
                                                config=config_location,
                                                named_params={'attempts': 3})
                    except RuntimeError:
                        continue

                    alba_backend = alba_backend_id_map.get(alba_id)
                    if alba_backend is None:  # ALBA Backend with ID not found in model
                        continue
                    if alba_backend.abm_cluster is not None and len(
                            alba_backend.nsm_clusters
                    ) > 0:  # Clusters already exist
                        continue

                    # Create ABM Cluster
                    if alba_backend.abm_cluster is None:
                        abm_cluster = ABMCluster()
                        abm_cluster.name = cluster_name
                        abm_cluster.alba_backend = alba_backend
                        abm_cluster.config_location = ArakoonClusterConfig.CONFIG_KEY.format(
                            cluster_name)
                        abm_cluster.save()
                    else:
                        abm_cluster = alba_backend.abm_cluster

                    # Create ABM Services
                    abm_arakoon_config = cluster_arakoon_map[cluster_name]
                    abm_arakoon_config.pop('global')
                    arakoon_nodes = abm_arakoon_config.keys()
                    if internal is False:
                        services_to_create = 1
                    else:
                        if set(arakoon_nodes).difference(
                                set(storagerouter_map.keys())):
                            continue
                        services_to_create = len(arakoon_nodes)
                    for index in range(services_to_create):
                        service = Service()
                        service.name = 'arakoon-{0}-abm'.format(
                            alba_backend.name)
                        service.type = ServiceTypeList.get_by_name(
                            ServiceType.SERVICE_TYPES.ALBA_MGR)
                        if internal is True:
                            arakoon_node_config = abm_arakoon_config[
                                arakoon_nodes[index]]
                            service.ports = [
                                arakoon_node_config['client_port'],
                                arakoon_node_config['messaging_port']
                            ]
                            service.storagerouter = storagerouter_map[
                                arakoon_nodes[index]]
                        else:
                            service.ports = []
                            service.storagerouter = None
                        service.save()

                        abm_service = ABMService()
                        abm_service.service = service
                        abm_service.abm_cluster = abm_cluster
                        abm_service.save()

                    # Create NSM Clusters
                    for cluster_index, nsm_host in enumerate(
                            sorted(nsm_hosts,
                                   key=lambda host: ExtensionsToolbox.
                                   advanced_sort(host['cluster_id'], '_'))):
                        nsm_cluster_name = nsm_host['cluster_id']
                        nsm_arakoon_config = cluster_arakoon_map.get(
                            nsm_cluster_name)
                        if nsm_arakoon_config is None:
                            continue

                        number = cluster_index if internal is False else int(
                            nsm_cluster_name.split('_')[-1])
                        nsm_cluster = NSMCluster()
                        nsm_cluster.name = nsm_cluster_name
                        nsm_cluster.number = number
                        nsm_cluster.alba_backend = alba_backend
                        nsm_cluster.config_location = ArakoonClusterConfig.CONFIG_KEY.format(
                            nsm_cluster_name)
                        nsm_cluster.save()

                        # Create NSM Services
                        nsm_arakoon_config.pop('global')
                        arakoon_nodes = nsm_arakoon_config.keys()
                        if internal is False:
                            services_to_create = 1
                        else:
                            if set(arakoon_nodes).difference(
                                    set(storagerouter_map.keys())):
                                continue
                            services_to_create = len(arakoon_nodes)
                        for service_index in range(services_to_create):
                            service = Service()
                            service.name = 'arakoon-{0}-nsm_{1}'.format(
                                alba_backend.name, number)
                            service.type = ServiceTypeList.get_by_name(
                                ServiceType.SERVICE_TYPES.NS_MGR)
                            if internal is True:
                                arakoon_node_config = nsm_arakoon_config[
                                    arakoon_nodes[service_index]]
                                service.ports = [
                                    arakoon_node_config['client_port'],
                                    arakoon_node_config['messaging_port']
                                ]
                                service.storagerouter = storagerouter_map[
                                    arakoon_nodes[service_index]]
                            else:
                                service.ports = []
                                service.storagerouter = None
                            service.save()

                            nsm_service = NSMService()
                            nsm_service.service = service
                            nsm_service.nsm_cluster = nsm_cluster
                            nsm_service.save()

            # Clean up all junction services no longer linked to an ALBA Backend
            all_nsm_services = [
                service.nsm_service for service in ServiceTypeList.get_by_name(
                    ServiceType.SERVICE_TYPES.NS_MGR).services
                if service.nsm_service.nsm_cluster is None
            ]
            all_abm_services = [
                service.abm_service for service in ServiceTypeList.get_by_name(
                    ServiceType.SERVICE_TYPES.ALBA_MGR).services
                if service.abm_service.abm_cluster is None
            ]
            for abm_service in all_abm_services:
                abm_service.delete()
                abm_service.service.delete()
            for nsm_service in all_nsm_services:
                nsm_service.delete()
                nsm_service.service.delete()

            ################################
            # Introduction of Active Drive #
            ################################
            # Update slot_id and Alba Node relation for all OSDs
            client = PersistentFactory.get_client()
            disk_osd_map = {}
            for key, data in client.prefix_entries('ovs_data_albaosd_'):
                alba_disk_guid = data.get('alba_disk', {}).get('guid')
                if alba_disk_guid is not None:
                    if alba_disk_guid not in disk_osd_map:
                        disk_osd_map[alba_disk_guid] = []
                    disk_osd_map[alba_disk_guid].append(
                        key.replace('ovs_data_albaosd_', ''))
                try:
                    value = client.get(key)
                    value.pop('alba_disk', None)
                    client.set(key=key, value=value)
                except Exception:
                    pass  # We don't care if we would have any leftover AlbaDisk information in _data, but its cleaner not to

            alba_guid_node_map = dict(
                (an.guid, an) for an in AlbaNodeList.get_albanodes())
            for key, data in client.prefix_entries('ovs_data_albadisk_'):
                alba_disk_guid = key.replace('ovs_data_albadisk_', '')
                alba_node_guid = data.get('alba_node', {}).get('guid')
                if alba_disk_guid in disk_osd_map and alba_node_guid in alba_guid_node_map and len(
                        data.get('aliases', [])) > 0:
                    slot_id = data['aliases'][0].split('/')[-1]
                    for osd_guid in disk_osd_map[alba_disk_guid]:
                        try:
                            osd = AlbaOSD(osd_guid)
                        except ObjectNotFoundException:
                            continue
                        osd.slot_id = slot_id
                        osd.alba_node = alba_guid_node_map[alba_node_guid]
                        osd.save()
                client.delete(key=key, must_exist=False)

            # Remove unique constraints for AlbaNode IP
            for key in client.prefix('ovs_unique_albanode_ip_'):
                client.delete(key=key, must_exist=False)

            # Remove relation for all Alba Disks
            for key in client.prefix('ovs_reverseindex_albadisk_'):
                client.delete(key=key, must_exist=False)

            # Remove the relation between AlbaNode and AlbaDisk
            for key in client.prefix('ovs_reverseindex_albanode_'):
                if '|disks|' in key:
                    client.delete(key=key, must_exist=False)

        return DALMigrator.THIS_VERSION
    def setUpClass(cls):
        """
        Sets up the unittest, mocking a certain set of 3rd party libraries and extensions.
        This makes sure the unittests can be executed without those libraries installed
        """
        cls.factory = None
        cls.initial_data = None
        cls.persistent = PersistentFactory.get_client()
        cls.persistent.clean()
        cls.volatile = VolatileFactory.get_client()
        cls.volatile.clean()

        admin_group = Group()
        admin_group.name = 'administrators'
        admin_group.description = 'Administrators'
        admin_group.save()
        viewers_group = Group()
        viewers_group.name = 'viewers'
        viewers_group.description = 'Viewers'
        viewers_group.save()

        # Create users
        admin = User()
        admin.username = '******'
        admin.password = hashlib.sha256('admin').hexdigest()
        admin.is_active = True
        admin.group = admin_group
        admin.save()
        admin_npg = User()
        admin_npg.username = '******'
        admin_npg.password = hashlib.sha256('admin_npg').hexdigest()
        admin_npg.is_active = True
        admin_npg.group = admin_group
        admin_npg.save()
        admin_na = User()
        admin_na.username = '******'
        admin_na.password = hashlib.sha256('admin_na').hexdigest()
        admin_na.is_active = False
        admin_na.group = admin_group
        admin_na.save()
        user = User()
        user.username = '******'
        user.password = hashlib.sha256('user').hexdigest()
        user.is_active = True
        user.group = viewers_group
        user.save()
        sort_combinations = [('bb', 'aa'), ('aa', 'cc'), ('bb', 'dd'), ('aa', 'bb')]  # No logical ordering
        for username, password in sort_combinations:
            sort_user = User()
            sort_user.username = username
            sort_user.password = password
            sort_user.is_active = True
            sort_user.group = viewers_group
            sort_user.save()

        # Create internal OAuth 2 clients
        admin_client = Client()
        admin_client.ovs_type = 'INTERNAL'
        admin_client.grant_type = 'PASSWORD'
        admin_client.user = admin
        admin_client.save()
        admin_na_client = Client()
        admin_na_client.ovs_type = 'INTERNAL'
        admin_na_client.grant_type = 'PASSWORD'
        admin_na_client.user = admin_na
        admin_na_client.save()
        user_client = Client()
        user_client.ovs_type = 'INTERNAL'
        user_client.grant_type = 'PASSWORD'
        user_client.user = user
        user_client.save()

        # Create roles
        read_role = Role()
        read_role.code = 'read'
        read_role.name = 'Read'
        read_role.description = 'Can read objects'
        read_role.save()
        write_role = Role()
        write_role.code = 'write'
        write_role.name = 'Write'
        write_role.description = 'Can write objects'
        write_role.save()
        manage_role = Role()
        manage_role.code = 'manage'
        manage_role.name = 'Manage'
        manage_role.description = 'Can manage the system'
        manage_role.save()

        # Attach groups to roles
        mapping = [
            (admin_group, [read_role, write_role, manage_role]),
            (viewers_group, [read_role])
        ]
        for setting in mapping:
            for role in setting[1]:
                rolegroup = RoleGroup()
                rolegroup.group = setting[0]
                rolegroup.role = role
                rolegroup.save()
            for user in setting[0].users:
                for role in setting[1]:
                    for client in user.clients:
                        roleclient = RoleClient()
                        roleclient.client = client
                        roleclient.role = role
                        roleclient.save()

        cls.initial_data = PersistentFactory.store._read(), VolatileFactory.store._read()

        sys.modules['backend.serializers.serializers'] = Serializers

        sys.path.append('/opt/OpenvStorage')
        sys.path.append('/opt/OpenvStorage/webapps')
        os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
        from django.conf import settings
        settings.VERSION = (1, 2, 3)
        from django.test import RequestFactory
        cls.factory = RequestFactory()

        fakesleep.monkey_patch()
Exemple #54
0
    def __init__(self, guid=None, data=None, datastore_wins=False, volatile=False, _hook=None):
        """
        Loads an object with a given guid. If no guid is given, a new object
        is generated with a new guid.
        * guid: The guid indicating which object should be loaded
        * datastoreWins: Optional boolean indicating save conflict resolve management.
        ** True: when saving, external modified fields will not be saved
        ** False: when saving, all changed data will be saved, regardless of external updates
        ** None: in case changed field were also changed externally, an error will be raised
        """

        # Initialize super class
        super(DataObject, self).__init__()

        # Initialize internal fields
        self._frozen = False
        self._datastore_wins = datastore_wins
        self._guid = None    # Guid identifier of the object
        self._original = {}  # Original data copy
        self._metadata = {}  # Some metadata, mainly used for unit testing
        self._data = {}      # Internal data storage
        self._objects = {}   # Internal objects storage

        # Initialize public fields
        self.dirty = False
        self.volatile = volatile

        # Worker fields/objects
        self._classname = self.__class__.__name__.lower()

        # Rebuild _relation types
        hybrid_structure = HybridRunner.get_hybrids()
        for relation in self._relations:
            if relation.foreign_type is not None:
                identifier = Descriptor(relation.foreign_type).descriptor['identifier']
                if identifier in hybrid_structure and identifier != hybrid_structure[identifier]['identifier']:
                    relation.foreign_type = Descriptor().load(hybrid_structure[identifier]).get_object()

        # Init guid
        self._new = False
        if guid is None:
            self._guid = str(uuid.uuid4())
            self._new = True
        else:
            self._guid = str(guid)

        # Build base keys
        self._key = '{0}_{1}_{2}'.format(DataObject.NAMESPACE, self._classname, self._guid)

        # Worker mutexes
        self._mutex_version = volatile_mutex('ovs_dataversion_{0}_{1}'.format(self._classname, self._guid))

        # Load data from cache or persistent backend where appropriate
        self._volatile = VolatileFactory.get_client()
        self._persistent = PersistentFactory.get_client()
        self._metadata['cache'] = None
        if self._new:
            self._data = {}
        else:
            if data is not None:
                self._data = copy.deepcopy(data)
                self._metadata['cache'] = None
            else:
                self._data = self._volatile.get(self._key)
                if self._data is None:
                    self._metadata['cache'] = False
                    try:
                        self._data = self._persistent.get(self._key)
                    except KeyNotFoundException:
                        raise ObjectNotFoundException('{0} with guid \'{1}\' could not be found'.format(
                            self.__class__.__name__, self._guid
                        ))
                else:
                    self._metadata['cache'] = True

        # Set default values on new fields
        for prop in self._properties:
            if prop.name not in self._data:
                self._data[prop.name] = prop.default
            self._add_property(prop)

        # Load relations
        for relation in self._relations:
            if relation.name not in self._data:
                if relation.foreign_type is None:
                    cls = self.__class__
                else:
                    cls = relation.foreign_type
                self._data[relation.name] = Descriptor(cls).descriptor
            self._add_relation_property(relation)

        # Add wrapped properties
        for dynamic in self._dynamics:
            self._add_dynamic_property(dynamic)

        # Load foreign keys
        relations = RelationMapper.load_foreign_relations(self.__class__)
        if relations is not None:
            for key, info in relations.iteritems():
                self._objects[key] = {'info': info,
                                      'data': None}
                self._add_list_property(key, info['list'])

        if _hook is not None and hasattr(_hook, '__call__'):
            _hook()

        if not self._new:
            # Re-cache the object, if required
            if self._metadata['cache'] is False:
                # The data wasn't loaded from the cache, so caching is required now
                try:
                    self._mutex_version.acquire(30)
                    this_version = self._data['_version']
                    store_version = self._persistent.get(self._key)['_version']
                    if this_version == store_version:
                        self._volatile.set(self._key, self._data)
                except KeyNotFoundException:
                    raise ObjectNotFoundException('{0} with guid \'{1}\' could not be found'.format(
                        self.__class__.__name__, self._guid
                    ))
                finally:
                    self._mutex_version.release()

        # Freeze property creation
        self._frozen = True

        # Optionally, initialize some fields
        if data is not None:
            for prop in self._properties:
                if prop.name in data:
                    setattr(self, prop.name, data[prop.name])

        # Store original data
        self._original = copy.deepcopy(self._data)
Exemple #55
0
        def new_function(*args, **kwargs):
            """
            Wrapped function
            :param args: Arguments without default values
            :param kwargs: Arguments with default values
            """
            def log_message(message, level='info'):
                """
                Log a message with some additional information
                :param message: Message to log
                :param level:   Log level
                :return:        None
                """
                if level not in ('info', 'warning', 'debug', 'error'):
                    raise ValueError(
                        'Unsupported log level "{0}" specified'.format(level))
                complete_message = 'Ensure single {0} mode - ID {1} - {2}'.format(
                    mode, now, message)
                getattr(logger, level)(complete_message)

            def update_value(key, append, value_to_store=None):
                """
                Store the specified value in the PersistentFactory
                :param key:            Key to store the value for
                :param append:         If True, the specified value will be appended else element at index 0 will be popped
                :param value_to_store: Value to append to the list
                :return:               Updated value
                """
                with VolatileMutex(name=key, wait=5):
                    if persistent_client.exists(key):
                        val = persistent_client.get(key)
                        if append is True and value_to_store is not None:
                            val['values'].append(value_to_store)
                        elif append is False and len(val['values']) > 0:
                            val['values'].pop(0)
                    else:
                        log_message('Setting initial value for key {0}'.format(
                            persistent_key))
                        val = {'mode': mode, 'values': []}
                    persistent_client.set(key, val)
                return val

            now = '{0}_{1}'.format(
                int(time.time()), ''.join(
                    random.choice(string.ascii_letters + string.digits)
                    for _ in range(10)))
            task_names = [
                task_name
            ] if extra_task_names is None else [task_name] + extra_task_names
            persistent_key = '{0}_{1}'.format(ENSURE_SINGLE_KEY, task_name)
            persistent_client = PersistentFactory.get_client()

            if mode == 'DEFAULT':
                with VolatileMutex(persistent_key, wait=5):
                    for task in task_names:
                        key_to_check = '{0}_{1}'.format(
                            ENSURE_SINGLE_KEY, task)
                        if persistent_client.exists(key_to_check):
                            log_message(
                                'Execution of task {0} discarded'.format(
                                    task_name))
                            return None
                    log_message('Setting key {0}'.format(persistent_key))
                    persistent_client.set(persistent_key, {'mode': mode})

                try:
                    output = function(*args, **kwargs)
                    log_message(
                        'Task {0} finished successfully'.format(task_name))
                    return output
                finally:
                    with VolatileMutex(persistent_key, wait=5):
                        if persistent_client.exists(persistent_key):
                            log_message(
                                'Deleting key {0}'.format(persistent_key))
                            persistent_client.delete(persistent_key)

            elif mode == 'CHAINED':
                if extra_task_names is not None:
                    log_message('Extra tasks are not allowed in this mode',
                                level='error')
                    raise ValueError(
                        'Ensure single {0} mode - ID {1} - Extra tasks are not allowed in this mode'
                        .format(mode, now))

                # 1. Create key to be stored in arakoon and update kwargs with args
                timeout = kwargs.pop(
                    'chain_timeout'
                ) if 'chain_timeout' in kwargs else global_timeout
                function_info = inspect.getargspec(function)
                kwargs_dict = {}
                for index, arg in enumerate(args):
                    kwargs_dict[function_info.args[index]] = arg
                kwargs_dict.update(kwargs)
                params_info = 'with params {0}'.format(
                    kwargs_dict) if kwargs_dict else 'with default params'

                # 2. Set the key in arakoon if non-existent
                value = update_value(key=persistent_key, append=True)

                # 3. Validate whether another job with same params is being executed, skip if so
                for item in value['values'][
                        1:]:  # 1st element is processing job, we check all other queued jobs for identical params
                    if item['kwargs'] == kwargs_dict:
                        log_message(
                            'Execution of task {0} {1} discarded because of identical parameters'
                            .format(task_name, params_info))
                        return None
                log_message('New task {0} {1} scheduled for execution'.format(
                    task_name, params_info))
                update_value(key=persistent_key,
                             append=True,
                             value_to_store={
                                 'kwargs': kwargs_dict,
                                 'timestamp': now
                             })

                # 4. Poll the arakoon to see whether this call is the first in list, if so --> execute, else wait
                first_element = None
                counter = 0
                while first_element != now and counter < timeout:
                    if persistent_client.exists(persistent_key):
                        value = persistent_client.get(persistent_key)
                        first_element = value['values'][0]['timestamp']

                    if first_element == now:
                        try:
                            if counter != 0:
                                current_time = int(time.time())
                                starting_time = int(now.split('_')[0])
                                log_message(
                                    'Task {0} {1} had to wait {2} seconds before being able to start'
                                    .format(task_name, params_info,
                                            current_time - starting_time))
                            output = function(*args, **kwargs)
                            log_message(
                                'Task {0} finished successfully'.format(
                                    task_name))
                        finally:
                            update_value(key=persistent_key, append=False)
                        return output
                    counter += 1
                    time.sleep(1)
                    if counter == timeout:
                        update_value(key=persistent_key, append=False)
                        log_message(
                            'Could not start task {0} {1}, within expected time ({2}s). Removed it from queue'
                            .format(task_name, params_info, timeout),
                            level='error')
                        raise EnsureSingleTimeoutReached(
                            'Ensure single {0} mode - ID {1} - Task {2} could not be started within timeout of {3}s'
                            .format(mode, now, task_name, timeout))
            else:
                raise ValueError(
                    'Unsupported mode "{0}" provided'.format(mode))
Exemple #56
0
    def migrate(previous_version):
        """
        Migrates from any version to any version, running all migrations required
        If previous_version is for example 0 and this script is at
        version 3 it will execute two steps:
          - 1 > 2
          - 2 > 3
        @param previous_version: The previous version from which to start the migration.
        """

        working_version = previous_version

        # Version 1 introduced:
        # - The datastore is still empty, add defaults
        if working_version < 1:
            from ovs.dal.hybrids.user import User
            from ovs.dal.hybrids.group import Group
            from ovs.dal.hybrids.role import Role
            from ovs.dal.hybrids.client import Client
            from ovs.dal.hybrids.failuredomain import FailureDomain
            from ovs.dal.hybrids.j_rolegroup import RoleGroup
            from ovs.dal.hybrids.j_roleclient import RoleClient
            from ovs.dal.hybrids.backendtype import BackendType
            from ovs.dal.hybrids.servicetype import ServiceType
            from ovs.dal.hybrids.branding import Branding
            from ovs.dal.lists.backendtypelist import BackendTypeList

            # Create groups
            admin_group = Group()
            admin_group.name = 'administrators'
            admin_group.description = 'Administrators'
            admin_group.save()
            viewers_group = Group()
            viewers_group.name = 'viewers'
            viewers_group.description = 'Viewers'
            viewers_group.save()

            # Create users
            admin = User()
            admin.username = '******'
            admin.password = hashlib.sha256('admin').hexdigest()
            admin.is_active = True
            admin.group = admin_group
            admin.save()

            # Create internal OAuth 2 clients
            admin_pw_client = Client()
            admin_pw_client.ovs_type = 'INTERNAL'
            admin_pw_client.grant_type = 'PASSWORD'
            admin_pw_client.user = admin
            admin_pw_client.save()
            admin_cc_client = Client()
            admin_cc_client.ovs_type = 'INTERNAL'
            admin_cc_client.grant_type = 'CLIENT_CREDENTIALS'
            admin_cc_client.client_secret = ''.join(random.choice(string.ascii_letters +
                                                                  string.digits +
                                                                  '|_=+*#@!/-[]{}<>.?,\'";:~')
                                                    for _ in range(128))
            admin_cc_client.user = admin
            admin_cc_client.save()

            # Create roles
            read_role = Role()
            read_role.code = 'read'
            read_role.name = 'Read'
            read_role.description = 'Can read objects'
            read_role.save()
            write_role = Role()
            write_role.code = 'write'
            write_role.name = 'Write'
            write_role.description = 'Can write objects'
            write_role.save()
            manage_role = Role()
            manage_role.code = 'manage'
            manage_role.name = 'Manage'
            manage_role.description = 'Can manage the system'
            manage_role.save()

            # Attach groups to roles
            mapping = [
                (admin_group, [read_role, write_role, manage_role]),
                (viewers_group, [read_role])
            ]
            for setting in mapping:
                for role in setting[1]:
                    rolegroup = RoleGroup()
                    rolegroup.group = setting[0]
                    rolegroup.role = role
                    rolegroup.save()
                for user in setting[0].users:
                    for role in setting[1]:
                        for client in user.clients:
                            roleclient = RoleClient()
                            roleclient.client = client
                            roleclient.role = role
                            roleclient.save()

            # Add backends
            for backend_type_info in [('Ceph', 'ceph_s3'), ('Amazon', 'amazon_s3'), ('Swift', 'swift_s3'),
                                      ('Local', 'local'), ('Distributed', 'distributed'), ('ALBA', 'alba')]:
                code = backend_type_info[1]
                backend_type = BackendTypeList.get_backend_type_by_code(code)
                if backend_type is None:
                    backend_type = BackendType()
                backend_type.name = backend_type_info[0]
                backend_type.code = code
                backend_type.save()

            # Add service types
            for service_type_info in [ServiceType.SERVICE_TYPES.MD_SERVER, ServiceType.SERVICE_TYPES.ALBA_PROXY, ServiceType.SERVICE_TYPES.ARAKOON]:
                service_type = ServiceType()
                service_type.name = service_type_info
                service_type.save()

            # Branding
            branding = Branding()
            branding.name = 'Default'
            branding.description = 'Default bootstrap theme'
            branding.css = 'bootstrap-default.min.css'
            branding.productname = 'Open vStorage'
            branding.is_default = True
            branding.save()
            slate = Branding()
            slate.name = 'Slate'
            slate.description = 'Dark bootstrap theme'
            slate.css = 'bootstrap-slate.min.css'
            slate.productname = 'Open vStorage'
            slate.is_default = False
            slate.save()

            # Failure Domain
            failure_domain = FailureDomain()
            failure_domain.name = 'Default'
            failure_domain.save()

            # We're now at version 1
            working_version = 1

        # Version 2 introduced:
        # - new Descriptor format
        if working_version < 2:
            import imp
            from ovs.dal.helpers import Descriptor
            from ovs.extensions.storage.persistentfactory import PersistentFactory

            client = PersistentFactory.get_client()
            keys = client.prefix('ovs_data')
            for key in keys:
                data = client.get(key)
                modified = False
                for entry in data.keys():
                    if isinstance(data[entry], dict) and 'source' in data[entry] and 'hybrids' in data[entry]['source']:
                        filename = data[entry]['source']
                        if not filename.startswith('/'):
                            filename = '/opt/OpenvStorage/ovs/dal/{0}'.format(filename)
                        module = imp.load_source(data[entry]['name'], filename)
                        cls = getattr(module, data[entry]['type'])
                        new_data = Descriptor(cls, cached=False).descriptor
                        if 'guid' in data[entry]:
                            new_data['guid'] = data[entry]['guid']
                        data[entry] = new_data
                        modified = True
                if modified is True:
                    data['_version'] += 1
                    client.set(key, data)

            # We're now at version 2
            working_version = 2

        # Version 3 introduced:
        # - new Descriptor format
        if working_version < 3:
            import imp
            from ovs.dal.helpers import Descriptor
            from ovs.extensions.storage.persistentfactory import PersistentFactory

            client = PersistentFactory.get_client()
            keys = client.prefix('ovs_data')
            for key in keys:
                data = client.get(key)
                modified = False
                for entry in data.keys():
                    if isinstance(data[entry], dict) and 'source' in data[entry]:
                        module = imp.load_source(data[entry]['name'], data[entry]['source'])
                        cls = getattr(module, data[entry]['type'])
                        new_data = Descriptor(cls, cached=False).descriptor
                        if 'guid' in data[entry]:
                            new_data['guid'] = data[entry]['guid']
                        data[entry] = new_data
                        modified = True
                if modified is True:
                    data['_version'] += 1
                    client.set(key, data)

            working_version = 3

        # Version 4 introduced:
        # - Flexible SSD layout
        if working_version < 4:
            import os
            from ovs.dal.hybrids.diskpartition import DiskPartition
            from ovs.dal.hybrids.j_storagedriverpartition import StorageDriverPartition
            from ovs.dal.hybrids.servicetype import ServiceType
            from ovs.dal.lists.servicetypelist import ServiceTypeList
            from ovs.dal.lists.storagedriverlist import StorageDriverList
            from ovs.extensions.generic.remote import remote
            from ovs.extensions.generic.sshclient import SSHClient
            from ovs.extensions.storageserver.storagedriver import StorageDriverConfiguration
            for service in ServiceTypeList.get_by_name(ServiceType.SERVICE_TYPES.MD_SERVER).services:
                mds_service = service.mds_service
                storagedriver = None
                for current_storagedriver in service.storagerouter.storagedrivers:
                    if current_storagedriver.vpool_guid == mds_service.vpool_guid:
                        storagedriver = current_storagedriver
                        break
                tasks = {}
                if storagedriver._data.get('mountpoint_md'):
                    tasks['{0}/mds_{1}_{2}'.format(storagedriver._data.get('mountpoint_md'),
                                                   storagedriver.vpool.name,
                                                   mds_service.number)] = (DiskPartition.ROLES.DB, StorageDriverPartition.SUBROLE.MDS)
                if storagedriver._data.get('mountpoint_temp'):
                    tasks['{0}/mds_{1}_{2}'.format(storagedriver._data.get('mountpoint_temp'),
                                                   storagedriver.vpool.name,
                                                   mds_service.number)] = (DiskPartition.ROLES.SCRUB, StorageDriverPartition.SUBROLE.MDS)
                for disk in service.storagerouter.disks:
                    for partition in disk.partitions:
                        for directory, (role, subrole) in tasks.iteritems():
                            with remote(storagedriver.storagerouter.ip, [os], username='******') as rem:
                                stat_dir = directory
                                while not rem.os.path.exists(stat_dir) and stat_dir != '/':
                                    stat_dir = stat_dir.rsplit('/', 1)[0]
                                    if not stat_dir:
                                        stat_dir = '/'
                                inode = rem.os.stat(stat_dir).st_dev
                            if partition.inode == inode:
                                if role not in partition.roles:
                                    partition.roles.append(role)
                                    partition.save()
                                number = 0
                                migrated = False
                                for sd_partition in storagedriver.partitions:
                                    if sd_partition.role == role and sd_partition.sub_role == subrole:
                                        if sd_partition.mds_service == mds_service:
                                            migrated = True
                                            break
                                        if sd_partition.partition_guid == partition.guid:
                                            number = max(sd_partition.number, number)
                                if migrated is False:
                                    sd_partition = StorageDriverPartition()
                                    sd_partition.role = role
                                    sd_partition.sub_role = subrole
                                    sd_partition.partition = partition
                                    sd_partition.storagedriver = storagedriver
                                    sd_partition.mds_service = mds_service
                                    sd_partition.size = None
                                    sd_partition.number = number + 1
                                    sd_partition.save()
                                    client = SSHClient(storagedriver.storagerouter, username='******')
                                    path = sd_partition.path.rsplit('/', 1)[0]
                                    if path:
                                        client.dir_create(path)
                                        client.dir_chown(path, 'ovs', 'ovs')
                                    client.dir_create(directory)
                                    client.dir_chown(directory, 'ovs', 'ovs')
                                    client.symlink({sd_partition.path: directory})
            for storagedriver in StorageDriverList.get_storagedrivers():
                migrated_objects = {}
                for disk in storagedriver.storagerouter.disks:
                    for partition in disk.partitions:
                        # Process all mountpoints that are unique and don't have a specified size
                        for key, (role, sr_info) in {'mountpoint_md': (DiskPartition.ROLES.DB, {'metadata_{0}': StorageDriverPartition.SUBROLE.MD,
                                                                                                'tlogs_{0}': StorageDriverPartition.SUBROLE.TLOG}),
                                                     'mountpoint_fragmentcache': (DiskPartition.ROLES.WRITE, {'fcache_{0}': StorageDriverPartition.SUBROLE.FCACHE}),
                                                     'mountpoint_foc': (DiskPartition.ROLES.WRITE, {'fd_{0}': StorageDriverPartition.SUBROLE.FD,
                                                                                                    'dtl_{0}': StorageDriverPartition.SUBROLE.DTL}),
                                                     'mountpoint_dtl': (DiskPartition.ROLES.WRITE, {'fd_{0}': StorageDriverPartition.SUBROLE.FD,
                                                                                                    'dtl_{0}': StorageDriverPartition.SUBROLE.DTL}),
                                                     'mountpoint_readcaches': (DiskPartition.ROLES.READ, {'': None}),
                                                     'mountpoint_writecaches': (DiskPartition.ROLES.WRITE, {'sco_{0}': StorageDriverPartition.SUBROLE.SCO})}.iteritems():
                            if key in storagedriver._data:
                                is_list = isinstance(storagedriver._data[key], list)
                                entries = storagedriver._data[key][:] if is_list is True else [storagedriver._data[key]]
                                for entry in entries:
                                    if not entry:
                                        if is_list:
                                            storagedriver._data[key].remove(entry)
                                            if len(storagedriver._data[key]) == 0:
                                                del storagedriver._data[key]
                                        else:
                                            del storagedriver._data[key]
                                    else:
                                        with remote(storagedriver.storagerouter.ip, [os], username='******') as rem:
                                            inode = rem.os.stat(entry).st_dev
                                        if partition.inode == inode:
                                            if role not in partition.roles:
                                                partition.roles.append(role)
                                                partition.save()
                                            for folder, subrole in sr_info.iteritems():
                                                number = 0
                                                migrated = False
                                                for sd_partition in storagedriver.partitions:
                                                    if sd_partition.role == role and sd_partition.sub_role == subrole:
                                                        if sd_partition.partition_guid == partition.guid:
                                                            number = max(sd_partition.number, number)
                                                if migrated is False:
                                                    sd_partition = StorageDriverPartition()
                                                    sd_partition.role = role
                                                    sd_partition.sub_role = subrole
                                                    sd_partition.partition = partition
                                                    sd_partition.storagedriver = storagedriver
                                                    sd_partition.size = None
                                                    sd_partition.number = number + 1
                                                    sd_partition.save()
                                                    if folder:
                                                        source = '{0}/{1}'.format(entry, folder.format(storagedriver.vpool.name))
                                                    else:
                                                        source = entry
                                                    client = SSHClient(storagedriver.storagerouter, username='******')
                                                    path = sd_partition.path.rsplit('/', 1)[0]
                                                    if path:
                                                        client.dir_create(path)
                                                        client.dir_chown(path, 'ovs', 'ovs')
                                                    client.symlink({sd_partition.path: source})
                                                    migrated_objects[source] = sd_partition
                                            if is_list:
                                                storagedriver._data[key].remove(entry)
                                                if len(storagedriver._data[key]) == 0:
                                                    del storagedriver._data[key]
                                            else:
                                                del storagedriver._data[key]
                                            storagedriver.save()
                if 'mountpoint_bfs' in storagedriver._data:
                    storagedriver.mountpoint_dfs = storagedriver._data['mountpoint_bfs']
                    if not storagedriver.mountpoint_dfs:
                        storagedriver.mountpoint_dfs = None
                    del storagedriver._data['mountpoint_bfs']
                    storagedriver.save()
                if 'mountpoint_temp' in storagedriver._data:
                    del storagedriver._data['mountpoint_temp']
                    storagedriver.save()
                if migrated_objects:
                    print 'Loading sizes'
                    config = StorageDriverConfiguration('storagedriver', storagedriver.vpool_guid, storagedriver.storagedriver_id)
                    config.load()
                    for readcache in config.configuration.get('content_addressed_cache', {}).get('clustercache_mount_points', []):
                        path = readcache.get('path', '').rsplit('/', 1)[0]
                        size = int(readcache['size'].strip('KiB')) * 1024 if 'size' in readcache else None
                        if path in migrated_objects:
                            migrated_objects[path].size = long(size)
                            migrated_objects[path].save()
                    for writecache in config.configuration.get('scocache', {}).get('scocache_mount_points', []):
                        path = writecache.get('path', '')
                        size = int(writecache['size'].strip('KiB')) * 1024 if 'size' in writecache else None
                        if path in migrated_objects:
                            migrated_objects[path].size = long(size)
                            migrated_objects[path].save()

            working_version = 4

        # Version 5 introduced:
        # - Failure Domains
        if working_version < 5:
            import os
            from ovs.dal.hybrids.failuredomain import FailureDomain
            from ovs.dal.lists.failuredomainlist import FailureDomainList
            from ovs.dal.lists.storagerouterlist import StorageRouterList
            from ovs.extensions.generic.remote import remote
            from ovs.extensions.generic.sshclient import SSHClient
            failure_domains = FailureDomainList.get_failure_domains()
            if len(failure_domains) > 0:
                failure_domain = failure_domains[0]
            else:
                failure_domain = FailureDomain()
                failure_domain.name = 'Default'
                failure_domain.save()
            for storagerouter in StorageRouterList.get_storagerouters():
                change = False
                if storagerouter.primary_failure_domain is None:
                    storagerouter.primary_failure_domain = failure_domain
                    change = True
                if storagerouter.rdma_capable is None:
                    client = SSHClient(storagerouter, username='******')
                    rdma_capable = False
                    with remote(client.ip, [os], username='******') as rem:
                        for root, dirs, files in rem.os.walk('/sys/class/infiniband'):
                            for directory in dirs:
                                ports_dir = '/'.join([root, directory, 'ports'])
                                if not rem.os.path.exists(ports_dir):
                                    continue
                                for sub_root, sub_dirs, _ in rem.os.walk(ports_dir):
                                    if sub_root != ports_dir:
                                        continue
                                    for sub_directory in sub_dirs:
                                        state_file = '/'.join([sub_root, sub_directory, 'state'])
                                        if rem.os.path.exists(state_file):
                                            if 'ACTIVE' in client.run('cat {0}'.format(state_file)):
                                                rdma_capable = True
                    storagerouter.rdma_capable = rdma_capable
                    change = True
                if change is True:
                    storagerouter.save()

            working_version = 5

        # Version 6 introduced:
        # - Distributed scrubbing
        if working_version < 6:
            from ovs.dal.hybrids.diskpartition import DiskPartition
            from ovs.dal.lists.storagedriverlist import StorageDriverList
            from ovs.extensions.generic.sshclient import SSHClient
            for storage_driver in StorageDriverList.get_storagedrivers():
                root_client = SSHClient(storage_driver.storagerouter, username='******')
                for partition in storage_driver.partitions:
                    if partition.role == DiskPartition.ROLES.SCRUB:
                        old_path = partition.path
                        partition.sub_role = None
                        partition.save()
                        partition.invalidate_dynamics(['folder', 'path'])
                        if root_client.dir_exists(partition.path):
                            continue  # New directory already exists
                        if '_mds_' in old_path:
                            if root_client.dir_exists(old_path):
                                root_client.symlink({partition.path: old_path})
                        if not root_client.dir_exists(partition.path):
                            root_client.dir_create(partition.path)
                        root_client.dir_chmod(partition.path, 0777)

            working_version = 6

        # Version 7 introduced:
        # - vPool status
        if working_version < 7:
            from ovs.dal.hybrids import vpool
            reload(vpool)
            from ovs.dal.hybrids.vpool import VPool
            from ovs.dal.lists.vpoollist import VPoolList
            for _vpool in VPoolList.get_vpools():
                vpool = VPool(_vpool.guid)
                if hasattr(vpool, 'status') and vpool.status is None:
                    vpool.status = VPool.STATUSES.RUNNING
                    vpool.save()

            working_version = 7

        # Version 10 introduced:
        # - Reverse indexes are stored in persistent store
        # - Store more non-changing metadata on disk iso using a dynamic property
        if working_version < 10:
            from ovs.dal.helpers import HybridRunner, Descriptor
            from ovs.dal.datalist import DataList
            from ovs.extensions.storage.persistentfactory import PersistentFactory
            from ovs.extensions.storage.volatilefactory import VolatileFactory
            persistent = PersistentFactory.get_client()
            for prefix in ['ovs_listcache', 'ovs_reverseindex']:
                for key in persistent.prefix(prefix):
                    persistent.delete(key)
            for key in persistent.prefix('ovs_data_'):
                persistent.set(key, persistent.get(key))
            base_reverse_key = 'ovs_reverseindex_{0}_{1}|{2}|{3}'
            hybrid_structure = HybridRunner.get_hybrids()
            for class_descriptor in hybrid_structure.values():
                cls = Descriptor().load(class_descriptor).get_object()
                all_objects = DataList(cls, {'type': DataList.where_operator.AND,
                                             'items': []})
                for item in all_objects:
                    guid = item.guid
                    for relation in item._relations:
                        if relation.foreign_type is None:
                            rcls = cls
                            rclsname = rcls.__name__.lower()
                        else:
                            rcls = relation.foreign_type
                            rclsname = rcls.__name__.lower()
                        key = relation.name
                        rguid = item._data[key]['guid']
                        if rguid is not None:
                            reverse_key = base_reverse_key.format(rclsname, rguid, relation.foreign_key, guid)
                            persistent.set(reverse_key, 0)
            volatile = VolatileFactory.get_client()
            try:
                volatile._client.flush_all()
            except:
                pass
            from ovs.dal.lists.vdisklist import VDiskList
            for vdisk in VDiskList.get_vdisks():
                try:
                    vdisk.metadata = {'lba_size': vdisk.info['lba_size'],
                                      'cluster_multiplier': vdisk.info['cluster_multiplier']}
                    vdisk.save()
                except:
                    pass

            working_version = 10

        # Version 11 introduced:
        # - ALBA accelerated ALBA, meaning different vpool.metadata information
        if working_version < 11:
            from ovs.dal.lists.vpoollist import VPoolList

            for vpool in VPoolList.get_vpools():
                vpool.metadata = {'backend': vpool.metadata}
                if 'metadata' in vpool.metadata['backend']:
                    vpool.metadata['backend']['arakoon_config'] = vpool.metadata['backend'].pop('metadata')
                if 'backend_info' in vpool.metadata['backend']:
                    vpool.metadata['backend']['backend_info']['fragment_cache_on_read'] = True
                    vpool.metadata['backend']['backend_info']['fragment_cache_on_write'] = False
                vpool.save()
            working_version = 11

        return working_version