Ejemplo n.º 1
0
    def __init__(self, object_type, query, key=None):
        """
        Initializes a DataList class with a given key (used for optional caching) and a given query
        :param object_type: The type of the objects that have to be queried
        :param query: The query to execute
        :param key: A key under which the result must be cached
        """
        super(DataList, self).__init__()

        if key is not None:
            self._key = '{0}_{1}'.format(DataList.NAMESPACE, key)
        else:
            identifier = copy.deepcopy(query)
            identifier['object'] = object_type.__name__
            self._key = '{0}_{1}'.format(
                DataList.NAMESPACE,
                hashlib.sha256(json.dumps(identifier)).hexdigest())

        self._volatile = VolatileFactory.get_client()
        self._persistent = PersistentFactory.get_client()
        self._query = query
        self._can_cache = True
        self._object_type = object_type
        self._data = {}
        self._objects = {}
        self._guids = None
        self._executed = False
        self._shallow_sort = True

        self.from_cache = None
Ejemplo n.º 2
0
    def __init__(self, object_type=None, guid=None):
        """
        Initializes a descriptor for a given type. Optionally already providing a guid for the
        instanciator
        """

        # Initialize super class
        super(Descriptor, self).__init__()

        if object_type is None:
            self.initialized = False
        else:
            self.initialized = True

            key = 'ovs_descriptor_{0}'.format(re.sub('[\W_]+', '', str(object_type)))
            self._volatile = VolatileFactory.get_client()
            self._descriptor = self._volatile.get(key)
            if self._descriptor is None:
                Toolbox.log_cache_hit('descriptor', False)
                filename = inspect.getfile(object_type).replace('.pyc', '.py')
                name = filename.replace(os.path.dirname(filename) + os.path.sep, '').replace('.py', '')
                source = os.path.relpath(filename, os.path.dirname(__file__))
                self._descriptor = {'name': name,
                                    'source': source,
                                    'type': object_type.__name__,
                                    'identifier': name + '_' + hashlib.sha256(name + source + object_type.__name__).hexdigest()}
                self._volatile.set(key, self._descriptor)
            else:
                Toolbox.log_cache_hit('descriptor', True)
            self._descriptor['guid'] = guid
Ejemplo n.º 3
0
 def calculate_delta(key, dynamic, current_stats):
     """
     Calculate statistics deltas
     :param key: Key to retrieve from volatile factory
     :param dynamic:
     :param current_stats: Current statistics to compare with
     :return: None
     """
     volatile = VolatileFactory.get_client()
     prev_key = '{0}_{1}'.format(key, 'statistics_previous')
     previous_stats = volatile.get(prev_key, default={})
     for key in current_stats.keys():
         if key == 'timestamp' or '_latency' in key or '_distribution' in key:
             continue
         delta = current_stats['timestamp'] - previous_stats.get(
             'timestamp', current_stats['timestamp'])
         if delta == 0:
             current_stats['{0}_ps'.format(key)] = previous_stats.get(
                 '{0}_ps'.format(key), 0)
         elif delta > 0 and key in previous_stats:
             current_stats['{0}_ps'.format(key)] = max(
                 0, (current_stats[key] - previous_stats[key]) / delta)
         else:
             current_stats['{0}_ps'.format(key)] = 0
     volatile.set(prev_key, current_stats, dynamic.timeout * 10)
Ejemplo n.º 4
0
        def new_function(*args, **kwargs):
            """
            Wrapped function
            """
            request = _find_request(args)

            now = time.time()
            key = 'ovs_api_limit_{0}.{1}_{2}'.format(
                f.__module__, f.__name__, request.META['HTTP_X_REAL_IP'])
            client = VolatileFactory.get_client()
            with volatile_mutex(key):
                rate_info = client.get(key, {'calls': [], 'timeout': None})
                active_timeout = rate_info['timeout']
                if active_timeout is not None:
                    if active_timeout > now:
                        logger.warning(
                            'Call {0} is being throttled with a wait of {1}'.
                            format(key, active_timeout - now))
                        raise Throttled(wait=active_timeout - now)
                    else:
                        rate_info['timeout'] = None
                rate_info['calls'] = [
                    call for call in rate_info['calls'] if call > (now - per)
                ] + [now]
                calls = len(rate_info['calls'])
                if calls > amount:
                    rate_info['timeout'] = now + timeout
                    client.set(key, rate_info)
                    logger.warning(
                        'Call {0} is being throttled with a wait of {1}'.
                        format(key, timeout))
                    raise Throttled(wait=timeout)
                client.set(key, rate_info)
            return f(*args, **kwargs)
Ejemplo n.º 5
0
    def sync_with_reality(storagerouter_guid=None, max_attempts=3):
        """
        Try to run sync_with_reality, retry in case of failure
         always run sync, as tasks calling this expect this to be sync
        :param storagerouter_guid:
        :return:
        """
        cache = VolatileFactory.get_client()
        mutex = VolatileMutex('ovs_disk_sync_with_reality_{0}'.format(storagerouter_guid))

        key = 'ovs_dedupe_sync_with_reality_{0}'.format(storagerouter_guid)
        attempt = 1
        while attempt < max_attempts:
            task_id = cache.get(key)
            if task_id:
                revoke(task_id)
            try:
                mutex.acquire(wait=120)
                return DiskController._sync_with_reality(storagerouter_guid)
            except Exception as ex:
                logger.warning('Sync with reality failed. {0}'.format(ex))
                attempt += 1
                time.sleep(attempt*30)
            finally:
                mutex.release()

        raise RuntimeError('Sync with reality failed after 3 attempts')
 def _load_backend_info(_connection_info, _alba_backend_guid,
                        _exceptions):
     # '_exceptions' must be an immutable object to be usable outside the Thread functionality
     client = OVSClient.get_instance(
         connection_info=_connection_info,
         cache_store=VolatileFactory.get_client())
     try:
         new_guids = client.get(
             '/alba/backends/{0}/'.format(_alba_backend_guid),
             params={'contents':
                     'linked_backend_guids'})['linked_backend_guids']
         with lock:
             guids.update(new_guids)
     except HttpNotFoundException:
         pass  # ALBA Backend has been deleted, we don't care we can't find the linked guids
     except HttpForbiddenException as fe:
         AlbaBackend._logger.exception(
             'Collecting remote ALBA Backend information failed due to permission issues. {0}'
             .format(fe))
         _exceptions.append('not_allowed')
     except Exception as ex:
         AlbaBackend._logger.exception(
             'Collecting remote ALBA Backend information failed with error: {0}'
             .format(ex))
         _exceptions.append('unknown')
Ejemplo n.º 7
0
    def __init__(self, ip, port, credentials=None, verify=False, version="*", raw_response=False):
        """
        Initializes the object with credentials and connection information
        """
        if credentials is not None and len(credentials) != 2:
            raise RuntimeError(
                "Credentials should be None (no authentication) or a tuple containing client_id and client_secret (authenticated)"
            )
        self.ip = ip
        self.port = port
        self.client_id = credentials[0] if credentials is not None else None
        self.client_secret = credentials[1] if credentials is not None else None
        self._url = "https://{0}:{1}/api".format(ip, port)
        self._key = hashlib.sha256(
            "{0}{1}{2}{3}".format(self.ip, self.port, self.client_id, self.client_secret)
        ).hexdigest()
        self._token = None
        self._verify = verify
        self._version = version
        self._raw_response = raw_response
        try:
            from ovs.extensions.storage.volatilefactory import VolatileFactory

            self._volatile_client = VolatileFactory.get_client()
        except ImportError:
            self._volatile_client = None
Ejemplo n.º 8
0
    def _clean():
        volatile = VolatileFactory.get_client()
        persistent = PersistentFactory.get_client()

        # noinspection PyProtectedMember
        volatile._clean()
        # noinspection PyProtectedMember
        persistent._clean()
        # noinspection PyProtectedMember
        SSHClient._clean()
        # noinspection PyProtectedMember
        SystemdMock._clean()
        # noinspection PyProtectedMember
        MDSClient._clean()
        # noinspection PyProtectedMember
        Decorators._clean()
        # noinspection PyProtectedMember
        MockedSSHClient._clean()
        # noinspection PyProtectedMember
        StorageRouterClient._clean()

        Logger._logs = {}
        DataList._test_hooks = {}
        Toolbox._function_pointers = {}
        # Clean underlying persistent store
        Configuration.get_client()._clean()

        for file_name in glob.glob(
                ArakoonClusterConfig.CONFIG_FILE.format('unittest*')):
            os.remove(file_name)

        for full_path in glob.glob(DalHelper.UNITTEST_DIR.format('*')):
            shutil.rmtree(full_path)

        return volatile, persistent
Ejemplo n.º 9
0
 def load_foreign_relations(object_type):
     """
     This method will return a mapping of all relations towards a certain hybrid object type.
     The resulting mapping will be stored in volatile storage so it can be fetched faster
     """
     relation_key = 'ovs_relations_{0}'.format(object_type.__name__.lower())
     volatile = VolatileFactory.get_client()
     relation_info = volatile.get(relation_key)
     if relation_info is None:
         Toolbox.log_cache_hit('relations', False)
         relation_info = {}
         hybrid_structure = HybridRunner.get_hybrids()
         for class_descriptor in hybrid_structure.values():  # Extended objects
             cls = Descriptor().load(class_descriptor).get_object()
             for relation in cls._relations:
                 if relation.foreign_type is None:
                     remote_class = cls
                 else:
                     identifier = Descriptor(relation.foreign_type).descriptor['identifier']
                     if identifier in hybrid_structure and identifier != hybrid_structure[identifier]['identifier']:
                         remote_class = Descriptor().load(hybrid_structure[identifier]).get_object()
                     else:
                         remote_class = relation.foreign_type
                 itemname = remote_class.__name__
                 if itemname == object_type.__name__:
                     relation_info[relation.foreign_key] = {'class': Descriptor(cls).descriptor,
                                                            'key': relation.name,
                                                            'list': not relation.onetoone}
         volatile.set(relation_key, relation_info)
     else:
         Toolbox.log_cache_hit('relations', True)
     return relation_info
Ejemplo n.º 10
0
        def new_function(*args, **kwargs):
            """
            Wrapped function
            """
            request = _find_request(args)

            now = time.time()
            key = 'ovs_api_limit_{0}.{1}_{2}'.format(
                f.__module__, f.__name__,
                request.META['HTTP_X_REAL_IP']
            )
            client = VolatileFactory.get_client()
            with VolatileMutex(key):
                rate_info = client.get(key, {'calls': [],
                                             'timeout': None})
                active_timeout = rate_info['timeout']
                if active_timeout is not None:
                    if active_timeout > now:
                        logger.warning('Call {0} is being throttled with a wait of {1}'.format(key, active_timeout - now))
                        raise Throttled(wait=active_timeout - now)
                    else:
                        rate_info['timeout'] = None
                rate_info['calls'] = [call for call in rate_info['calls'] if call > (now - per)] + [now]
                calls = len(rate_info['calls'])
                if calls > amount:
                    rate_info['timeout'] = now + timeout
                    client.set(key, rate_info)
                    logger.warning('Call {0} is being throttled with a wait of {1}'.format(key, timeout))
                    raise Throttled(wait=timeout)
                client.set(key, rate_info)
            return f(*args, **kwargs)
Ejemplo n.º 11
0
 def __init__(self,
              ip,
              port,
              credentials=None,
              verify=False,
              version='*',
              raw_response=False):
     """
     Initializes the object with credentials and connection information
     """
     if credentials is not None and len(credentials) != 2:
         raise RuntimeError(
             'Credentials should be None (no authentication) or a tuple containing client_id and client_secret (authenticated)'
         )
     self.ip = ip
     self.port = port
     self.client_id = credentials[0] if credentials is not None else None
     self.client_secret = credentials[1] if credentials is not None else None
     self._url = 'https://{0}:{1}/api'.format(ip, port)
     self._key = hashlib.sha256('{0}{1}{2}{3}'.format(
         self.ip, self.port, self.client_id,
         self.client_secret)).hexdigest()
     self._token = None
     self._verify = verify
     self._version = version
     self._raw_response = raw_response
     try:
         from ovs.extensions.storage.volatilefactory import VolatileFactory
         self._volatile_client = VolatileFactory.get_client()
     except ImportError:
         self._volatile_client = None
Ejemplo n.º 12
0
 def new_function(self, request, *args, **kwargs):
     """
     Wrapped function
     """
     now = time.time()
     key = 'ovs_api_limit_{0}.{1}_{2}'.format(
         f.__module__, f.__name__, request.META['HTTP_X_REAL_IP'])
     client = VolatileFactory.get_client()
     mutex = VolatileMutex(key)
     try:
         mutex.acquire()
         rate_info = client.get(key, {'calls': [], 'timeout': None})
         active_timeout = rate_info['timeout']
         if active_timeout is not None:
             if active_timeout > now:
                 raise Throttled(wait=active_timeout - now)
             else:
                 rate_info['timeout'] = None
         rate_info['calls'] = [
             call for call in rate_info['calls'] if call > (now - per)
         ] + [now]
         calls = len(rate_info['calls'])
         if calls > amount:
             rate_info['timeout'] = now + timeout
             client.set(key, rate_info)
             raise Throttled(wait=timeout)
         client.set(key, rate_info)
     finally:
         mutex.release()
     return f(self, request, *args, **kwargs)
Ejemplo n.º 13
0
        def new_function(*args, **kwargs):
            """
            Wrapped function
            """
            request = _find_request(args)

            now = time.time()
            key = 'ovs_api_limit_{0}.{1}_{2}'.format(
                f.__module__, f.__name__,
                request.META['HTTP_X_REAL_IP']
            )
            client = VolatileFactory.get_client()
            with volatile_mutex(key):
                rate_info = client.get(key, {'calls': [],
                                             'timeout': None})
                active_timeout = rate_info['timeout']
                if active_timeout is not None:
                    if active_timeout > now:
                        logger.warning('Call {0} is being throttled with a wait of {1}'.format(key, active_timeout - now))
                        raise HttpTooManyRequestsException(error='rate_limit_timeout',
                                                           error_description='Rate limit timeout ({0}s remaining)'.format(round(active_timeout - now, 2)))
                    else:
                        rate_info['timeout'] = None
                rate_info['calls'] = [call for call in rate_info['calls'] if call > (now - per)] + [now]
                calls = len(rate_info['calls'])
                if calls > amount:
                    rate_info['timeout'] = now + timeout
                    client.set(key, rate_info)
                    logger.warning('Call {0} is being throttled with a wait of {1}'.format(key, timeout))
                    raise HttpTooManyRequestsException(error='rate_limit_reached',
                                                       error_description='Rate limit reached ({0} in last {1}s)'.format(calls, per))
                client.set(key, rate_info)
            return f(*args, **kwargs)
 def _load_backend_info(_connection_info, _alba_backend_guid):
     client = OVSClient.get_instance(
         connection_info=_connection_info,
         cache_store=VolatileFactory.get_client())
     return_value[_alba_backend_guid][
         'live_status'] = AlbaBackend.STATUSES.UNKNOWN
     try:
         info = client.get(
             '/alba/backends/{0}/'.format(_alba_backend_guid),
             params={'contents': 'local_summary,live_status'})
         with lock:
             return_value[_alba_backend_guid].update(
                 info['local_summary'])
             return_value[_alba_backend_guid]['live_status'] = info[
                 'live_status']
     except HttpNotFoundException as ex:
         return_value[_alba_backend_guid]['error'] = 'backend_deleted'
         self._logger.warning(
             'AlbaBackend {0} STATUS set as FAILURE due to HttpNotFoundException: {1}'
             .format(self.name, ex))
         return_value[_alba_backend_guid][
             'live_status'] = AlbaBackend.STATUSES.FAILURE
     except HttpForbiddenException:
         return_value[_alba_backend_guid]['error'] = 'not_allowed'
     except Exception as ex:
         return_value[_alba_backend_guid]['error'] = 'unknown'
         AlbaBackend._logger.exception(
             'Collecting remote ALBA Backend information failed with error: {0}'
             .format(ex))
Ejemplo n.º 15
0
 def new_function(self, request, *args, **kwargs):
     """
     Wrapped function
     """
     now = time.time()
     key = 'ovs_api_limit_{0}.{1}_{2}'.format(
         f.__module__, f.__name__,
         request.META['HTTP_X_REAL_IP']
     )
     client = VolatileFactory.get_client()
     mutex = VolatileMutex(key)
     try:
         mutex.acquire()
         rate_info = client.get(key, {'calls': [],
                                      'timeout': None})
         active_timeout = rate_info['timeout']
         if active_timeout is not None:
             if active_timeout > now:
                 raise Throttled(wait=active_timeout - now)
             else:
                 rate_info['timeout'] = None
         rate_info['calls'] = [call for call in rate_info['calls'] if call > (now - per)] + [now]
         calls = len(rate_info['calls'])
         if calls > amount:
             rate_info['timeout'] = now + timeout
             client.set(key, rate_info)
             raise Throttled(wait=timeout)
         client.set(key, rate_info)
     finally:
         mutex.release()
     return f(self, request, *args, **kwargs)
Ejemplo n.º 16
0
 def new_function(self, request, *args, **kwargs):
     """
     Wrapped function
     """
     now = time.time()
     key = 'ovs_api_limit_{0}.{1}_{2}'.format(
         f.__module__, f.__name__,
         request.META['HTTP_X_REAL_IP']
     )
     client = VolatileFactory.get_client()
     mutex = VolatileMutex(key)
     try:
         mutex.acquire()
         rate_info = client.get(key, {'calls': [],
                                      'timeout': None})
         active_timeout = rate_info['timeout']
         if active_timeout is not None:
             if active_timeout > now:
                 return HttpResponse, {'error_code': 'rate_limit_timeout',
                                       'error': 'Rate limit timeout ({0}s remaining)'.format(round(active_timeout - now, 2))}, 429
             else:
                 rate_info['timeout'] = None
         rate_info['calls'] = [call for call in rate_info['calls'] if call > (now - per)] + [now]
         calls = len(rate_info['calls'])
         if calls > amount:
             rate_info['timeout'] = now + timeout
             client.set(key, rate_info)
             return HttpResponse, {'error_code': 'rate_limit_reached',
                                   'error': 'Rate limit reached ({0} in last {1}s)'.format(calls, per)}, 429
         client.set(key, rate_info)
     finally:
         mutex.release()
     return f(self, request, *args, **kwargs)
Ejemplo n.º 17
0
 def load_foreign_relations(object_type):
     """
     This method will return a mapping of all relations towards a certain hybrid object type.
     The resulting mapping will be stored in volatile storage so it can be fetched faster
     """
     relation_key = 'ovs_relations_{0}'.format(object_type.__name__.lower())
     volatile = VolatileFactory.get_client()
     relation_info = volatile.get(relation_key)
     if relation_info is None:
         Toolbox.log_cache_hit('relations', False)
         relation_info = {}
         hybrid_structure = HybridRunner.get_hybrids()
         for class_descriptor in hybrid_structure.values():  # Extended objects
             cls = Descriptor().load(class_descriptor).get_object()
             for relation in cls._relations:
                 if relation.foreign_type is None:
                     remote_class = cls
                 else:
                     identifier = Descriptor(relation.foreign_type).descriptor['identifier']
                     if identifier in hybrid_structure and identifier != hybrid_structure[identifier]['identifier']:
                         remote_class = Descriptor().load(hybrid_structure[identifier]).get_object()
                     else:
                         remote_class = relation.foreign_type
                 itemname = remote_class.__name__
                 if itemname == object_type.__name__:
                     relation_info[relation.foreign_key] = {'class': Descriptor(cls).descriptor,
                                                            'key': relation.name,
                                                            'list': not relation.onetoone}
         volatile.set(relation_key, relation_info)
     else:
         Toolbox.log_cache_hit('relations', True)
     return relation_info
Ejemplo n.º 18
0
    def __init__(self, object_type, query, key=None):
        """
        Initializes a DataList class with a given key (used for optional caching) and a given query
        :param object_type: The type of the objects that have to be queried
        :param query: The query to execute
        :param key: A key under which the result must be cached
        """
        super(DataList, self).__init__()

        if key is not None:
            self._key = '{0}_{1}'.format(DataList.NAMESPACE, key)
        else:
            identifier = copy.deepcopy(query)
            identifier['object'] = object_type.__name__
            self._key = '{0}_{1}'.format(DataList.NAMESPACE, hashlib.sha256(json.dumps(identifier)).hexdigest())

        self._volatile = VolatileFactory.get_client()
        self._persistent = PersistentFactory.get_client()
        self._query = query
        self._can_cache = True
        self._object_type = object_type
        self._data = {}
        self._objects = {}
        self._guids = None
        self._executed = False
        self._shallow_sort = True

        self.from_cache = None
Ejemplo n.º 19
0
        def new_function(*args, **kwargs):
            """
            Wrapped function
            """
            request = _find_request(args)

            now = time.time()
            key = 'ovs_api_limit_{0}.{1}_{2}'.format(
                f.__module__, f.__name__,
                request.META['HTTP_X_REAL_IP']
            )
            client = VolatileFactory.get_client()
            with volatile_mutex(key):
                rate_info = client.get(key, {'calls': [],
                                             'timeout': None})
                active_timeout = rate_info['timeout']
                if active_timeout is not None:
                    if active_timeout > now:
                        logger.warning('Call {0} is being throttled with a wait of {1}'.format(key, active_timeout - now))
                        raise HttpTooManyRequestsException(error='rate_limit_timeout',
                                                           error_description='Rate limit timeout ({0}s remaining)'.format(round(active_timeout - now, 2)))
                    else:
                        rate_info['timeout'] = None
                rate_info['calls'] = [call for call in rate_info['calls'] if call > (now - per)] + [now]
                calls = len(rate_info['calls'])
                if calls > amount:
                    rate_info['timeout'] = now + timeout
                    client.set(key, rate_info)
                    logger.warning('Call {0} is being throttled with a wait of {1}'.format(key, timeout))
                    raise HttpTooManyRequestsException(error='rate_limit_reached',
                                                       error_description='Rate limit reached ({0} in last {1}s)'.format(calls, per))
                client.set(key, rate_info)
            return f(*args, **kwargs)
Ejemplo n.º 20
0
 def __init__(self, name):
     """
     Creates a volatile mutex object
     """
     self._volatile = VolatileFactory.get_client()
     self.name = name
     self._has_lock = False
     self._start = 0
Ejemplo n.º 21
0
    def partial_update(self, storagerouter, request, contents=None):
        """
        Update a StorageRouter
        """
        contents = None if contents is None else contents.split(',')
        previous_primary = storagerouter.primary_failure_domain
        previous_secondary = storagerouter.secondary_failure_domain
        serializer = FullSerializer(StorageRouter,
                                    contents=contents,
                                    instance=storagerouter,
                                    data=request.DATA)
        if serializer.is_valid():
            primary = storagerouter.primary_failure_domain
            secondary = storagerouter.secondary_failure_domain
            if primary is None:
                raise NotAcceptable(
                    'A StorageRouter must have a primary FD configured')
            if secondary is not None:
                if primary.guid == secondary.guid:
                    raise NotAcceptable(
                        'A StorageRouter cannot have the same FD for both primary and secondary'
                    )
                if len(secondary.primary_storagerouters) == 0:
                    raise NotAcceptable(
                        'The secondary FD should be set as primary FD by at least one StorageRouter'
                    )
            if len(previous_primary.secondary_storagerouters) > 0 and len(previous_primary.primary_storagerouters) == 1 and \
                    previous_primary.primary_storagerouters[0].guid == storagerouter.guid and previous_primary.guid != primary.guid:
                raise NotAcceptable(
                    'Cannot change the primary FD as this StorageRouter is the only one serving it while it is used as secondary FD'
                )
            serializer.save()
            if previous_primary != primary or previous_secondary != secondary:
                cache = VolatileFactory.get_client()
                key_mds = 'ovs_dedupe_fdchange_mds_{0}'.format(
                    storagerouter.guid)
                key_dtl = 'ovs_dedupe_fdchange_dtl_{0}'.format(
                    storagerouter.guid)
                task_mds_id = cache.get(key_mds)
                task_dtl_id = cache.get(key_dtl)
                if task_mds_id:
                    # Key exists, task was already scheduled. If task is already running, the revoke message will be ignored
                    revoke(task_mds_id)
                if task_dtl_id:
                    revoke(task_dtl_id)
                async_mds_result = MDSServiceController.mds_checkup.s(
                ).apply_async(countdown=60)
                async_dtl_result = VDiskController.dtl_checkup.s().apply_async(
                    countdown=60)
                cache.set(key_mds, async_mds_result.id,
                          600)  # Store the task id
                cache.set(key_mds, async_dtl_result.id,
                          600)  # Store the task id

            return Response(serializer.data, status=status.HTTP_202_ACCEPTED)
        else:
            return Response(serializer.errors,
                            status=status.HTTP_400_BAD_REQUEST)
Ejemplo n.º 22
0
 def tearDownClass(cls):
     """
     Clean up the unittest
     """
     fakesleep.monkey_restore()
     cls.volatile = VolatileFactory.get_client()
     cls.volatile.clean()
     cls.persistent = PersistentFactory.get_client()
     cls.persistent.clean()
Ejemplo n.º 23
0
 def __init__(self, name, wait=None):
     """
     Creates a volatile mutex object
     """
     self._volatile = VolatileFactory.get_client()
     self.name = name
     self._has_lock = False
     self._start = 0
     self._wait = wait
Ejemplo n.º 24
0
 def setUpClass(cls):
     """
     Sets up the unittest, mocking a certain set of 3rd party libraries and extensions.
     This makes sure the unittests can be executed without those libraries installed
     """
     cls.persistent = PersistentFactory.get_client()
     cls.persistent.clean()
     cls.volatile = VolatileFactory.get_client()
     cls.volatile.clean()
Ejemplo n.º 25
0
 def setUpClass(cls):
     """
     Sets up the unittest, mocking a certain set of 3rd party libraries and extensions.
     This makes sure the unittests can be executed without those libraries installed
     """
     cls.persistent = PersistentFactory.get_client()
     cls.persistent.clean()
     cls.volatile = VolatileFactory.get_client()
     cls.volatile.clean()
Ejemplo n.º 26
0
 def __init__(self, name, wait=None):
     """
     Creates a volatile mutex object
     """
     self._logger = LogHandler.get('extensions', 'volatile mutex')
     self._volatile = VolatileFactory.get_client()
     self.name = name
     self._has_lock = False
     self._start = 0
     self._wait = wait
Ejemplo n.º 27
0
    def tearDownClass(cls):
        """
        Tear down changes made during setUpClass
        """
        Configuration._unittest_data = {}

        cls.persistent = PersistentFactory.get_client()
        cls.persistent.clean()

        cls.volatile = VolatileFactory.get_client()
        cls.volatile.clean()
Ejemplo n.º 28
0
    def tearDownClass(cls):
        """
        Tear down changes made during setUpClass
        """
        Configuration._unittest_data = {}

        cls.persistent = PersistentFactory.get_client()
        cls.persistent.clean()

        cls.volatile = VolatileFactory.get_client()
        cls.volatile.clean()
Ejemplo n.º 29
0
 def log_cache_hit(cache_type, hit):
     """
     Registers a cache hit or miss with a specific type
     """
     volatile = VolatileFactory.get_client()
     key = 'ovs_stats_cache_{0}_{1}'.format(cache_type, 'hit' if hit else 'miss')
     try:
         successfull = volatile.incr(key)
         if not successfull:
             volatile.set(key, 1)
     except:
         pass
Ejemplo n.º 30
0
    def set_domains(self, storagerouter, domain_guids, recovery_domain_guids):
        """
        Configures the given domains to the StorageRouter.
        :param storagerouter: The StorageRouter to update
        :type storagerouter: ovs.dal.hybrids.storagerouter.StorageRouter
        :param domain_guids: A list of Domain guids
        :type domain_guids: list
        :param recovery_domain_guids: A list of Domain guids to set as recovery Domain
        :type recovery_domain_guids: list
        :return: None
        :rtype: None
        """
        change = False
        for junction in storagerouter.domains:
            if junction.backup is False:
                if junction.domain_guid not in domain_guids:
                    junction.delete()
                    change = True
                else:
                    domain_guids.remove(junction.domain_guid)
            else:
                if junction.domain_guid not in recovery_domain_guids:
                    junction.delete()
                    change = True
                else:
                    recovery_domain_guids.remove(junction.domain_guid)
        for domain_guid in domain_guids + recovery_domain_guids:
            junction = StorageRouterDomain()
            junction.domain = Domain(domain_guid)
            junction.backup = domain_guid in recovery_domain_guids
            junction.storagerouter = storagerouter
            junction.save()
            change = True

        # Schedule a task to run after 60 seconds, re-schedule task if another identical task gets triggered
        if change is True:
            cache = VolatileFactory.get_client()
            task_ids = cache.get(StorageRouterViewSet.DOMAIN_CHANGE_KEY)
            if task_ids:
                for task_id in task_ids:
                    revoke(task_id)
            task_ids = [
                MDSServiceController.mds_checkup.s().apply_async(
                    countdown=60).id,
                VDiskController.dtl_checkup.s().apply_async(countdown=60).id,
                StorageDriverController.cluster_registry_checkup.s().
                apply_async(countdown=60).id
            ]
            cache.set(StorageRouterViewSet.DOMAIN_CHANGE_KEY, task_ids,
                      600)  # Store the task ids
            storagerouter.invalidate_dynamics(
                ['regular_domains', 'recovery_domains'])
Ejemplo n.º 31
0
 def log_cache_hit(cache_type, hit):
     """
     Registers a cache hit or miss with a specific type
     """
     volatile = VolatileFactory.get_client()
     key = 'ovs_stats_cache_{0}_{1}'.format(cache_type,
                                            'hit' if hit else 'miss')
     try:
         successfull = volatile.incr(key)
         if not successfull:
             volatile.set(key, 1)
     except:
         pass
Ejemplo n.º 32
0
 def _test_store(self, store_type, key=None, value=None):
     # type: (str, str, str) -> bool
     """
     Test specified store type
     :param store_type: name of the store type
     :type: str
     :param key: key content to test
     :type key: str
     :param value: value to put
     :type value: str
     :return: boolean
     """
     # Volatile
     self.log_message('Testing {0} store...'.format(store_type))
     max_tries = 5
     tries = 0
     while tries < max_tries:
         if store_type == 'arakoon_voldrv':
             try:
                 cluster_name = str(Configuration.get('/ovs/framework/arakoon_clusters|voldrv'))
                 configuration = Configuration.get('/ovs/arakoon/{0}/config'.format(cluster_name), raw=True)
                 client = PyrakoonStore(cluster=cluster_name, configuration=configuration)
                 client.nop()
                 break
             except Exception as message:
                 self.log_message('  Error during arakoon (voldrv) test: {0}'.format(message), 2)
         else:
             try:
                 if store_type == 'volatile':
                     VolatileFactory.store = None
                     volatile = VolatileFactory.get_client()
                     volatile.set(key, value)
                     if volatile.get(key) == value:
                         volatile.delete(key)
                         break
                     volatile.delete(key)
                 elif store_type == 'persistent':
                     persistent = PersistentFactory.get_client()
                     persistent.nop()
                     break
             except Exception as message:
                 self.log_message('  Error during {0} store test: {1}'.format(store_type, message), 3)
         key = 'ovs-watcher-{0}'.format(str(uuid.uuid4()))  # Get another key
         time.sleep(1)
         tries += 1
     if tries == max_tries:
         self.log_message('  {0} store not working correctly'.format(store_type), 2)
         return False
     self.log_message('  {0} store OK after {1} tries'.format(store_type, tries))
Ejemplo n.º 33
0
    def setUpClass(cls):
        """
        Sets up the unittest, mocking a certain set of 3rd party libraries and extensions.
        This makes sure the unittests can be executed without those libraries installed
        """
        cls.persistent = PersistentFactory.get_client()
        cls.persistent.clean()
        cls.volatile = VolatileFactory.get_client()
        cls.volatile.clean()
        StorageRouterClient.clean()

        fakesleep.monkey_patch()
        Configuration.set('/ovs/framework/arakoon_clusters|voldrv', 'voldrv')
        Configuration.set('/ovs/framework/hosts/1/ports', {'arakoon': [10000, 10100]})
        Configuration.set('/ovs/framework/rdma', False)
Ejemplo n.º 34
0
    def setUpClass(cls):
        """
        Sets up the unittest, mocking a certain set of 3rd party libraries and extensions.
        This makes sure the unittests can be executed without those libraries installed
        """
        cls.persistent = PersistentFactory.get_client()
        cls.persistent.clean()
        cls.volatile = VolatileFactory.get_client()
        cls.volatile.clean()
        StorageRouterClient.clean()

        fakesleep.monkey_patch()
        Configuration.set('/ovs/framework/storagedriver|mds_tlogs', 100)
        Configuration.set('/ovs/framework/storagedriver|mds_maxload', 75)
        Configuration.set('/ovs/framework/storagedriver|mds_safety', 2)
    def setUpClass(cls):
        """
        Sets up the unittest, mocking a certain set of 3rd party libraries and extensions.
        This makes sure the unittests can be executed without those libraries installed
        """
        cls.persistent = PersistentFactory.get_client()
        cls.persistent.clean()
        cls.volatile = VolatileFactory.get_client()
        cls.volatile.clean()
        StorageRouterClient.clean()

        fakesleep.monkey_patch()
        Configuration.set('/ovs/framework/storagedriver|mds_tlogs', 100)
        Configuration.set('/ovs/framework/storagedriver|mds_maxload', 75)
        Configuration.set('/ovs/framework/storagedriver|mds_safety', 2)
Ejemplo n.º 36
0
    def set_domains(self, storagerouter, domain_guids, recovery_domain_guids):
        """
        Configures the given domains to the StorageRouter.
        :param storagerouter: The StorageRouter to update
        :type storagerouter: StorageRouter
        :param domain_guids: A list of Domain guids
        :type domain_guids: list
        :param recovery_domain_guids: A list of Domain guids to set as recovery Domain
        :type recovery_domain_guids: list
        :return: None
        :rtype: None
        """
        change = False
        for junction in storagerouter.domains:
            if junction.backup is False:
                if junction.domain_guid not in domain_guids:
                    junction.delete()
                    change = True
                else:
                    domain_guids.remove(junction.domain_guid)
            else:
                if junction.domain_guid not in recovery_domain_guids:
                    junction.delete()
                    change = True
                else:
                    recovery_domain_guids.remove(junction.domain_guid)
        for domain_guid in domain_guids + recovery_domain_guids:
            junction = StorageRouterDomain()
            junction.domain = Domain(domain_guid)
            junction.backup = domain_guid in recovery_domain_guids
            junction.storagerouter = storagerouter
            junction.save()
            change = True

        # Schedule a task to run after 60 seconds, re-schedule task if another identical task gets triggered
        if change is True:
            cache = VolatileFactory.get_client()
            task_id_domain = cache.get(StorageRouterViewSet.DOMAIN_CHANGE_KEY)
            task_id_backup = cache.get(StorageRouterViewSet.RECOVERY_DOMAIN_CHANGE_KEY)
            if task_id_domain:
                revoke(task_id_domain)  # If key exists, task was already scheduled. If task is already running, the revoke message will be ignored
            if task_id_backup:
                revoke(task_id_backup)
            async_mds_result = MDSServiceController.mds_checkup.s().apply_async(countdown=60)
            async_dtl_result = VDiskController.dtl_checkup.s().apply_async(countdown=60)
            cache.set(StorageRouterViewSet.DOMAIN_CHANGE_KEY, async_mds_result.id, 600)  # Store the task id
            cache.set(StorageRouterViewSet.RECOVERY_DOMAIN_CHANGE_KEY, async_dtl_result.id, 600)  # Store the task id
            storagerouter.invalidate_dynamics(['regular_domains', 'recovery_domains'])
Ejemplo n.º 37
0
    def set_domains(self, storagerouter, domain_guids, recovery_domain_guids):
        """
        Configures the given domains to the StorageRouter.
        :param storagerouter: The StorageRouter to update
        :type storagerouter: StorageRouter
        :param domain_guids: A list of Domain guids
        :type domain_guids: list
        :param recovery_domain_guids: A list of Domain guids to set as recovery Domain
        :type recovery_domain_guids: list
        :return: None
        :rtype: None
        """
        change = False
        for junction in storagerouter.domains:
            if junction.backup is False:
                if junction.domain_guid not in domain_guids:
                    junction.delete()
                    change = True
                else:
                    domain_guids.remove(junction.domain_guid)
            else:
                if junction.domain_guid not in recovery_domain_guids:
                    junction.delete()
                    change = True
                else:
                    recovery_domain_guids.remove(junction.domain_guid)
        for domain_guid in domain_guids + recovery_domain_guids:
            junction = StorageRouterDomain()
            junction.domain = Domain(domain_guid)
            junction.backup = domain_guid in recovery_domain_guids
            junction.storagerouter = storagerouter
            junction.save()
            change = True

        # Schedule a task to run after 60 seconds, re-schedule task if another identical task gets triggered
        if change is True:
            cache = VolatileFactory.get_client()
            task_ids = cache.get(StorageRouterViewSet.DOMAIN_CHANGE_KEY)
            if task_ids:
                for task_id in task_ids:
                    revoke(task_id)
            task_ids = [
                MDSServiceController.mds_checkup.s().apply_async(countdown=60).id,
                VDiskController.dtl_checkup.s().apply_async(countdown=60).id,
                StorageDriverController.cluster_registry_checkup.s().apply_async(countdown=60).id,
            ]
            cache.set(StorageRouterViewSet.DOMAIN_CHANGE_KEY, task_ids, 600)  # Store the task ids
            storagerouter.invalidate_dynamics(["regular_domains", "recovery_domains"])
Ejemplo n.º 38
0
 def new_function(self, request, *args, **kwargs):
     """
     Wrapped function
     """
     now = time.time()
     key = 'ovs_api_limit_{0}.{1}_{2}'.format(
         f.__module__, f.__name__, request.META['HTTP_X_REAL_IP'])
     client = VolatileFactory.get_client()
     mutex = VolatileMutex(key)
     try:
         mutex.acquire()
         rate_info = client.get(key, {'calls': [], 'timeout': None})
         active_timeout = rate_info['timeout']
         if active_timeout is not None:
             if active_timeout > now:
                 logger.warning(
                     'Call {0} is being throttled with a wait of {1}'.
                     format(key, active_timeout - now))
                 return HttpResponse, {
                     'error_code':
                     'rate_limit_timeout',
                     'error':
                     'Rate limit timeout ({0}s remaining)'.format(
                         round(active_timeout - now, 2))
                 }, 429
             else:
                 rate_info['timeout'] = None
         rate_info['calls'] = [
             call for call in rate_info['calls'] if call > (now - per)
         ] + [now]
         calls = len(rate_info['calls'])
         if calls > amount:
             rate_info['timeout'] = now + timeout
             client.set(key, rate_info)
             logger.warning(
                 'Call {0} is being throttled with a wait of {1}'.
                 format(key, timeout))
             return HttpResponse, {
                 'error_code':
                 'rate_limit_reached',
                 'error':
                 'Rate limit reached ({0} in last {1}s)'.format(
                     calls, per)
             }, 429
         client.set(key, rate_info)
     finally:
         mutex.release()
     return f(self, request, *args, **kwargs)
Ejemplo n.º 39
0
 def _statistics(self, dynamic):
     """
     Loads statistics from the ASD
     """
     data_keys = {'apply': ['Apply', 'Apply2'],
                  'multi_get': ['MultiGet', 'MultiGet2'],
                  'range': ['Range'],
                  'range_entries': ['RangeEntries'],
                  'statistics': ['Statistics']}
     volatile = VolatileFactory.get_client()
     prev_key = '{0}_{1}'.format(self._key, 'statistics_previous')
     previous_stats = volatile.get(prev_key, default={})
     try:
         all_statistics = self.alba_backend.osd_statistics
         if self.osd_id not in all_statistics:
             return {}
         data = all_statistics[self.osd_id]
         statistics = {'timestamp': time.time()}
         delta = statistics['timestamp'] - previous_stats.get('timestamp', statistics['timestamp'])
         for key, sources in data_keys.iteritems():
             if key not in statistics:
                 statistics[key] = {'n': 0, 'max': [], 'min': [], 'avg': []}
             for source in sources:
                 if source in data:
                     statistics[key]['n'] += data[source]['n']
                     statistics[key]['max'].append(data[source]['max'])
                     statistics[key]['min'].append(data[source]['min'])
                     statistics[key]['avg'].append(data[source]['avg'] * data[source]['n'])
             statistics[key]['max'] = max(statistics[key]['max']) if len(statistics[key]['max']) > 0 else 0
             statistics[key]['min'] = min(statistics[key]['min']) if len(statistics[key]['min']) > 0 else 0
             if statistics[key]['n'] > 0:
                 statistics[key]['avg'] = sum(statistics[key]['avg']) / float(statistics[key]['n'])
             else:
                 statistics[key]['avg'] = 0
             if key in previous_stats:
                 if delta < 0:
                     statistics[key]['n_ps'] = 0
                 elif delta == 0:
                     statistics[key]['n_ps'] = previous_stats[key].get('n_ps', 0)
                 else:
                     statistics[key]['n_ps'] = max(0, (statistics[key]['n'] - previous_stats[key]['n']) / delta)
             else:
                 statistics[key]['n_ps'] = 0
         volatile.set(prev_key, statistics, dynamic.timeout * 10)
         return statistics
     except Exception:
         # This might fail every now and then, e.g. on disk removal. Let's ignore for now.
         return {}
Ejemplo n.º 40
0
 def _statistics(self, dynamic):
     """
     Loads statistics from the ASD
     """
     data_keys = {'apply': ['Apply', 'Apply2'],
                  'multi_get': ['MultiGet', 'MultiGet2'],
                  'range': ['Range'],
                  'range_entries': ['RangeEntries'],
                  'statistics': ['Statistics']}
     volatile = VolatileFactory.get_client()
     prev_key = '{0}_{1}'.format(self._key, 'statistics_previous')
     previous_stats = volatile.get(prev_key, default={})
     try:
         all_statistics = self.alba_backend.asd_statistics
         if self.asd_id not in all_statistics:
             return {}
         data = all_statistics[self.asd_id]
         statistics = {'timestamp': time.time()}
         delta = statistics['timestamp'] - previous_stats.get('timestamp', statistics['timestamp'])
         for key, sources in data_keys.iteritems():
             if key not in statistics:
                 statistics[key] = {'n': 0, 'max': [], 'min': [], 'avg': []}
             for source in sources:
                 if source in data:
                     statistics[key]['n'] += data[source]['n']
                     statistics[key]['max'].append(data[source]['max'])
                     statistics[key]['min'].append(data[source]['min'])
                     statistics[key]['avg'].append(data[source]['avg'] * data[source]['n'])
             statistics[key]['max'] = max(statistics[key]['max']) if len(statistics[key]['max']) > 0 else 0
             statistics[key]['min'] = min(statistics[key]['min']) if len(statistics[key]['min']) > 0 else 0
             if statistics[key]['n'] > 0:
                 statistics[key]['avg'] = sum(statistics[key]['avg']) / float(statistics[key]['n'])
             else:
                 statistics[key]['avg'] = 0
             if key in previous_stats:
                 if delta < 0:
                     statistics[key]['n_ps'] = 0
                 elif delta == 0:
                     statistics[key]['n_ps'] = previous_stats[key].get('n_ps', 0)
                 else:
                     statistics[key]['n_ps'] = max(0, (statistics[key]['n'] - previous_stats[key]['n']) / delta)
             else:
                 statistics[key]['n_ps'] = 0
         volatile.set(prev_key, statistics, dynamic.timeout * 10)
         return statistics
     except Exception:
         # This might fail every now and then, e.g. on disk removal. Let's ignore for now.
         return {}
Ejemplo n.º 41
0
 def try_get(key, fallback):
     """
     Returns a value linked to a certain key from the volatile store.
     If not found in the volatile store, it will try fetch it from the persistent
     store. If not found, it returns the fallback
     """
     volatile = VolatileFactory.get_client()
     data = volatile.get(key)
     if data is None:
         try:
             persistent = PersistentFactory.get_client()
             data = persistent.get(key)
         except:
             data = fallback
         volatile.set(key, data)
     return data
Ejemplo n.º 42
0
 def try_get(key, fallback):
     """
     Returns a value linked to a certain key from the volatile store.
     If not found in the volatile store, it will try fetch it from the persistent
     store. If not found, it returns the fallback
     """
     volatile = VolatileFactory.get_client()
     data = volatile.get(key)
     if data is None:
         try:
             persistent = PersistentFactory.get_client()
             data = persistent.get(key)
         except:
             data = fallback
         volatile.set(key, data)
     return data
Ejemplo n.º 43
0
    def __init__(self, object_type=None, guid=None, cached=True):
        """
        Initializes a descriptor for a given type. Optionally already providing a guid for the
        instanciator
        """

        # Initialize super class
        super(Descriptor, self).__init__()

        if object_type is None:
            self.initialized = False
        else:
            self.initialized = True
            self._volatile = VolatileFactory.get_client()

            type_name = object_type.__name__
            module_name = object_type.__module__.split('.')[-1]
            fqm_name = 'ovs.dal.hybrids.{0}'.format(module_name)
            try:
                module = __import__(fqm_name, level=0, fromlist=[type_name])
                _ = getattr(module, type_name)
            except (ImportError, AttributeError):
                logger.info('Received object type {0} is not a hybrid'.format(
                    object_type))
                raise TypeError(
                    'Invalid type for Descriptor: {0}'.format(object_type))
            identifier = '{0}_{1}'.format(type_name,
                                          hashlib.sha1(fqm_name).hexdigest())
            key = 'ovs_descriptor_{0}'.format(identifier)

            self._descriptor = self._volatile.get(key)
            if self._descriptor is None or cached is False:
                if self._descriptor is None:
                    logger.debug(
                        'Object type {0} was translated to {1}.{2}'.format(
                            object_type, fqm_name, type_name))
                Toolbox.log_cache_hit('descriptor', False)
                self._descriptor = {
                    'fqmn': fqm_name,
                    'type': type_name,
                    'identifier': identifier,
                    'version': 3
                }
                self._volatile.set(key, self._descriptor)
            else:
                Toolbox.log_cache_hit('descriptor', True)
            self._descriptor['guid'] = guid
Ejemplo n.º 44
0
 def retrieve(self):
     """
     Returns statistics information
     """
     nodes = MemcacheViewSet._get_memcache_nodes()
     client = VolatileFactory.get_client('memcache')
     online_nodes = ['%s:%s' % (node.ip, node.port) for node in client._client.servers if node.deaduntil == 0]
     stats = {'nodes': [],
              'offline': []}
     for node in nodes:
         if node in online_nodes:
             stat = MemcacheViewSet._node_stats(node)
             stat['node'] = node
             stats['nodes'].append(stat)
         else:
             stats['offline'].append(node)
     return Response(stats)
Ejemplo n.º 45
0
 def _save_pks(name, keys):
     """
     Pages and saves a set
     """
     internal_key = 'ovs_primarykeys_{0}_{{0}}'.format(name)
     volatile = VolatileFactory.get_client()
     keys = list(keys)
     if len(keys) <= DataList.partsize_pks:
         volatile.set(internal_key.format(0), [keys, None])
     else:
         sets = range(0, len(keys), DataList.partsize_pks)
         sets.reverse()
         pointer = None
         for i in sets:
             data = [keys[i:i + DataList.partsize_pks], pointer]
             pointer = internal_key.format(i)
             volatile.set(pointer, data)
Ejemplo n.º 46
0
 def calculate_delta(key, dynamic, current_stats):
     """
     Calculate statistics deltas
     """
     volatile = VolatileFactory.get_client()
     prev_key = '{0}_{1}'.format(key, 'statistics_previous')
     previous_stats = volatile.get(prev_key, default={})
     for key in current_stats.keys():
         if key in StorageDriverClient.STAT_KEYS:
             delta = current_stats['timestamp'] - previous_stats.get('timestamp', current_stats['timestamp'])
             if delta < 0:
                 current_stats['{0}_ps'.format(key)] = 0
             elif delta == 0:
                 current_stats['{0}_ps'.format(key)] = previous_stats.get('{0}_ps'.format(key), 0)
             else:
                 current_stats['{0}_ps'.format(key)] = max(0, (current_stats[key] - previous_stats[key]) / delta)
     volatile.set(prev_key, current_stats, dynamic.timeout * 10)
Ejemplo n.º 47
0
 def retrieve(self):
     """
     Returns statistics information
     """
     nodes = MemcacheViewSet._get_memcache_nodes()
     client = VolatileFactory.get_client('memcache')
     online_nodes = ['%s:%s' % (node.ip, node.port) for node in client._client.servers if node.deaduntil == 0]
     stats = {'nodes': [],
              'offline': []}
     for node in nodes:
         if node in online_nodes:
             stat = MemcacheViewSet._node_stats(node)
             stat['node'] = node
             stats['nodes'].append(stat)
         else:
             stats['offline'].append(node)
     return Response(stats)
Ejemplo n.º 48
0
 def calculate_delta(key, dynamic, current_stats):
     """
     Calculate statistics deltas
     """
     volatile = VolatileFactory.get_client()
     prev_key = '{0}_{1}'.format(key, 'statistics_previous')
     previous_stats = volatile.get(prev_key, default={})
     for key in current_stats.keys():
         if key in StorageDriverClient.STAT_KEYS:
             delta = current_stats['timestamp'] - previous_stats.get('timestamp', current_stats['timestamp'])
             if delta < 0:
                 current_stats['{0}_ps'.format(key)] = 0
             elif delta == 0:
                 current_stats['{0}_ps'.format(key)] = previous_stats.get('{0}_ps'.format(key), 0)
             else:
                 current_stats['{0}_ps'.format(key)] = max(0, (current_stats[key] - previous_stats[key]) / delta)
     volatile.set(prev_key, current_stats, dynamic.timeout * 10)
Ejemplo n.º 49
0
 def _save_pks(name, keys):
     """
     Pages and saves a set
     """
     internal_key = 'ovs_primarykeys_{0}_{{0}}'.format(name)
     volatile = VolatileFactory.get_client()
     keys = list(keys)
     if len(keys) <= DataList.partsize_pks:
         volatile.set(internal_key.format(0), [keys, None])
     else:
         sets = range(0, len(keys), DataList.partsize_pks)
         sets.reverse()
         pointer = None
         for i in sets:
             data = [keys[i:i + DataList.partsize_pks], pointer]
             pointer = internal_key.format(i)
             volatile.set(pointer, data)
Ejemplo n.º 50
0
 def async_sync_with_reality(storagerouter_guid=None, max_attempts=3):
     """
     Calls sync_with_reality, implements dedupe logic
     Keep existing task as it is, some tasks depend on it being sync, call async explicitly
     - if task was already called for this storagerouter, revoke it and call a new one
     - ensures only 1 task runs for a storagerouter and only the last task is executed
     :param storagerouter_guid:
     :return:
     """
     cache = VolatileFactory.get_client()
     key = 'ovs_dedupe_sync_with_reality_{0}'.format(storagerouter_guid)
     task_id = cache.get(key)
     if task_id:
         # Key exists, task was already scheduled
         # If task is already running, the revoke message will be ignored
         revoke(task_id)
     async_result = DiskController.sync_with_reality.s().apply_async(args=[storagerouter_guid, max_attempts], countdown=15)
     cache.set(key, async_result.id, 600)  # Store the task id
Ejemplo n.º 51
0
 def _statistics(self, dynamic):
     """
     Fetches the Statistics for the vDisk.
     """
     client = StorageDriverClient()
     volatile = VolatileFactory.get_client()
     prev_key = '{0}_{1}'.format(self._key, 'statistics_previous')
     # Load data from volumedriver
     if self.volume_id and self.vpool:
         try:
             vdiskstats = self.storagedriver_client.statistics_volume(
                 str(self.volume_id))
         except:
             vdiskstats = client.empty_statistics()
     else:
         vdiskstats = client.empty_statistics()
     # Load volumedriver data in dictionary
     vdiskstatsdict = {}
     for key, value in vdiskstats.__class__.__dict__.items():
         if type(value) is property and key in client.stat_counters:
             vdiskstatsdict[key] = getattr(vdiskstats, key)
     # Precalculate sums
     for key, items in client.stat_sums.iteritems():
         vdiskstatsdict[key] = 0
         for item in items:
             vdiskstatsdict[key] += vdiskstatsdict[item]
     vdiskstatsdict['timestamp'] = time.time()
     # Calculate delta's based on previously loaded dictionary
     previousdict = volatile.get(prev_key, default={})
     for key in vdiskstatsdict.keys():
         if key in client.stat_keys:
             delta = vdiskstatsdict['timestamp'] - previousdict.get(
                 'timestamp', vdiskstatsdict['timestamp'])
             if delta < 0:
                 vdiskstatsdict['{0}_ps'.format(key)] = 0
             elif delta == 0:
                 vdiskstatsdict['{0}_ps'.format(key)] = previousdict.get(
                     '{0}_ps'.format(key), 0)
             else:
                 vdiskstatsdict['{0}_ps'.format(key)] = max(
                     0, (vdiskstatsdict[key] - previousdict[key]) / delta)
     volatile.set(prev_key, vdiskstatsdict, dynamic.timeout * 10)
     # Returning the dictionary
     return vdiskstatsdict
Ejemplo n.º 52
0
    def __init__(self, object_type, query=None, key=None, guids=None):
        """
        Initializes a DataList class with a given key (used for optional caching) and a given query
        :param object_type: The type of the objects that have to be queried
        :param query: The query to execute. Example: {'type': DataList.where_operator.AND, 'items': [('storagedriver_id', DataList.operator.EQUALS, storagedriver_id)]}
        When query is None, it will default to a query which will not do any filtering
        :type query: dict or NoneType
        :param key: A key under which the result must be cached
        :type key: str
        :param guids: List of guids to use as a base
        These guids should be guids of objects related to the object_type param. If no object related to the guid could be found, these guids will not be included in the result
        When guids is None, it will default to querying all items
        :type guids: list[basestring] or NoneType
        """
        # Validation
        self.validate_guids(guids)
        self.validate_query(query)

        # Defaults
        if query is None:
            query = {'type': DataList.where_operator.AND, 'items': []}

        super(DataList, self).__init__()

        self._volatile = VolatileFactory.get_client()
        self._persistent = PersistentFactory.get_client()
        self._query = query
        self._can_cache = True
        self._object_type = object_type
        self._data = {}
        self._objects = {}
        self._guids = None
        self._executed = False
        self._shallow_sort = True
        self._provided_guids = guids
        self._provided_keys = None  # Conversion of guids to keys, cached for faster lookup
        self._key = None
        self._provided_key = False  # Keep track whether a key was explicitly set
        self.from_cache = None
        self.from_index = 'none'

        self.set_key(key)
Ejemplo n.º 53
0
    def __init__(self, object_type=None, guid=None, cached=True):
        """
        Initializes a descriptor for a given type. Optionally already providing a guid for the
        instanciator
        """

        # Initialize super class
        super(Descriptor, self).__init__()

        if object_type is None:
            self.initialized = False
        else:
            self.initialized = True
            self._volatile = VolatileFactory.get_client()

            type_name = object_type.__name__
            module_name = object_type.__module__.split('.')[-1]
            fqm_name = 'ovs.dal.hybrids.{0}'.format(module_name)
            try:
                module = __import__(fqm_name, level=0, fromlist=[type_name])
                _ = getattr(module, type_name)
            except (ImportError, AttributeError):
                logger.info('Received object type {0} is not a hybrid'.format(object_type))
                raise TypeError('Invalid type for Descriptor: {0}'.format(object_type))
            identifier = '{0}_{1}'.format(type_name, hashlib.sha1(fqm_name).hexdigest())
            key = 'ovs_descriptor_{0}'.format(identifier)

            self._descriptor = self._volatile.get(key)
            if self._descriptor is None or cached is False:
                if self._descriptor is None:
                    logger.debug('Object type {0} was translated to {1}.{2}'.format(
                        object_type, fqm_name, type_name
                    ))
                Toolbox.log_cache_hit('descriptor', False)
                self._descriptor = {'fqmn': fqm_name,
                                    'type': type_name,
                                    'identifier': identifier,
                                    'version': 3}
                self._volatile.set(key, self._descriptor)
            else:
                Toolbox.log_cache_hit('descriptor', True)
            self._descriptor['guid'] = guid
Ejemplo n.º 54
0
 def _get_pks(namespace, name):
     """
     Loads the primary key set information and pages, merges them to a single set
     and returns it
     """
     internal_key = 'ovs_primarykeys_{0}_{{0}}'.format(name)
     volatile = VolatileFactory.get_client()
     persistent = PersistentFactory.get_client()
     pointer = internal_key.format(0)
     keys = set()
     while pointer is not None:
         subset = volatile.get(pointer)
         if subset is None:
             prefix = '{0}_{1}_'.format(namespace, name)
             keys = set([key.replace(prefix, '') for key in persistent.prefix(prefix, max_elements=-1)])
             DataList._save_pks(name, keys)
             return keys
         keys = keys.union(subset[0])
         pointer = subset[1]
     return keys
Ejemplo n.º 55
0
    def __init__(self, object_type=None, guid=None):
        """
        Initializes a descriptor for a given type. Optionally already providing a guid for the
        instanciator
        """

        # Initialize super class
        super(Descriptor, self).__init__()

        if object_type is None:
            self.initialized = False
        else:
            self.initialized = True

            key = 'ovs_descriptor_{0}'.format(
                re.sub('[\W_]+', '', str(object_type)))
            self._volatile = VolatileFactory.get_client()
            self._descriptor = self._volatile.get(key)
            if self._descriptor is None:
                Toolbox.log_cache_hit('descriptor', False)
                filename = inspect.getfile(object_type).replace('.pyc', '.py')
                name = filename.replace(
                    os.path.dirname(filename) + os.path.sep,
                    '').replace('.py', '')
                source = os.path.relpath(filename, os.path.dirname(__file__))
                self._descriptor = {
                    'name':
                    name,
                    'source':
                    source,
                    'type':
                    object_type.__name__,
                    'identifier':
                    name + '_' +
                    hashlib.sha256(name + source +
                                   object_type.__name__).hexdigest()
                }
                self._volatile.set(key, self._descriptor)
            else:
                Toolbox.log_cache_hit('descriptor', True)
            self._descriptor['guid'] = guid
Ejemplo n.º 56
0
    def partial_update(self, storagerouter, request, contents=None):
        """
        Update a StorageRouter
        """
        contents = None if contents is None else contents.split(',')
        previous_primary = storagerouter.primary_failure_domain
        previous_secondary = storagerouter.secondary_failure_domain
        serializer = FullSerializer(StorageRouter, contents=contents, instance=storagerouter, data=request.DATA)
        if serializer.is_valid():
            primary = storagerouter.primary_failure_domain
            secondary = storagerouter.secondary_failure_domain
            if primary is None:
                raise NotAcceptable('A StorageRouter must have a primary FD configured')
            if secondary is not None:
                if primary.guid == secondary.guid:
                    raise NotAcceptable('A StorageRouter cannot have the same FD for both primary and secondary')
                if len(secondary.primary_storagerouters) == 0:
                    raise NotAcceptable('The secondary FD should be set as primary FD by at least one StorageRouter')
            if len(previous_primary.secondary_storagerouters) > 0 and len(previous_primary.primary_storagerouters) == 1 and \
                    previous_primary.primary_storagerouters[0].guid == storagerouter.guid and previous_primary.guid != primary.guid:
                raise NotAcceptable('Cannot change the primary FD as this StorageRouter is the only one serving it while it is used as secondary FD')
            serializer.save()
            if previous_primary != primary or previous_secondary != secondary:
                cache = VolatileFactory.get_client()
                key_mds = 'ovs_dedupe_fdchange_mds_{0}'.format(storagerouter.guid)
                key_dtl = 'ovs_dedupe_fdchange_dtl_{0}'.format(storagerouter.guid)
                task_mds_id = cache.get(key_mds)
                task_dtl_id = cache.get(key_dtl)
                if task_mds_id:
                    # Key exists, task was already scheduled. If task is already running, the revoke message will be ignored
                    revoke(task_mds_id)
                if task_dtl_id:
                    revoke(task_dtl_id)
                async_mds_result = MDSServiceController.mds_checkup.s().apply_async(countdown=60)
                async_dtl_result = VDiskController.dtl_checkup.s().apply_async(countdown=60)
                cache.set(key_mds, async_mds_result.id, 600)  # Store the task id
                cache.set(key_mds, async_dtl_result.id, 600)  # Store the task id

            return Response(serializer.data, status=status.HTTP_202_ACCEPTED)
        else:
            return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
Ejemplo n.º 57
0
 def _statistics(self, dynamic):
     """
     Fetches the Statistics for the vDisk.
     """
     volatile = VolatileFactory.get_client()
     prev_key = '{0}_{1}'.format(self._key, 'statistics_previous')
     # Load data from volumedriver
     if self.volume_id and self.vpool:
         try:
             vdiskstats = self.storagedriver_client.statistics_volume(str(self.volume_id))
         except:
             vdiskstats = StorageDriverClient.empty_statistics()
     else:
         vdiskstats = StorageDriverClient.empty_statistics()
     # Load volumedriver data in dictionary
     vdiskstatsdict = {}
     for key, value in vdiskstats.__class__.__dict__.items():
         if type(value) is property and key in StorageDriverClient.stat_counters:
             vdiskstatsdict[key] = getattr(vdiskstats, key)
     # Precalculate sums
     for key, items in StorageDriverClient.stat_sums.iteritems():
         vdiskstatsdict[key] = 0
         for item in items:
             vdiskstatsdict[key] += vdiskstatsdict[item]
     vdiskstatsdict['timestamp'] = time.time()
     # Calculate delta's based on previously loaded dictionary
     previousdict = volatile.get(prev_key, default={})
     for key in vdiskstatsdict.keys():
         if key in StorageDriverClient.stat_keys:
             delta = vdiskstatsdict['timestamp'] - previousdict.get('timestamp',
                                                                    vdiskstatsdict['timestamp'])
             if delta < 0:
                 vdiskstatsdict['{0}_ps'.format(key)] = 0
             elif delta == 0:
                 vdiskstatsdict['{0}_ps'.format(key)] = previousdict.get('{0}_ps'.format(key), 0)
             else:
                 vdiskstatsdict['{0}_ps'.format(key)] = max(0, (vdiskstatsdict[key] - previousdict[key]) / delta)
     volatile.set(prev_key, vdiskstatsdict, dynamic.timeout * 10)
     # Returning the dictionary
     return vdiskstatsdict
Ejemplo n.º 58
0
    def __init__(self, query, key=None, load=True):
        """
        Initializes a DataList class with a given key (used for optional caching) and a given query
        """
        # Initialize super class
        super(DataList, self).__init__()

        if key is not None:
            self._key = key
        else:
            identifier = copy.deepcopy(query)
            identifier['object'] = identifier['object'].__name__
            self._key = hashlib.sha256(json.dumps(identifier)).hexdigest()
        self._key = '{0}_{1}'.format(DataList.namespace, self._key)
        self._volatile = VolatileFactory.get_client()
        self._persistent = PersistentFactory.get_client()
        self._query = query
        self.data = None
        self.from_cache = False
        self._can_cache = True
        if load is True:
            self._load()