def load_foreign_relations(object_type):
     """
     This method will return a mapping of all relations towards a certain hybrid object type.
     The resulting mapping will be stored in volatile storage so it can be fetched faster
     """
     relation_key = 'ovs_relations_{0}'.format(object_type.__name__.lower())
     volatile = VolatileFactory.get_client()
     relation_info = volatile.get(relation_key)
     if relation_info is None:
         Toolbox.log_cache_hit('relations', False)
         relation_info = {}
         hybrid_structure = HybridRunner.get_hybrids()
         for class_descriptor in hybrid_structure.values():  # Extended objects
             cls = Descriptor().load(class_descriptor).get_object()
             for relation in cls._relations:
                 if relation.foreign_type is None:
                     remote_class = cls
                 else:
                     identifier = Descriptor(relation.foreign_type).descriptor['identifier']
                     if identifier in hybrid_structure and identifier != hybrid_structure[identifier]['identifier']:
                         remote_class = Descriptor().load(hybrid_structure[identifier]).get_object()
                     else:
                         remote_class = relation.foreign_type
                 itemname = remote_class.__name__
                 if itemname == object_type.__name__:
                     relation_info[relation.foreign_key] = {'class': Descriptor(cls).descriptor,
                                                            'key': relation.name,
                                                            'list': not relation.onetoone}
         volatile.set(relation_key, relation_info)
     else:
         Toolbox.log_cache_hit('relations', True)
     return relation_info
 def _get_relation_property(self, relation):
     """
     Getter for a complex property (hybrid)
     It will only load the object once and caches it for the lifetime of this object
     """
     attribute = relation.name
     if attribute not in self._objects:
         descriptor = Descriptor().load(self._data[attribute])
         self._objects[attribute] = descriptor.get_object(instantiate=True)
     return self._objects[attribute]
Exemple #3
0
 def _get_relation_property(self, relation):
     """
     Getter for a complex property (hybrid)
     It will only load the object once and caches it for the lifetime of this object
     """
     attribute = relation.name
     if attribute not in self._objects:
         descriptor = Descriptor().load(self._data[attribute])
         self._objects[attribute] = descriptor.get_object(instantiate=True)
     return self._objects[attribute]
 def __new__(cls, *args, **kwargs):
     """
     Initializes the class
     """
     hybrid_structure = HybridRunner.get_hybrids()
     identifier = Descriptor(cls).descriptor['identifier']
     if identifier in hybrid_structure and identifier != hybrid_structure[identifier]['identifier']:
         new_class = Descriptor().load(hybrid_structure[identifier]).get_object()
         return super(cls, new_class).__new__(new_class, *args, **kwargs)
     return super(DataObject, cls).__new__(cls)
Exemple #5
0
 def __add__(self, other):
     if not isinstance(other, DataObjectList):
         raise TypeError('Both operands should be of type DataObjectList')
     if Descriptor(self.type) != Descriptor(other.type):
         raise TypeError('Both operands should contain the same data')
     new_dol = DataObjectList(self._query_result, self.type)
     guids = self._guids[:]
     for guid in other._guids:
         if guid not in guids:
             guids.append(guid)
     new_dol._guids = guids
     return new_dol
 def _get_list_property(self, attribute):
     """
     Getter for the list property
     It will execute the related query every time to return a list of hybrid objects that
     refer to this object. The resulting data will be stored or merged into the cached list
     preserving as much already loaded objects as possible
     """
     info = self._objects[attribute]['info']
     remote_class = Descriptor().load(info['class']).get_object()
     remote_key = info['key']
     datalist = DataList.get_relation_set(remote_class, remote_key,
                                          self.__class__, attribute,
                                          self.guid)
     if self._objects[attribute]['data'] is None:
         self._objects[attribute]['data'] = DataObjectList(
             datalist.data, remote_class)
     else:
         self._objects[attribute]['data'].update(datalist.data)
     if info['list'] is True:
         return self._objects[attribute]['data']
     else:
         data = self._objects[attribute]['data']
         if len(data) > 1:
             raise InvalidRelationException(
                 'More than one element found in {0}'.format(attribute))
         return data[0] if len(data) == 1 else None
 def __eq__(self, other):
     """
     Checks whether two objects are the same.
     """
     if not Descriptor.isinstance(other, self.__class__):
         return False
     return self.__hash__() == other.__hash__()
 def __ne__(self, other):
     """
     Checks whether to objects are not the same.
     """
     if not Descriptor.isinstance(other, self.__class__):
         return True
     return not self.__eq__(other)
Exemple #9
0
 def __radd__(self, other):
     # This will typically called when "other" is no DataObjectList.
     if other is None:
         return self
     if isinstance(other, list) and other == []:
         return self
     if not isinstance(other, DataObjectList):
         raise TypeError('Both operands should be of type DataObjectList')
     if Descriptor(self.type) != Descriptor(other.type):
         raise TypeError('Both operands should contain the same data')
     new_dol = DataObjectList(self._query_result, self.type)
     guids = self._guids[:]
     for guid in other._guids:
         if guid not in guids:
             guids.append(guid)
     new_dol._guids = guids
     return new_dol
Exemple #10
0
    def __init__(self, endpoint, username='******', password=None):
        """
        Initializes an SSHClient
        """
        from subprocess import check_output
        from ovs.dal.hybrids.storagerouter import StorageRouter
        storagerouter = None
        if isinstance(endpoint, basestring):
            ip = endpoint
            if not re.findall(SSHClient.IP_REGEX, ip):
                raise ValueError('Incorrect IP {0} specified'.format(ip))
        elif Descriptor.isinstance(endpoint, StorageRouter):
            # Refresh the object before checking its attributes
            storagerouter = StorageRouter(endpoint.guid)
            ip = storagerouter.ip
        else:
            raise ValueError('The endpoint parameter should be either an ip address or a StorageRouter')

        self.ip = ip
        self._client = None
        self.local_ips = [lip.strip() for lip in check_output("ip a | grep 'inet ' | sed 's/\s\s*/ /g' | cut -d ' ' -f 3 | cut -d '/' -f 1", shell=True).strip().splitlines()]
        self.is_local = self.ip in self.local_ips
        self.password = password
        self._unittest_mode = os.environ.get('RUNNING_UNITTESTS') == 'True'

        if self.is_local is False and storagerouter is not None and self._unittest_mode is False:
            process_heartbeat = storagerouter.heartbeats.get('process')
            if process_heartbeat is not None:
                if time.time() - process_heartbeat > 300:
                    message = 'StorageRouter {0} process heartbeat > 300s'.format(ip)
                    SSHClient._logger.error(message)
                    raise UnableToConnectException(message)

        current_user = check_output('whoami', shell=True).strip()
        if username is None:
            self.username = current_user
        else:
            self.username = username
            if username != current_user:
                self.is_local = False  # If specified user differs from current executing user, we always use the paramiko SSHClient

        if self._unittest_mode is True:
            self.is_local = True

        if not self.is_local:
            logging.getLogger('paramiko').setLevel(logging.WARNING)
            key = '{0}@{1}'.format(self.ip, self.username)
            if key not in SSHClient.client_cache:
                import paramiko
                client = paramiko.SSHClient()
                client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
                client.is_connected = types.MethodType(is_connected, client)
                SSHClient.client_cache[key] = client
            self._client = SSHClient.client_cache[key]
        self._connect()
Exemple #11
0
 def update(self, other):
     """
     This method merges in a datalist, preserving objects that might already
     be cached. It also maintains previous sorting, appending new items to the end of the list.
     There result is:
     * Both lists must have guids available
     * Only entries (guids) from the given list
     * Sorting (guids) of this list
     * Cached objects from both lists
     :param other: The list that must be used to update this lists query results
     :type other: ovs.dal.datalist.DataList
     """
     # Validating and ensure that the guids are available
     if not isinstance(other, DataList):
         raise TypeError('Both operands should be of type DataList')
     if Descriptor(self._object_type) != Descriptor(other._object_type):
         raise TypeError('Both operands should contain the same data')
     if self._executed is False and self._guids is None:
         self._guids = []
         self._data = {}
         self._objects = {}
         self._executed = True
     if other._executed is False and other._guids is None:
         other._execute_query()
     # Maintaining order is very important here
     old_guids = self._guids[:]
     new_guids = other._guids
     self._guids = []
     for guid in old_guids:
         if guid in new_guids:
             self._guids.append(guid)
     # noinspection PyTypeChecker
     for guid in new_guids:
         if guid not in self._guids:
             self._guids.append(guid)
     # Cleaning out old cached objects
     for guid in self._data.keys():
         if guid not in self._guids:
             del self._data[guid]
     for guid in self._objects.keys():
         if guid not in self._guids:
             del self._objects[guid]
Exemple #12
0
    def _build_invalidations(invalidations, object_type, items):
        """
        Builds an invalidation set out of a given object type and query items. It will use type information
        to build the invalidations, and not the actual data.
        """
        def add(class_name, field):
            if class_name not in invalidations:
                invalidations[class_name] = []
            if field not in invalidations[class_name]:
                invalidations[class_name].append(field)

        for item in items:
            if isinstance(item, dict):
                # Recursive
                DataList._build_invalidations(invalidations, object_type, item['items'])
            else:
                path = item[0].split('.')
                value = object_type
                itemcounter = 0
                for pitem in path:
                    itemcounter += 1
                    class_name = value.__name__.lower()
                    if pitem == 'guid':
                        # The guid is a final value which can't be changed so it shouldn't be taken into account
                        break
                    elif pitem in (prop.name for prop in value._properties):
                        # The pitem is in the blueprint, so it's a simple property (e.g. vmachine.name)
                        add(class_name, pitem)
                        break
                    elif pitem in (relation.name for relation in value._relations):
                        # The pitem is in the relations, so it's a relation property (e.g. vdisk.vmachine)
                        add(class_name, pitem)
                        relation = [relation for relation in value._relations if relation.name == pitem][0]
                        if relation.foreign_type is not None:
                            value = relation.foreign_type
                        continue
                    elif pitem.endswith('_guid') and pitem.replace('_guid', '') in (relation.name for relation in value._relations):
                        # The pitem is the guid pointing to a relation, so it can be handled like a simple property (e.g. vdisk.vmachine_guid)
                        add(class_name, pitem.replace('_guid', ''))
                        break
                    elif pitem in (dynamic.name for dynamic in value._dynamics):
                        # The pitem is a dynamic property, which will be ignored anyway
                        break
                    else:
                        # No blueprint and no relation, it might be a foreign relation (e.g. vmachine.vdisks)
                        # this means the pitem most likely contains an index
                        cleaned_pitem = pitem.split('[')[0]
                        relations = RelationMapper.load_foreign_relations(value)
                        if relations is not None:
                            if cleaned_pitem in relations:
                                value = Descriptor().load(relations[cleaned_pitem]['class']).get_object()
                                add(value.__name__.lower(), relations[cleaned_pitem]['key'])
                                continue
                    raise RuntimeError('Invalid path given: {0}, currently pointing to {1}'.format(path, pitem))
Exemple #13
0
    def remove(self, item):
        """
        Remove an item from the DataList
        :param item: Guid or hybrid object (of the correct type)
        """
        if self._executed is False and self._guids is None:
            self._execute_query()

        guid = None
        if isinstance(item, basestring):
            if item in self._guids:
                guid = item
        else:
            if Descriptor(self._object_type) != Descriptor(item.__class__):
                raise TypeError('Item should be of type {0}'.format(self._object_type))
            guid = item.guid
        if guid is None:
            raise ValueError('Item not in list')
        self._guids.remove(guid)
        self._objects = dict(item for item in self._objects.iteritems() if item[0] in self._guids)
Exemple #14
0
 def remove(self, item):
     """
     Remove an item from the data-object list
     Item can be a guid of an object or the object itself
     :param item: Guid or object
     :return: Updated list
     """
     guid = None
     if isinstance(item, basestring):
         if item in self._guids:
             guid = item
     else:
         if Descriptor(self.type) != Descriptor(item.__class__):
             raise TypeError('Item should be of type {0}'.format(self.type))
         guid = item.guid
     if guid is None:
         raise ValueError('Item not in list')
     self._guids.remove(guid)
     self._objects = dict(item for item in self._objects.iteritems()
                          if item[0] in self._guids)
Exemple #15
0
 def get_backend_type_by_code(code):
     """
     Returns a single BackendType for the given code. Returns None if no BackendType was found
     """
     backendtypes = DataList({'object': BackendType,
                              'data': DataList.select.GUIDS,
                              'query': {'type': DataList.where_operator.AND,
                                        'items': [('code', DataList.operator.EQUALS, code)]}}).data  # noqa
     if len(backendtypes) == 1:
         return Descriptor(BackendType, backendtypes[0]).get_object(True)
     return None
Exemple #16
0
 def __add__(self, other):
     """
     __add__ operator for DataList
     :param other: A DataList instance that must be added to this instance
     """
     if not isinstance(other, DataList):
         raise TypeError('Both operands should be of type DataList')
     if Descriptor(self._object_type) != Descriptor(other._object_type):
         raise TypeError('Both operands should contain the same data')
     if self._executed is False and self._guids is None:
         self._execute_query()
     if other._executed is False and other._guids is None:
         other._execute_query()
     new_datalist = DataList(self._object_type, {})
     guids = self._guids[:]
     for guid in other._guids:
         if guid not in guids:
             guids.append(guid)
     new_datalist._guids = guids
     return new_datalist
Exemple #17
0
    def __init__(self, endpoint, username='******', password=None):
        """
        Initializes an SSHClient
        """
        if isinstance(endpoint, basestring):
            ip = endpoint
            if not re.findall(SSHClient.IP_REGEX, ip):
                raise ValueError('Incorrect IP {0} specified'.format(ip))
        elif Descriptor.isinstance(endpoint, StorageRouter):
            # Refresh the object before checking its attributes
            endpoint = StorageRouter(endpoint.guid)
            process_heartbeat = endpoint.heartbeats.get('process')
            ip = endpoint.ip
            if process_heartbeat is not None:
                if time.time() - process_heartbeat > 300:
                    message = 'StorageRouter {0} process heartbeat > 300s'.format(
                        ip)
                    logger.error(message)
                    raise UnableToConnectException(message)
        else:
            raise ValueError(
                'The endpoint parameter should be either an ip address or a StorageRouter'
            )

        logging.getLogger('paramiko').setLevel(logging.WARNING)
        self.client = paramiko.SSHClient()
        self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())

        self.ip = ip
        local_ips = check_output(
            "ip a | grep 'inet ' | sed 's/\s\s*/ /g' | cut -d ' ' -f 3 | cut -d '/' -f 1",
            shell=True).strip().splitlines()
        self.local_ips = [ip.strip() for ip in local_ips]
        self.is_local = self.ip in self.local_ips

        current_user = check_output('whoami', shell=True).strip()
        if username is None:
            self.username = current_user
        else:
            self.username = username
            if username != current_user:
                self.is_local = False  # If specified user differs from current executing user, we always use the paramiko SSHClient
        self.password = password

        if not self.is_local:
            try:
                self._connect()
            except socket.error, ex:
                if 'No route to host' in str(ex):
                    message = 'SocketException: No route to host {0}'.format(
                        ip)
                    logger.error(message)
                    raise UnableToConnectException(message)
                raise
    def __init__(self, endpoint, username="******", password=None):
        """
        Initializes an SSHClient
        """
        storagerouter = None
        if isinstance(endpoint, basestring):
            ip = endpoint
            if not re.findall(SSHClient.IP_REGEX, ip):
                raise ValueError("Incorrect IP {0} specified".format(ip))
        elif Descriptor.isinstance(endpoint, StorageRouter):
            # Refresh the object before checking its attributes
            storagerouter = StorageRouter(endpoint.guid)
            ip = storagerouter.ip
        else:
            raise ValueError("The endpoint parameter should be either an ip address or a StorageRouter")

        self.ip = ip
        local_ips = (
            check_output("ip a | grep 'inet ' | sed 's/\s\s*/ /g' | cut -d ' ' -f 3 | cut -d '/' -f 1", shell=True)
            .strip()
            .splitlines()
        )
        self.local_ips = [ip.strip() for ip in local_ips]
        self.is_local = self.ip in self.local_ips

        if self.is_local is False and storagerouter is not None:
            process_heartbeat = storagerouter.heartbeats.get("process")
            if process_heartbeat is not None:
                if time.time() - process_heartbeat > 300:
                    message = "StorageRouter {0} process heartbeat > 300s".format(ip)
                    logger.error(message)
                    raise UnableToConnectException(message)

        current_user = check_output("whoami", shell=True).strip()
        if username is None:
            self.username = current_user
        else:
            self.username = username
            if username != current_user:
                self.is_local = (
                    False
                )  # If specified user differs from current executing user, we always use the paramiko SSHClient
        self.password = password

        self.client = None
        if not self.is_local:
            logging.getLogger("paramiko").setLevel(logging.WARNING)
            key = "{0}@{1}".format(self.ip, self.username)
            if key not in SSHClient.client_cache:
                client = paramiko.SSHClient()
                client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
                client.is_connected = types.MethodType(is_connected, client)
                SSHClient.client_cache[key] = client
            self.client = SSHClient.client_cache[key]
Exemple #19
0
 def get_by_ip_ports(ip, ports):
     """
     Returns a single Service for the ip/ports. Returns None if no Service was found
     """
     services = DataList({'object': Service,
                          'data': DataList.select.GUIDS,
                          'query': {'type': DataList.where_operator.AND,
                                    'items': [('storagerouter.ip', DataList.operator.EQUALS, ip),
                                              ('ports', DataList.operator.EQUALS, ports)]}}).data
     if len(services) == 1:
         return Descriptor(Service, services[0]).get_object(True)
     return None
 def get_by_component(component, return_as_list=False):
     """
     Returns a single License for the given name. Returns None if no license was found
     """
     # pylint: disable=line-too-long
     licenses = DataList({'object': License,
                          'data': DataList.select.GUIDS,
                          'query': {'type': DataList.where_operator.AND,
                                    'items': [('component', DataList.operator.EQUALS, component)]}}).data  # noqa
     # pylint: enable=line-too-long
     if return_as_list is True:
         return DataObjectList(licenses, License)
     if len(licenses) == 1:
         return Descriptor(License, licenses[0]).get_object(True)
     return None
Exemple #21
0
 def get_role_by_code(code):
     """
     Returns a single Role for the given code. Returns None if no Role was found
     """
     roles = DataList({
         'object': Role,
         'data': DataList.select.GUIDS,
         'query': {
             'type': DataList.where_operator.AND,
             'items': [('code', DataList.operator.EQUALS, code)]
         }
     }).data  # noqa
     if len(roles) == 1:
         return Descriptor(Role, roles[0]).get_object(True)
     return None
Exemple #22
0
 def get_by_name(name):
     """
     Returns a single ServiceType for the given name. Returns None if no ServiceType was found
     """
     servicetypes = DataList({
         'object': ServiceType,
         'data': DataList.select.GUIDS,
         'query': {
             'type': DataList.where_operator.AND,
             'items': [('name', DataList.operator.EQUALS, name)]
         }
     }).data
     if len(servicetypes) == 1:
         return Descriptor(ServiceType, servicetypes[0]).get_object(True)
     return None
Exemple #23
0
 def build_remote_relation(relation):
     key, relation_info = relation
     remote_cls = Descriptor().load(relation_info['class']).get_object()
     _docstring = '{1} instance identifier{3}. One-to-{0} relation with {1}.{2}.'.format(
         'one' if relation_info['list'] is False else 'many',
         remote_cls.__name__, '{0}_guid'.format(relation_info['key']),
         '' if relation_info['list'] is False else 's')
     info = {'description': _docstring, 'readOnly': True}
     if relation_info['list'] is True:
         info['type'] = 'array'
         info['items'] = {'type': 'string'}
         _name = '{0}_guids'.format(key)
     else:
         info['type'] = 'string'
         _name = '{0}_guid'.format(key)
     return _name, info
 def _set_relation_property(self, relation, value):
     """
     Setter for a complex property (hybrid) that will validate the type
     """
     self.dirty = True
     attribute = relation.name
     if value is None:
         self._objects[attribute] = None
         self._data[attribute]['guid'] = None
     else:
         descriptor = Descriptor(value.__class__).descriptor
         if descriptor['identifier'] != self._data[attribute]['identifier']:
             raise TypeError(
                 'An invalid type was given: {0} instead of {1}'.format(
                     descriptor['type'], self._data[attribute]['type']))
         self._objects[attribute] = value
         self._data[attribute]['guid'] = value.guid
Exemple #25
0
 def get_user_by_username(username):
     """
     Returns a single User for the given username. Returns None if no user was found
     """
     # pylint: disable=line-too-long
     users = DataList({
         'object': User,
         'data': DataList.select.GUIDS,
         'query': {
             'type': DataList.where_operator.AND,
             'items': [('username', DataList.operator.EQUALS, username)]
         }
     }).data  # noqa
     # pylint: enable=line-too-long
     if len(users) == 1:
         return Descriptor(User, users[0]).get_object(True)
     return None
Exemple #26
0
    def __init__(self,
                 endpoint,
                 username='******',
                 password=None,
                 cached=True,
                 timeout=None):
        """
        Initializes an SSHClient
        :param endpoint: Ip address to connect to / storagerouter
        :type endpoint: basestring | ovs.dal.hybrids.storagerouter.StorageRouter
        :param username: Name of the user to connect as
        :type username: str
        :param password: Password to authenticate the user as. Can be None when ssh keys are in place.
        :type password: str
        :param cached: Cache this SSHClient instance
        :type cached: bool
        :param timeout: An optional timeout (in seconds) for the TCP connect
        :type timeout: float
        """
        from ovs.dal.hybrids.storagerouter import StorageRouter
        storagerouter = None
        if Descriptor.isinstance(endpoint, StorageRouter):
            # Refresh the object before checking its attributes
            storagerouter = StorageRouter(endpoint.guid)
            endpoint = storagerouter.ip

        unittest_mode = os.environ.get('RUNNING_UNITTESTS') == 'True'

        if storagerouter is not None and unittest_mode is False:
            process_heartbeat = storagerouter.heartbeats.get('process')
            if process_heartbeat is not None:
                if time.time() - process_heartbeat > 300:
                    message = 'StorageRouter {0} process heartbeat > 300s'.format(
                        storagerouter.ip)
                    raise UnableToConnectException(message)

        super(SSHClient, self).__init__(endpoint=endpoint,
                                        username=username,
                                        password=password,
                                        cached=cached,
                                        timeout=timeout)
Exemple #27
0
    def _execute_query(self):
        """
        Tries to load the result for the given key from the volatile cache, or executes the query
        if not yet available. Afterwards (if a key is given), the result will be (re)cached

        Definitions:
        * <query>: Should be a dictionary:
                   {'type' : DataList.where_operator.XYZ,
                    'items': <items>}
        * <filter>: A tuple defining a single expression:
                    (<field>, DataList.operator.XYZ, <value> [, <ignore_case>])
                    The field is any property you would also find on the given object. In case of
                    properties, you can dot as far as you like.
        * <items>: A list of one or more <query> or <filter> items. This means the query structure is recursive and
                   complex queries are possible
        """
        from ovs.dal.dataobject import DataObject
        hybrid_structure = HybridRunner.get_hybrids()
        query_object_id = Descriptor(
            self._object_type).descriptor['identifier']
        if query_object_id in hybrid_structure and query_object_id != hybrid_structure[
                query_object_id]['identifier']:
            self._object_type = Descriptor().load(
                hybrid_structure[query_object_id]).get_object()
        object_type_name = self._object_type.__name__.lower()
        prefix = '{0}_{1}_'.format(DataObject.NAMESPACE, object_type_name)

        if self._guids is not None:
            keys = ['{0}{1}'.format(prefix, guid) for guid in self._guids]
            successful = False
            tries = 0
            entries = []
            while successful is False:
                tries += 1
                if tries > 5:
                    raise RaceConditionException()
                try:
                    entries = list(self._persistent.get_multi(keys))
                    successful = True
                except KeyNotFoundException as knfe:
                    keys.remove(knfe.message)
                    self._guids.remove(knfe.message.replace(prefix, ''))

            self._data = {}
            self._objects = {}
            for index, guid in enumerate(self._guids):
                self._data[guid] = {'data': entries[index], 'guid': guid}
            self._executed = True
            return

        cached_data = self._volatile.get(self._key)
        if cached_data is None:
            self.from_cache = False

            query_type = self._query['type']
            query_items = self._query['items']

            invalidations = {object_type_name: ['__all']}
            DataList._build_invalidations(invalidations, self._object_type,
                                          query_items)
            transaction = self._persistent.begin_transaction()
            for class_name, fields in invalidations.iteritems():
                key = '{0}_{1}|{{0}}|{{1}}'.format(DataList.CACHELINK,
                                                   class_name)
                for field in fields:
                    self._persistent.set(key.format(self._key, field),
                                         0,
                                         transaction=transaction)
            self._persistent.apply_transaction(transaction)

            self._guids = []
            self._data = {}
            self._objects = {}
            elements = 0
            for key, data in self._persistent.prefix_entries(prefix):
                elements += 1
                try:
                    guid = key.replace(prefix, '')
                    result, instance = self._filter(
                        {
                            'data': data,
                            'guid': guid
                        }, query_items, query_type)
                    if result is True:
                        self._guids.append(guid)
                        self._data[guid] = {'data': data, 'guid': guid}
                        if not isinstance(instance, dict):
                            self._objects[guid] = instance
                except ObjectNotFoundException:
                    pass

            if 'post_query' in DataList.test_hooks:
                DataList.test_hooks['post_query'](self)

            if self._key is not None and elements > 0 and self._can_cache:
                self._volatile.set(
                    self._key, self._guids,
                    300 + randint(0, 300))  # Cache between 5 and 10 minutes
                # Check whether the cache was invalidated and should be removed again
                for class_name in invalidations:
                    key = '{0}_{1}|{{0}}|'.format(DataList.CACHELINK,
                                                  class_name)
                    if len(list(self._persistent.prefix(key.format(
                            self._key)))) == 0:
                        self._volatile.delete(self._key)
                        break
            self._executed = True
        else:
            self.from_cache = True
            self._guids = cached_data

            keys = ['{0}{1}'.format(prefix, guid) for guid in self._guids]
            successful = False
            tries = 0
            entries = []
            while successful is False:
                tries += 1
                if tries > 5:
                    raise RaceConditionException()
                try:
                    entries = list(self._persistent.get_multi(keys))
                    successful = True
                except KeyNotFoundException as knfe:
                    keys.remove(knfe.message)
                    self._guids.remove(knfe.message.replace(prefix, ''))

            self._data = {}
            self._objects = {}
            for index in xrange(len(self._guids)):
                guid = self._guids[index]
                self._data[guid] = {'data': entries[index], 'guid': guid}
            self._executed = True
 def test_objectproperties(self):
     """
     Validates the correctness of all hybrid objects:
     * They should contain all required properties
     * Properties should have the correct type
     * All dynamic properties should be implemented
     """
     # Some stuff here to dynamically test all hybrid properties
     hybrid_structure = HybridRunner.get_hybrids()
     print ''
     print 'Validating hybrids...'
     for class_descriptor in hybrid_structure.values():
         cls = Descriptor().load(class_descriptor).get_object()
         print '* {0}'.format(cls.__name__)
         relation_info = RelationMapper.load_foreign_relations(cls)
         remote_properties_n = []
         remote_properties_1 = []
         if relation_info is not None:
             for key, info in relation_info.iteritems():
                 if info['list'] is True:
                     remote_properties_n.append(key)
                 else:
                     remote_properties_1.append(key)
         # Make sure certain attributes are correctly set
         self.assertIsInstance(
             cls._properties, list,
             '_properties required: {0}'.format(cls.__name__))
         self.assertIsInstance(
             cls._relations, list,
             '_relations required: {0}'.format(cls.__name__))
         self.assertIsInstance(
             cls._dynamics, list,
             '_dynamics required: {0}'.format(cls.__name__))
         # Check types
         allowed_types = [int, float, str, bool, list, dict]
         for prop in cls._properties:
             is_allowed_type = prop.property_type in allowed_types \
                 or isinstance(prop.property_type, list)
             self.assertTrue(
                 is_allowed_type,
                 '_properties types in {0} should be one of {1}'.format(
                     cls.__name__, str(allowed_types)))
         for dynamic in cls._dynamics:
             is_allowed_type = dynamic.return_type in allowed_types \
                 or isinstance(dynamic.return_type, list)
             self.assertTrue(
                 is_allowed_type,
                 '_dynamics types in {0} should be one of {1}'.format(
                     cls.__name__, str(allowed_types)))
         instance = cls()
         for prop in cls._properties:
             self.assertEqual(getattr(instance, prop.name), prop.default,
                              'Default property set correctly')
         # Make sure the type can be instantiated
         self.assertIsNotNone(instance.guid)
         properties = []
         for item in dir(instance):
             if hasattr(cls, item) and isinstance(getattr(cls, item),
                                                  property):
                 properties.append(item)
         # All expiries should be implemented
         missing_props = []
         for dynamic in instance._dynamics:
             if dynamic.name not in properties:
                 missing_props.append(dynamic.name)
             else:  # ... and should work
                 _ = getattr(instance, dynamic.name)
         self.assertEqual(
             len(missing_props), 0,
             'Missing dynamic properties in {0}: {1}'.format(
                 cls.__name__, missing_props))
         # An all properties should be either in the blueprint, relations or expiry
         missing_metadata = []
         for found_prop in properties:
             found = found_prop in [prop.name for prop in cls._properties] \
                 or found_prop in [relation.name for relation in cls._relations] \
                 or found_prop in ['{0}_guid'.format(relation.name) for relation in cls._relations] \
                 or found_prop in [dynamic.name for dynamic in cls._dynamics] \
                 or found_prop in remote_properties_n \
                 or found_prop in remote_properties_1 \
                 or found_prop in ['{0}_guids'.format(key) for key in remote_properties_n] \
                 or found_prop in ['{0}_guid'.format(key) for key in remote_properties_1] \
                 or found_prop == 'guid'
             if not found:
                 missing_metadata.append(found_prop)
         self.assertEqual(
             len(missing_metadata), 0,
             'Missing metadata for properties in {0}: {1}'.format(
                 cls.__name__, missing_metadata))
         instance.delete()
Exemple #29
0
    def migrate(previous_version):
        """
        Migrates from a given version to the current version. It uses 'previous_version' to be smart
        wherever possible, but the code should be able to migrate any version towards the expected version.
        When this is not possible, the code can set a minimum version and raise when it is not met.
        :param previous_version: The previous version from which to start the migration
        :type previous_version: float
        """

        working_version = previous_version

        if working_version == 0:
            from ovs.dal.hybrids.servicetype import ServiceType
            # Initial version:
            # * Add any basic configuration or model entries

            # Add backends
            for backend_type_info in [('ALBA', 'alba')]:
                code = backend_type_info[1]
                backend_type = BackendTypeList.get_backend_type_by_code(code)
                if backend_type is None:
                    backend_type = BackendType()
                backend_type.name = backend_type_info[0]
                backend_type.code = code
                backend_type.save()

            # Add service types
            for service_type_info in [
                    ServiceType.SERVICE_TYPES.NS_MGR,
                    ServiceType.SERVICE_TYPES.ALBA_MGR,
                    ServiceType.SERVICE_TYPES.ALBA_S3_TRANSACTION
            ]:
                service_type = ServiceType()
                service_type.name = service_type_info
                service_type.save()

        # From here on, all actual migration should happen to get to the expected state for THIS RELEASE
        elif working_version < DALMigrator.THIS_VERSION:
            import hashlib
            from ovs.dal.exceptions import ObjectNotFoundException
            from ovs.dal.helpers import HybridRunner, Descriptor
            from ovs.dal.hybrids.albaabmcluster import ABMCluster
            from ovs.dal.hybrids.albaosd import AlbaOSD
            from ovs.dal.hybrids.albansmcluster import NSMCluster
            from ovs.dal.hybrids.j_abmservice import ABMService
            from ovs.dal.hybrids.j_nsmservice import NSMService
            from ovs.dal.hybrids.service import Service
            from ovs.dal.hybrids.servicetype import ServiceType
            from ovs.dal.lists.albabackendlist import AlbaBackendList
            from ovs.dal.lists.albanodelist import AlbaNodeList
            from ovs.dal.lists.servicetypelist import ServiceTypeList
            from ovs.dal.lists.storagerouterlist import StorageRouterList
            from ovs.extensions.db.arakooninstaller import ArakoonClusterConfig, ArakoonInstaller
            from ovs.extensions.generic.configuration import Configuration, NotFoundException
            from ovs_extensions.generic.toolbox import ExtensionsToolbox
            from ovs.extensions.plugins.albacli import AlbaCLI
            from ovs.extensions.storage.persistentfactory import PersistentFactory

            # Migrate unique constraints & indexes
            client = PersistentFactory.get_client()
            hybrid_structure = HybridRunner.get_hybrids()
            for class_descriptor in hybrid_structure.values():
                cls = Descriptor().load(class_descriptor).get_object()
                classname = cls.__name__.lower()
                unique_key = 'ovs_unique_{0}_{{0}}_'.format(classname)
                index_prefix = 'ovs_index_{0}|{{0}}|'.format(classname)
                index_key = 'ovs_index_{0}|{{0}}|{{1}}'.format(classname)
                uniques = []
                indexes = []
                # noinspection PyProtectedMember
                for prop in cls._properties:
                    if prop.unique is True and len([
                            k for k in client.prefix(
                                unique_key.format(prop.name))
                    ]) == 0:
                        uniques.append(prop.name)
                    if prop.indexed is True and len([
                            k for k in client.prefix(
                                index_prefix.format(prop.name))
                    ]) == 0:
                        indexes.append(prop.name)
                if len(uniques) > 0 or len(indexes) > 0:
                    prefix = 'ovs_data_{0}_'.format(classname)
                    for key, data in client.prefix_entries(prefix):
                        for property_name in uniques:
                            ukey = '{0}{1}'.format(
                                unique_key.format(property_name),
                                hashlib.sha1(str(
                                    data[property_name])).hexdigest())
                            client.set(ukey, key)
                        for property_name in indexes:
                            if property_name not in data:
                                continue  # This is the case when there's a new indexed property added.
                            ikey = index_key.format(
                                property_name,
                                hashlib.sha1(str(
                                    data[property_name])).hexdigest())
                            index = list(
                                client.get_multi([ikey], must_exist=False))[0]
                            transaction = client.begin_transaction()
                            if index is None:
                                client.assert_value(ikey,
                                                    None,
                                                    transaction=transaction)
                                client.set(ikey, [key],
                                           transaction=transaction)
                            elif key not in index:
                                client.assert_value(ikey,
                                                    index[:],
                                                    transaction=transaction)
                                client.set(ikey,
                                           index + [key],
                                           transaction=transaction)
                            client.apply_transaction(transaction)

            #############################################
            # Introduction of ABMCluster and NSMCluster #
            #############################################
            # Verify presence of unchanged ALBA Backends
            alba_backends = AlbaBackendList.get_albabackends()
            changes_required = False
            for alba_backend in alba_backends:
                if alba_backend.abm_cluster is None or len(
                        alba_backend.nsm_clusters) == 0:
                    changes_required = True
                    break

            if changes_required:
                # Retrieve ABM and NSM clusters
                abm_cluster_info = []
                nsm_cluster_info = []
                for cluster_name in Configuration.list('/ovs/arakoon'):
                    try:
                        metadata = ArakoonInstaller.get_arakoon_metadata_by_cluster_name(
                            cluster_name=cluster_name)
                        if metadata[
                                'cluster_type'] == ServiceType.ARAKOON_CLUSTER_TYPES.ABM:
                            abm_cluster_info.append(metadata)
                        elif metadata[
                                'cluster_type'] == ServiceType.ARAKOON_CLUSTER_TYPES.NSM:
                            nsm_cluster_info.append(metadata)
                    except NotFoundException:
                        continue

                # Retrieve NSM Arakoon cluster information
                cluster_arakoon_map = {}
                for cluster_info in abm_cluster_info + nsm_cluster_info:
                    cluster_name = cluster_info['cluster_name']
                    arakoon_config = ArakoonClusterConfig(
                        cluster_id=cluster_name)
                    cluster_arakoon_map[
                        cluster_name] = arakoon_config.export_dict()

                storagerouter_map = dict(
                    (storagerouter.machine_id, storagerouter) for storagerouter
                    in StorageRouterList.get_storagerouters())
                alba_backend_id_map = dict((alba_backend.alba_id, alba_backend)
                                           for alba_backend in alba_backends)
                for cluster_info in abm_cluster_info:
                    internal = cluster_info['internal']
                    cluster_name = cluster_info['cluster_name']
                    config_location = Configuration.get_configuration_path(
                        key=ArakoonClusterConfig.CONFIG_KEY.format(
                            cluster_name))
                    try:
                        alba_id = AlbaCLI.run(command='get-alba-id',
                                              config=config_location,
                                              named_params={'attempts':
                                                            3})['id']
                        nsm_hosts = AlbaCLI.run(command='list-nsm-hosts',
                                                config=config_location,
                                                named_params={'attempts': 3})
                    except RuntimeError:
                        continue

                    alba_backend = alba_backend_id_map.get(alba_id)
                    if alba_backend is None:  # ALBA Backend with ID not found in model
                        continue
                    if alba_backend.abm_cluster is not None and len(
                            alba_backend.nsm_clusters
                    ) > 0:  # Clusters already exist
                        continue

                    # Create ABM Cluster
                    if alba_backend.abm_cluster is None:
                        abm_cluster = ABMCluster()
                        abm_cluster.name = cluster_name
                        abm_cluster.alba_backend = alba_backend
                        abm_cluster.config_location = ArakoonClusterConfig.CONFIG_KEY.format(
                            cluster_name)
                        abm_cluster.save()
                    else:
                        abm_cluster = alba_backend.abm_cluster

                    # Create ABM Services
                    abm_arakoon_config = cluster_arakoon_map[cluster_name]
                    abm_arakoon_config.pop('global')
                    arakoon_nodes = abm_arakoon_config.keys()
                    if internal is False:
                        services_to_create = 1
                    else:
                        if set(arakoon_nodes).difference(
                                set(storagerouter_map.keys())):
                            continue
                        services_to_create = len(arakoon_nodes)
                    for index in range(services_to_create):
                        service = Service()
                        service.name = 'arakoon-{0}-abm'.format(
                            alba_backend.name)
                        service.type = ServiceTypeList.get_by_name(
                            ServiceType.SERVICE_TYPES.ALBA_MGR)
                        if internal is True:
                            arakoon_node_config = abm_arakoon_config[
                                arakoon_nodes[index]]
                            service.ports = [
                                arakoon_node_config['client_port'],
                                arakoon_node_config['messaging_port']
                            ]
                            service.storagerouter = storagerouter_map[
                                arakoon_nodes[index]]
                        else:
                            service.ports = []
                            service.storagerouter = None
                        service.save()

                        abm_service = ABMService()
                        abm_service.service = service
                        abm_service.abm_cluster = abm_cluster
                        abm_service.save()

                    # Create NSM Clusters
                    for cluster_index, nsm_host in enumerate(
                            sorted(nsm_hosts,
                                   key=lambda host: ExtensionsToolbox.
                                   advanced_sort(host['cluster_id'], '_'))):
                        nsm_cluster_name = nsm_host['cluster_id']
                        nsm_arakoon_config = cluster_arakoon_map.get(
                            nsm_cluster_name)
                        if nsm_arakoon_config is None:
                            continue

                        number = cluster_index if internal is False else int(
                            nsm_cluster_name.split('_')[-1])
                        nsm_cluster = NSMCluster()
                        nsm_cluster.name = nsm_cluster_name
                        nsm_cluster.number = number
                        nsm_cluster.alba_backend = alba_backend
                        nsm_cluster.config_location = ArakoonClusterConfig.CONFIG_KEY.format(
                            nsm_cluster_name)
                        nsm_cluster.save()

                        # Create NSM Services
                        nsm_arakoon_config.pop('global')
                        arakoon_nodes = nsm_arakoon_config.keys()
                        if internal is False:
                            services_to_create = 1
                        else:
                            if set(arakoon_nodes).difference(
                                    set(storagerouter_map.keys())):
                                continue
                            services_to_create = len(arakoon_nodes)
                        for service_index in range(services_to_create):
                            service = Service()
                            service.name = 'arakoon-{0}-nsm_{1}'.format(
                                alba_backend.name, number)
                            service.type = ServiceTypeList.get_by_name(
                                ServiceType.SERVICE_TYPES.NS_MGR)
                            if internal is True:
                                arakoon_node_config = nsm_arakoon_config[
                                    arakoon_nodes[service_index]]
                                service.ports = [
                                    arakoon_node_config['client_port'],
                                    arakoon_node_config['messaging_port']
                                ]
                                service.storagerouter = storagerouter_map[
                                    arakoon_nodes[service_index]]
                            else:
                                service.ports = []
                                service.storagerouter = None
                            service.save()

                            nsm_service = NSMService()
                            nsm_service.service = service
                            nsm_service.nsm_cluster = nsm_cluster
                            nsm_service.save()

            # Clean up all junction services no longer linked to an ALBA Backend
            all_nsm_services = [
                service.nsm_service for service in ServiceTypeList.get_by_name(
                    ServiceType.SERVICE_TYPES.NS_MGR).services
                if service.nsm_service.nsm_cluster is None
            ]
            all_abm_services = [
                service.abm_service for service in ServiceTypeList.get_by_name(
                    ServiceType.SERVICE_TYPES.ALBA_MGR).services
                if service.abm_service.abm_cluster is None
            ]
            for abm_service in all_abm_services:
                abm_service.delete()
                abm_service.service.delete()
            for nsm_service in all_nsm_services:
                nsm_service.delete()
                nsm_service.service.delete()

            ################################
            # Introduction of Active Drive #
            ################################
            # Update slot_id and Alba Node relation for all OSDs
            client = PersistentFactory.get_client()
            disk_osd_map = {}
            for key, data in client.prefix_entries('ovs_data_albaosd_'):
                alba_disk_guid = data.get('alba_disk', {}).get('guid')
                if alba_disk_guid is not None:
                    if alba_disk_guid not in disk_osd_map:
                        disk_osd_map[alba_disk_guid] = []
                    disk_osd_map[alba_disk_guid].append(
                        key.replace('ovs_data_albaosd_', ''))
                try:
                    value = client.get(key)
                    value.pop('alba_disk', None)
                    client.set(key=key, value=value)
                except Exception:
                    pass  # We don't care if we would have any leftover AlbaDisk information in _data, but its cleaner not to

            alba_guid_node_map = dict(
                (an.guid, an) for an in AlbaNodeList.get_albanodes())
            for key, data in client.prefix_entries('ovs_data_albadisk_'):
                alba_disk_guid = key.replace('ovs_data_albadisk_', '')
                alba_node_guid = data.get('alba_node', {}).get('guid')
                if alba_disk_guid in disk_osd_map and alba_node_guid in alba_guid_node_map and len(
                        data.get('aliases', [])) > 0:
                    slot_id = data['aliases'][0].split('/')[-1]
                    for osd_guid in disk_osd_map[alba_disk_guid]:
                        try:
                            osd = AlbaOSD(osd_guid)
                        except ObjectNotFoundException:
                            continue
                        osd.slot_id = slot_id
                        osd.alba_node = alba_guid_node_map[alba_node_guid]
                        osd.save()
                client.delete(key=key, must_exist=False)

            # Remove unique constraints for AlbaNode IP
            for key in client.prefix('ovs_unique_albanode_ip_'):
                client.delete(key=key, must_exist=False)

            # Remove relation for all Alba Disks
            for key in client.prefix('ovs_reverseindex_albadisk_'):
                client.delete(key=key, must_exist=False)

            # Remove the relation between AlbaNode and AlbaDisk
            for key in client.prefix('ovs_reverseindex_albanode_'):
                if '|disks|' in key:
                    client.delete(key=key, must_exist=False)

        return DALMigrator.THIS_VERSION
Exemple #30
0
    def migrate(previous_version):
        """
        Migrates from any version to any version, running all migrations required
        If previous_version is for example 0 and this script is at
        version 3 it will execute two steps:
          - 1 > 2
          - 2 > 3
        @param previous_version: The previous version from which to start the migration.
        """

        working_version = previous_version

        # Version 1 introduced:
        # - The datastore is still empty, add defaults
        if working_version < 1:
            from ovs.dal.hybrids.user import User
            from ovs.dal.hybrids.group import Group
            from ovs.dal.hybrids.role import Role
            from ovs.dal.hybrids.client import Client
            from ovs.dal.hybrids.failuredomain import FailureDomain
            from ovs.dal.hybrids.j_rolegroup import RoleGroup
            from ovs.dal.hybrids.j_roleclient import RoleClient
            from ovs.dal.hybrids.backendtype import BackendType
            from ovs.dal.hybrids.servicetype import ServiceType
            from ovs.dal.hybrids.branding import Branding
            from ovs.dal.lists.backendtypelist import BackendTypeList

            # Create groups
            admin_group = Group()
            admin_group.name = 'administrators'
            admin_group.description = 'Administrators'
            admin_group.save()
            viewers_group = Group()
            viewers_group.name = 'viewers'
            viewers_group.description = 'Viewers'
            viewers_group.save()

            # Create users
            admin = User()
            admin.username = '******'
            admin.password = hashlib.sha256('admin').hexdigest()
            admin.is_active = True
            admin.group = admin_group
            admin.save()

            # Create internal OAuth 2 clients
            admin_pw_client = Client()
            admin_pw_client.ovs_type = 'INTERNAL'
            admin_pw_client.grant_type = 'PASSWORD'
            admin_pw_client.user = admin
            admin_pw_client.save()
            admin_cc_client = Client()
            admin_cc_client.ovs_type = 'INTERNAL'
            admin_cc_client.grant_type = 'CLIENT_CREDENTIALS'
            admin_cc_client.client_secret = ''.join(
                random.choice(string.ascii_letters + string.digits +
                              '|_=+*#@!/-[]{}<>.?,\'";:~') for _ in range(128))
            admin_cc_client.user = admin
            admin_cc_client.save()

            # Create roles
            read_role = Role()
            read_role.code = 'read'
            read_role.name = 'Read'
            read_role.description = 'Can read objects'
            read_role.save()
            write_role = Role()
            write_role.code = 'write'
            write_role.name = 'Write'
            write_role.description = 'Can write objects'
            write_role.save()
            manage_role = Role()
            manage_role.code = 'manage'
            manage_role.name = 'Manage'
            manage_role.description = 'Can manage the system'
            manage_role.save()

            # Attach groups to roles
            mapping = [(admin_group, [read_role, write_role, manage_role]),
                       (viewers_group, [read_role])]
            for setting in mapping:
                for role in setting[1]:
                    rolegroup = RoleGroup()
                    rolegroup.group = setting[0]
                    rolegroup.role = role
                    rolegroup.save()
                for user in setting[0].users:
                    for role in setting[1]:
                        for client in user.clients:
                            roleclient = RoleClient()
                            roleclient.client = client
                            roleclient.role = role
                            roleclient.save()

            # Add backends
            for backend_type_info in [('Ceph', 'ceph_s3'),
                                      ('Amazon', 'amazon_s3'),
                                      ('Swift', 'swift_s3'),
                                      ('Local', 'local'),
                                      ('Distributed', 'distributed'),
                                      ('ALBA', 'alba')]:
                code = backend_type_info[1]
                backend_type = BackendTypeList.get_backend_type_by_code(code)
                if backend_type is None:
                    backend_type = BackendType()
                backend_type.name = backend_type_info[0]
                backend_type.code = code
                backend_type.save()

            # Add service types
            for service_type_info in [
                    ServiceType.SERVICE_TYPES.MD_SERVER,
                    ServiceType.SERVICE_TYPES.ALBA_PROXY,
                    ServiceType.SERVICE_TYPES.ARAKOON
            ]:
                service_type = ServiceType()
                service_type.name = service_type_info
                service_type.save()

            # Branding
            branding = Branding()
            branding.name = 'Default'
            branding.description = 'Default bootstrap theme'
            branding.css = 'bootstrap-default.min.css'
            branding.productname = 'Open vStorage'
            branding.is_default = True
            branding.save()
            slate = Branding()
            slate.name = 'Slate'
            slate.description = 'Dark bootstrap theme'
            slate.css = 'bootstrap-slate.min.css'
            slate.productname = 'Open vStorage'
            slate.is_default = False
            slate.save()

            # Failure Domain
            failure_domain = FailureDomain()
            failure_domain.name = 'Default'
            failure_domain.save()

            # We're now at version 1
            working_version = 1

        # Version 2 introduced:
        # - new Descriptor format
        if working_version < 2:
            import imp
            from ovs.dal.helpers import Descriptor
            from ovs.extensions.storage.persistentfactory import PersistentFactory

            client = PersistentFactory.get_client()
            keys = client.prefix('ovs_data')
            for key in keys:
                data = client.get(key)
                modified = False
                for entry in data.keys():
                    if isinstance(data[entry], dict) and 'source' in data[
                            entry] and 'hybrids' in data[entry]['source']:
                        filename = data[entry]['source']
                        if not filename.startswith('/'):
                            filename = '/opt/OpenvStorage/ovs/dal/{0}'.format(
                                filename)
                        module = imp.load_source(data[entry]['name'], filename)
                        cls = getattr(module, data[entry]['type'])
                        new_data = Descriptor(cls, cached=False).descriptor
                        if 'guid' in data[entry]:
                            new_data['guid'] = data[entry]['guid']
                        data[entry] = new_data
                        modified = True
                if modified is True:
                    data['_version'] += 1
                    client.set(key, data)

            # We're now at version 2
            working_version = 2

        # Version 3 introduced:
        # - new Descriptor format
        if working_version < 3:
            import imp
            from ovs.dal.helpers import Descriptor
            from ovs.extensions.storage.persistentfactory import PersistentFactory

            client = PersistentFactory.get_client()
            keys = client.prefix('ovs_data')
            for key in keys:
                data = client.get(key)
                modified = False
                for entry in data.keys():
                    if isinstance(data[entry],
                                  dict) and 'source' in data[entry]:
                        module = imp.load_source(data[entry]['name'],
                                                 data[entry]['source'])
                        cls = getattr(module, data[entry]['type'])
                        new_data = Descriptor(cls, cached=False).descriptor
                        if 'guid' in data[entry]:
                            new_data['guid'] = data[entry]['guid']
                        data[entry] = new_data
                        modified = True
                if modified is True:
                    data['_version'] += 1
                    client.set(key, data)

            working_version = 3

        # Version 4 introduced:
        # - Flexible SSD layout
        if working_version < 4:
            import os
            from ovs.dal.hybrids.diskpartition import DiskPartition
            from ovs.dal.hybrids.j_storagedriverpartition import StorageDriverPartition
            from ovs.dal.hybrids.servicetype import ServiceType
            from ovs.dal.lists.servicetypelist import ServiceTypeList
            from ovs.dal.lists.storagedriverlist import StorageDriverList
            from ovs.extensions.generic.remote import remote
            from ovs.extensions.generic.sshclient import SSHClient
            from ovs.extensions.storageserver.storagedriver import StorageDriverConfiguration
            for service in ServiceTypeList.get_by_name(
                    ServiceType.SERVICE_TYPES.MD_SERVER).services:
                mds_service = service.mds_service
                storagedriver = None
                for current_storagedriver in service.storagerouter.storagedrivers:
                    if current_storagedriver.vpool_guid == mds_service.vpool_guid:
                        storagedriver = current_storagedriver
                        break
                tasks = {}
                if storagedriver._data.get('mountpoint_md'):
                    tasks['{0}/mds_{1}_{2}'.format(
                        storagedriver._data.get('mountpoint_md'),
                        storagedriver.vpool.name, mds_service.number)] = (
                            DiskPartition.ROLES.DB,
                            StorageDriverPartition.SUBROLE.MDS)
                if storagedriver._data.get('mountpoint_temp'):
                    tasks['{0}/mds_{1}_{2}'.format(
                        storagedriver._data.get('mountpoint_temp'),
                        storagedriver.vpool.name, mds_service.number)] = (
                            DiskPartition.ROLES.SCRUB,
                            StorageDriverPartition.SUBROLE.MDS)
                for disk in service.storagerouter.disks:
                    for partition in disk.partitions:
                        for directory, (role, subrole) in tasks.iteritems():
                            with remote(storagedriver.storagerouter.ip, [os],
                                        username='******') as rem:
                                stat_dir = directory
                                while not rem.os.path.exists(
                                        stat_dir) and stat_dir != '/':
                                    stat_dir = stat_dir.rsplit('/', 1)[0]
                                    if not stat_dir:
                                        stat_dir = '/'
                                inode = rem.os.stat(stat_dir).st_dev
                            if partition.inode == inode:
                                if role not in partition.roles:
                                    partition.roles.append(role)
                                    partition.save()
                                number = 0
                                migrated = False
                                for sd_partition in storagedriver.partitions:
                                    if sd_partition.role == role and sd_partition.sub_role == subrole:
                                        if sd_partition.mds_service == mds_service:
                                            migrated = True
                                            break
                                        if sd_partition.partition_guid == partition.guid:
                                            number = max(
                                                sd_partition.number, number)
                                if migrated is False:
                                    sd_partition = StorageDriverPartition()
                                    sd_partition.role = role
                                    sd_partition.sub_role = subrole
                                    sd_partition.partition = partition
                                    sd_partition.storagedriver = storagedriver
                                    sd_partition.mds_service = mds_service
                                    sd_partition.size = None
                                    sd_partition.number = number + 1
                                    sd_partition.save()
                                    client = SSHClient(
                                        storagedriver.storagerouter,
                                        username='******')
                                    path = sd_partition.path.rsplit('/', 1)[0]
                                    if path:
                                        client.dir_create(path)
                                        client.dir_chown(path, 'ovs', 'ovs')
                                    client.dir_create(directory)
                                    client.dir_chown(directory, 'ovs', 'ovs')
                                    client.symlink(
                                        {sd_partition.path: directory})
            for storagedriver in StorageDriverList.get_storagedrivers():
                migrated_objects = {}
                for disk in storagedriver.storagerouter.disks:
                    for partition in disk.partitions:
                        # Process all mountpoints that are unique and don't have a specified size
                        for key, (role, sr_info) in {
                                'mountpoint_md': (DiskPartition.ROLES.DB, {
                                    'metadata_{0}':
                                    StorageDriverPartition.SUBROLE.MD,
                                    'tlogs_{0}':
                                    StorageDriverPartition.SUBROLE.TLOG
                                }),
                                'mountpoint_fragmentcache':
                            (DiskPartition.ROLES.WRITE, {
                                'fcache_{0}':
                                StorageDriverPartition.SUBROLE.FCACHE
                            }),
                                'mountpoint_foc': (DiskPartition.ROLES.WRITE, {
                                    'fd_{0}':
                                    StorageDriverPartition.SUBROLE.FD,
                                    'dtl_{0}':
                                    StorageDriverPartition.SUBROLE.DTL
                                }),
                                'mountpoint_dtl': (DiskPartition.ROLES.WRITE, {
                                    'fd_{0}':
                                    StorageDriverPartition.SUBROLE.FD,
                                    'dtl_{0}':
                                    StorageDriverPartition.SUBROLE.DTL
                                }),
                                'mountpoint_readcaches':
                            (DiskPartition.ROLES.READ, {
                                '': None
                            }),
                                'mountpoint_writecaches':
                            (DiskPartition.ROLES.WRITE, {
                                'sco_{0}': StorageDriverPartition.SUBROLE.SCO
                            })
                        }.iteritems():
                            if key in storagedriver._data:
                                is_list = isinstance(storagedriver._data[key],
                                                     list)
                                entries = storagedriver._data[
                                    key][:] if is_list is True else [
                                        storagedriver._data[key]
                                    ]
                                for entry in entries:
                                    if not entry:
                                        if is_list:
                                            storagedriver._data[key].remove(
                                                entry)
                                            if len(storagedriver._data[key]
                                                   ) == 0:
                                                del storagedriver._data[key]
                                        else:
                                            del storagedriver._data[key]
                                    else:
                                        with remote(
                                                storagedriver.storagerouter.ip,
                                            [os],
                                                username='******') as rem:
                                            inode = rem.os.stat(entry).st_dev
                                        if partition.inode == inode:
                                            if role not in partition.roles:
                                                partition.roles.append(role)
                                                partition.save()
                                            for folder, subrole in sr_info.iteritems(
                                            ):
                                                number = 0
                                                migrated = False
                                                for sd_partition in storagedriver.partitions:
                                                    if sd_partition.role == role and sd_partition.sub_role == subrole:
                                                        if sd_partition.partition_guid == partition.guid:
                                                            number = max(
                                                                sd_partition.
                                                                number, number)
                                                if migrated is False:
                                                    sd_partition = StorageDriverPartition(
                                                    )
                                                    sd_partition.role = role
                                                    sd_partition.sub_role = subrole
                                                    sd_partition.partition = partition
                                                    sd_partition.storagedriver = storagedriver
                                                    sd_partition.size = None
                                                    sd_partition.number = number + 1
                                                    sd_partition.save()
                                                    if folder:
                                                        source = '{0}/{1}'.format(
                                                            entry,
                                                            folder.format(
                                                                storagedriver.
                                                                vpool.name))
                                                    else:
                                                        source = entry
                                                    client = SSHClient(
                                                        storagedriver.
                                                        storagerouter,
                                                        username='******')
                                                    path = sd_partition.path.rsplit(
                                                        '/', 1)[0]
                                                    if path:
                                                        client.dir_create(path)
                                                        client.dir_chown(
                                                            path, 'ovs', 'ovs')
                                                    client.symlink({
                                                        sd_partition.path:
                                                        source
                                                    })
                                                    migrated_objects[
                                                        source] = sd_partition
                                            if is_list:
                                                storagedriver._data[
                                                    key].remove(entry)
                                                if len(storagedriver._data[key]
                                                       ) == 0:
                                                    del storagedriver._data[
                                                        key]
                                            else:
                                                del storagedriver._data[key]
                                            storagedriver.save()
                if 'mountpoint_bfs' in storagedriver._data:
                    storagedriver.mountpoint_dfs = storagedriver._data[
                        'mountpoint_bfs']
                    if not storagedriver.mountpoint_dfs:
                        storagedriver.mountpoint_dfs = None
                    del storagedriver._data['mountpoint_bfs']
                    storagedriver.save()
                if 'mountpoint_temp' in storagedriver._data:
                    del storagedriver._data['mountpoint_temp']
                    storagedriver.save()
                if migrated_objects:
                    print 'Loading sizes'
                    config = StorageDriverConfiguration(
                        'storagedriver', storagedriver.vpool_guid,
                        storagedriver.storagedriver_id)
                    config.load()
                    for readcache in config.configuration.get(
                            'content_addressed_cache',
                        {}).get('clustercache_mount_points', []):
                        path = readcache.get('path', '').rsplit('/', 1)[0]
                        size = int(readcache['size'].strip(
                            'KiB')) * 1024 if 'size' in readcache else None
                        if path in migrated_objects:
                            migrated_objects[path].size = long(size)
                            migrated_objects[path].save()
                    for writecache in config.configuration.get(
                            'scocache', {}).get('scocache_mount_points', []):
                        path = writecache.get('path', '')
                        size = int(writecache['size'].strip(
                            'KiB')) * 1024 if 'size' in writecache else None
                        if path in migrated_objects:
                            migrated_objects[path].size = long(size)
                            migrated_objects[path].save()

            working_version = 4

        # Version 5 introduced:
        # - Failure Domains
        if working_version < 5:
            import os
            from ovs.dal.hybrids.failuredomain import FailureDomain
            from ovs.dal.lists.failuredomainlist import FailureDomainList
            from ovs.dal.lists.storagerouterlist import StorageRouterList
            from ovs.extensions.generic.remote import remote
            from ovs.extensions.generic.sshclient import SSHClient
            failure_domains = FailureDomainList.get_failure_domains()
            if len(failure_domains) > 0:
                failure_domain = failure_domains[0]
            else:
                failure_domain = FailureDomain()
                failure_domain.name = 'Default'
                failure_domain.save()
            for storagerouter in StorageRouterList.get_storagerouters():
                change = False
                if storagerouter.primary_failure_domain is None:
                    storagerouter.primary_failure_domain = failure_domain
                    change = True
                if storagerouter.rdma_capable is None:
                    client = SSHClient(storagerouter, username='******')
                    rdma_capable = False
                    with remote(client.ip, [os], username='******') as rem:
                        for root, dirs, files in rem.os.walk(
                                '/sys/class/infiniband'):
                            for directory in dirs:
                                ports_dir = '/'.join(
                                    [root, directory, 'ports'])
                                if not rem.os.path.exists(ports_dir):
                                    continue
                                for sub_root, sub_dirs, _ in rem.os.walk(
                                        ports_dir):
                                    if sub_root != ports_dir:
                                        continue
                                    for sub_directory in sub_dirs:
                                        state_file = '/'.join(
                                            [sub_root, sub_directory, 'state'])
                                        if rem.os.path.exists(state_file):
                                            if 'ACTIVE' in client.run(
                                                    'cat {0}'.format(
                                                        state_file)):
                                                rdma_capable = True
                    storagerouter.rdma_capable = rdma_capable
                    change = True
                if change is True:
                    storagerouter.save()

            working_version = 5

        # Version 6 introduced:
        # - Distributed scrubbing
        if working_version < 6:
            from ovs.dal.hybrids.diskpartition import DiskPartition
            from ovs.dal.lists.storagedriverlist import StorageDriverList
            from ovs.extensions.generic.sshclient import SSHClient
            for storage_driver in StorageDriverList.get_storagedrivers():
                root_client = SSHClient(storage_driver.storagerouter,
                                        username='******')
                for partition in storage_driver.partitions:
                    if partition.role == DiskPartition.ROLES.SCRUB:
                        old_path = partition.path
                        partition.sub_role = None
                        partition.save()
                        partition.invalidate_dynamics(['folder', 'path'])
                        if root_client.dir_exists(partition.path):
                            continue  # New directory already exists
                        if '_mds_' in old_path:
                            if root_client.dir_exists(old_path):
                                root_client.symlink({partition.path: old_path})
                        if not root_client.dir_exists(partition.path):
                            root_client.dir_create(partition.path)
                        root_client.dir_chmod(partition.path, 0777)

            working_version = 6

        # Version 7 introduced:
        # - vPool status
        if working_version < 7:
            from ovs.dal.hybrids import vpool
            reload(vpool)
            from ovs.dal.hybrids.vpool import VPool
            from ovs.dal.lists.vpoollist import VPoolList
            for _vpool in VPoolList.get_vpools():
                vpool = VPool(_vpool.guid)
                if hasattr(vpool, 'status') and vpool.status is None:
                    vpool.status = VPool.STATUSES.RUNNING
                    vpool.save()

            working_version = 7

        # Version 10 introduced:
        # - Reverse indexes are stored in persistent store
        # - Store more non-changing metadata on disk iso using a dynamic property
        if working_version < 10:
            from ovs.dal.helpers import HybridRunner, Descriptor
            from ovs.dal.datalist import DataList
            from ovs.extensions.storage.persistentfactory import PersistentFactory
            from ovs.extensions.storage.volatilefactory import VolatileFactory
            persistent = PersistentFactory.get_client()
            for prefix in ['ovs_listcache', 'ovs_reverseindex']:
                for key in persistent.prefix(prefix):
                    persistent.delete(key)
            for key in persistent.prefix('ovs_data_'):
                persistent.set(key, persistent.get(key))
            base_reverse_key = 'ovs_reverseindex_{0}_{1}|{2}|{3}'
            hybrid_structure = HybridRunner.get_hybrids()
            for class_descriptor in hybrid_structure.values():
                cls = Descriptor().load(class_descriptor).get_object()
                all_objects = DataList(cls, {
                    'type': DataList.where_operator.AND,
                    'items': []
                })
                for item in all_objects:
                    guid = item.guid
                    for relation in item._relations:
                        if relation.foreign_type is None:
                            rcls = cls
                            rclsname = rcls.__name__.lower()
                        else:
                            rcls = relation.foreign_type
                            rclsname = rcls.__name__.lower()
                        key = relation.name
                        rguid = item._data[key]['guid']
                        if rguid is not None:
                            reverse_key = base_reverse_key.format(
                                rclsname, rguid, relation.foreign_key, guid)
                            persistent.set(reverse_key, 0)
            volatile = VolatileFactory.get_client()
            try:
                volatile._client.flush_all()
            except:
                pass
            from ovs.dal.lists.vdisklist import VDiskList
            for vdisk in VDiskList.get_vdisks():
                try:
                    vdisk.metadata = {
                        'lba_size': vdisk.info['lba_size'],
                        'cluster_multiplier': vdisk.info['cluster_multiplier']
                    }
                    vdisk.save()
                except:
                    pass

            working_version = 10

        # Version 11 introduced:
        # - ALBA accelerated ALBA, meaning different vpool.metadata information
        if working_version < 11:
            from ovs.dal.lists.vpoollist import VPoolList

            for vpool in VPoolList.get_vpools():
                vpool.metadata = {'backend': vpool.metadata}
                if 'metadata' in vpool.metadata['backend']:
                    vpool.metadata['backend'][
                        'arakoon_config'] = vpool.metadata['backend'].pop(
                            'metadata')
                if 'backend_info' in vpool.metadata['backend']:
                    vpool.metadata['backend']['backend_info'][
                        'fragment_cache_on_read'] = True
                    vpool.metadata['backend']['backend_info'][
                        'fragment_cache_on_write'] = False
                vpool.save()
            working_version = 11

        return working_version
Exemple #31
0
    def get(self):
        """
        returns OpenAPI specs
        """
        version = settings.VERSION[-1]
        data = {'swagger': '2.0',
                'info': {'title': 'Open vStorage',
                         'description': 'The Open vStorage API.',
                         'version': str(version)},
                'basePath': '/api',
                'schemes': ['https'],
                'consumes': ['application/json'],
                'produces': ['application/json; version={0}'.format(version)],
                'paths': {'/': {'get': {'summary': 'Retrieve API metadata',
                                        'operationId': 'api',
                                        'responses': {'200': {'descirption': 'API metadata',
                                                              'schema': {'type': 'object',
                                                                         'title': 'APIMetadata',
                                                                         'properties': {'authenticated': {'type': 'boolean',
                                                                                                          'description': 'Indicates whether the client is authenticated.'},
                                                                                        'authentication_state': {'type': 'string',
                                                                                                                 'description': 'Povides more information on the "authenticated" state of a client.',
                                                                                                                 'enum': ['unauthenticated',
                                                                                                                          'invalid_authorization_type',
                                                                                                                          'invalid_token',
                                                                                                                          'token_expired',
                                                                                                                          'inactive_user',
                                                                                                                          'authenticated',
                                                                                                                          'unexpected_exception']},
                                                                                        'authentication_metadata': {'type': 'object',
                                                                                                                    'title': 'AuthenticationMetadata',
                                                                                                                    'description': 'Contains information on the usage of an optional 3rd party OAuth2.0 authentication service.',
                                                                                                                    'properties': {'ip': {'type': 'string',
                                                                                                                                          'description': 'The IP address of the current node.'},
                                                                                                                                   'mode': {'type': 'string',
                                                                                                                                            'description': 'Indicates wheter the "local" or a "remote" authentication endpoint should be used.',
                                                                                                                                            'enum': ['local',
                                                                                                                                                     'remote']},
                                                                                                                                   'authorize_uri': {'type': 'string',
                                                                                                                                                     'description': 'The URI to which the user has to be redirect to authenticate.'},
                                                                                                                                   'client_id': {'type': 'string',
                                                                                                                                                 'description': 'The client identifier to be used when authenticating.'},
                                                                                                                                   'scope': {'type': 'string',
                                                                                                                                             'description': 'The scope that has to be requested to the authentication endpoint.'}},
                                                                                                                    'required': []},
                                                                                        'username': {'type': 'string',
                                                                                                     'description': 'The username of the client or null if not available.'},
                                                                                        'userguid': {'type': 'string',
                                                                                                     'description': 'The GUID (primary key) of the client\'s user or null if not available.'},
                                                                                        'roles': {'type': 'array',
                                                                                                  'description': 'An array of the scopes that were granted to the client.',
                                                                                                  'items': {'type': 'string'}},
                                                                                        'identification': {'type': 'object',
                                                                                                           'title': 'APIIdentification',
                                                                                                           'description': 'Contains identification information about the API/environment.',
                                                                                                           'properties': {'cluster_id': {'type': 'string',
                                                                                                                                         'description': 'Environment identification string.'}},
                                                                                                           'required': ['cluster_id']},
                                                                                        'storagerouter_ips': {'type': 'array',
                                                                                                              'description': 'An array containing the IP addresses of all StorageRouters in the environment.',
                                                                                                              'items': {'type': 'string'}},
                                                                                        'versions': {'type': 'array',
                                                                                                     'description': 'An array of all versions that this instance of the API supports.',
                                                                                                     'items': {'type': 'integer'}},
                                                                                        'plugins': {}},
                                                                         'required': ['authenticated',
                                                                                      'authentication_state',
                                                                                      'authentication_metadata',
                                                                                      'username',
                                                                                      'userguid',
                                                                                      'roles',
                                                                                      'identification',
                                                                                      'storagerouter_ips',
                                                                                      'versions',
                                                                                      'plugins']}}}}}},
                'definitions': {'APIError': {'type': 'object',
                                             'properties': {'error': {'type': 'string',
                                                                      'description': 'An error code'},
                                                            'error_description': {'type': 'string',
                                                                                  'description': 'Descriptive error message'}},
                                             'required': ['error', 'error_description']}},
                'securityDefinitions': {'oauth2': {'type': 'oauth2',
                                                   'flow': 'password',
                                                   'tokenUrl': 'oauth2/token',
                                                   'scopes': {'read': 'Read access',
                                                              'write': 'Write access',
                                                              'manage': 'Management access'}}},
                'security': [{'oauth2': ['read', 'write', 'manage']}]}

        # Plugin information
        plugins = {}
        for backend_type in BackendTypeList.get_backend_types():
            if backend_type.has_plugin is True:
                if backend_type.code not in plugins:
                    plugins[backend_type.code] = []
                plugins[backend_type.code] += ['backend', 'gui']
        generic_plugins = Configuration.get('/ovs/framework/plugins/installed|generic')
        for plugin_name in generic_plugins:
            if plugin_name not in plugins:
                plugins[plugin_name] = []
            plugins[plugin_name] += ['gui']

        data['paths']['/']['get']['responses']['200']['schema']['properties']['plugins'] = {
            'type': 'object',
            'title': 'PluginMetadata',
            'description': 'Contains information about plugins active in the system. Each property represents a plugin and the area where they provide functionality.',
            'properties': {plugin: {'type': 'array',
                                    'description': 'An array of all areas the plugin provides functionality.',
                                    'items': {'type': 'string'}} for (plugin, info) in plugins.iteritems()},
            'required': []
        }

        # API paths
        def load_parameters(_fun):
            # Parameters by @load decorators
            parameter_info = []
            mandatory_args = _fun.ovs_metadata['load']['mandatory']
            optional_args = _fun.ovs_metadata['load']['optional']
            object_type = _fun.ovs_metadata['load']['object_type']
            entries = ['version', 'request', 'local_storagerouter', 'pk', 'contents']
            if object_type is not None:
                object_arg = object_type.__name__.lower()
                if object_arg in mandatory_args or object_arg in optional_args:
                    parameter_info.append({'name': 'guid',
                                           'in': 'path',
                                           'description': 'Identifier of the object on which to call is applied.',
                                           'required': True,
                                           'type': 'string'})
                entries.append(object_arg)
            for entry in entries:
                if entry in mandatory_args:
                    mandatory_args.remove(entry)
                if entry in optional_args:
                    optional_args.remove(entry)
            docs = _fun.__doc__
            doc_info = {}
            if docs is not None:
                for match in re.finditer(':(param|type) (.*?): (.*)', docs, re.MULTILINE):
                    entries = match.groups()
                    if entries[1] not in doc_info:
                        doc_info[entries[1]] = {}
                    doc_info[entries[1]][entries[0]] = entries[2]
            for argument in mandatory_args + optional_args:
                info = {'name': argument,
                        'in': 'query',
                        'required': argument in mandatory_args,
                        'type': 'string'}
                if argument in doc_info:
                    description = doc_info[argument].get('param')
                    if description:
                        info['description'] = description
                    type_info = doc_info[argument].get('type')
                    if type_info:
                        if type_info in ['int', 'long']:
                            info['type'] = 'integer'
                        elif type_info in ['float']:
                            info['type'] = 'number'
                        elif type_info in ['bool']:
                            info['type'] = 'boolean'
                        elif type_info in ['str', 'basestring', 'unicode']:
                            info['type'] = 'string'
                        elif type_info in ['dict']:
                            info['type'] = 'object'
                parameter_info.append(info)
            # Parameters by @returns_* decorators
            return_info = _fun.ovs_metadata.get('returns', None)
            if return_info is not None:
                # Extra parameters
                params = return_info['parameters']
                fields = []
                if 'contents' in params or 'sorting' in params:
                    _cls = return_info['object_type']
                    fields = [prop.name for prop in _cls._properties] + \
                             ['{0}_guid'.format(rel.name) for rel in _cls._relations] + \
                             [dynamic.name for dynamic in _cls._dynamics]
                    relation_info = RelationMapper.load_foreign_relations(_cls)
                    if relation_info is not None:
                        fields += [('{0}_guid' if rel_info['list'] is False else '{0}_guids').format(key)
                                   for key, rel_info in relation_info.iteritems()]
                    fields = fields + ['-{0}'.format(field) for field in fields]
                for parameter in params:
                    if parameter == 'contents':
                        parameter_info.append({'name': 'contents',
                                               'in': 'query',
                                               'description': 'Specify the returned contents.',
                                               'required': True,
                                               'collectionFormat': 'csv',
                                               'type': 'array',
                                               'enum': ['_dynamics', '_relations', 'guid'] + fields,
                                               'items': {'type': 'string'}})
                    elif parameter == 'paging':
                        parameter_info.append({'name': 'page',
                                               'in': 'query',
                                               'description': 'Specifies the page to be returned.',
                                               'required': False,
                                               'type': 'integer'})
                        parameter_info.append({'name': 'page_size',
                                               'in': 'query',
                                               'description': 'Specifies the size of a page. Supported values: 10, 25, 50 and 100. Requires "page" to be set.',
                                               'required': False,
                                               'type': 'integer'})
                    elif parameter == 'sorting':
                        parameter_info.append({'name': 'sort',
                                               'in': 'query',
                                               'description': 'Specifies the sorting of the list.',
                                               'required': False,
                                               'default': params[parameter],
                                               'enum': ['guid', '-guid'] + fields,
                                               'type': 'array',
                                               'items': {'type': 'string'}})
            return parameter_info

        def load_response(_fun):
            response_code = '200'
            response_schema = None
            return_info = _fun.ovs_metadata.get('returns', None)
            if return_info is not None:
                return_type, _return_code = return_info['returns']
                if _return_code is not None:
                    response_code = _return_code
                if return_type == 'object':
                    _cls = return_info['object_type']
                    response_schema = {'$ref': '#/definitions/{0}'.format(_cls.__name__)}
                elif return_type == 'list':
                    _cls = return_info['object_type']
                    class_schema = {'$ref': '#/definitions/{0}'.format(_cls.__name__)}
                    fields = [prop.name for prop in _cls._properties] + \
                             ['{0}_guid'.format(rel.name) for rel in _cls._relations] + \
                             [dynamic.name for dynamic in _cls._dynamics]
                    relation_info = RelationMapper.load_foreign_relations(_cls)
                    if relation_info is not None:
                        fields += [('{0}_guid' if rel_info['list'] is False else '{0}_guids').format(key)
                                   for key, rel_info in relation_info.iteritems()]
                    fields = fields + ['-{0}'.format(field) for field in fields]
                    response_schema = {'type': 'object',
                                       'title': 'DataList',
                                       'properties': {'_contents': {'type': 'array',
                                                                    'description': 'Requested contents.',
                                                                    'items': {'type': 'string'},
                                                                    'required': True,
                                                                    'collectionFormat': 'csv',
                                                                    'enum': ['_dynamics', '_relations', 'guid'] + fields},
                                                      '_paging': {'type': 'object',
                                                                  'title': 'PagingMetadata',
                                                                  'properties': {'total_items': {'type': 'integer',
                                                                                                 'description': 'Total items available.'},
                                                                                 'max_page': {'type': 'integer',
                                                                                              'description': 'Last page available.'},
                                                                                 'end_number': {'type': 'integer',
                                                                                                'description': '1-based index of the last item in the current page.'},
                                                                                 'current_page': {'type': 'integer',
                                                                                                  'description': 'Current page number.'},
                                                                                 'page_size': {'type': 'integer',
                                                                                               'description': 'Number of items in the current page.'},
                                                                                 'start_number': {'type': 'integer',
                                                                                                  'description': '1-based index of the first item in the current page'}},
                                                                  'required': ['total_items', 'max_page', 'end_number', 'current_page', 'page_size', 'start_number']},
                                                      '_sorting': {'type': 'array',
                                                                   'description': 'Applied sorting',
                                                                   'items': {'type': 'string'},
                                                                   'required': True,
                                                                   'collectionFormat': 'csv',
                                                                   'enum': ['-guid', 'guid'] + fields},
                                                      'data': {'type': 'array',
                                                               'description': 'List of serialized {0}s.'.format(_cls.__name__),
                                                               'required': True,
                                                               'items': class_schema}},
                                       'required': ['_contents', '_paging', '_sorting', 'data']}
                else:
                    docs = _fun.__doc__
                    doc_info = {}
                    if docs is not None:
                        for match in re.finditer(':(return|rtype): (.*)', docs, re.MULTILINE):
                            entries = match.groups()
                            doc_info[entries[0]] = entries[1]
                    if return_type == 'task':
                        task_return = ''
                        if 'return' in doc_info:
                            task_return = ' The task returns: {0}'.format(doc_info['return'])
                        response_schema = {'type': 'string',
                                           'description': 'A task identifier.{0}'.format(task_return)}
                    elif return_type is None:
                        response_schema = {'type': 'string'}
                        if 'return' in doc_info:
                            response_schema['description'] = doc_info['return']
                        if 'rtype' in doc_info:
                            type_info = doc_info['rtype']
                            if type_info in ['int', 'long']:
                                response_schema['type'] = 'integer'
                            elif type_info in ['float']:
                                response_schema['type'] = 'number'
                            elif type_info in ['bool']:
                                response_schema['type'] = 'boolean'
                            elif type_info in ['str', 'basestring', 'unicode']:
                                response_schema['type'] = 'string'
                            elif type_info in ['dict']:
                                response_schema['type'] = 'object'
                            elif type_info in ['None']:
                                response_schema = None
                                response_code = '204'
            return response_code, response_schema

        paths = data['paths']
        path = '/'.join([os.path.dirname(__file__), 'backend', 'views'])
        for filename in os.listdir(path):
            if os.path.isfile('/'.join([path, filename])) and filename.endswith('.py'):
                name = filename.replace('.py', '')
                module = imp.load_source(name, '/'.join([path, filename]))
                for member in inspect.getmembers(module):
                    if inspect.isclass(member[1]) \
                            and member[1].__module__ == name \
                            and 'ViewSet' in [base.__name__ for base in member[1].__bases__]:
                        cls = member[1]
                        if hasattr(cls, 'skip_spec') and cls.skip_spec is True:
                            continue
                        base_calls = {'list': ['get', '/{0}/'],
                                      'retrieve': ['get', '/{0}/{{guid}}/'],
                                      'create': ['post', '/{0}/'],
                                      'destroy': ['delete', '/{0}/{{guid}}/'],
                                      'partial_update': ['patch', '/{0}/{{guid}}/']}
                        for call, route_data in base_calls.iteritems():
                            if hasattr(cls, call):
                                fun = getattr(cls, call)
                                docstring = fun.__doc__.strip().split('\n')[0]
                                parameters = load_parameters(fun)
                                return_code, schema = load_response(fun)
                                route = {route_data[0]: {'summary': docstring,
                                                         'operationId': '{0}.{1}'.format(member[1].prefix, call),
                                                         'responses': {return_code: {'description': docstring},
                                                                       'default': {'description': 'Error payload',
                                                                                   'schema': {'$ref': '#/definitions/APIError'}}},
                                                         'parameters': parameters}}
                                if schema is not None:
                                    route[route_data[0]]['responses'][return_code]['schema'] = schema
                                current_path = route_data[1].format(member[1].prefix)
                                if current_path not in paths:
                                    paths[current_path] = {}
                                paths[current_path].update(route)
                        funs = [fun[1] for fun in inspect.getmembers(cls, predicate=inspect.ismethod)
                                if fun[0] not in base_calls.keys()]
                        for fun in funs:
                            if hasattr(fun, 'bind_to_methods'):
                                routes = {}
                                docstring = fun.__doc__.strip().split('\n')[0]
                                parameters = load_parameters(fun)
                                return_code, schema = load_response(fun)
                                name = fun.__name__
                                for verb in fun.bind_to_methods:
                                    routes[verb] = {'summary': docstring,
                                                    'operationId': '{0}.{1}_{2}'.format(member[1].prefix, verb, name),
                                                    'responses': {return_code: {'description': docstring},
                                                                  'default': {'description': 'Error payload',
                                                                              'schema': {'$ref': '#/definitions/APIError'}}},
                                                    'parameters': parameters}
                                    if schema is not None:
                                        routes[verb]['responses'][return_code]['schema'] = schema
                                paths['/{0}/{{guid}}/{1}/'.format(member[1].prefix, name)] = routes

        # DataObject / hybrids
        def build_property(prop):
            _docstring = prop.docstring or prop.name
            _docstring = _docstring.replace('None', 'null').replace('True', 'true').replace('False', 'false')
            info = {'description': _docstring}
            if prop.default is not None:
                info['default'] = prop.default
            if prop.property_type == int:
                info['type'] = 'integer'
            elif prop.property_type == float:
                info['type'] = 'number'
            elif prop.property_type == long:
                info['type'] = 'integer'
            elif prop.property_type == str:
                info['type'] = 'string'
            elif prop.property_type == bool:
                info['type'] = 'boolean'
            elif prop.property_type == list:
                info['type'] = 'array'
            elif prop.property_type == dict:
                info['type'] = 'object'
            elif prop.property_type == set:
                info['type'] = 'array'
            elif isinstance(prop.property_type, list):  # enumerator
                info['type'] = 'string'
                info['enum'] = prop.property_type
            return info

        def build_relation(_cls, relation):
            itemtype = relation.foreign_type.__name__ if relation.foreign_type is not None else _cls.__name__
            _docstring = '{1} instance identifier{3}. One-to-{0} relation with {1}.{2}.'.format(
                'one' if relation.onetoone is True else 'many',
                itemtype,
                ('{0}_guid' if relation.onetoone is True else '{0}_guids').format(relation.foreign_key),
                '' if relation.mandatory is True else ', null if relation is not set'
            )
            info = {'description': _docstring,
                    'type': 'string'}
            return '{0}_guid'.format(relation.name), info

        def build_dynamic(_cls, dynamic):
            _docstring = dynamic.name
            if hasattr(_cls, '_{0}'.format(dynamic.name)):
                docs = getattr(_cls, '_{0}'.format(dynamic.name)).__doc__
                if docs is not None:
                    _docstring = docs.strip().split('\n')[0]
                    _docstring = _docstring.replace('None', 'null').replace('True', 'true').replace('False', 'false')
            _docstring = '{0} (dynamic property, cache timeout: {1}s)'.format(_docstring, dynamic.timeout)
            info = {'description': _docstring,
                    'readOnly': True}
            if dynamic.return_type == int:
                info['type'] = 'integer'
            elif dynamic.return_type == float:
                info['type'] = 'number'
            elif dynamic.return_type == long:
                info['type'] = 'integer'
            elif dynamic.return_type == str:
                info['type'] = 'string'
            elif dynamic.return_type == bool:
                info['type'] = 'boolean'
            elif dynamic.return_type == list:
                info['type'] = 'array'
            elif dynamic.return_type == dict:
                info['type'] = 'object'
            elif dynamic.return_type == set:
                info['type'] = 'array'
            elif isinstance(dynamic.return_type, list):  # enumerator
                info['type'] = 'string'
                info['enum'] = dynamic.return_type
            return info

        def build_remote_relation(relation):
            key, relation_info = relation
            remote_cls = Descriptor().load(relation_info['class']).get_object()
            _docstring = '{1} instance identifier{3}. One-to-{0} relation with {1}.{2}.'.format(
                'one' if relation_info['list'] is False else 'many',
                remote_cls.__name__,
                '{0}_guid'.format(relation_info['key']),
                '' if relation_info['list'] is False else 's'
            )
            info = {'description': _docstring,
                    'readOnly': True}
            if relation_info['list'] is True:
                info['type'] = 'array'
                info['items'] = {'type': 'string'}
                _name = '{0}_guids'.format(key)
            else:
                info['type'] = 'string'
                _name = '{0}_guid'.format(key)
            return _name, info

        def get_properties(_cls):
            properties = {}
            properties.update({prop.name: build_property(prop) for prop in _cls._properties})
            properties.update(dict(build_relation(_cls, relation) for relation in _cls._relations))
            properties.update({dynamic.name: build_dynamic(_cls, dynamic) for dynamic in _cls._dynamics})
            relation_info = RelationMapper.load_foreign_relations(_cls)
            if relation_info is not None:
                properties.update(dict(build_remote_relation(relation) for relation in relation_info.iteritems()))
            return properties

        def get_required_properties(_cls):
            required = []
            for prop in _cls._properties:
                if prop.mandatory is True:
                    required.append(prop.name)
            for relation in _cls._relations:
                if relation.mandatory is True:
                    required.append('{0}_guid'.format(relation.name))
            return required

        definitions = data['definitions']
        definitions['DataObject'] = {'type': 'object',
                                     'title': 'DataObject',
                                     'description': 'Root object inherited by all hybrid objects. Shall not be used directly.',
                                     'properties': {'guid': {'type': 'string',
                                                             'description': 'Identifier of the object.'}},
                                     'required': ['guid']}
        hybrid_structure = HybridRunner.get_hybrids()
        for class_descriptor in hybrid_structure.values():
            cls = Descriptor().load(class_descriptor).get_object()
            definitions[cls.__name__] = {'description': cls.__doc__.strip().split('\n')[0],
                                         'allOf': [{'$ref': '#/definitions/DataObject'},
                                                   {'type': 'object',
                                                    'properties': get_properties(cls),
                                                    'required': get_required_properties(cls)}]}

        return data
Exemple #32
0
    def _load(self):
        """
        Tries to load the result for the given key from the volatile cache, or executes the query
        if not yet available. Afterwards (if a key is given), the result will be (re)cached
        """
        self.data = self._volatile.get(
            self._key) if self._key is not None else None
        if self.data is None:
            # The query should be a dictionary:
            #     {'object': Disk,  # Object on which the query should be executed
            #      'data'  : DataList.select.XYZ,  # The requested result
            #      'query' : <query>}  # The actual query
            # Where <query> is a query(group) dictionary:
            #     {'type' : DataList.where_operator.ABC,  # Whether the items should be AND/OR
            #      'items': <items>}  # The items in the group
            # Where the <items> is any combination of one or more <filter> or <query>
            # A <filter> tuple example:
            #     (<field>, DataList.operator.GHI, <value>)  # For example EQUALS
            # The field is any property you would also find on the given object. In case of
            # properties, you can dot as far as you like. This means you can combine AND and OR
            # in any possible combination

            Toolbox.log_cache_hit('datalist', False)
            hybrid_structure = HybridRunner.get_hybrids()

            items = self._query['query']['items']
            query_type = self._query['query']['type']
            query_data = self._query['data']
            query_object = self._query['object']
            query_object_id = Descriptor(query_object).descriptor['identifier']
            if query_object_id in hybrid_structure and query_object_id != hybrid_structure[
                    query_object_id]['identifier']:
                query_object = Descriptor().load(
                    hybrid_structure[query_object_id]).get_object()

            invalidations = {query_object.__name__.lower(): ['__all']}
            DataList._build_invalidations(invalidations, query_object, items)

            for class_name in invalidations:
                key = '{0}_{1}'.format(DataList.cachelink, class_name)
                mutex = VolatileMutex('listcache_{0}'.format(class_name))
                try:
                    mutex.acquire(60)
                    cache_list = Toolbox.try_get(key, {})
                    current_fields = cache_list.get(self._key, [])
                    current_fields = list(
                        set(current_fields + ['__all'] +
                            invalidations[class_name]))
                    cache_list[self._key] = current_fields
                    self._volatile.set(key, cache_list)
                    self._persistent.set(key, cache_list)
                finally:
                    mutex.release()

            self.from_cache = False
            namespace = query_object()._namespace
            name = query_object.__name__.lower()
            guids = DataList.get_pks(namespace, name)

            if query_data == DataList.select.COUNT:
                self.data = 0
            else:
                self.data = []

            for guid in guids:
                try:
                    instance = query_object(guid)
                    if query_type == DataList.where_operator.AND:
                        include = self._exec_and(instance, items)
                    elif query_type == DataList.where_operator.OR:
                        include = self._exec_or(instance, items)
                    else:
                        raise NotImplementedError(
                            'The given operator is not yet implemented.')
                    if include:
                        if query_data == DataList.select.COUNT:
                            self.data += 1
                        elif query_data == DataList.select.GUIDS:
                            self.data.append(guid)
                        else:
                            raise NotImplementedError(
                                'The given selector type is not implemented')
                except ObjectNotFoundException:
                    pass

            if self._post_query_hook is not None:
                self._post_query_hook(self)

            if self._key is not None and len(guids) > 0 and self._can_cache:
                invalidated = False
                for class_name in invalidations:
                    key = '{0}_{1}'.format(DataList.cachelink, class_name)
                    cache_list = Toolbox.try_get(key, {})
                    if self._key not in cache_list:
                        invalidated = True
                # If the key under which the list should be saved was already invalidated since the invalidations
                # were saved, the returned list is most likely outdated. This is OK for this result, but the list
                # won't get cached
                if invalidated is False:
                    self._volatile.set(
                        self._key, self.data, 300 +
                        randint(0, 300))  # Cache between 5 and 10 minutes
        else:
            Toolbox.log_cache_hit('datalist', True)
            self.from_cache = True
        return self
Exemple #33
0
    def __init__(self, endpoint, username='******', password=None):
        """
        Initializes an SSHClient
        """
        from subprocess import check_output
        from ovs.dal.hybrids.storagerouter import StorageRouter
        storagerouter = None
        if isinstance(endpoint, basestring):
            ip = endpoint
            if not re.findall(SSHClient.IP_REGEX, ip):
                raise ValueError('Incorrect IP {0} specified'.format(ip))
        elif Descriptor.isinstance(endpoint, StorageRouter):
            # Refresh the object before checking its attributes
            storagerouter = StorageRouter(endpoint.guid)
            ip = storagerouter.ip
        else:
            raise ValueError(
                'The endpoint parameter should be either an ip address or a StorageRouter'
            )

        self.ip = ip
        self._client = None
        self.local_ips = [
            lip.strip() for lip in check_output(
                "ip a | grep 'inet ' | sed 's/\s\s*/ /g' | cut -d ' ' -f 3 | cut -d '/' -f 1",
                shell=True).strip().splitlines()
        ]
        self.is_local = self.ip in self.local_ips
        self.password = password
        self._unittest_mode = os.environ.get('RUNNING_UNITTESTS') == 'True'

        if self.is_local is False and storagerouter is not None and self._unittest_mode is False:
            process_heartbeat = storagerouter.heartbeats.get('process')
            if process_heartbeat is not None:
                if time.time() - process_heartbeat > 300:
                    message = 'StorageRouter {0} process heartbeat > 300s'.format(
                        ip)
                    SSHClient._logger.error(message)
                    raise UnableToConnectException(message)

        current_user = check_output('whoami', shell=True).strip()
        if username is None:
            self.username = current_user
        else:
            self.username = username
            if username != current_user:
                self.is_local = False  # If specified user differs from current executing user, we always use the paramiko SSHClient

        if self._unittest_mode is True:
            self.is_local = True

        if not self.is_local:
            logging.getLogger('paramiko').setLevel(logging.WARNING)
            key = '{0}@{1}'.format(self.ip, self.username)
            if key not in SSHClient.client_cache:
                import paramiko
                client = paramiko.SSHClient()
                client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
                client.is_connected = types.MethodType(is_connected, client)
                SSHClient.client_cache[key] = client
            self._client = SSHClient.client_cache[key]
        self._connect()