Пример #1
0
    def _configure_amqp_to_volumedriver():
        Toolbox.log(logger=NodeTypeController._logger,
                    messages='Update existing vPools')
        login = Configuration.get('/ovs/framework/messagequeue|user')
        password = Configuration.get('/ovs/framework/messagequeue|password')
        protocol = Configuration.get('/ovs/framework/messagequeue|protocol')

        uris = []
        for endpoint in Configuration.get(
                '/ovs/framework/messagequeue|endpoints'):
            uris.append({
                'amqp_uri':
                '{0}://{1}:{2}@{3}'.format(protocol, login, password, endpoint)
            })

        if Configuration.dir_exists('/ovs/vpools'):
            for vpool_guid in Configuration.list('/ovs/vpools'):
                for storagedriver_id in Configuration.list(
                        '/ovs/vpools/{0}/hosts'.format(vpool_guid)):
                    storagedriver_config = StorageDriverConfiguration(
                        vpool_guid, storagedriver_id)
                    storagedriver_config.configure_event_publisher(
                        events_amqp_routing_key=Configuration.get(
                            '/ovs/framework/messagequeue|queues.storagedriver'
                        ),
                        events_amqp_uris=uris)
                    storagedriver_config.save()
Пример #2
0
    def discover_nodes(cls):
        # type: () -> Dict[str, AlbaNode]
        """
        Discover nodes by querying the config mgmt
        :return: The discovered nodes, mapped by their guid
        :rtype: Dict[str, AlbaNode]
        """
        nodes = {}
        model_node_ids = set(node.node_id
                             for node in AlbaNodeList.get_albanodes())
        found_node_ids = set()
        node_ids_by_type = {}
        for node_type, base_config_path in {
                AlbaNode.NODE_TYPES.ASD: ASD_NODE_BASE_PATH,
                AlbaNode.NODE_TYPES.S3: S3_NODE_BASE_PATH
        }.iteritems():
            if Configuration.dir_exists(base_config_path):
                node_ids = Configuration.list(base_config_path)
                node_ids_by_type[node_type] = node_ids

        for node_type, node_ids in node_ids_by_type.iteritems():
            for node_id in node_ids:
                if node_id not in model_node_ids and node_id not in found_node_ids:
                    node = cls.model_volatile_node(node_id, node_type)
                    nodes[node.guid] = node
                    found_node_ids.add(node.node_id)
        return nodes
Пример #3
0
    def get_unused_arakoon_metadata_and_claim(cluster_type, locked=True):
        """
        Retrieve arakoon cluster information based on its type
        :param cluster_type: Type of arakoon cluster (See ServiceType.ARAKOON_CLUSTER_TYPES)
        :type cluster_type: str
        :param locked: Execute this in a locked context
        :type locked: bool
        :return: Metadata of the arakoon cluster
        :rtype: dict
        """
        cluster_type = cluster_type.upper()
        if cluster_type not in ServiceType.ARAKOON_CLUSTER_TYPES:
            raise ValueError('Unsupported arakoon cluster type provided. Please choose from {0}'.format(', '.join(ServiceType.ARAKOON_CLUSTER_TYPES)))
        if not Configuration.dir_exists(ArakoonInstaller.CONFIG_ROOT):
            return None

        mutex = volatile_mutex('claim_arakoon_metadata', wait=10)
        try:
            if locked is True:
                mutex.acquire()

            for cluster_name in Configuration.list(ArakoonInstaller.CONFIG_ROOT):
                config = ArakoonClusterConfig(cluster_id=cluster_name, filesystem=False)
                config.load_config()
                arakoon_client = ArakoonInstaller.build_client(config)
                if arakoon_client.exists(ArakoonInstaller.METADATA_KEY):
                    metadata = json.loads(arakoon_client.get(ArakoonInstaller.METADATA_KEY))
                    if metadata['cluster_type'] == cluster_type and metadata['in_use'] is False and metadata['internal'] is False:
                        metadata['in_use'] = True
                        arakoon_client.set(ArakoonInstaller.METADATA_KEY, json.dumps(metadata, indent=4))
                        return metadata
        finally:
            if locked is True:
                mutex.release()
Пример #4
0
 def _configure_arakoon_to_volumedriver(cluster_name):
     StorageDriverController._logger.info('Update existing vPools')
     config = ArakoonClusterConfig(cluster_id=cluster_name)
     arakoon_nodes = []
     for node in config.nodes:
         arakoon_nodes.append({
             'host': node.ip,
             'port': node.client_port,
             'node_id': node.name
         })
     if Configuration.dir_exists('/ovs/vpools'):
         for vpool_guid in Configuration.list('/ovs/vpools'):
             for storagedriver_id in Configuration.list(
                     '/ovs/vpools/{0}/hosts'.format(vpool_guid)):
                 storagedriver_config = StorageDriverConfiguration(
                     'storagedriver', vpool_guid, storagedriver_id)
                 storagedriver_config.load()
                 storagedriver_config.configure_volume_registry(
                     vregistry_arakoon_cluster_id=cluster_name,
                     vregistry_arakoon_cluster_nodes=arakoon_nodes)
                 storagedriver_config.configure_distributed_lock_store(
                     dls_type='Arakoon',
                     dls_arakoon_cluster_id=cluster_name,
                     dls_arakoon_cluster_nodes=arakoon_nodes)
                 storagedriver_config.save()
Пример #5
0
    def _revert_vpool_status(vpool,
                             status=VPool.STATUSES.RUNNING,
                             storagedriver=None,
                             client=None,
                             dirs_created=None):
        """
        Remove the vPool being created or revert the vPool being extended
        :return: None
        :rtype: NoneType
        """
        vpool.status = status
        vpool.save()

        if status == VPool.STATUSES.RUNNING:
            if len(dirs_created) > 0:
                try:
                    client.dir_delete(directories=dirs_created)
                except Exception:
                    StorageRouterController._logger.warning(
                        'Failed to clean up following directories: {0}'.format(
                            ', '.join(dirs_created)))

            if storagedriver is not None:
                for sdp in storagedriver.partitions:
                    sdp.delete()
                for proxy in storagedriver.alba_proxies:
                    proxy.delete()
                storagedriver.delete()
            if len(vpool.storagedrivers) == 0:
                vpool.delete()
                if Configuration.dir_exists(
                        key='/ovs/vpools/{0}'.format(vpool.guid)):
                    Configuration.delete(
                        key='/ovs/vpools/{0}'.format(vpool.guid))
    def remove_node(node_guid):
        """
        Removes an ALBA node
        :param node_guid: Guid of the ALBA node to remove
        :type node_guid: str
        :return: None
        """
        node = AlbaNode(node_guid)
        for disk in node.disks:
            for osd in disk.osds:
                AlbaNodeController.remove_asd(node_guid=osd.alba_disk.alba_node_guid, asd_id=osd.osd_id, expected_safety=None)
            AlbaNodeController.remove_disk(node_guid=disk.alba_node_guid, device_alias=disk.aliases[0])

        try:
            for service_name in node.client.list_maintenance_services():
                node.client.remove_maintenance_service(service_name)
        except (requests.ConnectionError, requests.Timeout):
            AlbaNodeController._logger.exception('Could not connect to node {0} to retrieve the maintenance services'.format(node.guid))
        except InvalidCredentialsError:
            AlbaNodeController._logger.warning('Failed to retrieve the maintenance services for ALBA node {0}'.format(node.node_id))

        if Configuration.dir_exists('/ovs/alba/asdnodes/{0}'.format(node.node_id)):
            Configuration.delete('/ovs/alba/asdnodes/{0}'.format(node.node_id))

        node.delete()
Пример #7
0
 def model_albanodes(**kwargs):
     """
     Add all ALBA nodes known to the config platform to the model
     :param kwargs: Kwargs containing information regarding the node
     :type kwargs: dict
     :return: None
     :rtype: NoneType
     """
     _ = kwargs
     if Configuration.dir_exists('/ovs/alba/asdnodes'):
         for node_id in Configuration.list('/ovs/alba/asdnodes'):
             node = AlbaNodeList.get_albanode_by_node_id(node_id)
             if node is None:
                 node = AlbaNode()
             main_config = Configuration.get(
                 '/ovs/alba/asdnodes/{0}/config/main'.format(node_id))
             node.type = 'ASD'
             node.node_id = node_id
             node.ip = main_config['ip']
             node.port = main_config['port']
             node.username = main_config['username']
             node.password = main_config['password']
             node.storagerouter = StorageRouterList.get_by_ip(
                 main_config['ip'])
             node.save()
Пример #8
0
    def _get_free_ports(client):
        node_name = System.get_my_machine_id(client)
        clusters = []
        exclude_ports = []
        if Configuration.dir_exists(ArakoonInstaller.CONFIG_ROOT):
            for cluster_name in Configuration.list(ArakoonInstaller.CONFIG_ROOT):
                config = ArakoonClusterConfig(cluster_name, False)
                config.load_config()
                for node in config.nodes:
                    if node.name == node_name:
                        clusters.append(cluster_name)
                        exclude_ports.append(node.client_port)
                        exclude_ports.append(node.messaging_port)

        ports = System.get_free_ports(Configuration.get('/ovs/framework/hosts/{0}/ports|arakoon'.format(node_name)), exclude_ports, 2, client)
        ArakoonInstaller._logger.debug('  Loaded free ports {0} based on existing clusters {1}'.format(ports, clusters))
        return ports
Пример #9
0
 def _resave_all_config_entries(config_path='/ovs'):
     """
     Recursive functions which checks every config management key if its a directory or not.
     If not a directory, we retrieve the config and just save it again using the new indentation logic
     """
     for item in Configuration.list(config_path):
         new_path = config_path + '/' + item
         print new_path
         if Configuration.dir_exists(new_path) is True:
             _resave_all_config_entries(config_path=new_path)
         else:
             try:
                 _config = Configuration.get(new_path)
                 Configuration.set(new_path, _config)
             except:
                 _config = Configuration.get(new_path, raw=True)
                 Configuration.set(new_path, _config, raw=True)
Пример #10
0
    def _configure_amqp_to_volumedriver():
        Toolbox.log(logger=NodeTypeController._logger, messages='Update existing vPools')
        login = Configuration.get('/ovs/framework/messagequeue|user')
        password = Configuration.get('/ovs/framework/messagequeue|password')
        protocol = Configuration.get('/ovs/framework/messagequeue|protocol')

        uris = []
        for endpoint in Configuration.get('/ovs/framework/messagequeue|endpoints'):
            uris.append({'amqp_uri': '{0}://{1}:{2}@{3}'.format(protocol, login, password, endpoint)})

        if Configuration.dir_exists('/ovs/vpools'):
            for vpool_guid in Configuration.list('/ovs/vpools'):
                for storagedriver_id in Configuration.list('/ovs/vpools/{0}/hosts'.format(vpool_guid)):
                    storagedriver_config = StorageDriverConfiguration('storagedriver', vpool_guid, storagedriver_id)
                    storagedriver_config.load()
                    storagedriver_config.configure_event_publisher(events_amqp_routing_key=Configuration.get('/ovs/framework/messagequeue|queues.storagedriver'),
                                                                   events_amqp_uris=uris)
                    storagedriver_config.save()
Пример #11
0
    def get_unused_arakoon_metadata_and_claim(cluster_type, locked=True):
        """
        Retrieve arakoon cluster information based on its type
        :param cluster_type: Type of arakoon cluster (See ServiceType.ARAKOON_CLUSTER_TYPES)
        :type cluster_type: str
        :param locked: Execute this in a locked context
        :type locked: bool
        :return: Metadata of the arakoon cluster
        :rtype: dict
        """
        cluster_type = cluster_type.upper()
        if cluster_type not in ServiceType.ARAKOON_CLUSTER_TYPES:
            raise ValueError(
                'Unsupported arakoon cluster type provided. Please choose from {0}'
                .format(', '.join(ServiceType.ARAKOON_CLUSTER_TYPES)))
        if not Configuration.dir_exists(ArakoonInstaller.CONFIG_ROOT):
            return None

        mutex = volatile_mutex('claim_arakoon_metadata', wait=10)
        try:
            if locked is True:
                mutex.acquire()

            for cluster_name in Configuration.list(
                    ArakoonInstaller.CONFIG_ROOT):
                config = ArakoonClusterConfig(cluster_id=cluster_name,
                                              filesystem=False)
                config.load_config()
                arakoon_client = ArakoonInstaller.build_client(config)
                if arakoon_client.exists(ArakoonInstaller.METADATA_KEY):
                    metadata = json.loads(
                        arakoon_client.get(ArakoonInstaller.METADATA_KEY))
                    if metadata['cluster_type'] == cluster_type and metadata[
                            'in_use'] is False and metadata[
                                'internal'] is False:
                        metadata['in_use'] = True
                        arakoon_client.set(ArakoonInstaller.METADATA_KEY,
                                           json.dumps(metadata, indent=4))
                        return metadata
        finally:
            if locked is True:
                mutex.release()
Пример #12
0
 def _configure_arakoon_to_volumedriver(cluster_name):
     StorageDriverController._logger.info('Update existing vPools')
     config = ArakoonClusterConfig(cluster_id=cluster_name, filesystem=False)
     config.load_config()
     arakoon_nodes = []
     for node in config.nodes:
         arakoon_nodes.append({'host': node.ip,
                               'port': node.client_port,
                               'node_id': node.name})
     if Configuration.dir_exists('/ovs/vpools'):
         for vpool_guid in Configuration.list('/ovs/vpools'):
             for storagedriver_id in Configuration.list('/ovs/vpools/{0}/hosts'.format(vpool_guid)):
                 storagedriver_config = StorageDriverConfiguration('storagedriver', vpool_guid, storagedriver_id)
                 storagedriver_config.load()
                 storagedriver_config.configure_volume_registry(vregistry_arakoon_cluster_id=cluster_name,
                                                                vregistry_arakoon_cluster_nodes=arakoon_nodes)
                 storagedriver_config.configure_distributed_lock_store(dls_type='Arakoon',
                                                                       dls_arakoon_cluster_id=cluster_name,
                                                                       dls_arakoon_cluster_nodes=arakoon_nodes)
                 storagedriver_config.save(reload_config=True)
Пример #13
0
 def model_albanodes(**kwargs):
     """
     Add all ALBA nodes known to the config platform to the model
     :param kwargs: Kwargs containing information regarding the node
     :type kwargs: dict
     :return: None
     """
     _ = kwargs
     if Configuration.dir_exists('/ovs/alba/asdnodes'):
         for node_id in Configuration.list('/ovs/alba/asdnodes'):
             node = AlbaNodeList.get_albanode_by_node_id(node_id)
             if node is None:
                 node = AlbaNode()
             main_config = Configuration.get('/ovs/alba/asdnodes/{0}/config/main'.format(node_id))
             node.type = 'ASD'
             node.node_id = node_id
             node.ip = main_config['ip']
             node.port = main_config['port']
             node.username = main_config['username']
             node.password = main_config['password']
             node.storagerouter = StorageRouterList.get_by_ip(main_config['ip'])
             node.save()
Пример #14
0
    def _get_free_ports(client):
        node_name = System.get_my_machine_id(client)
        clusters = []
        exclude_ports = []
        if Configuration.dir_exists(ArakoonInstaller.CONFIG_ROOT):
            for cluster_name in Configuration.list(
                    ArakoonInstaller.CONFIG_ROOT):
                config = ArakoonClusterConfig(cluster_name, False)
                config.load_config()
                for node in config.nodes:
                    if node.name == node_name:
                        clusters.append(cluster_name)
                        exclude_ports.append(node.client_port)
                        exclude_ports.append(node.messaging_port)

        ports = System.get_free_ports(
            Configuration.get(
                '/ovs/framework/hosts/{0}/ports|arakoon'.format(node_name)),
            exclude_ports, 2, client)
        ArakoonInstaller._logger.debug(
            '  Loaded free ports {0} based on existing clusters {1}'.format(
                ports, clusters))
        return ports
Пример #15
0
    def revert_vpool(self, status):
        """
        Remove the vPool being created or revert the vPool being extended
        :param status: Status to put the vPool in
        :type status: ovs.dal.hybrids.vpool.VPool.STATUSES
        :return: None
        :rtype: NoneType
        """
        self.vpool.status = status
        self.vpool.save()

        if status == VPool.STATUSES.RUNNING:
            if self.sr_installer is not None:
                try:
                    self.sr_installer.root_client.dir_delete(
                        directories=self.sr_installer.created_dirs)
                except Exception:
                    self._logger.warning(
                        'Failed to clean up following directories: {0}'.format(
                            ', '.join(self.sr_installer.created_dirs)))

            if self.sd_installer is not None and self.sd_installer.storagedriver is not None:
                for sdp in self.sd_installer.storagedriver.partitions:
                    sdp.delete()
                for proxy in self.sd_installer.storagedriver.alba_proxies:
                    proxy.delete()
                self.sd_installer.storagedriver.delete()
            if len(self.vpool.storagedrivers) == 0:
                self.vpool.delete()
                if Configuration.dir_exists(
                        key='/ovs/vpools/{0}'.format(self.vpool.guid)):
                    Configuration.delete(
                        key='/ovs/vpools/{0}'.format(self.vpool.guid))
        elif status == VPool.STATUSES.FAILURE:
            # In case of failure status the cluster registry settings have already been adapted, so revert
            self.configure_cluster_registry(
                exclude=[self.sd_installer.storagedriver])
Пример #16
0
    def create_cluster(cluster_name,
                       cluster_type,
                       ip,
                       base_dir,
                       plugins=None,
                       locked=True,
                       internal=True,
                       filesystem=False,
                       ports=None):
        """
        Always creates a cluster but marks it's usage according to the internal flag
        :param cluster_name: Name of the cluster
        :type cluster_name: str
        :param cluster_type: Type of the cluster (See ServiceType.ARAKOON_CLUSTER_TYPES)
        :type cluster_type: str
        :param ip: IP address of the first node of the new cluster
        :type ip: str
        :param base_dir: Base directory that should contain the data and tlogs
        :type base_dir: str
        :param plugins: Plugins that should be added to the configuration file
        :type plugins: list
        :param locked: Indicates whether the create should run in a locked context (e.g. to prevent port conflicts)
        :type locked: bool
        :param internal: Is cluster internally managed by OVS
        :type internal: bool
        :param filesystem: Indicates whether the configuration should be on the filesystem or in a configuration cluster
        :type filesystem: bool
        :param ports: A list of ports to be used for this cluster's node
        :type ports: list
        :return: Ports used by arakoon cluster
        :rtype: dict
        """
        if cluster_type not in ServiceType.ARAKOON_CLUSTER_TYPES:
            raise ValueError(
                'Cluster type {0} is not supported. Please choose from {1}'.
                format(cluster_type,
                       ', '.join(ServiceType.ARAKOON_CLUSTER_TYPES)))

        client = SSHClient(ip, username=ArakoonInstaller.SSHCLIENT_USER)
        if filesystem is True:
            exists = client.file_exists(
                ArakoonClusterConfig.CONFIG_FILE.format(cluster_name))
        else:
            exists = Configuration.dir_exists(
                '/ovs/arakoon/{0}'.format(cluster_name))
        if exists is True:
            raise ValueError(
                'An Arakoon cluster with name "{0}" already exists'.format(
                    cluster_name))

        ArakoonInstaller._logger.debug('Creating cluster {0} on {1}'.format(
            cluster_name, ip))

        node_name = System.get_my_machine_id(client)
        base_dir = base_dir.rstrip('/')
        home_dir = ArakoonInstaller.ARAKOON_HOME_DIR.format(
            base_dir, cluster_name)
        tlog_dir = ArakoonInstaller.ARAKOON_TLOG_DIR.format(
            base_dir, cluster_name)
        ArakoonInstaller.clean_leftover_arakoon_data(ip, [home_dir, tlog_dir])

        port_mutex = None
        try:
            if locked is True:
                from ovs.extensions.generic.volatilemutex import volatile_mutex
                port_mutex = volatile_mutex(
                    'arakoon_install_ports_{0}'.format(ip))
                port_mutex.acquire(wait=60)
            if ports is None:
                ports = ArakoonInstaller._get_free_ports(client)
            config = ArakoonClusterConfig(cluster_name, filesystem, plugins)
            config.nodes.append(
                ArakoonNodeConfig(
                    name=node_name,
                    ip=ip,
                    client_port=ports[0],
                    messaging_port=ports[1],
                    log_sinks=LogHandler.get_sink_path('arakoon_server'),
                    crash_log_sinks=LogHandler.get_sink_path(
                        'arakoon_server_crash'),
                    home=home_dir,
                    tlog_dir=tlog_dir))
            metadata = {
                'internal': internal,
                'cluster_name': cluster_name,
                'cluster_type': cluster_type.upper(),
                'in_use': False
            }
            ArakoonInstaller._deploy(config, filesystem=filesystem)
        finally:
            if port_mutex is not None:
                port_mutex.release()

        ArakoonInstaller._logger.debug(
            'Creating cluster {0} on {1} completed'.format(cluster_name, ip))
        return {
            'metadata': metadata,
            'client_port': ports[0],
            'messaging_port': ports[1]
        }
Пример #17
0
    def create_cluster(cluster_name, cluster_type, ip, base_dir, plugins=None, locked=True, internal=True, filesystem=False, ports=None):
        """
        Always creates a cluster but marks it's usage according to the internal flag
        :param cluster_name: Name of the cluster
        :type cluster_name: str
        :param cluster_type: Type of the cluster (See ServiceType.ARAKOON_CLUSTER_TYPES)
        :type cluster_type: str
        :param ip: IP address of the first node of the new cluster
        :type ip: str
        :param base_dir: Base directory that should contain the data and tlogs
        :type base_dir: str
        :param plugins: Plugins that should be added to the configuration file
        :type plugins: dict
        :param locked: Indicates whether the create should run in a locked context (e.g. to prevent port conflicts)
        :type locked: bool
        :param internal: Is cluster internally managed by OVS
        :type internal: bool
        :param filesystem: Indicates whether the configuration should be on the filesystem or in a configuration cluster
        :type filesystem: bool
        :param ports: A list of ports to be used for this cluster's node
        :type ports: list
        :return: Ports used by arakoon cluster
        :rtype: dict
        """
        if cluster_type not in ServiceType.ARAKOON_CLUSTER_TYPES:
            raise ValueError('Cluster type {0} is not supported. Please choose from {1}'.format(cluster_type, ', '.join(ServiceType.ARAKOON_CLUSTER_TYPES)))

        client = SSHClient(ip, username=ArakoonInstaller.SSHCLIENT_USER)
        if filesystem is True:
            exists = client.file_exists(ArakoonClusterConfig.CONFIG_FILE.format(cluster_name))
        else:
            exists = Configuration.dir_exists('/ovs/arakoon/{0}'.format(cluster_name))
        if exists is True:
            raise ValueError('An Arakoon cluster with name "{0}" already exists'.format(cluster_name))

        ArakoonInstaller._logger.debug('Creating cluster {0} on {1}'.format(cluster_name, ip))

        node_name = System.get_my_machine_id(client)
        base_dir = base_dir.rstrip('/')
        home_dir = ArakoonInstaller.ARAKOON_HOME_DIR.format(base_dir, cluster_name)
        tlog_dir = ArakoonInstaller.ARAKOON_TLOG_DIR.format(base_dir, cluster_name)
        ArakoonInstaller.clean_leftover_arakoon_data(ip, [home_dir, tlog_dir])

        port_mutex = None
        try:
            if locked is True:
                from ovs.extensions.generic.volatilemutex import volatile_mutex
                port_mutex = volatile_mutex('arakoon_install_ports_{0}'.format(ip))
                port_mutex.acquire(wait=60)
            if ports is None:
                ports = ArakoonInstaller._get_free_ports(client)
            config = ArakoonClusterConfig(cluster_name, filesystem, plugins.keys() if plugins is not None else None)
            config.nodes.append(ArakoonNodeConfig(name=node_name,
                                                  ip=ip,
                                                  client_port=ports[0],
                                                  messaging_port=ports[1],
                                                  log_sinks=LogHandler.get_sink_path('arakoon_server'),
                                                  crash_log_sinks=LogHandler.get_sink_path('arakoon_server_crash'),
                                                  home=home_dir,
                                                  tlog_dir=tlog_dir))
            metadata = {'internal': internal,
                        'cluster_name': cluster_name,
                        'cluster_type': cluster_type.upper(),
                        'in_use': False}
            service_metadata = ArakoonInstaller._deploy(config=config,
                                                        filesystem=filesystem,
                                                        plugins=plugins.values() if plugins is not None else None,
                                                        delay_service_registration=cluster_type == ServiceType.ARAKOON_CLUSTER_TYPES.CFG)[ip]
        finally:
            if port_mutex is not None:
                port_mutex.release()

        ArakoonInstaller._logger.debug('Creating cluster {0} on {1} completed'.format(cluster_name, ip))
        return {'metadata': metadata,
                'client_port': ports[0],
                'messaging_port': ports[1],
                'service_metadata': service_metadata}
Пример #18
0
    def migrate(previous_version, master_ips=None, extra_ips=None):
        """
        Migrates from a given version to the current version. It uses 'previous_version' to be smart
        wherever possible, but the code should be able to migrate any version towards the expected version.
        When this is not possible, the code can set a minimum version and raise when it is not met.
        :param previous_version: The previous version from which to start the migration
        :type previous_version: float
        :param master_ips: IP addresses of the MASTER nodes
        :type master_ips: list or None
        :param extra_ips: IP addresses of the EXTRA nodes
        :type extra_ips: list or None
        """

        _ = master_ips, extra_ips
        working_version = previous_version

        # From here on, all actual migration should happen to get to the expected state for THIS RELEASE
        if working_version < ExtensionMigrator.THIS_VERSION:
            try:
                from ovs.dal.lists.storagerouterlist import StorageRouterList
                from ovs.dal.lists.vpoollist import VPoolList
                from ovs.extensions.generic.configuration import Configuration
                from ovs.extensions.services.servicefactory import ServiceFactory
                from ovs.extensions.generic.sshclient import SSHClient
                from ovs.extensions.generic.system import System
                local_machine_id = System.get_my_machine_id()
                local_ip = Configuration.get(
                    '/ovs/framework/hosts/{0}/ip'.format(local_machine_id))
                local_client = SSHClient(endpoint=local_ip, username='******')

                # Multiple Proxies
                if local_client.dir_exists(
                        directory=
                        '/opt/OpenvStorage/config/storagedriver/storagedriver'
                ):
                    local_client.dir_delete(directories=[
                        '/opt/OpenvStorage/config/storagedriver/storagedriver'
                    ])

                # MDS safety granularity on vPool level
                mds_safety_key = '/ovs/framework/storagedriver'
                if Configuration.exists(key=mds_safety_key):
                    current_mds_settings = Configuration.get(
                        key=mds_safety_key)
                    for vpool in VPoolList.get_vpools():
                        vpool_key = '/ovs/vpools/{0}'.format(vpool.guid)
                        if Configuration.dir_exists(key=vpool_key):
                            Configuration.set(
                                key='{0}/mds_config'.format(vpool_key),
                                value=current_mds_settings)
                    Configuration.delete(key=mds_safety_key)

                # Introduction of edition key
                if Configuration.get(key=Configuration.EDITION_KEY,
                                     default=None) not in [
                                         PackageFactory.EDITION_COMMUNITY,
                                         PackageFactory.EDITION_ENTERPRISE
                                     ]:
                    for storagerouter in StorageRouterList.get_storagerouters(
                    ):
                        try:
                            Configuration.set(
                                key=Configuration.EDITION_KEY,
                                value=storagerouter.features['alba']
                                ['edition'])
                            break
                        except:
                            continue

            except:
                ExtensionMigrator._logger.exception(
                    'Error occurred while executing the migration code')
                # Don't update migration version with latest version, resulting in next migration trying again to execute this code
                return ExtensionMigrator.THIS_VERSION - 1

        return ExtensionMigrator.THIS_VERSION
    def list(self, discover=False, ip=None, node_id=None):
        """
        Lists all available ALBA Nodes
        :param discover: If True and IP provided, return list of single ALBA node, If True and no IP provided, return all ALBA nodes else return modeled ALBA nodes
        :type discover: bool
        :param ip: IP of ALBA node to retrieve
        :type ip: str
        :param node_id: ID of the ALBA node
        :type node_id: str
        """
        if discover is False and (ip is not None or node_id is not None):
            raise HttpNotAcceptableException(error_description='Discover is mutually exclusive with IP and nodeID',
                                             error='invalid_data')
        if (ip is None and node_id is not None) or (ip is not None and node_id is None):
            raise HttpNotAcceptableException(error_description='Both IP and nodeID need to be specified',
                                             error='invalid_data')

        if discover is False:
            return AlbaNodeList.get_albanodes()

        if ip is not None:
            node = AlbaNode(volatile=True)
            node.ip = ip
            node.type = 'ASD'
            node.node_id = node_id
            node.port = Configuration.get('/ovs/alba/asdnodes/{0}/config/main|port'.format(node_id))
            node.username = Configuration.get('/ovs/alba/asdnodes/{0}/config/main|username'.format(node_id))
            node.password = Configuration.get('/ovs/alba/asdnodes/{0}/config/main|password'.format(node_id))
            data = node.client.get_metadata()
            if data['_success'] is False and data['_error'] == 'Invalid credentials':
                raise HttpNotAcceptableException(error_description='Invalid credentials',
                                                 error='invalid_data')
            if data['node_id'] != node_id:
                raise HttpNotAcceptableException(error_description='Unexpected node identifier. {0} vs {1}'.format(data['node_id'], node_id),
                                                 error='invalid_data')
            node_list = DataList(AlbaNode, {})
            node_list._executed = True
            node_list._guids = [node.guid]
            node_list._objects = {node.guid: node}
            node_list._data = {node.guid: {'guid': node.guid, 'data': node._data}}
            return node_list

        nodes = {}
        model_node_ids = [node.node_id for node in AlbaNodeList.get_albanodes()]
        found_node_ids = []
        asd_node_ids = []
        if Configuration.dir_exists('/ovs/alba/asdnodes'):
            asd_node_ids = Configuration.list('/ovs/alba/asdnodes')

        for node_id in asd_node_ids:
            node = AlbaNode(volatile=True)
            node.type = 'ASD'
            node.node_id = node_id
            node.ip = Configuration.get('/ovs/alba/asdnodes/{0}/config/main|ip'.format(node_id))
            node.port = Configuration.get('/ovs/alba/asdnodes/{0}/config/main|port'.format(node_id))
            node.username = Configuration.get('/ovs/alba/asdnodes/{0}/config/main|username'.format(node_id))
            node.password = Configuration.get('/ovs/alba/asdnodes/{0}/config/main|password'.format(node_id))
            if node.node_id not in model_node_ids and node.node_id not in found_node_ids:
                nodes[node.guid] = node
                found_node_ids.append(node.node_id)
        node_list = DataList(AlbaNode, {})
        node_list._executed = True
        node_list._guids = nodes.keys()
        node_list._objects = nodes
        node_list._data = dict([(node.guid, {'guid': node.guid, 'data': node._data}) for node in nodes.values()])
        return node_list
Пример #20
0
    def validate_alba_backend_sanity_without_claimed_disks(alba_backend):
        """
        Validate whether the ALBA backend is configured correctly
        :param alba_backend: ALBA backend
        :return: None
        """
        # Attribute validation
        assert alba_backend.available is True,\
            'ALBA backend {0} is not available'.format(alba_backend.backend.name)
        assert len(alba_backend.presets) >= 1,\
            'No preset found for ALBA backend {0}'.format(alba_backend.backend.name)
        assert len([default for default in alba_backend.presets if default['is_default'] is True]) == 1,\
            'Could not find default preset for backend {0}'.format(alba_backend.backend.name)
        assert alba_backend.backend.backend_type.code == 'alba',\
            'Backend type for ALBA backend is {0}'.format(alba_backend.backend.backend_type.code)
        assert alba_backend.backend.status == 'RUNNING',\
            'Status for ALBA backend is {0}'.format(alba_backend.backend.status)

        # Validate ABM and NSM services
        storagerouters = GeneralStorageRouter.get_storage_routers()
        storagerouters_with_db_role = [sr for sr in storagerouters if GeneralStorageRouter.has_roles(storagerouter=sr, roles='DB') is True and sr.node_type == 'MASTER']

        assert len(alba_backend.abm_services) == len(storagerouters_with_db_role),\
            'Not enough ABM services found'
        assert len(alba_backend.nsm_services) == len(storagerouters_with_db_role),\
            'Not enough NSM services found'

        # Validate ALBA backend configuration structure
        alba_backend_key = '/ovs/alba/backends'
        assert Configuration.dir_exists(key=alba_backend_key) is True,\
            'Configuration does not contain key {0}'.format(alba_backend_key)

        actual_config_keys = [key for key in Configuration.list(alba_backend_key)]
        expected_config_keys = ['global_gui_error_interval', alba_backend.guid, 'default_nsm_hosts']
        optional_config_keys = ['verification_factor']

        expected_keys_amount = 0
        for optional_key in optional_config_keys:
            if optional_key in actual_config_keys:
                expected_keys_amount += 1

        for expected_key in expected_config_keys:
            if not re.match(Toolbox.regex_guid, expected_key):
                expected_keys_amount += 1
            assert expected_key in actual_config_keys,\
                'Key {0} was not found in tree {1}'.format(expected_key, alba_backend_key)

        for actual_key in list(actual_config_keys):
            if re.match(Toolbox.regex_guid, actual_key):
                actual_config_keys.remove(actual_key)  # Remove all alba backend keys
        assert len(actual_config_keys) == expected_keys_amount,\
            'Another key was added to the {0} tree'.format(alba_backend_key)

        this_alba_backend_key = '{0}/{1}'.format(alba_backend_key, alba_backend.guid)
        actual_keys = [key for key in Configuration.list(this_alba_backend_key)]
        expected_keys = ['maintenance']
        assert actual_keys == expected_keys,\
            'Actual keys: {0} - Expected keys: {1}'.format(actual_keys, expected_keys)

        maintenance_key = '{0}/maintenance'.format(this_alba_backend_key)
        actual_keys = [key for key in Configuration.list(maintenance_key)]
        expected_keys = ['nr_of_agents', 'config']
        assert set(actual_keys) == set(expected_keys),\
            'Actual keys: {0} - Expected keys: {1}'.format(actual_keys, expected_keys)
        # @TODO: Add validation for config values

        # Validate ASD node configuration structure
        alba_nodes = GeneralAlba.get_alba_nodes()
        assert len(alba_nodes) > 0,\
            'Could not find any ALBA nodes in the model'
        alba_node_key = '/ovs/alba/asdnodes'
        actual_keys = [key for key in Configuration.list(alba_node_key)]
        assert len(alba_nodes) == len(actual_keys),\
            'Amount of ALBA nodes in model: {0} >< amount of ALBA nodes in configuration: {1}.'.format(len(alba_nodes),
                                                                                                       len(actual_keys))
        for alba_node in alba_nodes:
            assert alba_node.node_id in actual_keys,\
                'ALBA node with ID {0} not present in configuration'.format(alba_node.node_id)

            actual_asdnode_keys = [key for key in Configuration.list('{0}/{1}'.format(alba_node_key, alba_node.node_id))]
            expected_asdnode_keys = ['config', 'services']
            assert actual_asdnode_keys == expected_asdnode_keys,\
                'Actual keys: {0} - Expected keys: {1}'.format(actual_asdnode_keys, expected_asdnode_keys)

            actual_config_keys = [key for key in Configuration.list('{0}/{1}/config'.format(alba_node_key, alba_node.node_id))]
            expected_config_keys = ['main', 'network']
            assert set(actual_config_keys) == set(expected_config_keys),\
                'Actual keys: {0} - Expected keys: {1}'.format(actual_config_keys, expected_config_keys)
            # @TODO: Add validation for main and network values

        # Validate Arakoon configuration structure
        arakoon_abm_key = '/ovs/arakoon/{0}/config'.format(alba_backend.abm_services[0].service.name).replace('arakoon-', '')
        arakoon_nsm_key = '/ovs/arakoon/{0}/config'.format(alba_backend.nsm_services[0].service.name).replace('arakoon-', '')
        assert Configuration.exists(key=arakoon_abm_key, raw=True) is True,\
            'Configuration key {0} does not exist'.format(arakoon_abm_key)
        assert Configuration.exists(key=arakoon_nsm_key, raw=True) is True,\
            'Configuration key {0} does not exist'.format(arakoon_nsm_key)
        # @TODO: Add validation for config values

        # Validate maintenance agents
        actual_amount_agents = len([service for node_services in [alba_node.client.list_maintenance_services() for alba_node in alba_nodes] for service in node_services])
        expected_amount_agents = 1
        assert actual_amount_agents == expected_amount_agents,\
            'Amount of maintenance agents is incorrect. Found {0} - Expected {1}'.format(actual_amount_agents,
                                                                                         expected_amount_agents)

        # Validate arakoon services
        machine_ids = [sr.machine_id for sr in storagerouters_with_db_role]
        abm_service_name = alba_backend.abm_services[0].service.name
        nsm_service_name = alba_backend.nsm_services[0].service.name
        for storagerouter in storagerouters_with_db_role:
            root_client = SSHClient(endpoint=storagerouter, username='******')
            for service_name in [abm_service_name, nsm_service_name]:
                assert GeneralService.has_service(name=service_name, client=root_client) is True,\
                    'Service {0} not deployed on Storage Router {1}'.format(service_name, storagerouter.name)
                exitcode, output = GeneralService.get_service_status(name=service_name, client=root_client)
                assert exitcode is True,\
                    'Service {0} not running on Storage Router {1} - {2}'.format(service_name, storagerouter.name,
                                                                                 output)
                out, err, _ = General.execute_command('arakoon --who-master -config {0}'.format(Configuration.get_configuration_path('/ovs/arakoon/{0}/config'.format(abm_service_name.replace('arakoon-', '')))))
                assert out.strip() in machine_ids,\
                    'Arakoon master is {0}, but should be 1 of "{1}"'.format(out.strip(), ', '.join(machine_ids))