示例#1
0
    def restart_framework_and_memcache_services(clients, logger, offline_node_ips=None):
        """
        Restart framework and Memcached services
        :param clients: Clients on which to restart these services
        :type clients: dict
        :param logger: Logger object used for logging
        :type logger: ovs.log.log_handler.LogHandler
        :param offline_node_ips: IP addresses of offline nodes in the cluster
        :type offline_node_ips: list
        :return: None
        """
        from ovs.dal.lists.storagerouterlist import StorageRouterList

        master_ips = [sr.ip for sr in StorageRouterList.get_masters()]
        slave_ips = [sr.ip for sr in StorageRouterList.get_slaves()]
        if offline_node_ips is None:
            offline_node_ips = []
        memcached = 'memcached'
        watcher = 'watcher-framework'
        support_agent = 'support-agent'
        for ip in master_ips + slave_ips:
            if ip not in offline_node_ips:
                if ServiceManager.has_service(watcher, clients[ip]):
                    Toolbox.change_service_state(clients[ip], watcher, 'stop', logger)
        for ip in master_ips:
            if ip not in offline_node_ips:
                Toolbox.change_service_state(clients[ip], memcached, 'restart', logger)
        for ip in master_ips + slave_ips:
            if ip not in offline_node_ips:
                if ServiceManager.has_service(watcher, clients[ip]):
                    Toolbox.change_service_state(clients[ip], watcher, 'start', logger)
                if ServiceManager.has_service(support_agent, clients[ip]):
                    Toolbox.change_service_state(clients[ip], support_agent, 'restart', logger)
        VolatileFactory.store = None
示例#2
0
    def __init__(self, cinder_client):
        self.client = SSHClient('127.0.0.1', username='******')
        self.cinder_client = cinder_client

        self._NOVA_CONF = '/etc/nova/nova.conf'
        self._CINDER_CONF = '/etc/cinder/cinder.conf'
        self._is_openstack = ServiceManager.has_service(OSManager.get_openstack_cinder_service_name(), self.client)
        self._nova_installed = self.client.file_exists(self._NOVA_CONF)
        self._cinder_installed = self.client.file_exists(self._CINDER_CONF)
        self._driver_location = OSManager.get_openstack_package_base_path()
        self._openstack_users = OSManager.get_openstack_users()
        self._devstack_driver = '/opt/stack/cinder/cinder/volume/drivers/openvstorage.py'

        try:
            self._is_devstack = 'stack' in str(self.client.run('ps aux | grep SCREEN | grep stack | grep -v grep || true'))
        except SystemExit:  # ssh client raises system exit 1
            self._is_devstack = False
        except Exception:
            self._is_devstack = False

        try:
            from cinder import version
            version_string = version.version_string()
            if version_string.startswith('2015.2') or version_string.startswith('7.0'):
                self._stack_version = 'liberty'
            elif version_string.startswith('2015.1'):
                self._stack_version = 'kilo'
            elif version_string.startswith('2014.2'):
                self._stack_version = 'juno'
            else:
                raise ValueError('Unsupported cinder version: {0}'.format(version_string))
        except Exception as ex:
            raise ValueError('Cannot determine cinder version: {0}'.format(ex))
示例#3
0
    def add_services(client, node_type, logger):
        """
        Add the services required by the OVS cluster
        :param client: Client on which to add the services
        :type client: ovs.extensions.generic.sshclient.SSHClient
        :param node_type: Type of node ('master' or 'extra')
        :type node_type: str
        :param logger: Logger object used for logging
        :type logger: ovs.log.log_handler.LogHandler
        :return: None
        """
        Toolbox.log(logger=logger, messages='Adding services')
        services = {}
        worker_queue = System.get_my_machine_id(client=client)
        if node_type == 'master':
            worker_queue += ',ovs_masters'
            services.update({'memcached': {'MEMCACHE_NODE_IP': client.ip, 'WORKER_QUEUE': worker_queue},
                             'rabbitmq-server': {'MEMCACHE_NODE_IP': client.ip, 'WORKER_QUEUE': worker_queue},
                             'scheduled-tasks': {},
                             'webapp-api': {},
                             'volumerouter-consumer': {}})
        services.update({'workers': {'WORKER_QUEUE': worker_queue},
                         'watcher-framework': {}})

        for service_name, params in services.iteritems():
            if not ServiceManager.has_service(service_name, client):
                Toolbox.log(logger=logger, messages='Adding service {0}'.format(service_name))
                ServiceManager.add_service(name=service_name, params=params, client=client)
示例#4
0
 def stop(cluster_name, client):
     """
     Stops an arakoon service
     """
     if ServiceManager.has_service('arakoon-{0}'.format(cluster_name), client=client) is True and \
             ServiceManager.get_service_status('arakoon-{0}'.format(cluster_name), client=client) is True:
         ServiceManager.stop_service('arakoon-{0}'.format(cluster_name), client=client)
示例#5
0
 def on_demote(cluster_ip, master_ip, offline_node_ips=None):
     """
     Handles the demote for the StorageDrivers
     :param cluster_ip: IP of the node to demote
     :param master_ip: IP of the master node
     :param offline_node_ips: IPs of nodes which are offline
     """
     _ = master_ip
     if offline_node_ips is None:
         offline_node_ips = []
     client = SSHClient(
         cluster_ip,
         username='******') if cluster_ip not in offline_node_ips else None
     servicetype = ServiceTypeList.get_by_name('Arakoon')
     current_service = None
     remaining_ips = []
     for service in servicetype.services:
         if service.name == 'arakoon-voldrv':
             if service.storagerouter.ip == cluster_ip:
                 current_service = service
             elif service.storagerouter.ip not in offline_node_ips:
                 remaining_ips.append(service.storagerouter.ip)
     if current_service is not None:
         print '* Shrink StorageDriver cluster'
         ArakoonInstaller.shrink_cluster(cluster_ip, 'voldrv',
                                         offline_node_ips)
         if client is not None and ServiceManager.has_service(
                 current_service.name, client=client) is True:
             ServiceManager.stop_service(current_service.name,
                                         client=client)
             ServiceManager.remove_service(current_service.name,
                                           client=client)
         ArakoonInstaller.restart_cluster_remove('voldrv', remaining_ips)
         current_service.delete()
         StorageDriverController._configure_arakoon_to_volumedriver()
示例#6
0
    def remove_services(client, node_type, logger):
        """
        Remove all services managed by OVS
        :param client: Client on which to remove the services
        :type client: ovs.extensions.generic.sshclient.SSHClient
        :param node_type: Type of node, can be 'master' or 'extra'
        :type node_type: str
        :param logger: Logger object used for logging
        :type logger: ovs.log.log_handler.LogHandler
        :return: None
        """
        Toolbox.log(logger=logger, messages="Removing services")
        stop_only = ["rabbitmq-server", "memcached"]
        services = ["workers", "support-agent", "watcher-framework"]
        if node_type == "master":
            services += ["scheduled-tasks", "webapp-api", "volumerouter-consumer"]
            if Toolbox.is_service_internally_managed(service="rabbitmq") is True:
                services.append("rabbitmq-server")
            if Toolbox.is_service_internally_managed(service="memcached") is True:
                services.append("memcached")

        for service in services:
            if ServiceManager.has_service(service, client=client):
                Toolbox.log(
                    logger=logger,
                    messages="{0} service {1}".format("Removing" if service not in stop_only else "Stopping", service),
                )
                ServiceManager.stop_service(service, client=client)
                if service not in stop_only:
                    ServiceManager.remove_service(service, client=client)
 def on_demote(cluster_ip, master_ip, offline_node_ips=None):
     """
     Handles the demote for the StorageDrivers
     :param cluster_ip: IP of the node to demote
     :param master_ip: IP of the master node
     :param offline_node_ips: IPs of nodes which are offline
     """
     if offline_node_ips is None:
         offline_node_ips = []
     client = SSHClient(cluster_ip, username='******') if cluster_ip not in offline_node_ips else None
     servicetype = ServiceTypeList.get_by_name('Arakoon')
     current_service = None
     remaining_ips = []
     for service in servicetype.services:
         if service.name == 'arakoon-voldrv':
             if service.storagerouter.ip == cluster_ip:
                 current_service = service
             elif service.storagerouter.ip not in offline_node_ips:
                 remaining_ips.append(service.storagerouter.ip)
     if current_service is not None:
         print '* Shrink StorageDriver cluster'
         ArakoonInstaller.shrink_cluster(master_ip, cluster_ip, 'voldrv', offline_node_ips)
         if client is not None and ServiceManager.has_service(current_service.name, client=client) is True:
             ServiceManager.stop_service(current_service.name, client=client)
             ServiceManager.remove_service(current_service.name, client=client)
         ArakoonInstaller.restart_cluster_remove('voldrv', remaining_ips)
         current_service.delete()
         StorageDriverController._configure_arakoon_to_volumedriver(offline_node_ips)
示例#8
0
    def _setup_proxy(initial_cluster, slave_client, cluster_name, force=False):
        base_name = 'ovs-etcd-proxy'
        target_name = 'ovs-etcd-{0}'.format(cluster_name)
        if force is False and ServiceManager.has_service(target_name, slave_client) and \
            ServiceManager.get_service_status(target_name, slave_client) is True:
            logger.info('Service {0} already configured and running'.format(target_name))
            return
        EtcdInstaller.stop(cluster_name, slave_client)

        data_dir = EtcdInstaller.DATA_DIR.format(EtcdInstaller.DB_DIR, cluster_name)
        wal_dir = EtcdInstaller.WAL_DIR.format(EtcdInstaller.DB_DIR, cluster_name)
        abs_paths = [data_dir, wal_dir]
        slave_client.dir_delete(abs_paths)
        slave_client.dir_create(data_dir)
        slave_client.dir_chmod(data_dir, 0755, recursive=True)
        slave_client.dir_chown(data_dir, 'ovs', 'ovs', recursive=True)

        ServiceManager.add_service(base_name, slave_client,
                                   params={'CLUSTER': cluster_name,
                                           'DATA_DIR': data_dir,
                                           'LOCAL_CLIENT_URL': EtcdInstaller.CLIENT_URL.format('127.0.0.1'),
                                           'INITIAL_CLUSTER': initial_cluster},
                                   target_name=target_name)
        EtcdInstaller.start(cluster_name, slave_client)
        EtcdInstaller.wait_for_cluster(cluster_name, slave_client)
示例#9
0
 def on_demote(cluster_ip, master_ip):
     """
     Handles the demote for the StorageDrivers
     :param cluster_ip: IP of the node to demote
     :param master_ip: IP of the master node
     """
     client = SSHClient(cluster_ip, username='******')
     servicetype = ServiceTypeList.get_by_name('Arakoon')
     current_service = None
     remaining_ips = []
     for service in servicetype.services:
         if service.name == 'arakoon-voldrv':
             if service.storagerouter.ip == cluster_ip:
                 current_service = service
             else:
                 remaining_ips.append(service.storagerouter.ip)
     if current_service is not None:
         print '* Shrink StorageDriver cluster'
         ArakoonInstaller.shrink_cluster(master_ip, cluster_ip, 'voldrv')
         if ServiceManager.has_service(current_service.name, client=client) is True:
             ServiceManager.stop_service(current_service.name, client=client)
             ServiceManager.remove_service(current_service.name, client=client)
         ArakoonInstaller.restart_cluster_remove('voldrv', remaining_ips)
         current_service.delete()
         for storagerouter in StorageRouterList.get_storagerouters():
             ArakoonInstaller.deploy_to_slave(master_ip, storagerouter.ip, 'voldrv')
         StorageDriverController._configure_arakoon_to_volumedriver()
示例#10
0
    def install_plugins():
        """
        (Re)load plugins
        """
        if ServiceManager.has_service('ovs-watcher-framework',
                                      SSHClient('127.0.0.1', username='******')):
            # If the watcher is running, 'ovs setup' was executed and we need to restart everything to load
            # the plugin. In the other case, the plugin will be loaded once 'ovs setup' is executed
            print 'Installing plugin into Open vStorage'
            from ovs.dal.lists.storagerouterlist import StorageRouterList
            clients = {}
            masters = StorageRouterList.get_masters()
            slaves = StorageRouterList.get_slaves()
            try:
                for sr in masters + slaves:
                    clients[sr] = SSHClient(sr, username='******')
            except UnableToConnectException:
                raise RuntimeError('Not all StorageRouters are reachable')
            memcached = 'memcached'
            watcher = 'watcher-framework'
            for sr in masters + slaves:
                if ServiceManager.has_service(watcher, clients[sr]):
                    print '- Stopping watcher on {0} ({1})'.format(
                        sr.name, sr.ip)
                    ServiceManager.stop_service(watcher, clients[sr])
            for sr in masters:
                print '- Restarting memcached on {0} ({1})'.format(
                    sr.name, sr.ip)
                ServiceManager.restart_service(memcached, clients[sr])
            for sr in masters + slaves:
                if ServiceManager.has_service(watcher, clients[sr]):
                    print '- Starting watcher on {0} ({1})'.format(
                        sr.name, sr.ip)
                    ServiceManager.start_service(watcher, clients[sr])

            print '- Execute model migrations'
            from ovs.dal.helpers import Migration
            Migration.migrate()

            from ovs.lib.helpers.toolbox import Toolbox
            ip = System.get_my_storagerouter().ip
            functions = Toolbox.fetch_hooks('plugin', 'postinstall')
            if len(functions) > 0:
                print '- Execute post installation scripts'
            for function in functions:
                function(ip=ip)
            print 'Installing plugin into Open vStorage: Completed'
 def has_service(name, client):
     """
     Validate if the node has the service configured
     :param name: Name of the service
     :param client: SSHClient object
     :return: True if service is configured
     """
     return ServiceManager.has_service(name, client)
示例#12
0
 def remove(cluster_name, client):
     """
     Removes an etcd service
     :param client: Client on which to remove the service
     :param cluster_name: The name of the cluster service to remove
     """
     if ServiceManager.has_service('etcd-{0}'.format(cluster_name), client=client) is True:
         ServiceManager.remove_service('etcd-{0}'.format(cluster_name), client=client)
示例#13
0
 def start(cluster_name, client):
     """
     Starts an arakoon cluster
     :param client: Client on which to start the service
     :param cluster_name: The name of the cluster service to start
     """
     if ServiceManager.has_service('arakoon-{0}'.format(cluster_name), client=client) is True:
         ServiceManager.start_service('arakoon-{0}'.format(cluster_name), client=client)
示例#14
0
 def stop(cluster_name, client):
     """
     Stops an arakoon service
     """
     if ServiceManager.has_service('arakoon-{0}'.format(cluster_name), client=client) is True and \
             ServiceManager.get_service_status('arakoon-{0}'.format(cluster_name), client=client) is True:
         ServiceManager.stop_service('arakoon-{0}'.format(cluster_name),
                                     client=client)
示例#15
0
 def remove(cluster_name, client):
     """
     Removes an arakoon service
     """
     if ServiceManager.has_service('arakoon-{0}'.format(cluster_name),
                                   client=client) is True:
         ServiceManager.remove_service('arakoon-{0}'.format(cluster_name),
                                       client=client)
示例#16
0
 def stop(cluster_name, client):
     """
     Stops an etcd service
     :param client: Client on which to stop the service
     :param cluster_name: The name of the cluster service to stop
     """
     if ServiceManager.has_service('etcd-{0}'.format(cluster_name), client=client) is True:
         ServiceManager.stop_service('etcd-{0}'.format(cluster_name), client=client)
示例#17
0
 def stop(cluster_name, client):
     """
     Stops an arakoon service
     """
     if (
         ServiceManager.has_service("arakoon-{0}".format(cluster_name), client=client) is True
         and ServiceManager.get_service_status("arakoon-{0}".format(cluster_name), client=client) is True
     ):
         ServiceManager.stop_service("arakoon-{0}".format(cluster_name), client=client)
示例#18
0
 def start(cluster_name, client):
     """
     Starts an etcd cluster
     :param client: Client on which to start the service
     :param cluster_name: The name of the cluster service to start
     """
     if ServiceManager.has_service('etcd-{0}'.format(cluster_name), client=client) is True and \
             ServiceManager.get_service_status('etcd-{0}'.format(cluster_name), client=client) is False:
         ServiceManager.start_service('etcd-{0}'.format(cluster_name), client=client)
示例#19
0
 def is_running(cluster_name, client):
     """
     Checks if arakoon service is running
     :param client: Client on which to stop the service
     :param cluster_name: The name of the cluster service to stop
     """
     if ServiceManager.has_service('arakoon-{0}'.format(cluster_name), client=client):
         return ServiceManager.get_service_status('arakoon-{0}'.format(cluster_name), client=client)
     return False
 def is_devstack_installed():
     """
     Check if OpenStack or DevStack is installed
     :return: True if installed
     """
     client = SSHClient('127.0.0.1', username='******')
     is_openstack = ServiceManager.has_service(OSManager.get_openstack_cinder_service_name(), client)
     is_devstack = 'stack' in str(client.run('ps aux | grep SCREEN | grep stack | grep -v grep || true'), allow_insecure=True)
     return is_openstack or is_devstack
 def stop(cluster_name, client):
     """
     Stops an arakoon service
     :param client: Client on which to stop the service
     :param cluster_name: The name of the cluster service to stop
     """
     if ServiceManager.has_service('arakoon-{0}'.format(cluster_name), client=client) is True and \
             ServiceManager.get_service_status('arakoon-{0}'.format(cluster_name), client=client) is True:
         ServiceManager.stop_service('arakoon-{0}'.format(cluster_name), client=client)
示例#22
0
 def stop(cluster_name, client):
     """
     Stops an etcd service
     :param client: Client on which to stop the service
     :param cluster_name: The name of the cluster service to stop
     """
     if ServiceManager.has_service('etcd-{0}'.format(cluster_name), client=client) is True and \
             ServiceManager.get_service_status('etcd-{0}'.format(cluster_name), client=client) is True:
         ServiceManager.stop_service('etcd-{0}'.format(cluster_name),
                                     client=client)
示例#23
0
 def _enable_openstack_events_consumer(self):
     """
     Enable service ovs-openstack-events-consumer
     """
     from ovs.extensions.services.service import ServiceManager
     service_name = 'ovs-openstack-events-consumer'
     if not ServiceManager.has_service(service_name, self.client):
         ServiceManager.add_service(service_name, self.client)
         ServiceManager.enable_service(service_name, self.client)
         ServiceManager.start_service(service_name, self.client)
示例#24
0
 def start(cluster_name, client):
     """
     Starts an arakoon cluster
     :param client: Client on which to start the service
     :param cluster_name: The name of the cluster service to start
     """
     if ServiceManager.has_service('arakoon-{0}'.format(cluster_name), client=client) is True and \
             ServiceManager.get_service_status('arakoon-{0}'.format(cluster_name), client=client) is False:
         ServiceManager.start_service('arakoon-{0}'.format(cluster_name),
                                      client=client)
示例#25
0
 def remove(cluster_name, client):
     """
     Removes an etcd service
     :param client: Client on which to remove the service
     :param cluster_name: The name of the cluster service to remove
     """
     if ServiceManager.has_service('etcd-{0}'.format(cluster_name),
                                   client=client) is True:
         ServiceManager.remove_service('etcd-{0}'.format(cluster_name),
                                       client=client)
示例#26
0
 def _enable_openstack_events_consumer(self):
     """
     Enable service ovs-openstack-events-consumer
     """
     from ovs.extensions.services.service import ServiceManager
     service_name = 'ovs-openstack-events-consumer'
     if not ServiceManager.has_service(service_name, self.client):
         ServiceManager.add_service(service_name, self.client)
         ServiceManager.enable_service(service_name, self.client)
         ServiceManager.start_service(service_name, self.client)
示例#27
0
    def install_plugins():
        """
        (Re)load plugins
        """
        if ServiceManager.has_service('ovs-watcher-framework', SSHClient('127.0.0.1', username='******')):
            # If the watcher is running, 'ovs setup' was executed and we need to restart everything to load
            # the plugin. In the other case, the plugin will be loaded once 'ovs setup' is executed
            print 'Installing plugin into Open vStorage'
            from ovs.dal.lists.storagerouterlist import StorageRouterList
            clients = {}
            masters = StorageRouterList.get_masters()
            slaves = StorageRouterList.get_slaves()
            try:
                for sr in masters + slaves:
                    clients[sr] = SSHClient(sr, username='******')
            except UnableToConnectException:
                raise RuntimeError('Not all StorageRouters are reachable')
            memcached = 'memcached'
            watcher = 'watcher-framework'
            for sr in masters + slaves:
                if ServiceManager.has_service(watcher, clients[sr]):
                    print '- Stopping watcher on {0} ({1})'.format(sr.name, sr.ip)
                    ServiceManager.stop_service(watcher, clients[sr])
            for sr in masters:
                print '- Restarting memcached on {0} ({1})'.format(sr.name, sr.ip)
                ServiceManager.restart_service(memcached, clients[sr])
            for sr in masters + slaves:
                if ServiceManager.has_service(watcher, clients[sr]):
                    print '- Starting watcher on {0} ({1})'.format(sr.name, sr.ip)
                    ServiceManager.start_service(watcher, clients[sr])

            print '- Execute model migrations'
            from ovs.dal.helpers import Migration
            Migration.migrate()

            from ovs.lib.helpers.toolbox import Toolbox
            ip = System.get_my_storagerouter().ip
            functions = Toolbox.fetch_hooks('plugin', 'postinstall')
            if len(functions) > 0:
                print '- Execute post installation scripts'
            for function in functions:
                function(ip=ip)
            print 'Installing plugin into Open vStorage: Completed'
示例#28
0
 def start(cluster_name, client):
     """
     Starts an etcd cluster
     :param cluster_name: The name of the cluster service to start
     :type cluster_name: str
     :param client: Client on which to start the service
     :type client: SSHClient
     :return: None
     """
     if ServiceManager.has_service('etcd-{0}'.format(cluster_name), client=client) is True:
         ServiceManager.start_service('etcd-{0}'.format(cluster_name), client=client)
示例#29
0
    def install_plugins():
        """
        (Re)load plugins
        """
        if ServiceManager.has_service('ovs-watcher-framework',
                                      SSHClient('127.0.0.1', username='******')):
            # If the watcher is running, 'ovs setup' was executed and we need to restart everything to load
            # the plugin. In the other case, the plugin will be loaded once 'ovs setup' is executed
            from ovs.dal.lists.storagerouterlist import StorageRouterList
            clients = []
            try:
                for storagerouter in StorageRouterList.get_storagerouters():
                    clients.append(SSHClient(storagerouter, username='******'))
            except UnableToConnectException:
                raise RuntimeError('Not all StorageRouters are reachable')

            for client in clients:
                for service_name in ['watcher-framework', 'memcached']:
                    ServiceManager.stop_service(service_name, client=client)
                    wait = 30
                    while wait > 0:
                        if ServiceManager.get_service_status(
                                service_name, client=client) is False:
                            break
                        time.sleep(1)
                        wait -= 1
                    if wait == 0:
                        raise RuntimeError(
                            'Could not stop service: {0}'.format(service_name))

            for client in clients:
                for service_name in ['memcached', 'watcher-framework']:
                    ServiceManager.start_service(service_name, client=client)
                    wait = 30
                    while wait > 0:
                        if ServiceManager.get_service_status(
                                service_name, client=client) is True:
                            break
                        time.sleep(1)
                        wait -= 1
                    if wait == 0:
                        raise RuntimeError(
                            'Could not start service: {0}'.format(
                                service_name))

            from ovs.dal.helpers import Migration
            Migration.migrate()

            from ovs.lib.helpers.toolbox import Toolbox
            ip = System.get_my_storagerouter().ip
            functions = Toolbox.fetch_hooks('plugin', 'postinstall')
            for function in functions:
                function(ip=ip)
示例#30
0
 def stop(cluster_name, client):
     """
     Stops an arakoon service
     :param cluster_name: The name of the cluster service to stop
     :type cluster_name: str
     :param client: Client on which to stop the service
     :type client: SSHClient
     :return: None
     """
     service_name = ArakoonInstaller.get_service_name_for_cluster(cluster_name=cluster_name)
     if ServiceManager.has_service(name=service_name, client=client) is True:
         ServiceManager.stop_service(name=service_name, client=client)
示例#31
0
 def is_running(cluster_name, client):
     """
     Checks if arakoon service is running
     :param cluster_name: The name of the cluster service to check
     :type cluster_name: str
     :param client: Client on which to check the service
     :type client: SSHClient
     :return: None
     """
     service_name = ArakoonInstaller.get_service_name_for_cluster(cluster_name=cluster_name)
     if ServiceManager.has_service(name=service_name, client=client):
         return ServiceManager.get_service_status(name=service_name, client=client)[0]
     return False
示例#32
0
    def stop(cluster_name, client):
        """
        Stops an arakoon service
        :param cluster_name: The name of the cluster service to stop
        :type cluster_name: str

        :param client: Client on which to stop the service
        :type client: SSHClient

        :return: None
        """
        if ServiceManager.has_service('arakoon-{0}'.format(cluster_name), client=client) is True:
            ServiceManager.stop_service('arakoon-{0}'.format(cluster_name), client=client)
示例#33
0
    def remove(cluster_name, client):
        """
        Removes an arakoon service
        :param cluster_name: The name of the cluster service to remove
        :type cluster_name: str

        :param client: Client on which to remove the service
        :type client: SSHClient

        :return: None
        """
        if ServiceManager.has_service('arakoon-{0}'.format(cluster_name), client=client) is True:
            ServiceManager.remove_service('arakoon-{0}'.format(cluster_name), client=client)
示例#34
0
 def _restart_openstack_services(self):
     """
     Restart services on openstack
     """
     services = OSManager.get_openstack_services()
     for service_name in services:
         if ServiceManager.has_service(service_name, self.client):
             try:
                 ServiceManager.restart_service(service_name, self.client)
             except SystemExit as sex:
                 logger.debug('Failed to restart service {0}. {1}'.format(service_name, sex))
     time.sleep(3)
     return self._is_cinder_running()
示例#35
0
    def start(cluster_name, client):
        """
        Starts an etcd cluster
        :param cluster_name: The name of the cluster service to start
        :type cluster_name: str

        :param client: Client on which to start the service
        :type client: SSHClient

        :return: None
        """
        if ServiceManager.has_service('etcd-{0}'.format(cluster_name), client=client) is True:
            ServiceManager.start_service('etcd-{0}'.format(cluster_name), client=client)
示例#36
0
    def stop(cluster_name, client):
        """
        Stops an arakoon service
        :param cluster_name: The name of the cluster service to stop
        :type cluster_name: str

        :param client: Client on which to stop the service
        :type client: SSHClient

        :return: None
        """
        if ServiceManager.has_service('arakoon-{0}'.format(cluster_name), client=client) is True:
            ServiceManager.stop_service('arakoon-{0}'.format(cluster_name), client=client)
示例#37
0
    def remove(cluster_name, client):
        """
        Removes an arakoon service
        :param cluster_name: The name of the cluster service to remove
        :type cluster_name: str

        :param client: Client on which to remove the service
        :type client: SSHClient

        :return: None
        """
        if ServiceManager.has_service('arakoon-{0}'.format(cluster_name), client=client) is True:
            ServiceManager.remove_service('arakoon-{0}'.format(cluster_name), client=client)
示例#38
0
    def create_cluster(cluster_name, ip, server_port=DEFAULT_SERVER_PORT, client_port=DEFAULT_CLIENT_PORT):
        """
        Creates a cluster
        :param cluster_name: Name of the cluster
        :type cluster_name: str

        :param ip: IP address of the first node of the new cluster
        :type ip: str

        :param server_port: Port to be used by server
        :type server_port: int

        :param client_port: Port to be used by client
        :type client_port: int

        :return: None
        """
        EtcdInstaller._logger.debug('Creating cluster "{0}" on {1}'.format(cluster_name, ip))

        client = SSHClient(ip, username='******')
        target_name = 'ovs-etcd-{0}'.format(cluster_name)
        if ServiceManager.has_service(target_name, client) and ServiceManager.get_service_status(target_name, client) is True:
            EtcdInstaller._logger.info('Service {0} already configured and running'.format(target_name))
            return

        node_name = System.get_my_machine_id(client)
        data_dir = EtcdInstaller.DATA_DIR.format(cluster_name)
        wal_dir = EtcdInstaller.WAL_DIR.format(cluster_name)
        abs_paths = [data_dir, wal_dir]
        client.dir_delete(abs_paths)
        client.dir_create(abs_paths)
        client.dir_chmod(abs_paths, 0755, recursive=True)
        client.dir_chown(abs_paths, 'ovs', 'ovs', recursive=True)

        base_name = 'ovs-etcd'
        ServiceManager.add_service(base_name, client,
                                   params={'CLUSTER': cluster_name,
                                           'NODE_ID': node_name,
                                           'DATA_DIR': data_dir,
                                           'WAL_DIR': wal_dir,
                                           'SERVER_URL': EtcdInstaller.SERVER_URL.format(ip, server_port),
                                           'CLIENT_URL': EtcdInstaller.CLIENT_URL.format(ip, client_port),
                                           'LOCAL_CLIENT_URL': EtcdInstaller.CLIENT_URL.format('127.0.0.1', client_port),
                                           'INITIAL_CLUSTER': '{0}={1}'.format(node_name, EtcdInstaller.SERVER_URL.format(ip, server_port)),
                                           'INITIAL_STATE': 'new',
                                           'INITIAL_PEERS': '-initial-advertise-peer-urls {0}'.format(EtcdInstaller.SERVER_URL.format(ip, server_port))},
                                   target_name=target_name)
        EtcdInstaller.start(cluster_name, client)
        EtcdInstaller.wait_for_cluster(cluster_name, client, client_port=client_port)

        EtcdInstaller._logger.debug('Creating cluster "{0}" on {1} completed'.format(cluster_name, ip))
示例#39
0
    def create_cluster(cluster_name, ip, server_port=DEFAULT_SERVER_PORT, client_port=DEFAULT_CLIENT_PORT):
        """
        Creates a cluster
        :param cluster_name: Name of the cluster
        :type cluster_name: str

        :param ip: IP address of the first node of the new cluster
        :type ip: str

        :param server_port: Port to be used by server
        :type server_port: int

        :param client_port: Port to be used by client
        :type client_port: int

        :return: None
        """
        EtcdInstaller._logger.debug('Creating cluster "{0}" on {1}'.format(cluster_name, ip))

        client = SSHClient(ip, username='******')
        target_name = 'ovs-etcd-{0}'.format(cluster_name)
        if ServiceManager.has_service(target_name, client) and ServiceManager.get_service_status(target_name, client) is True:
            EtcdInstaller._logger.info('Service {0} already configured and running'.format(target_name))
            return

        node_name = System.get_my_machine_id(client)
        data_dir = EtcdInstaller.DATA_DIR.format(cluster_name)
        wal_dir = EtcdInstaller.WAL_DIR.format(cluster_name)
        abs_paths = [data_dir, wal_dir]
        client.dir_delete(abs_paths)
        client.dir_create(abs_paths)
        client.dir_chmod(abs_paths, 0755, recursive=True)
        client.dir_chown(abs_paths, 'ovs', 'ovs', recursive=True)

        base_name = 'ovs-etcd'
        ServiceManager.add_service(base_name, client,
                                   params={'CLUSTER': cluster_name,
                                           'NODE_ID': node_name,
                                           'DATA_DIR': data_dir,
                                           'WAL_DIR': wal_dir,
                                           'SERVER_URL': EtcdInstaller.SERVER_URL.format(ip, server_port),
                                           'CLIENT_URL': EtcdInstaller.CLIENT_URL.format(ip, client_port),
                                           'LOCAL_CLIENT_URL': EtcdInstaller.CLIENT_URL.format('127.0.0.1', client_port),
                                           'INITIAL_CLUSTER': '{0}={1}'.format(node_name, EtcdInstaller.SERVER_URL.format(ip, server_port)),
                                           'INITIAL_STATE': 'new',
                                           'INITIAL_PEERS': '-initial-advertise-peer-urls {0}'.format(EtcdInstaller.SERVER_URL.format(ip, server_port))},
                                   target_name=target_name)
        EtcdInstaller.start(cluster_name, client)
        EtcdInstaller.wait_for_cluster(cluster_name, client, client_port=client_port)

        EtcdInstaller._logger.debug('Creating cluster "{0}" on {1} completed'.format(cluster_name, ip))
示例#40
0
    def is_running(cluster_name, client):
        """
        Checks if arakoon service is running
        :param cluster_name: The name of the cluster service to check
        :type cluster_name: str

        :param client: Client on which to check the service
        :type client: SSHClient

        :return: None
        """
        if ServiceManager.has_service('arakoon-{0}'.format(cluster_name), client=client):
            return ServiceManager.get_service_status('arakoon-{0}'.format(cluster_name), client=client)[0]
        return False
示例#41
0
 def _restart_openstack_services(self):
     """
     Restart services on openstack
     """
     services = OSManager.get_openstack_services()
     for service_name in services:
         if ServiceManager.has_service(service_name, self.client):
             try:
                 ServiceManager.restart_service(service_name, self.client)
             except SystemExit as sex:
                 logger.debug('Failed to restart service {0}. {1}'.format(
                     service_name, sex))
     time.sleep(3)
     return self._is_cinder_running()
示例#42
0
    def is_running(cluster_name, client):
        """
        Checks if arakoon service is running
        :param cluster_name: The name of the cluster service to check
        :type cluster_name: str

        :param client: Client on which to check the service
        :type client: SSHClient

        :return: None
        """
        if ServiceManager.has_service('arakoon-{0}'.format(cluster_name), client=client):
            return ServiceManager.get_service_status('arakoon-{0}'.format(cluster_name), client=client)
        return False
示例#43
0
    def on_demote(cluster_ip, master_ip, offline_node_ips=None):
        """
        Handles the demote for the StorageDrivers
        :param cluster_ip: IP of the node to demote
        :type cluster_ip: str

        :param master_ip: IP of the master node
        :type master_ip: str

        :param offline_node_ips: IPs of nodes which are offline
        :type offline_node_ips: list

        :return: None
        """
        _ = master_ip
        if offline_node_ips is None:
            offline_node_ips = []
        client = SSHClient(
            cluster_ip,
            username='******') if cluster_ip not in offline_node_ips else None
        servicetype = ServiceTypeList.get_by_name(
            ServiceType.SERVICE_TYPES.ARAKOON)
        current_service = None
        remaining_ips = []
        for service in servicetype.services:
            if service.name == 'arakoon-voldrv' and service.is_internal is True:  # Externally managed arakoon cluster service does not have storage router
                if service.storagerouter.ip == cluster_ip:
                    current_service = service
                elif service.storagerouter.ip not in offline_node_ips:
                    remaining_ips.append(service.storagerouter.ip)
        if current_service is not None:
            StorageDriverController._logger.debug(
                '* Shrink StorageDriver cluster')
            cluster_name = str(
                EtcdConfiguration.get(
                    '/ovs/framework/arakoon_clusters|voldrv'))
            ArakoonInstaller.shrink_cluster(deleted_node_ip=cluster_ip,
                                            cluster_name=cluster_name,
                                            offline_nodes=offline_node_ips)
            if client is not None and ServiceManager.has_service(
                    current_service.name, client=client) is True:
                ServiceManager.stop_service(current_service.name,
                                            client=client)
                ServiceManager.remove_service(current_service.name,
                                              client=client)
            ArakoonInstaller.restart_cluster_remove(cluster_name,
                                                    remaining_ips)
            current_service.delete()
            StorageDriverController._configure_arakoon_to_volumedriver(
                cluster_name=cluster_name)
示例#44
0
 def remove(cluster_name, client, delay_unregistration=False):
     """
     Removes an arakoon service
     :param cluster_name: The name of the cluster service to remove
     :type cluster_name: str
     :param client: Client on which to remove the service
     :type client: SSHClient
     :param delay_unregistration: Un-register the service right away or not
     :type delay_unregistration: bool
     :return: None
     """
     service_name = ArakoonInstaller.get_service_name_for_cluster(cluster_name=cluster_name)
     if ServiceManager.has_service(name=service_name, client=client) is True:
         ServiceManager.remove_service(name=service_name, client=client, delay_unregistration=delay_unregistration)
示例#45
0
    def install_plugins():
        """
        (Re)load plugins
        """
        if ServiceManager.has_service('ovs-watcher-framework', SSHClient('127.0.0.1', username='******')):
            # If the watcher is running, 'ovs setup' was executed and we need to restart everything to load
            # the plugin. In the other case, the plugin will be loaded once 'ovs setup' is executed
            from ovs.dal.lists.storagerouterlist import StorageRouterList
            clients = []
            try:
                for storagerouter in StorageRouterList.get_storagerouters():
                    clients.append(SSHClient(storagerouter, username='******'))
            except UnableToConnectException:
                raise RuntimeError('Not all StorageRouters are reachable')

            for client in clients:
                for service_name in ['watcher-framework', 'memcached']:
                    ServiceManager.stop_service(service_name, client=client)
                    wait = 30
                    while wait > 0:
                        if ServiceManager.get_service_status(service_name, client=client) is False:
                            break
                        time.sleep(1)
                        wait -= 1
                    if wait == 0:
                        raise RuntimeError('Could not stop service: {0}'.format(service_name))

            for client in clients:
                for service_name in ['memcached', 'watcher-framework']:
                    ServiceManager.start_service(service_name, client=client)
                    wait = 30
                    while wait > 0:
                        if ServiceManager.get_service_status(service_name, client=client) is True:
                            break
                        time.sleep(1)
                        wait -= 1
                    if wait == 0:
                        raise RuntimeError('Could not start service: {0}'.format(service_name))

            from ovs.dal.helpers import Migration
            Migration.migrate()

            from ovs.lib.helpers.toolbox import Toolbox
            ip = System.get_my_storagerouter().ip
            functions = Toolbox.fetch_hooks('plugin', 'postinstall')
            for function in functions:
                function(ip=ip)
示例#46
0
    def check_rabbitmq_and_enable_ha_mode(client, logger):
        """
        Verify RabbitMQ is running properly and enable HA mode
        :param client: Client on which to check RabbitMQ
        :type client: ovs.extensions.generic.sshclient.SSHClient
        :param logger: Logger object used for logging
        :type logger: ovs.log.log_handler.LogHandler
        :return: None
        """
        if not ServiceManager.has_service('rabbitmq-server', client):
            raise RuntimeError('Service rabbitmq-server has not been added on node {0}'.format(client.ip))
        rabbitmq_running, same_process = ServiceManager.is_rabbitmq_running(client=client)
        if rabbitmq_running is False or same_process is False:
            Toolbox.change_service_state(client, 'rabbitmq-server', 'restart', logger)

        time.sleep(5)
        client.run(['rabbitmqctl', 'set_policy', 'ha-all', '^(volumerouter|ovs_.*)$', '{"ha-mode":"all"}'])
示例#47
0
 def on_remove(cluster_ip, complete_removal):
     """
     Handles the StorageDriver removal part of a node
     :param cluster_ip: IP of the node which is being removed from the cluster
     :type cluster_ip: str
     :param complete_removal: Unused for StorageDriver, used for AlbaController
     :type complete_removal: bool
     :return: None
     """
     _ = complete_removal
     service_name = 'watcher-volumedriver'
     try:
         client = SSHClient(endpoint=cluster_ip, username='******')
         if ServiceManager.has_service(name=service_name, client=client):
             ServiceManager.stop_service(name=service_name, client=client)
             ServiceManager.remove_service(name=service_name, client=client)
     except UnableToConnectException:
         pass
示例#48
0
    def __init__(self, cinder_client):
        self._logger = LogHandler.get('extensions', name='openstack_mgmt')
        self.client = SSHClient('127.0.0.1', username='******')
        self.cinder_client = cinder_client

        self._NOVA_CONF = '/etc/nova/nova.conf'
        self._CINDER_CONF = '/etc/cinder/cinder.conf'
        self._is_openstack = ServiceManager.has_service(
            OSManager.get_openstack_cinder_service_name(), self.client)
        self._nova_installed = self.client.file_exists(self._NOVA_CONF)
        self._cinder_installed = self.client.file_exists(self._CINDER_CONF)
        self._driver_location = OSManager.get_openstack_package_base_path()
        self._openstack_users = OSManager.get_openstack_users()
        self._devstack_driver = '/opt/stack/cinder/cinder/volume/drivers/openvstorage.py'

        try:
            self._is_devstack = 'stack' in str(
                self.client.run(
                    'ps aux | grep SCREEN | grep stack | grep -v grep || true')
            )
        except SystemExit:  # ssh client raises system exit 1
            self._is_devstack = False
        except Exception:
            self._is_devstack = False

        try:
            from cinder import version
            version_string = version.version_string()
            if version_string.startswith('9.0'):
                self._stack_version = 'newton'
            elif version_string.startswith('8.0'):
                self._stack_version = 'mitaka'
            elif version_string.startswith(
                    '2015.2') or version_string.startswith('7.0'):
                self._stack_version = 'liberty'
            elif version_string.startswith('2015.1'):
                self._stack_version = 'kilo'
            elif version_string.startswith('2014.2'):
                self._stack_version = 'juno'
            else:
                raise ValueError(
                    'Unsupported cinder version: {0}'.format(version_string))
        except Exception as ex:
            raise ValueError('Cannot determine cinder version: {0}'.format(ex))
示例#49
0
    def _setup_proxy(initial_cluster,
                     slave_client,
                     cluster_name,
                     force=False,
                     client_port=DEFAULT_CLIENT_PORT):
        base_name = 'ovs-etcd-proxy'
        target_name = 'ovs-etcd-{0}'.format(cluster_name)
        if force is False and ServiceManager.has_service(
                target_name,
                slave_client) and ServiceManager.get_service_status(
                    target_name, slave_client)[0] is True:
            EtcdInstaller._logger.info(
                'Service {0} already configured and running'.format(
                    target_name))
            return
        EtcdInstaller.stop(cluster_name, slave_client)

        data_dir = EtcdInstaller.DATA_DIR.format(cluster_name)
        wal_dir = EtcdInstaller.WAL_DIR.format(cluster_name)
        abs_paths = [data_dir, wal_dir]
        slave_client.dir_delete(abs_paths)
        slave_client.dir_create(data_dir)
        slave_client.dir_chmod(data_dir, 0755, recursive=True)
        slave_client.dir_chown(data_dir, 'ovs', 'ovs', recursive=True)

        ServiceManager.add_service(base_name,
                                   slave_client,
                                   params={
                                       'CLUSTER':
                                       cluster_name,
                                       'DATA_DIR':
                                       data_dir,
                                       'LOCAL_CLIENT_URL':
                                       EtcdInstaller.CLIENT_URL.format(
                                           '127.0.0.1', client_port),
                                       'INITIAL_CLUSTER':
                                       initial_cluster
                                   },
                                   target_name=target_name)
        EtcdInstaller.start(cluster_name, slave_client)
        EtcdInstaller.wait_for_cluster(cluster_name,
                                       slave_client,
                                       client_port=client_port)
示例#50
0
 def _change_services_state(services, ssh_clients, action):
     """
     Stop/start services on SSH clients
     If action is start, we ignore errors and try to start other services on other nodes
     """
     if action == 'start':
         services.reverse(
         )  # Start services again in reverse order of stopping
     for service_name in services:
         for ssh_client in ssh_clients:
             description = 'stopping' if action == 'stop' else 'starting' if action == 'start' else 'restarting'
             try:
                 if ServiceManager.has_service(service_name,
                                               client=ssh_client):
                     UpdateController._log_message(
                         '{0} service {1}'.format(description.capitalize(),
                                                  service_name),
                         ssh_client.ip)
                     Toolbox.change_service_state(
                         client=ssh_client,
                         name=service_name,
                         state=action,
                         logger=UpdateController._logger)
                     UpdateController._log_message(
                         '{0} service {1}'.format(
                             'Stopped' if action == 'stop' else 'Started'
                             if action == 'start' else 'Restarted',
                             service_name), ssh_client.ip)
             except Exception as exc:
                 UpdateController._log_message(
                     'Something went wrong {0} service {1}: {2}'.format(
                         description, service_name, exc),
                     ssh_client.ip,
                     severity='warning')
                 if action == 'stop':
                     return False
     return True
示例#51
0
    def execute_scrub_work(queue, vpool, scrub_info, error_messages):
        """
        Executes scrub work for a given vDisk queue and vPool, based on scrub_info
        :param queue: a Queue with vDisk guids that need to be scrubbed (they should only be member of a single vPool)
        :type queue: Queue
        :param vpool: the vPool object of the vDisks
        :type vpool: VPool
        :param scrub_info: A dict containing scrub information: `scrub_path` with the path where to scrub and `storage_router` with the StorageRouter
                           that needs to do the work
        :type scrub_info: dict
        :param error_messages: A list of error messages to be filled
        :type error_messages: list
        :return: a list of error messages
        :rtype: list
        """
        def _verify_mds_config(current_vdisk):
            current_vdisk.invalidate_dynamics('info')
            vdisk_configs = current_vdisk.info['metadata_backend_config']
            if len(vdisk_configs) == 0:
                raise RuntimeError('Could not load MDS configuration')
            return vdisk_configs

        client = None
        lock_time = 5 * 60
        storagerouter = scrub_info['storage_router']
        scrub_directory = '{0}/scrub_work_{1}_{2}'.format(
            scrub_info['scrub_path'], vpool.name, storagerouter.name)
        scrub_config_key = 'ovs/vpools/{0}/proxies/scrub/scrub_config_{1}'.format(
            vpool.guid, storagerouter.guid)
        backend_config_key = 'ovs/vpools/{0}/proxies/scrub/backend_config_{1}'.format(
            vpool.guid, storagerouter.guid)
        alba_proxy_service = 'ovs-albaproxy_{0}_{1}_scrub'.format(
            vpool.name, storagerouter.name)

        # Deploy a proxy
        try:
            with file_mutex(name='ovs_albaproxy_scrub', wait=lock_time):
                ScheduledTaskController._logger.info(
                    'Scrubber - vPool {0} - StorageRouter {1} - Deploying ALBA proxy {2}'
                    .format(vpool.name, storagerouter.name,
                            alba_proxy_service))
                client = SSHClient(storagerouter, 'root')
                client.dir_create(scrub_directory)
                client.dir_chmod(
                    scrub_directory, 0777
                )  # Celery task executed by 'ovs' user and should be able to write in it
                if ServiceManager.has_service(
                        name=alba_proxy_service, client=client
                ) is True and ServiceManager.get_service_status(
                        name=alba_proxy_service, client=client) is True:
                    ScheduledTaskController._logger.info(
                        'Scrubber - vPool {0} - StorageRouter {1} - Re-using existing proxy service {2}'
                        .format(vpool.name, storagerouter.name,
                                alba_proxy_service))
                    scrub_config = Configuration.get(scrub_config_key)
                else:
                    machine_id = System.get_my_machine_id(client)
                    port_range = Configuration.get(
                        '/ovs/framework/hosts/{0}/ports|storagedriver'.format(
                            machine_id))
                    port = System.get_free_ports(selected_range=port_range,
                                                 nr=1,
                                                 client=client)[0]
                    # Scrub config
                    # {u'albamgr_cfg_url': u'arakoon://config/ovs/vpools/71e2f717-f270-4a41-bbb0-d4c8c084d43e/proxies/64759516-3471-4321-b912-fb424568fc5b/config/abm?ini=%2Fopt%2FOpenvStorage%2Fconfig%2Farakoon_cacc.ini',
                    #  u'fragment_cache': [u'none'],
                    #  u'ips': [u'127.0.0.1'],
                    #  u'log_level': u'info',
                    #  u'manifest_cache_size': 17179869184,
                    #  u'port': 0,
                    #  u'transport': u'tcp'}

                    # Backend config
                    # {u'alba_connection_host': u'10.100.193.155',
                    #  u'alba_connection_port': 26204,
                    #  u'alba_connection_preset': u'preset',
                    #  u'alba_connection_timeout': 15,
                    #  u'alba_connection_transport': u'TCP',
                    #  u'backend_interface_retries_on_error': 5,
                    #  u'backend_interface_retry_backoff_multiplier': 2.0,
                    #  u'backend_interface_retry_interval_secs': 1,
                    #  u'backend_type': u'ALBA'}
                    scrub_config = Configuration.get(
                        'ovs/vpools/{0}/proxies/scrub/generic_scrub'.format(
                            vpool.guid))
                    scrub_config['port'] = port
                    scrub_config['transport'] = 'tcp'
                    Configuration.set(scrub_config_key,
                                      json.dumps(scrub_config, indent=4),
                                      raw=True)

                    params = {
                        'VPOOL_NAME':
                        vpool.name,
                        'LOG_SINK':
                        LogHandler.get_sink_path('alba_proxy'),
                        'CONFIG_PATH':
                        Configuration.get_configuration_path(scrub_config_key)
                    }
                    ServiceManager.add_service(name='ovs-albaproxy',
                                               params=params,
                                               client=client,
                                               target_name=alba_proxy_service)
                    ServiceManager.start_service(name=alba_proxy_service,
                                                 client=client)
                    ScheduledTaskController._logger.info(
                        'Scrubber - vPool {0} - StorageRouter {1} - Deployed ALBA proxy {2}'
                        .format(vpool.name, storagerouter.name,
                                alba_proxy_service))

                backend_config = Configuration.get(
                    'ovs/vpools/{0}/hosts/{1}/config'.format(
                        vpool.guid, vpool.storagedrivers[0].storagedriver_id
                    ))['backend_connection_manager']
                backend_config['alba_connection_host'] = '127.0.0.1'
                backend_config['alba_connection_port'] = scrub_config['port']
                Configuration.set(
                    backend_config_key,
                    json.dumps({"backend_connection_manager": backend_config},
                               indent=4),
                    raw=True)
        except Exception:
            message = 'Scrubber - vPool {0} - StorageRouter {1} - An error occurred deploying ALBA proxy {2}'.format(
                vpool.name, storagerouter.name, alba_proxy_service)
            error_messages.append(message)
            ScheduledTaskController._logger.exception(message)
            if client is not None and ServiceManager.has_service(
                    name=alba_proxy_service, client=client) is True:
                if ServiceManager.get_service_status(name=alba_proxy_service,
                                                     client=client) is True:
                    ServiceManager.stop_service(name=alba_proxy_service,
                                                client=client)
                ServiceManager.remove_service(name=alba_proxy_service,
                                              client=client)
            if Configuration.exists(scrub_config_key):
                Configuration.delete(scrub_config_key)

        try:
            # Empty the queue with vDisks to scrub
            with remote(storagerouter.ip, [VDisk]) as rem:
                while True:
                    vdisk = None
                    vdisk_guid = queue.get(False)
                    try:
                        # Check MDS master is local. Trigger MDS handover if necessary
                        vdisk = rem.VDisk(vdisk_guid)
                        ScheduledTaskController._logger.info(
                            'Scrubber - vPool {0} - StorageRouter {1} - vDisk {2} - Started scrubbing at location {3}'
                            .format(vpool.name, storagerouter.name, vdisk.name,
                                    scrub_directory))
                        configs = _verify_mds_config(current_vdisk=vdisk)
                        storagedriver = StorageDriverList.get_by_storagedriver_id(
                            vdisk.storagedriver_id)
                        if configs[0].get(
                                'ip') != storagedriver.storagerouter.ip:
                            ScheduledTaskController._logger.info(
                                'Scrubber - vPool {0} - StorageRouter {1} - vDisk {2} - MDS master is not local, trigger handover'
                                .format(vpool.name, storagerouter.name,
                                        vdisk.name))
                            MDSServiceController.ensure_safety(
                                VDisk(vdisk_guid)
                            )  # Do not use a remote VDisk instance here
                            configs = _verify_mds_config(current_vdisk=vdisk)
                            if configs[0].get(
                                    'ip') != storagedriver.storagerouter.ip:
                                ScheduledTaskController._logger.warning(
                                    'Scrubber - vPool {0} - StorageRouter {1} - vDisk {2} - Skipping because master MDS still not local'
                                    .format(vpool.name, storagerouter.name,
                                            vdisk.name))
                                continue

                        # Do the actual scrubbing
                        with vdisk.storagedriver_client.make_locked_client(
                                str(vdisk.volume_id)) as locked_client:
                            ScheduledTaskController._logger.info(
                                'Scrubber - vPool {0} - StorageRouter {1} - vDisk {2} - Retrieve and apply scrub work'
                                .format(vpool.name, storagerouter.name,
                                        vdisk.name))
                            work_units = locked_client.get_scrubbing_workunits(
                            )
                            for work_unit in work_units:
                                res = locked_client.scrub(
                                    work_unit=work_unit,
                                    scratch_dir=scrub_directory,
                                    log_sinks=[
                                        LogHandler.get_sink_path(
                                            'scrubber', allow_override=True)
                                    ],
                                    backend_config=Configuration.
                                    get_configuration_path(backend_config_key))
                                locked_client.apply_scrubbing_result(
                                    scrubbing_work_result=res)
                            if work_units:
                                ScheduledTaskController._logger.info(
                                    'Scrubber - vPool {0} - StorageRouter {1} - vDisk {2} - {3} work units successfully applied'
                                    .format(vpool.name, storagerouter.name,
                                            vdisk.name, len(work_units)))
                            else:
                                ScheduledTaskController._logger.info(
                                    'Scrubber - vPool {0} - StorageRouter {1} - vDisk {2} - No scrubbing required'
                                    .format(vpool.name, storagerouter.name,
                                            vdisk.name))
                    except Exception:
                        if vdisk is None:
                            message = 'Scrubber - vPool {0} - StorageRouter {1} - vDisk with guid {2} could not be found'.format(
                                vpool.name, storagerouter.name, vdisk_guid)
                        else:
                            message = 'Scrubber - vPool {0} - StorageRouter {1} - vDisk {2} - Scrubbing failed'.format(
                                vpool.name, storagerouter.name, vdisk.name)
                        error_messages.append(message)
                        ScheduledTaskController._logger.exception(message)

        except Empty:  # Raised when all items have been fetched from the queue
            ScheduledTaskController._logger.info(
                'Scrubber - vPool {0} - StorageRouter {1} - Queue completely processed'
                .format(vpool.name, storagerouter.name))
        except Exception:
            message = 'Scrubber - vPool {0} - StorageRouter {1} - Scrubbing failed'.format(
                vpool.name, storagerouter.name)
            error_messages.append(message)
            ScheduledTaskController._logger.exception(message)

        # Delete the proxy again
        try:
            with file_mutex(name='ovs_albaproxy_scrub', wait=lock_time):
                ScheduledTaskController._logger.info(
                    'Scrubber - vPool {0} - StorageRouter {1} - Removing service {2}'
                    .format(vpool.name, storagerouter.name,
                            alba_proxy_service))
                client = SSHClient(storagerouter, 'root')
                client.dir_delete(scrub_directory)
                if ServiceManager.has_service(alba_proxy_service,
                                              client=client):
                    ServiceManager.stop_service(alba_proxy_service,
                                                client=client)
                    ServiceManager.remove_service(alba_proxy_service,
                                                  client=client)
                if Configuration.exists(scrub_config_key):
                    Configuration.delete(scrub_config_key)
                ScheduledTaskController._logger.info(
                    'Scrubber - vPool {0} - StorageRouter {1} - Removed service {2}'
                    .format(vpool.name, storagerouter.name,
                            alba_proxy_service))
        except Exception:
            message = 'Scrubber - vPool {0} - StorageRouter {1} - Removing service {2} failed'.format(
                vpool.name, storagerouter.name, alba_proxy_service)
            error_messages.append(message)
            ScheduledTaskController._logger.exception(message)
示例#52
0
    def configure_host(self, ip):
        if self._is_devstack is False and self._is_openstack is False or self._cinder_installed is False or self._nova_installed is False:
            self._logger.warning(
                'Configure host: No OpenStack nor DevStack installation detected or Cinder and Nova plugins are not installed'
            )
            return

        # 1. Get Driver code
        self._logger.info('*** Configuring host with IP {0} ***'.format(ip))
        self._logger.info('  Copy driver code')
        remote_driver = "/opt/OpenvStorage/config/templates/cinder-volume-driver/{0}/openvstorage.py".format(
            self._stack_version)
        remote_version = '0.0.0'
        existing_version = '0.0.0'
        try:
            from cinder.volume.drivers import openvstorage
            if hasattr(openvstorage, 'OVSVolumeDriver'):
                existing_version = getattr(openvstorage.OVSVolumeDriver,
                                           'VERSION', '0.0.0')
        except ImportError:
            pass

        for line in self.client.file_read(remote_driver).splitlines():
            if 'VERSION = ' in line:
                remote_version = line.split('VERSION = ')[-1].strip().replace(
                    "'", "").replace('"', "")
                break

        nova_base_path = self._get_base_path('nova')
        cinder_base_path = self._get_base_path('cinder')

        if self._is_devstack is True:
            local_driver = '{0}/volume/drivers/openvstorage.py'.format(
                cinder_base_path)
        else:
            local_driver = '{0}/cinder/volume/drivers/openvstorage.py'.format(
                self._driver_location)

        if remote_version > existing_version:
            self._logger.debug(
                'Updating existing driver using {0} from version {1} to version {2}'
                .format(remote_driver, existing_version, remote_version))
            self.client.run('cp -f {0} {1}'.format(remote_driver,
                                                   local_driver))
        else:
            self._logger.debug('Using driver {0} version {1}'.format(
                local_driver, existing_version))

        # 2. Configure users and groups
        self._logger.info('  Add users to group ovs')
        users = ['libvirt-qemu', 'stack'
                 ] if self._is_devstack is True else self._openstack_users
        for user in users:
            self.client.run('usermod -a -G ovs {0}'.format(user))

        # 3. Apply patches
        self._logger.info('  Applying patches')
        if self._stack_version in ('liberty', 'mitaka', 'newton'):
            try:
                import os_brick
                cinder_brick_initiator_file = "{0}/initiator/connector.py".format(
                    os.path.dirname(os_brick.__file__))
            except ImportError:
                cinder_brick_initiator_file = ''
            if self._is_devstack is True:
                nova_volume_file = '{0}/virt/libvirt/volume/volume.py'.format(
                    nova_base_path)
            else:
                nova_volume_file = '{0}/nova/virt/libvirt/volume/volume.py'.format(
                    self._driver_location)
        else:
            cinder_brick_initiator_file = '{0}/cinder/brick/initiator/connector.py'.format(
                self._driver_location)
            if self._is_devstack is True:
                nova_volume_file = '{0}/virt/libvirt/volume.py'.format(
                    nova_base_path)
            else:
                nova_volume_file = '{0}/nova/virt/libvirt/volume.py'.format(
                    self._driver_location)
        if self._is_devstack is True:
            nova_driver_file = '{0}/virt/libvirt/driver.py'.format(
                nova_base_path)
        else:
            nova_driver_file = '{0}/nova/virt/libvirt/driver.py'.format(
                self._driver_location)

        self._logger.info('    Patching file {0}'.format(nova_volume_file))

        file_contents = self.client.file_read(nova_volume_file)
        if 'class LibvirtFileVolumeDriver(LibvirtBaseVolumeDriver):' not in file_contents:
            file_contents += '''
class LibvirtFileVolumeDriver(LibvirtBaseVolumeDriver):
    def __init__(self, connection):
        super(LibvirtFileVolumeDriver,
              self).__init__(connection, is_block_dev=False)

    def get_config(self, connection_info, disk_info):
        conf = super(LibvirtFileVolumeDriver,
                     self).get_config(connection_info, disk_info)
        conf.source_type = 'file'
        conf.source_path = connection_info['data']['device_path']
        return conf
'''
            self.client.file_write(nova_volume_file, file_contents)

        self._logger.info('    Patching file {0}'.format(nova_driver_file))

        file_contents = self.client.file_read(nova_driver_file)
        if self._stack_version in ('liberty', 'mitaka'):
            check_line = 'local=nova.virt.libvirt.volume.volume.LibvirtVolumeDriver'
            new_line = 'file=nova.virt.libvirt.volume.volume.LibvirtFileVolumeDriver'
        else:
            check_line = 'local=nova.virt.libvirt.volume.LibvirtVolumeDriver'
            new_line = 'file=nova.virt.libvirt.volume.LibvirtFileVolumeDriver'
        if new_line not in file_contents:
            for line in file_contents.splitlines():
                if check_line in line:
                    stripped_line = line.rstrip()
                    whitespaces = len(stripped_line) - len(
                        stripped_line.lstrip())
                    new_line = "{0}'{1}',\n".format(' ' * whitespaces,
                                                    new_line)
                    fc = file_contents[:file_contents.
                                       index(line)] + new_line + file_contents[
                                           file_contents.index(line):]
                    self.client.file_write(nova_driver_file, "".join(fc))
                    break

        if os.path.exists(cinder_brick_initiator_file):
            # fix brick/upload to glance
            self._logger.info(
                '    Patching file {0}'.format(cinder_brick_initiator_file))
            if self._stack_version in ('liberty', 'mitaka', 'newton'):
                self.client.run(
                    """sed -i 's/elif protocol == LOCAL:/elif protocol in [LOCAL, "FILE"]:/g' {0}"""
                    .format(cinder_brick_initiator_file))
            else:
                self.client.run(
                    """sed -i 's/elif protocol == "LOCAL":/elif protocol in ["LOCAL", "FILE"]:/g' {0}"""
                    .format(cinder_brick_initiator_file))

        # 4. Configure messaging driver
        self._logger.info('   - Configure messaging driver')
        nova_messaging_driver = 'nova.openstack.common.notifier.rpc_notifier' if self._stack_version == 'juno' else 'messaging'
        cinder_messaging_driver = 'cinder.openstack.common.notifier.rpc_notifier' if self._stack_version == 'juno' else 'messaging'

        with remote(ip, [RawConfigParser, open], 'root') as rem:
            for config_file, driver in {
                    self._NOVA_CONF: nova_messaging_driver,
                    self._CINDER_CONF: cinder_messaging_driver
            }.iteritems():
                changed = False
                cfg = rem.RawConfigParser()
                cfg.read([config_file])
                if cfg.has_option("DEFAULT", "notification_driver"):
                    if cfg.get("DEFAULT", "notification_driver") != driver:
                        changed = True
                        cfg.set("DEFAULT", "notification_driver", driver)
                else:
                    changed = True
                    cfg.set("DEFAULT", "notification_driver", driver)
                if cfg.has_option("DEFAULT", "notification_topics"):
                    notification_topics = cfg.get(
                        "DEFAULT", "notification_topics").split(",")
                    if "notifications" not in notification_topics:
                        notification_topics.append("notifications")
                        changed = True
                        cfg.set("DEFAULT", "notification_topics",
                                ",".join(notification_topics))
                else:
                    changed = True
                    cfg.set("DEFAULT", "notification_topics", "notifications")

                if config_file == self._NOVA_CONF:
                    for param, value in {
                            'notify_on_any_change': 'True',
                            'notify_on_state_change': 'vm_and_task_state'
                    }.iteritems():
                        if not cfg.has_option("DEFAULT", param):
                            changed = True
                            cfg.set("DEFAULT", param, value)

                if changed is True:
                    with rem.open(config_file, "w") as fp:
                        cfg.write(fp)

        # 5. Enable events consumer
        self._logger.info('   - Enabling events consumer service')
        service_name = 'openstack-events-consumer'
        if not ServiceManager.has_service(service_name, self.client):
            ServiceManager.add_service(service_name, self.client)
            ServiceManager.enable_service(service_name, self.client)
            ServiceManager.start_service(service_name, self.client)
示例#53
0
    def unconfigure_host(self, ip):
        if self._is_devstack is False and self._is_openstack is False or self._cinder_installed is False or self._nova_installed is False:
            self._logger.warning(
                'Unconfigure host: No OpenStack nor DevStack installation detected or Cinder and Nova plugins are not installed'
            )
            return

        # 1. Remove driver code
        self._logger.info('*** Unconfiguring host with IP {0} ***'.format(ip))
        self._logger.info(' Removing driver code')
        if self._is_devstack is True:
            self.client.file_delete(self._devstack_driver)
        else:
            self.client.file_delete(
                '{0}/cinder/volume/drivers/openvstorage.py'.format(
                    self._driver_location))

        # 2. Removing users from group
        self._logger.info('  Removing users from group ovs')
        for user in ['libvirt-qemu', 'stack'
                     ] if self._is_devstack is True else self._openstack_users:
            self.client.run('deluser {0} ovs'.format(user))

        # 3. Revert patches
        self._logger.info('  Reverting patches')
        nova_base_path = self._get_base_path('nova')
        cinder_base_path = self._get_base_path('cinder')
        if self._is_devstack is True:
            nova_volume_file = '{0}/virt/libvirt/volume.py'.format(
                nova_base_path)
            nova_driver_file = '{0}/virt/libvirt/driver.py'.format(
                nova_base_path)
            cinder_brick_initiator_file = '{0}/brick/initiator/connector.py'.format(
                cinder_base_path)
        else:
            nova_volume_file = '{0}/nova/virt/libvirt/volume.py'.format(
                self._driver_location)
            nova_driver_file = '{0}/nova/virt/libvirt/driver.py'.format(
                self._driver_location)
            cinder_brick_initiator_file = '{0}/cinder/brick/initiator/connector.py'.format(
                self._driver_location)

        self._logger.info(
            '    Reverting patched file: {0}'.format(nova_volume_file))
        new_contents = []

        skip_class = False
        for line in self.client.file_read(nova_volume_file).splitlines():
            if line.startswith(
                    'class LibvirtFileVolumeDriver(LibvirtBaseVolumeDriver):'):
                skip_class = True
                continue
            if line.startswith('class'):
                skip_class = False
            if skip_class is False:
                new_contents.append(line)
        self.client.file_write(nova_volume_file, "".join(new_contents))

        self._logger.info(
            '    Reverting patched file: {0}'.format(nova_driver_file))
        new_contents = []

        for line in self.client.file_read(nova_driver_file).splitlines():
            stripped_line = line.strip()
            if stripped_line.startswith(
                    "'file=nova.virt.libvirt.volume.LibvirtFileVolumeDriver'"):
                continue
            new_contents.append(line)
        self.client.file_write(nova_driver_file, "".join(new_contents))

        if os.path.exists(cinder_brick_initiator_file):
            self._logger.info('    Reverting patched file: {0}'.format(
                cinder_brick_initiator_file))
            self.client.run(
                """sed -i 's/elif protocol in ["LOCAL", "FILE"]:/elif protocol == "LOCAL":/g' {0}"""
                .format(cinder_brick_initiator_file))

        # 4. Unconfigure messaging driver
        self._logger.info('  Unconfiguring messaging driver')
        nova_messaging_driver = 'nova.openstack.common.notifier.rpc_notifier' if self._stack_version == 'juno' else 'messaging'
        cinder_messaging_driver = 'cinder.openstack.common.notifier.rpc_notifier' if self._stack_version == 'juno' else 'messaging'

        with remote(ip, [RawConfigParser, open], 'root') as rem:
            for config_file, driver in {
                    self._NOVA_CONF: nova_messaging_driver,
                    self._CINDER_CONF: cinder_messaging_driver
            }.iteritems():
                cfg = rem.RawConfigParser()
                cfg.read([config_file])
                if cfg.has_option("DEFAULT", "notification_driver"):
                    cfg.remove_option("DEFAULT", "notification_driver")
                if cfg.has_option("DEFAULT", "notification_topics"):
                    notification_topics = cfg.get(
                        "DEFAULT", "notification_topics").split(",")
                    if "notifications" in notification_topics:
                        notification_topics.remove("notifications")
                        cfg.set("DEFAULT", "notification_topics",
                                ",".join(notification_topics))

                if config_file == self._NOVA_CONF:
                    for param, value in {
                            'notify_on_any_change': 'True',
                            'notify_on_state_change': 'vm_and_task_state'
                    }.iteritems():
                        if cfg.has_option("DEFAULT", param):
                            cfg.remove_option("DEFAULT", param)

                with rem.open(config_file, "w") as fp:
                    cfg.write(fp)

        # 5. Disable events consumer
        self._logger.info('  Disabling events consumer')
        service_name = 'ovs-openstack-events-consumer'
        if ServiceManager.has_service(service_name, self.client):
            ServiceManager.stop_service(service_name, self.client)
            ServiceManager.disable_service(service_name, self.client)
            ServiceManager.remove_service(service_name, self.client)
示例#54
0
    def _restart_processes(self):
        """
        Restart the cinder process that uses the OVS volume driver
        - also restarts nova api and compute services
        """
        def stop_screen_process(process_name):
            out = self.client.run(
                '''su stack -c 'screen -S {0} -p {1} -Q select 1>/dev/null; echo $?' '''
                .format(screen_name, process_name))
            process_screen_exists = out == '0'
            if process_screen_exists:
                self.client.run(
                    '''su stack -c 'screen -S {0} -p {1} -X stuff \n' '''.
                    format(screen_name, process_name))
                self.client.run(
                    '''su stack -c 'screen -S {0} -p {1} -X kill' '''.format(
                        screen_name, process_name))
            return process_screen_exists

        def start_screen_process(process_name, commands):
            logfile = '{0}/{1}.log.{2}'.format(
                logdir, process_name,
                datetime.datetime.strftime(datetime.datetime.now(),
                                           '%Y-%m-%d-%H%M%S'))
            self._logger.debug(
                self.client.run(
                    '''su stack -c 'touch {0}' '''.format(logfile)))
            self._logger.debug(
                self.client.run(
                    '''su stack -c 'screen -S {0} -X screen -t {1}' '''.format(
                        screen_name, process_name)))
            self._logger.debug(
                self.client.run(
                    '''su stack -c 'screen -S {0} -p {1} -X logfile {2}' '''.
                    format(screen_name, process_name, logfile)))
            self._logger.debug(
                self.client.run(
                    '''su stack -c 'screen -S {0} -p {1} -X log on' '''.format(
                        screen_name, process_name)))
            time.sleep(1)
            self._logger.debug(
                self.client.run('rm {0}/{1}.log || true'.format(
                    logdir, process_name)))
            self._logger.debug(
                self.client.run('ln -sf {0} {1}/{2}.log'.format(
                    logfile, logdir, process_name)))
            for command in commands:
                cmd = '''su stack -c 'screen -S {0} -p {1} -X stuff "{2}\012"' '''.format(
                    screen_name, process_name, command)
                self._logger.debug(cmd)
                self._logger.debug(self.client.run(cmd))

        logdir = '/opt/stack/logs'
        screen_name = 'stack'
        if self._is_devstack is True:
            try:
                c_vol_screen_exists = stop_screen_process('c-vol')
                n_cpu_screen_exists = stop_screen_process('n-cpu')
                n_api_screen_exists = stop_screen_process('n-api')
                c_api_screen_exists = stop_screen_process('c-api')

                self.client.run('''su stack -c 'mkdir -p /opt/stack/logs' ''')

                if c_vol_screen_exists:
                    start_screen_process('c-vol', [
                        "export PYTHONPATH=\"${PYTHONPATH}:/opt/OpenvStorage\" ",
                        "newgrp ovs", "newgrp stack", "umask 0002",
                        "/usr/local/bin/cinder-volume --config-file /etc/cinder/cinder.conf & echo \$! >/opt/stack/status/stack/c-vol.pid; fg || echo  c-vol failed to start | tee \"/opt/stack/status/stack/c-vol.failure\" "
                    ])
                time.sleep(3)
                if n_cpu_screen_exists:
                    start_screen_process('n-cpu', [
                        "newgrp ovs", "newgrp stack",
                        "sg libvirtd /usr/local/bin/nova-compute --config-file /etc/nova/nova.conf & echo $! >/opt/stack/status/stack/n-cpu.pid; fg || echo n-cpu failed to start | tee \"/opt/stack/status/stack/n-cpu.failure\" "
                    ])
                time.sleep(3)
                if n_api_screen_exists:
                    start_screen_process('n-api', [
                        "export PYTHONPATH=\"${PYTHONPATH}:/opt/OpenvStorage\" ",
                        "/usr/local/bin/nova-api & echo $! >/opt/stack/status/stack/n-api.pid; fg || echo n-api failed to start | tee \"/opt/stack/status/stack/n-api.failure\" "
                    ])
                time.sleep(3)
                if c_api_screen_exists:
                    start_screen_process('c-api', [
                        "/usr/local/bin/cinder-api --config-file /etc/cinder/cinder.conf & echo $! >/opt/stack/status/stack/c-api.pid; fg || echo c-api failed to start | tee \"/opt/stack/status/stack/c-api.failure\" "
                    ])
                time.sleep(3)
            except SystemExit as se:  # failed command or non-zero exit codes raise SystemExit
                raise RuntimeError(str(se))

        else:
            for service_name in OSManager.get_openstack_services():
                if ServiceManager.has_service(service_name, self.client):
                    try:
                        ServiceManager.restart_service(service_name,
                                                       self.client)
                    except SystemExit as sex:
                        self._logger.debug(
                            'Failed to restart service {0}. {1}'.format(
                                service_name, sex))
            time.sleep(3)
示例#55
0
    def is_host_configured(self, ip):
        if (
                self._is_devstack is False and self._is_openstack is False
        ) or self._cinder_installed is False or self._nova_installed is False:
            self._logger.warning(
                'Host configured: No OpenStack nor DevStack installation detected or Cinder and Nova plugins are not installed'
            )
            return False

        # 1. Check driver code
        if self._is_devstack is True:
            if not self.client.file_exists(filename=self._devstack_driver):
                self._logger.info('  File "{0}" does not exist'.format(
                    self._devstack_driver))
                return False
        else:
            if not self.client.file_exists(
                    filename='{0}/cinder/volume/drivers/openvstorage.py'.
                    format(self._driver_location)):
                self._logger.info(
                    '  File "{0}/cinder/volume/drivers/openvstorage.py" does not exist'
                    .format(self._driver_location))
                return False

        # 2. Check configured users
        ovs_id = self.client.run('id -u ovs')
        if not ovs_id:
            self._logger.info('Failed to determine the OVS user group ID')
            return False

        users = ['libvirt-qemu', 'stack'
                 ] if self._is_devstack is True else self._openstack_users
        for user in users:
            if '{0}(ovs)'.format(ovs_id) not in self.client.run(
                    'id -a {0}'.format(user)):
                self._logger.info(
                    'User "{0}" is not part of the OVS user group')
                return False

        # 3. Check patches
        nova_base_path = self._get_base_path('nova')
        cinder_base_path = self._get_base_path('cinder')
        if self._stack_version in ('liberty', 'mitaka', 'newton'):
            try:
                import os_brick
                cinder_brick_initiator_file = "{0}/initiator/connector.py".format(
                    os.path.dirname(os_brick.__file__))
            except ImportError:
                cinder_brick_initiator_file = ''
            if self._is_devstack is True:
                nova_volume_file = '{0}/virt/libvirt/volume/volume.py'.format(
                    nova_base_path)
            else:
                nova_volume_file = '{0}/nova/virt/libvirt/volume/volume.py'.format(
                    self._driver_location)
        else:
            if self._is_devstack is True:
                nova_volume_file = '{0}/virt/libvirt/volume.py'.format(
                    nova_base_path)
            else:
                nova_volume_file = '{0}/nova/virt/libvirt/volume.py'.format(
                    self._driver_location)
            cinder_brick_initiator_file = '{0}/brick/initiator/connector.py'.format(
                cinder_base_path)

        if self._is_devstack is True:
            nova_driver_file = '{0}/virt/libvirt/driver.py'.format(
                nova_base_path)
        else:
            nova_driver_file = '{0}/nova/virt/libvirt/driver.py'.format(
                self._driver_location)

        file_contents = self.client.file_read(nova_volume_file)
        if 'class LibvirtFileVolumeDriver(LibvirtBaseVolumeDriver):' not in file_contents:
            self._logger.info('File "{0}" is not configured properly'.format(
                nova_volume_file))
            return False

        if self._stack_version in ('liberty', 'mitaka'):
            check_line = 'file=nova.virt.libvirt.volume.volume.LibvirtFileVolumeDriver'
        else:
            check_line = 'file=nova.virt.libvirt.volume.LibvirtFileVolumeDriver'

        file_contents = self.client.file_read(nova_driver_file)
        if check_line not in file_contents:
            self._logger.info('File "{0}" is not configured properly'.format(
                nova_driver_file))
            return False

        if os.path.exists(cinder_brick_initiator_file):
            file_contents = self.client.file_read(cinder_brick_initiator_file)
            if self._stack_version in ('liberty', 'mitaka', 'newton'):
                if 'elif protocol in [LOCAL, "FILE"]:' not in file_contents:
                    self._logger.info(
                        'File "{0}" is not configured properly'.format(
                            cinder_brick_initiator_file))
                    return False
            else:
                if 'elif protocol in ["LOCAL", "FILE"]:' not in file_contents:
                    self._logger.info(
                        'File "{0}" is not configured properly'.format(
                            cinder_brick_initiator_file))
                    return False

        # 4. Check messaging driver configuration
        nova_messaging_driver = 'nova.openstack.common.notifier.rpc_notifier' if self._stack_version == 'juno' else 'messaging'
        cinder_messaging_driver = 'cinder.openstack.common.notifier.rpc_notifier' if self._stack_version == 'juno' else 'messaging'

        host_configured = True
        with remote(ip, [RawConfigParser], 'root') as rem:
            for config_file, driver in {
                    self._NOVA_CONF: nova_messaging_driver,
                    self._CINDER_CONF: cinder_messaging_driver
            }.iteritems():
                cfg = rem.RawConfigParser()
                cfg.read([config_file])
                host_configured &= cfg.get("DEFAULT",
                                           "notification_driver") == driver
                host_configured &= "notifications" in cfg.get(
                    "DEFAULT", "notification_topics")

                if config_file == self._NOVA_CONF:
                    host_configured &= cfg.get(
                        "DEFAULT", "notify_on_any_change") == "True"
                    host_configured &= cfg.get(
                        "DEFAULT",
                        "notify_on_state_change") == "vm_and_task_state"

        if host_configured is False:
            self._logger.info(
                'Nova and/or Cinder configuration files are not configured properly'
            )
            return host_configured

        # 5. Check events consumer service
        service_name = 'ovs-openstack-events-consumer'
        if not (ServiceManager.has_service(service_name, self.client)
                and ServiceManager.get_service_status(service_name,
                                                      self.client) is True):
            self._logger.info(
                'Service "{0}" is not configured properly'.format(
                    service_name))
            return False

        return True
示例#56
0
 def _is_openstack(self):
     cinder_service = OSManager.get_openstack_cinder_service_name()
     return ServiceManager.has_service(cinder_service, self.client)