def change_service_state(client, name, state, logger=None): """ Starts/stops/restarts a service :param client: SSHClient on which to connect and change service state :param name: Name of the service :param state: State to put the service in :param logger: LogHandler Object """ action = None status, _ = ServiceManager.get_service_status(name, client=client) if status is False and state in ['start', 'restart']: if logger is not None: logger.debug(' {0:<15} - Starting service {1}'.format(client.ip, name)) ServiceManager.start_service(name, client=client) action = 'started' elif status is True and state == 'stop': if logger is not None: logger.debug(' {0:<15} - Stopping service {1}'.format(client.ip, name)) ServiceManager.stop_service(name, client=client) action = 'stopped' elif status is True and state == 'restart': if logger is not None: logger.debug(' {0:<15} - Restarting service {1}'.format(client.ip, name)) ServiceManager.restart_service(name, client=client) action = 'restarted' if action is None: print ' [{0}] {1} already {2}'.format(client.ip, name, 'running' if status is True else 'halted') else: logger.debug(' {0:<15} - Service {1} {2}'.format(client.ip, name, action)) print ' [{0}] {1} {2}'.format(client.ip, name, action)
def restart_required_services(): """ Checks if the ASD MANAGER setup was executed correctly :returns if all services successfully restarted :rtype bool """ ServiceManager.restart_service('avahi-daemon', root_client)
def _restart_openstack_services(self): """ Restart services on openstack """ services = OSManager.get_openstack_services() for service_name in services: if ServiceManager.has_service(service_name, self.client): try: ServiceManager.restart_service(service_name, self.client) except SystemExit as sex: logger.debug('Failed to restart service {0}. {1}'.format(service_name, sex)) time.sleep(3) return self._is_cinder_running()
def _restart_openstack_services(self): """ Restart services on openstack """ services = OSManager.get_openstack_services() for service_name in services: if ServiceManager.has_service(service_name, self.client): try: ServiceManager.restart_service(service_name, self.client) except SystemExit as sex: logger.debug('Failed to restart service {0}. {1}'.format( service_name, sex)) time.sleep(3) return self._is_cinder_running()
def install_plugins(): """ (Re)load plugins """ if ServiceManager.has_service('ovs-watcher-framework', SSHClient('127.0.0.1', username='******')): # If the watcher is running, 'ovs setup' was executed and we need to restart everything to load # the plugin. In the other case, the plugin will be loaded once 'ovs setup' is executed print 'Installing plugin into Open vStorage' from ovs.dal.lists.storagerouterlist import StorageRouterList clients = {} masters = StorageRouterList.get_masters() slaves = StorageRouterList.get_slaves() try: for sr in masters + slaves: clients[sr] = SSHClient(sr, username='******') except UnableToConnectException: raise RuntimeError('Not all StorageRouters are reachable') memcached = 'memcached' watcher = 'watcher-framework' for sr in masters + slaves: if ServiceManager.has_service(watcher, clients[sr]): print '- Stopping watcher on {0} ({1})'.format( sr.name, sr.ip) ServiceManager.stop_service(watcher, clients[sr]) for sr in masters: print '- Restarting memcached on {0} ({1})'.format( sr.name, sr.ip) ServiceManager.restart_service(memcached, clients[sr]) for sr in masters + slaves: if ServiceManager.has_service(watcher, clients[sr]): print '- Starting watcher on {0} ({1})'.format( sr.name, sr.ip) ServiceManager.start_service(watcher, clients[sr]) print '- Execute model migrations' from ovs.dal.helpers import Migration Migration.migrate() from ovs.lib.helpers.toolbox import Toolbox ip = System.get_my_storagerouter().ip functions = Toolbox.fetch_hooks('plugin', 'postinstall') if len(functions) > 0: print '- Execute post installation scripts' for function in functions: function(ip=ip) print 'Installing plugin into Open vStorage: Completed'
def change_service_state(client, name, state, logger=None): """ Starts/stops/restarts a service :param client: SSHClient on which to connect and change service state :param name: Name of the service :param state: State to put the service in :param logger: LogHandler Object """ action = None # Enable service before changing the state status = ServiceManager.is_enabled(name, client=client) if status is False: if logger is not None: logger.debug(' {0:<15} - Enabling service {1}'.format( client.ip, name)) ServiceManager.enable_service(name, client=client) status = ServiceManager.get_service_status(name, client=client) if status is False and state in ['start', 'restart']: if logger is not None: logger.debug(' {0:<15} - Starting service {1}'.format( client.ip, name)) ServiceManager.start_service(name, client=client) action = 'started' elif status is True and state == 'stop': if logger is not None: logger.debug(' {0:<15} - Stopping service {1}'.format( client.ip, name)) ServiceManager.stop_service(name, client=client) action = 'stopped' elif status is True and state == 'restart': if logger is not None: logger.debug(' {0:<15} - Restarting service {1}'.format( client.ip, name)) ServiceManager.restart_service(name, client=client) action = 'restarted' if action is None: print ' [{0}] {1} already {2}'.format( client.ip, name, 'running' if status is True else 'halted') else: logger.debug(' {0:<15} - Service {1} {2}'.format( client.ip, name, action)) print ' [{0}] {1} {2}'.format(client.ip, name, action)
def install_plugins(): """ (Re)load plugins """ if ServiceManager.has_service('ovs-watcher-framework', SSHClient('127.0.0.1', username='******')): # If the watcher is running, 'ovs setup' was executed and we need to restart everything to load # the plugin. In the other case, the plugin will be loaded once 'ovs setup' is executed print 'Installing plugin into Open vStorage' from ovs.dal.lists.storagerouterlist import StorageRouterList clients = {} masters = StorageRouterList.get_masters() slaves = StorageRouterList.get_slaves() try: for sr in masters + slaves: clients[sr] = SSHClient(sr, username='******') except UnableToConnectException: raise RuntimeError('Not all StorageRouters are reachable') memcached = 'memcached' watcher = 'watcher-framework' for sr in masters + slaves: if ServiceManager.has_service(watcher, clients[sr]): print '- Stopping watcher on {0} ({1})'.format(sr.name, sr.ip) ServiceManager.stop_service(watcher, clients[sr]) for sr in masters: print '- Restarting memcached on {0} ({1})'.format(sr.name, sr.ip) ServiceManager.restart_service(memcached, clients[sr]) for sr in masters + slaves: if ServiceManager.has_service(watcher, clients[sr]): print '- Starting watcher on {0} ({1})'.format(sr.name, sr.ip) ServiceManager.start_service(watcher, clients[sr]) print '- Execute model migrations' from ovs.dal.helpers import Migration Migration.migrate() from ovs.lib.helpers.toolbox import Toolbox ip = System.get_my_storagerouter().ip functions = Toolbox.fetch_hooks('plugin', 'postinstall') if len(functions) > 0: print '- Execute post installation scripts' for function in functions: function(ip=ip) print 'Installing plugin into Open vStorage: Completed'
def _restart_processes(self): """ Restart the cinder process that uses the OVS volume driver - also restarts nova api and compute services """ def stop_screen_process(process_name): out = self.client.run('''su stack -c 'screen -S {0} -p {1} -Q select 1>/dev/null; echo $?' '''.format(screen_name, process_name)) process_screen_exists = out == '0' if process_screen_exists: self.client.run('''su stack -c 'screen -S {0} -p {1} -X stuff \n' '''.format(screen_name, process_name)) self.client.run('''su stack -c 'screen -S {0} -p {1} -X kill' '''.format(screen_name, process_name)) return process_screen_exists def start_screen_process(process_name, commands): logfile = '{0}/{1}.log.{2}'.format(logdir, process_name, datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d-%H%M%S')) self._logger.debug(self.client.run('''su stack -c 'touch {0}' '''.format(logfile))) self._logger.debug(self.client.run('''su stack -c 'screen -S {0} -X screen -t {1}' '''.format(screen_name, process_name))) self._logger.debug(self.client.run('''su stack -c 'screen -S {0} -p {1} -X logfile {2}' '''.format(screen_name, process_name, logfile))) self._logger.debug(self.client.run('''su stack -c 'screen -S {0} -p {1} -X log on' '''.format(screen_name, process_name))) time.sleep(1) self._logger.debug(self.client.run('rm {0}/{1}.log || true'.format(logdir, process_name))) self._logger.debug(self.client.run('ln -sf {0} {1}/{2}.log'.format(logfile, logdir, process_name))) for command in commands: cmd = '''su stack -c 'screen -S {0} -p {1} -X stuff "{2}\012"' '''.format(screen_name, process_name, command) self._logger.debug(cmd) self._logger.debug(self.client.run(cmd)) logdir = '/opt/stack/logs' screen_name = 'stack' if self._is_devstack is True: try: c_vol_screen_exists = stop_screen_process('c-vol') n_cpu_screen_exists = stop_screen_process('n-cpu') n_api_screen_exists = stop_screen_process('n-api') c_api_screen_exists = stop_screen_process('c-api') self.client.run('''su stack -c 'mkdir -p /opt/stack/logs' ''') if c_vol_screen_exists: start_screen_process('c-vol', ["export PYTHONPATH=\"${PYTHONPATH}:/opt/OpenvStorage\" ", "newgrp ovs", "newgrp stack", "umask 0002", "/usr/local/bin/cinder-volume --config-file /etc/cinder/cinder.conf & echo \$! >/opt/stack/status/stack/c-vol.pid; fg || echo c-vol failed to start | tee \"/opt/stack/status/stack/c-vol.failure\" "]) time.sleep(3) if n_cpu_screen_exists: start_screen_process('n-cpu', ["newgrp ovs", "newgrp stack", "sg libvirtd /usr/local/bin/nova-compute --config-file /etc/nova/nova.conf & echo $! >/opt/stack/status/stack/n-cpu.pid; fg || echo n-cpu failed to start | tee \"/opt/stack/status/stack/n-cpu.failure\" "]) time.sleep(3) if n_api_screen_exists: start_screen_process('n-api', ["export PYTHONPATH=\"${PYTHONPATH}:/opt/OpenvStorage\" ", "/usr/local/bin/nova-api & echo $! >/opt/stack/status/stack/n-api.pid; fg || echo n-api failed to start | tee \"/opt/stack/status/stack/n-api.failure\" "]) time.sleep(3) if c_api_screen_exists: start_screen_process('c-api', ["/usr/local/bin/cinder-api --config-file /etc/cinder/cinder.conf & echo $! >/opt/stack/status/stack/c-api.pid; fg || echo c-api failed to start | tee \"/opt/stack/status/stack/c-api.failure\" "]) time.sleep(3) except SystemExit as se: # failed command or non-zero exit codes raise SystemExit raise RuntimeError(str(se)) else: for service_name in OSManager.get_openstack_services(): if ServiceManager.has_service(service_name, self.client): try: ServiceManager.restart_service(service_name, self.client) except SystemExit as sex: self._logger.debug('Failed to restart service {0}. {1}'.format(service_name, sex)) time.sleep(3)
def _restart_processes(self): """ Restart the cinder process that uses the OVS volume driver - also restarts nova api and compute services """ def stop_screen_process(process_name): out = self.client.run( '''su stack -c 'screen -S {0} -p {1} -Q select 1>/dev/null; echo $?' ''' .format(screen_name, process_name)) process_screen_exists = out == '0' if process_screen_exists: self.client.run( '''su stack -c 'screen -S {0} -p {1} -X stuff \n' '''. format(screen_name, process_name)) self.client.run( '''su stack -c 'screen -S {0} -p {1} -X kill' '''.format( screen_name, process_name)) return process_screen_exists def start_screen_process(process_name, commands): logfile = '{0}/{1}.log.{2}'.format( logdir, process_name, datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d-%H%M%S')) self._logger.debug( self.client.run( '''su stack -c 'touch {0}' '''.format(logfile))) self._logger.debug( self.client.run( '''su stack -c 'screen -S {0} -X screen -t {1}' '''.format( screen_name, process_name))) self._logger.debug( self.client.run( '''su stack -c 'screen -S {0} -p {1} -X logfile {2}' '''. format(screen_name, process_name, logfile))) self._logger.debug( self.client.run( '''su stack -c 'screen -S {0} -p {1} -X log on' '''.format( screen_name, process_name))) time.sleep(1) self._logger.debug( self.client.run('rm {0}/{1}.log || true'.format( logdir, process_name))) self._logger.debug( self.client.run('ln -sf {0} {1}/{2}.log'.format( logfile, logdir, process_name))) for command in commands: cmd = '''su stack -c 'screen -S {0} -p {1} -X stuff "{2}\012"' '''.format( screen_name, process_name, command) self._logger.debug(cmd) self._logger.debug(self.client.run(cmd)) logdir = '/opt/stack/logs' screen_name = 'stack' if self._is_devstack is True: try: c_vol_screen_exists = stop_screen_process('c-vol') n_cpu_screen_exists = stop_screen_process('n-cpu') n_api_screen_exists = stop_screen_process('n-api') c_api_screen_exists = stop_screen_process('c-api') self.client.run('''su stack -c 'mkdir -p /opt/stack/logs' ''') if c_vol_screen_exists: start_screen_process('c-vol', [ "export PYTHONPATH=\"${PYTHONPATH}:/opt/OpenvStorage\" ", "newgrp ovs", "newgrp stack", "umask 0002", "/usr/local/bin/cinder-volume --config-file /etc/cinder/cinder.conf & echo \$! >/opt/stack/status/stack/c-vol.pid; fg || echo c-vol failed to start | tee \"/opt/stack/status/stack/c-vol.failure\" " ]) time.sleep(3) if n_cpu_screen_exists: start_screen_process('n-cpu', [ "newgrp ovs", "newgrp stack", "sg libvirtd /usr/local/bin/nova-compute --config-file /etc/nova/nova.conf & echo $! >/opt/stack/status/stack/n-cpu.pid; fg || echo n-cpu failed to start | tee \"/opt/stack/status/stack/n-cpu.failure\" " ]) time.sleep(3) if n_api_screen_exists: start_screen_process('n-api', [ "export PYTHONPATH=\"${PYTHONPATH}:/opt/OpenvStorage\" ", "/usr/local/bin/nova-api & echo $! >/opt/stack/status/stack/n-api.pid; fg || echo n-api failed to start | tee \"/opt/stack/status/stack/n-api.failure\" " ]) time.sleep(3) if c_api_screen_exists: start_screen_process('c-api', [ "/usr/local/bin/cinder-api --config-file /etc/cinder/cinder.conf & echo $! >/opt/stack/status/stack/c-api.pid; fg || echo c-api failed to start | tee \"/opt/stack/status/stack/c-api.failure\" " ]) time.sleep(3) except SystemExit as se: # failed command or non-zero exit codes raise SystemExit raise RuntimeError(str(se)) else: for service_name in OSManager.get_openstack_services(): if ServiceManager.has_service(service_name, self.client): try: ServiceManager.restart_service(service_name, self.client) except SystemExit as sex: self._logger.debug( 'Failed to restart service {0}. {1}'.format( service_name, sex)) time.sleep(3)