def add_fstab(device, mountpoint, filesystem): """ Add entry to /etc/fstab for mountpoint :param device: Device to add :type device: str :param mountpoint: Mountpoint on which device is mounted :type mountpoint: str :param filesystem: Filesystem used :type filesystem: str :return: None """ new_content = [] with open('/etc/fstab', 'r') as fstab_file: lines = [line.strip() for line in fstab_file.readlines()] found = False for line in lines: if line.startswith(device) and re.match('^{0}\s+'.format(re.escape(device)), line): new_content.append(OSManager.get_fstab_entry(device, mountpoint, filesystem)) found = True else: new_content.append(line) if found is False: new_content.append(OSManager.get_fstab_entry(device, mountpoint, filesystem)) with open('/etc/fstab', 'w') as fstab_file: fstab_file.write('{0}\n'.format('\n'.join(new_content)))
def add_fstab(device, mountpoint, filesystem): """ Add entry to /etc/fstab for mountpoint :param device: Device to add :type device: str :param mountpoint: Mountpoint on which device is mounted :type mountpoint: str :param filesystem: Filesystem used :type filesystem: str :return: None """ new_content = [] with open('/etc/fstab', 'r') as fstab_file: lines = [line.strip() for line in fstab_file.readlines()] found = False for line in lines: if line.startswith(device) and re.match( '^{0}\s+'.format(re.escape(device)), line): new_content.append( OSManager.get_fstab_entry(device, mountpoint, filesystem)) found = True else: new_content.append(line) if found is False: new_content.append( OSManager.get_fstab_entry(device, mountpoint, filesystem)) with open('/etc/fstab', 'w') as fstab_file: fstab_file.write('{0}\n'.format('\n'.join(new_content)))
def __init__(self, cinder_client): self.client = SSHClient('127.0.0.1', username='******') self.cinder_client = cinder_client self._NOVA_CONF = '/etc/nova/nova.conf' self._CINDER_CONF = '/etc/cinder/cinder.conf' self._is_openstack = ServiceManager.has_service(OSManager.get_openstack_cinder_service_name(), self.client) self._nova_installed = self.client.file_exists(self._NOVA_CONF) self._cinder_installed = self.client.file_exists(self._CINDER_CONF) self._driver_location = OSManager.get_openstack_package_base_path() self._openstack_users = OSManager.get_openstack_users() self._devstack_driver = '/opt/stack/cinder/cinder/volume/drivers/openvstorage.py' try: self._is_devstack = 'stack' in str(self.client.run('ps aux | grep SCREEN | grep stack | grep -v grep || true')) except SystemExit: # ssh client raises system exit 1 self._is_devstack = False except Exception: self._is_devstack = False try: from cinder import version version_string = version.version_string() if version_string.startswith('2015.2') or version_string.startswith('7.0'): self._stack_version = 'liberty' elif version_string.startswith('2015.1'): self._stack_version = 'kilo' elif version_string.startswith('2014.2'): self._stack_version = 'juno' else: raise ValueError('Unsupported cinder version: {0}'.format(version_string)) except Exception as ex: raise ValueError('Cannot determine cinder version: {0}'.format(ex))
def add_fstab(partition_aliases, mountpoint, filesystem): """ Add entry to /etc/fstab for mountpoint :param partition_aliases: Possible aliases of the partition to add :type partition_aliases: list :param mountpoint: Mountpoint on which device is mounted :type mountpoint: str :param filesystem: Filesystem used :type filesystem: str :return: None """ if len(partition_aliases) == 0: raise ValueError('No partition aliases provided') with open('/etc/fstab', 'r') as fstab_file: lines = [line.strip() for line in fstab_file.readlines()] used_path = None used_index = None mount_line = None for device_alias in partition_aliases: for index, line in enumerate(lines): if line.startswith('#'): continue if line.startswith(device_alias) and re.match( '^{0}\s+'.format(re.escape(device_alias)), line): used_path = device_alias used_index = index if len(line.split()) == 6 and line.split( )[1] == mountpoint: # Example line: 'UUID=40d99523-a1e7-4374-84f2-85b5d14b516e / swap sw 0 0' mount_line = line if used_path is not None: break if used_path is None: # Partition not yet present with any of its possible aliases lines.append( OSManager.get_fstab_entry(partition_aliases[0], mountpoint, filesystem)) else: # Partition present, update information lines.pop(used_index) lines.insert( used_index, OSManager.get_fstab_entry(used_path, mountpoint, filesystem)) if mount_line is not None: # Mountpoint already in use by another device (potentially same device, but other device_path) lines.remove(mount_line) with file_mutex('ovs-fstab-lock'): with open('/etc/fstab', 'w') as fstab_file: fstab_file.write('{0}\n'.format('\n'.join(lines)))
def add_fstab(device, mountpoint, filesystem): new_content = [] with open('/etc/fstab', 'r') as fstab_file: lines = [line.strip() for line in fstab_file.readlines()] found = False for line in lines: if line.startswith(device) and re.match('^{0}\s+'.format(re.escape(device)), line): new_content.append(OSManager.get_fstab_entry(device, mountpoint, filesystem)) found = True else: new_content.append(line) if found is False: new_content.append(OSManager.get_fstab_entry(device, mountpoint, filesystem)) with open('/etc/fstab', 'w') as fstab_file: fstab_file.write('{0}\n'.format('\n'.join(new_content)))
def _get_driver_code(self): """ CP driver, compare versions, allow local code to be updated with version from current package """ version = OpenStackManagement._get_version() remote_driver = "/opt/OpenvStorage/config/templates/cinder-volume-driver/%s/openvstorage.py" % version existing_version = OpenStackManagement._get_existing_driver_version() remote_version = OpenStackManagement._get_remote_driver_version( remote_driver) if self.is_devstack: cinder_base_path = OpenStackManagement._get_base_path('cinder') local_driver = '{0}/volume/drivers/openvstorage.py'.format( cinder_base_path) elif self.is_openstack: driver_location = OSManager.get_openstack_package_base_path() local_driver = '{0}/cinder/volume/drivers/openvstorage.py'.format( driver_location) else: raise ValueError('OpenStack or DevStack only') if remote_version > existing_version: logger.debug( 'Updating existing driver using {0} from version {1} to version {2}' .format(remote_driver, existing_version, remote_version)) if self.is_devstack: self.client.run( 'cp -f {0} /opt/stack/cinder/cinder/volume/drivers'.format( remote_driver)) elif self.is_openstack: self.client.run('cp -f {0} {1}'.format(remote_driver, local_driver)) else: logger.debug('Using driver {0} version {1}'.format( local_driver, existing_version))
def pulse(): """ Update the heartbeats for all Storage Routers :return: None """ logger = LogHandler.get('extensions', name='heartbeat') current_time = int(time.time()) machine_id = System.get_my_machine_id() amqp = '{0}://{1}:{2}@{3}//'.format(EtcdConfiguration.get('/ovs/framework/messagequeue|protocol'), EtcdConfiguration.get('/ovs/framework/messagequeue|user'), EtcdConfiguration.get('/ovs/framework/messagequeue|password'), EtcdConfiguration.get('/ovs/framework/hosts/{0}/ip'.format(machine_id))) celery_path = OSManager.get_path('celery') worker_states = check_output("{0} inspect ping -b {1} --timeout=5 2> /dev/null | grep OK | perl -pe 's/\x1b\[[0-9;]*m//g' || true".format(celery_path, amqp), shell=True) routers = StorageRouterList.get_storagerouters() for node in routers: if node.heartbeats is None: node.heartbeats = {} if 'celery@{0}: OK'.format(node.name) in worker_states: node.heartbeats['celery'] = current_time if node.machine_id == machine_id: node.heartbeats['process'] = current_time else: try: # check timeout of other nodes and clear arp cache if node.heartbeats and 'process' in node.heartbeats: if current_time - node.heartbeats['process'] >= HeartBeat.ARP_TIMEOUT: check_output("/usr/sbin/arp -d {0}".format(node.name), shell=True) except CalledProcessError: logger.exception('Error clearing ARP cache') node.save()
def _get_driver_code(self): """ CP driver, compare versions, allow local code to be updated with version from current package """ version = OpenStackManagement._get_version() remote_driver = "/opt/OpenvStorage/config/templates/cinder-volume-driver/%s/openvstorage.py" % version existing_version = OpenStackManagement._get_existing_driver_version() remote_version = OpenStackManagement._get_remote_driver_version(remote_driver) if self.is_devstack: cinder_base_path = OpenStackManagement._get_base_path('cinder') local_driver = '{0}/volume/drivers/openvstorage.py'.format(cinder_base_path) elif self.is_openstack: driver_location = OSManager.get_openstack_package_base_path() local_driver = '{0}/cinder/volume/drivers/openvstorage.py'.format(driver_location) else: raise ValueError('OpenStack or DevStack only') if remote_version > existing_version: logger.debug('Updating existing driver using {0} from version {1} to version {2}'.format(remote_driver, existing_version, remote_version)) if self.is_devstack: self.client.run('cp -f {0} /opt/stack/cinder/cinder/volume/drivers'.format(remote_driver)) elif self.is_openstack: self.client.run('cp -f {0} {1}'.format(remote_driver, local_driver)) else: logger.debug('Using driver {0} version {1}'.format(local_driver, existing_version))
def is_devstack_installed(): """ Check if OpenStack or DevStack is installed :return: True if installed """ client = SSHClient('127.0.0.1', username='******') is_openstack = ServiceManager.has_service(OSManager.get_openstack_cinder_service_name(), client) is_devstack = 'stack' in str(client.run('ps aux | grep SCREEN | grep stack | grep -v grep || true'), allow_insecure=True) return is_openstack or is_devstack
def __init__(self, cinder_client): self._logger = LogHandler.get('extensions', name='openstack_mgmt') self.client = SSHClient('127.0.0.1', username='******') self.cinder_client = cinder_client self._NOVA_CONF = '/etc/nova/nova.conf' self._CINDER_CONF = '/etc/cinder/cinder.conf' self._is_openstack = ServiceManager.has_service( OSManager.get_openstack_cinder_service_name(), self.client) self._nova_installed = self.client.file_exists(self._NOVA_CONF) self._cinder_installed = self.client.file_exists(self._CINDER_CONF) self._driver_location = OSManager.get_openstack_package_base_path() self._openstack_users = OSManager.get_openstack_users() self._devstack_driver = '/opt/stack/cinder/cinder/volume/drivers/openvstorage.py' try: self._is_devstack = 'stack' in str( self.client.run( 'ps aux | grep SCREEN | grep stack | grep -v grep || true') ) except SystemExit: # ssh client raises system exit 1 self._is_devstack = False except Exception: self._is_devstack = False try: from cinder import version version_string = version.version_string() if version_string.startswith('9.0'): self._stack_version = 'newton' elif version_string.startswith('8.0'): self._stack_version = 'mitaka' elif version_string.startswith( '2015.2') or version_string.startswith('7.0'): self._stack_version = 'liberty' elif version_string.startswith('2015.1'): self._stack_version = 'kilo' elif version_string.startswith('2014.2'): self._stack_version = 'juno' else: raise ValueError( 'Unsupported cinder version: {0}'.format(version_string)) except Exception as ex: raise ValueError('Cannot determine cinder version: {0}'.format(ex))
def add_fstab(partition_aliases, mountpoint, filesystem): """ Add entry to /etc/fstab for mountpoint :param partition_aliases: Possible aliases of the partition to add :type partition_aliases: list :param mountpoint: Mountpoint on which device is mounted :type mountpoint: str :param filesystem: Filesystem used :type filesystem: str :return: None """ if len(partition_aliases) == 0: raise ValueError('No partition aliases provided') with open('/etc/fstab', 'r') as fstab_file: lines = [line.strip() for line in fstab_file.readlines()] used_path = None used_index = None mount_line = None for device_alias in partition_aliases: for index, line in enumerate(lines): if line.startswith('#'): continue if line.startswith(device_alias) and re.match('^{0}\s+'.format(re.escape(device_alias)), line): used_path = device_alias used_index = index if len(line.split()) == 6 and line.split()[1] == mountpoint: # Example line: 'UUID=40d99523-a1e7-4374-84f2-85b5d14b516e / swap sw 0 0' mount_line = line if used_path is not None: break if used_path is None: # Partition not yet present with any of its possible aliases lines.append(OSManager.get_fstab_entry(partition_aliases[0], mountpoint, filesystem)) else: # Partition present, update information lines.pop(used_index) lines.insert(used_index, OSManager.get_fstab_entry(used_path, mountpoint, filesystem)) if mount_line is not None: # Mountpoint already in use by another device (potentially same device, but other device_path) lines.remove(mount_line) with file_mutex('ovs-fstab-lock'): with open('/etc/fstab', 'w') as fstab_file: fstab_file.write('{0}\n'.format('\n'.join(lines)))
def _configure_user_groups(self): # Vpool owned by stack / cinder # Give access to libvirt-qemu and ovs if self.is_devstack: self.client.run('usermod -a -G ovs libvirt-qemu') self.client.run('usermod -a -G ovs stack') elif self.is_openstack: users = OSManager.get_openstack_users() for user in users: self.client.run('usermod -a -G ovs {0}'.format(user))
def _restart_openstack_services(self): """ Restart services on openstack """ services = OSManager.get_openstack_services() for service_name in services: if ServiceManager.has_service(service_name, self.client): try: ServiceManager.restart_service(service_name, self.client) except SystemExit as sex: logger.debug('Failed to restart service {0}. {1}'.format(service_name, sex)) time.sleep(3) return self._is_cinder_running()
def _is_cinder_running(self): if self.is_devstack: try: return 'cinder-volume' in str(self.client.run('ps aux | grep cinder-volume | grep -v grep')) except SystemExit: return False if self.is_openstack: try: cinder_service = OSManager.get_openstack_cinder_service_name() return ServiceManager.get_service_status(cinder_service, self.client) except SystemExit: return False return False
def _restart_openstack_services(self): """ Restart services on openstack """ services = OSManager.get_openstack_services() for service_name in services: if ServiceManager.has_service(service_name, self.client): try: ServiceManager.restart_service(service_name, self.client) except SystemExit as sex: logger.debug('Failed to restart service {0}. {1}'.format( service_name, sex)) time.sleep(3) return self._is_cinder_running()
def _is_cinder_running(self): if self.is_devstack: try: return 'cinder-volume' in str( self.client.run( 'ps aux | grep cinder-volume | grep -v grep')) except SystemExit: return False if self.is_openstack: try: cinder_service = OSManager.get_openstack_cinder_service_name() return ServiceManager.get_service_status( cinder_service, self.client) except SystemExit: return False return False
def pulse(): """ Update the heartbeats for all Storage Routers :return: None """ logger = LogHandler.get('extensions', name='heartbeat') current_time = int(time.time()) machine_id = System.get_my_machine_id() amqp = '{0}://{1}:{2}@{3}//'.format( EtcdConfiguration.get('/ovs/framework/messagequeue|protocol'), EtcdConfiguration.get('/ovs/framework/messagequeue|user'), EtcdConfiguration.get('/ovs/framework/messagequeue|password'), EtcdConfiguration.get( '/ovs/framework/hosts/{0}/ip'.format(machine_id))) celery_path = OSManager.get_path('celery') worker_states = check_output( "{0} inspect ping -b {1} --timeout=5 2> /dev/null | grep OK | perl -pe 's/\x1b\[[0-9;]*m//g' || true" .format(celery_path, amqp), shell=True) routers = StorageRouterList.get_storagerouters() for node in routers: if node.heartbeats is None: node.heartbeats = {} if 'celery@{0}: OK'.format(node.name) in worker_states: node.heartbeats['celery'] = current_time if node.machine_id == machine_id: node.heartbeats['process'] = current_time else: try: # check timeout of other nodes and clear arp cache if node.heartbeats and 'process' in node.heartbeats: if current_time - node.heartbeats[ 'process'] >= HeartBeat.ARP_TIMEOUT: check_output("/usr/sbin/arp -d {0}".format( node.name), shell=True) except CalledProcessError: logger.exception('Error clearing ARP cache') node.save()
from subprocess import check_output from ovs.dal.lists.storagerouterlist import StorageRouterList from ovs.extensions.generic.system import System from ovs.extensions.generic.configuration import Configuration from ovs.extensions.os.os import OSManager ARP_TIMEOUT = 30 current_time = int(time.time()) machine_id = System.get_my_machine_id() amqp = '{0}://{1}:{2}@{3}//'.format( Configuration.get('ovs.core.broker.protocol'), Configuration.get('ovs.core.broker.login'), Configuration.get('ovs.core.broker.password'), Configuration.get('ovs.grid.ip')) celery_path = OSManager.get_path('celery') worker_states = check_output( "{0} inspect ping -b {1} 2> /dev/null | grep OK | perl -pe 's/\x1b\[[0-9;]*m//g' || true" .format(celery_path, amqp), shell=True) routers = StorageRouterList.get_storagerouters() for node in routers: if node.heartbeats is None: node.heartbeats = {} if 'celery@{0}: OK'.format(node.name) in worker_states: node.heartbeats['celery'] = current_time if node.machine_id == machine_id: node.heartbeats['process'] = current_time else: # check timeout of other nodes and clear arp cache if node.heartbeats and 'process' in node.heartbeats:
def _apply_patches(self): nova_base_path = OpenStackManagement._get_base_path('nova') cinder_base_path = OpenStackManagement._get_base_path('cinder') version = OpenStackManagement._get_version() # fix "blockdev" issue if self.is_devstack: nova_volume_file = '{0}/virt/libvirt/volume.py'.format(nova_base_path) nova_driver_file = '{0}/virt/libvirt/driver.py'.format(nova_base_path) cinder_brick_initiator_file = '{0}/brick/initiator/connector.py'.format(cinder_base_path) elif self.is_openstack: driver_location = OSManager.get_openstack_package_base_path() nova_volume_file = '{0}/nova/virt/libvirt/volume.py'.format(driver_location) nova_driver_file = '{0}/nova/virt/libvirt/driver.py'.format(driver_location) cinder_brick_initiator_file = '{0}/cinder/brick/initiator/connector.py'.format(driver_location) else: raise ValueError('OpenStack or DevStack only') self.client.run("""python -c " import os version = '%s' nova_volume_file = '%s' nova_driver_file = '%s' with open(nova_volume_file, 'r') as f: file_contents = f.readlines() new_class = ''' class LibvirtFileVolumeDriver(LibvirtBaseVolumeDriver): def __init__(self, connection): super(LibvirtFileVolumeDriver, self).__init__(connection, is_block_dev=False) def get_config(self, connection_info, disk_info): conf = super(LibvirtFileVolumeDriver, self).get_config(connection_info, disk_info) conf.source_type = 'file' conf.source_path = connection_info['data']['device_path'] return conf ''' patched = False for line in file_contents: if 'class LibvirtFileVolumeDriver(LibvirtBaseVolumeDriver):' in line: patched = True break if not patched: fc = None for line in file_contents[:]: if line.startswith('class LibvirtVolumeDriver(LibvirtBaseVolumeDriver):'): fc = file_contents[:file_contents.index(line)] + [l+'\\n' for l in new_class.split('\\n')] + file_contents[file_contents.index(line):] break if fc is not None: with open(nova_volume_file, 'w') as f: f.writelines(fc) with open(nova_driver_file, 'r') as f: file_contents = f.readlines() patched = False for line in file_contents: if 'file=nova.virt.libvirt.volume.LibvirtFileVolumeDriver' in line: patched = True break if not patched: fc = None for line in file_contents[:]: if 'local=nova.virt.libvirt.volume.LibvirtVolumeDriver' in line: fc = file_contents[:file_contents.index(line)] + [''' 'file=nova.virt.libvirt.volume.LibvirtFileVolumeDriver',\\n'''] + file_contents[file_contents.index(line):] break if fc is not None: with open(nova_driver_file, 'w') as f: f.writelines(fc) " """ % (version, nova_volume_file, nova_driver_file)) # fix brick/upload to glance if os.path.exists(cinder_brick_initiator_file): self.client.run("""sed -i 's/elif protocol == "LOCAL":/elif protocol in ["LOCAL", "FILE"]:/g' %s""" % cinder_brick_initiator_file)
def _restart_processes(self): """ Restart the cinder process that uses the OVS volume driver - also restarts nova api and compute services """ def stop_screen_process(process_name): out = self.client.run('''su stack -c 'screen -S {0} -p {1} -Q select 1>/dev/null; echo $?' '''.format(screen_name, process_name)) process_screen_exists = out == '0' if process_screen_exists: self.client.run('''su stack -c 'screen -S {0} -p {1} -X stuff \n' '''.format(screen_name, process_name)) self.client.run('''su stack -c 'screen -S {0} -p {1} -X kill' '''.format(screen_name, process_name)) return process_screen_exists def start_screen_process(process_name, commands): logfile = '{0}/{1}.log.{2}'.format(logdir, process_name, datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d-%H%M%S')) self._logger.debug(self.client.run('''su stack -c 'touch {0}' '''.format(logfile))) self._logger.debug(self.client.run('''su stack -c 'screen -S {0} -X screen -t {1}' '''.format(screen_name, process_name))) self._logger.debug(self.client.run('''su stack -c 'screen -S {0} -p {1} -X logfile {2}' '''.format(screen_name, process_name, logfile))) self._logger.debug(self.client.run('''su stack -c 'screen -S {0} -p {1} -X log on' '''.format(screen_name, process_name))) time.sleep(1) self._logger.debug(self.client.run('rm {0}/{1}.log || true'.format(logdir, process_name))) self._logger.debug(self.client.run('ln -sf {0} {1}/{2}.log'.format(logfile, logdir, process_name))) for command in commands: cmd = '''su stack -c 'screen -S {0} -p {1} -X stuff "{2}\012"' '''.format(screen_name, process_name, command) self._logger.debug(cmd) self._logger.debug(self.client.run(cmd)) logdir = '/opt/stack/logs' screen_name = 'stack' if self._is_devstack is True: try: c_vol_screen_exists = stop_screen_process('c-vol') n_cpu_screen_exists = stop_screen_process('n-cpu') n_api_screen_exists = stop_screen_process('n-api') c_api_screen_exists = stop_screen_process('c-api') self.client.run('''su stack -c 'mkdir -p /opt/stack/logs' ''') if c_vol_screen_exists: start_screen_process('c-vol', ["export PYTHONPATH=\"${PYTHONPATH}:/opt/OpenvStorage\" ", "newgrp ovs", "newgrp stack", "umask 0002", "/usr/local/bin/cinder-volume --config-file /etc/cinder/cinder.conf & echo \$! >/opt/stack/status/stack/c-vol.pid; fg || echo c-vol failed to start | tee \"/opt/stack/status/stack/c-vol.failure\" "]) time.sleep(3) if n_cpu_screen_exists: start_screen_process('n-cpu', ["newgrp ovs", "newgrp stack", "sg libvirtd /usr/local/bin/nova-compute --config-file /etc/nova/nova.conf & echo $! >/opt/stack/status/stack/n-cpu.pid; fg || echo n-cpu failed to start | tee \"/opt/stack/status/stack/n-cpu.failure\" "]) time.sleep(3) if n_api_screen_exists: start_screen_process('n-api', ["export PYTHONPATH=\"${PYTHONPATH}:/opt/OpenvStorage\" ", "/usr/local/bin/nova-api & echo $! >/opt/stack/status/stack/n-api.pid; fg || echo n-api failed to start | tee \"/opt/stack/status/stack/n-api.failure\" "]) time.sleep(3) if c_api_screen_exists: start_screen_process('c-api', ["/usr/local/bin/cinder-api --config-file /etc/cinder/cinder.conf & echo $! >/opt/stack/status/stack/c-api.pid; fg || echo c-api failed to start | tee \"/opt/stack/status/stack/c-api.failure\" "]) time.sleep(3) except SystemExit as se: # failed command or non-zero exit codes raise SystemExit raise RuntimeError(str(se)) else: for service_name in OSManager.get_openstack_services(): if ServiceManager.has_service(service_name, self.client): try: ServiceManager.restart_service(service_name, self.client) except SystemExit as sex: self._logger.debug('Failed to restart service {0}. {1}'.format(service_name, sex)) time.sleep(3)
def _restart_processes(self): """ Restart the cinder process that uses the OVS volume driver - also restarts nova api and compute services """ def stop_screen_process(process_name): out = self.client.run( '''su stack -c 'screen -S {0} -p {1} -Q select 1>/dev/null; echo $?' ''' .format(screen_name, process_name)) process_screen_exists = out == '0' if process_screen_exists: self.client.run( '''su stack -c 'screen -S {0} -p {1} -X stuff \n' '''. format(screen_name, process_name)) self.client.run( '''su stack -c 'screen -S {0} -p {1} -X kill' '''.format( screen_name, process_name)) return process_screen_exists def start_screen_process(process_name, commands): logfile = '{0}/{1}.log.{2}'.format( logdir, process_name, datetime.datetime.strftime(datetime.datetime.now(), '%Y-%m-%d-%H%M%S')) self._logger.debug( self.client.run( '''su stack -c 'touch {0}' '''.format(logfile))) self._logger.debug( self.client.run( '''su stack -c 'screen -S {0} -X screen -t {1}' '''.format( screen_name, process_name))) self._logger.debug( self.client.run( '''su stack -c 'screen -S {0} -p {1} -X logfile {2}' '''. format(screen_name, process_name, logfile))) self._logger.debug( self.client.run( '''su stack -c 'screen -S {0} -p {1} -X log on' '''.format( screen_name, process_name))) time.sleep(1) self._logger.debug( self.client.run('rm {0}/{1}.log || true'.format( logdir, process_name))) self._logger.debug( self.client.run('ln -sf {0} {1}/{2}.log'.format( logfile, logdir, process_name))) for command in commands: cmd = '''su stack -c 'screen -S {0} -p {1} -X stuff "{2}\012"' '''.format( screen_name, process_name, command) self._logger.debug(cmd) self._logger.debug(self.client.run(cmd)) logdir = '/opt/stack/logs' screen_name = 'stack' if self._is_devstack is True: try: c_vol_screen_exists = stop_screen_process('c-vol') n_cpu_screen_exists = stop_screen_process('n-cpu') n_api_screen_exists = stop_screen_process('n-api') c_api_screen_exists = stop_screen_process('c-api') self.client.run('''su stack -c 'mkdir -p /opt/stack/logs' ''') if c_vol_screen_exists: start_screen_process('c-vol', [ "export PYTHONPATH=\"${PYTHONPATH}:/opt/OpenvStorage\" ", "newgrp ovs", "newgrp stack", "umask 0002", "/usr/local/bin/cinder-volume --config-file /etc/cinder/cinder.conf & echo \$! >/opt/stack/status/stack/c-vol.pid; fg || echo c-vol failed to start | tee \"/opt/stack/status/stack/c-vol.failure\" " ]) time.sleep(3) if n_cpu_screen_exists: start_screen_process('n-cpu', [ "newgrp ovs", "newgrp stack", "sg libvirtd /usr/local/bin/nova-compute --config-file /etc/nova/nova.conf & echo $! >/opt/stack/status/stack/n-cpu.pid; fg || echo n-cpu failed to start | tee \"/opt/stack/status/stack/n-cpu.failure\" " ]) time.sleep(3) if n_api_screen_exists: start_screen_process('n-api', [ "export PYTHONPATH=\"${PYTHONPATH}:/opt/OpenvStorage\" ", "/usr/local/bin/nova-api & echo $! >/opt/stack/status/stack/n-api.pid; fg || echo n-api failed to start | tee \"/opt/stack/status/stack/n-api.failure\" " ]) time.sleep(3) if c_api_screen_exists: start_screen_process('c-api', [ "/usr/local/bin/cinder-api --config-file /etc/cinder/cinder.conf & echo $! >/opt/stack/status/stack/c-api.pid; fg || echo c-api failed to start | tee \"/opt/stack/status/stack/c-api.failure\" " ]) time.sleep(3) except SystemExit as se: # failed command or non-zero exit codes raise SystemExit raise RuntimeError(str(se)) else: for service_name in OSManager.get_openstack_services(): if ServiceManager.has_service(service_name, self.client): try: ServiceManager.restart_service(service_name, self.client) except SystemExit as sex: self._logger.debug( 'Failed to restart service {0}. {1}'.format( service_name, sex)) time.sleep(3)
def _is_openstack(self): cinder_service = OSManager.get_openstack_cinder_service_name() return ServiceManager.has_service(cinder_service, self.client)
import time from subprocess import check_output from ovs.dal.lists.storagerouterlist import StorageRouterList from ovs.extensions.generic.system import System from ovs.extensions.generic.configuration import Configuration from ovs.extensions.os.os import OSManager ARP_TIMEOUT = 30 current_time = int(time.time()) machine_id = System.get_my_machine_id() amqp = '{0}://{1}:{2}@{3}//'.format(Configuration.get('ovs.core.broker.protocol'), Configuration.get('ovs.core.broker.login'), Configuration.get('ovs.core.broker.password'), Configuration.get('ovs.grid.ip')) celery_path = OSManager.get_path('celery') worker_states = check_output("{0} inspect ping -b {1} 2> /dev/null | grep OK | perl -pe 's/\x1b\[[0-9;]*m//g' || true".format(celery_path, amqp), shell=True) routers = StorageRouterList.get_storagerouters() for node in routers: if node.heartbeats is None: node.heartbeats = {} if 'celery@{0}: OK'.format(node.name) in worker_states: node.heartbeats['celery'] = current_time if node.machine_id == machine_id: node.heartbeats['process'] = current_time else: # check timeout of other nodes and clear arp cache if node.heartbeats and 'process' in node.heartbeats: if current_time - node.heartbeats['process'] >= ARP_TIMEOUT: check_output("/usr/sbin/arp -d {0}".format(node.name), shell=True) node.save()
def _apply_patches(self): nova_base_path = OpenStackManagement._get_base_path('nova') cinder_base_path = OpenStackManagement._get_base_path('cinder') version = OpenStackManagement._get_version() # fix "blockdev" issue if self.is_devstack: nova_volume_file = '{0}/virt/libvirt/volume.py'.format( nova_base_path) nova_driver_file = '{0}/virt/libvirt/driver.py'.format( nova_base_path) cinder_brick_initiator_file = '{0}/brick/initiator/connector.py'.format( cinder_base_path) elif self.is_openstack: driver_location = OSManager.get_openstack_package_base_path() nova_volume_file = '{0}/nova/virt/libvirt/volume.py'.format( driver_location) nova_driver_file = '{0}/nova/virt/libvirt/driver.py'.format( driver_location) cinder_brick_initiator_file = '{0}/cinder/brick/initiator/connector.py'.format( driver_location) else: raise ValueError('OpenStack or DevStack only') self.client.run("""python -c " import os version = '%s' nova_volume_file = '%s' nova_driver_file = '%s' with open(nova_volume_file, 'r') as f: file_contents = f.readlines() new_class = ''' class LibvirtFileVolumeDriver(LibvirtBaseVolumeDriver): def __init__(self, connection): super(LibvirtFileVolumeDriver, self).__init__(connection, is_block_dev=False) def get_config(self, connection_info, disk_info): conf = super(LibvirtFileVolumeDriver, self).get_config(connection_info, disk_info) conf.source_type = 'file' conf.source_path = connection_info['data']['device_path'] return conf ''' patched = False for line in file_contents: if 'class LibvirtFileVolumeDriver(LibvirtBaseVolumeDriver):' in line: patched = True break if not patched: fc = None for line in file_contents[:]: if line.startswith('class LibvirtVolumeDriver(LibvirtBaseVolumeDriver):'): fc = file_contents[:file_contents.index(line)] + [l+'\\n' for l in new_class.split('\\n')] + file_contents[file_contents.index(line):] break if fc is not None: with open(nova_volume_file, 'w') as f: f.writelines(fc) with open(nova_driver_file, 'r') as f: file_contents = f.readlines() patched = False for line in file_contents: if 'file=nova.virt.libvirt.volume.LibvirtFileVolumeDriver' in line: patched = True break if not patched: fc = None for line in file_contents[:]: if 'local=nova.virt.libvirt.volume.LibvirtVolumeDriver' in line: fc = file_contents[:file_contents.index(line)] + [''' 'file=nova.virt.libvirt.volume.LibvirtFileVolumeDriver',\\n'''] + file_contents[file_contents.index(line):] break if fc is not None: with open(nova_driver_file, 'w') as f: f.writelines(fc) " """ % (version, nova_volume_file, nova_driver_file)) # fix brick/upload to glance if os.path.exists(cinder_brick_initiator_file): self.client.run( """sed -i 's/elif protocol == "LOCAL":/elif protocol in ["LOCAL", "FILE"]:/g' %s""" % cinder_brick_initiator_file)