def _deploy(config): """ Deploys a complete cluster: Distributing the configuration files, creating directories and services """ logger.debug('Deploying cluster {0}'.format(config.cluster_id)) for node in config.nodes: logger.debug(' Deploying cluster {0} on {1}'.format( config.cluster_id, node.ip)) ovs_client = SSHClient(node.ip) root_client = SSHClient(node.ip, username='******') # Distributes a configuration file to all its nodes config.write_config(ovs_client) # Create dirs as root because mountpoint /mnt/cache1 is typically owned by root abs_paths = [node.log_dir, node.tlog_dir, node.home] root_client.dir_create(abs_paths) root_client.dir_chmod(abs_paths, 0755, recursive=True) root_client.dir_chown(abs_paths, 'ovs', 'ovs', recursive=True) # Creates services for/on all nodes in the config base_name = 'ovs-arakoon' target_name = 'ovs-arakoon-{0}'.format(config.cluster_id) ServiceManager.prepare_template(base_name, target_name, ovs_client) ServiceManager.add_service(target_name, root_client, params={'CLUSTER': config.cluster_id}) logger.debug(' Deploying cluster {0} on {1} completed'.format( config.cluster_id, node.ip))
def extend_cluster(master_ip, new_ip, cluster_name, exclude_ports): """ Extends a cluster to a given new node """ logger.debug('Extending cluster {0} from {1} to {2}'.format( cluster_name, master_ip, new_ip)) client = SSHClient(master_ip) config = ArakoonClusterConfig(cluster_name) config.load_config(client) client = SSHClient(new_ip) base_dir = client.config_read('ovs.arakoon.location').rstrip('/') port_range = client.config_read('ovs.ports.arakoon') ports = System.get_free_ports(port_range, exclude_ports, 2, client) node_name = System.get_my_machine_id(client) if not [node.name for node in config.nodes if node.name == node_name]: config.nodes.append( ArakoonNodeConfig( name=node_name, ip=new_ip, client_port=ports[0], messaging_port=ports[1], log_dir=ArakoonInstaller.ARAKOON_LOG_DIR.format( cluster_name), home=ArakoonInstaller.ARAKOON_HOME_DIR.format( base_dir, cluster_name), tlog_dir=ArakoonInstaller.ARAKOON_TLOG_DIR.format( base_dir, cluster_name))) ArakoonInstaller._deploy(config) logger.debug('Extending cluster {0} from {1} to {2} completed'.format( cluster_name, master_ip, new_ip)) return {'client_port': ports[0], 'messaging_port': ports[1]}
def restart_cluster(cluster_name, master_ip): """ Execute a restart sequence (Executed after arakoon and/or alba package upgrade) """ logger.debug('Restart sequence for {0} via {1}'.format( cluster_name, master_ip)) client = SSHClient(master_ip) config = ArakoonClusterConfig(cluster_name) config.load_config(client) all_clients = [ SSHClient(node.ip) for node in config.nodes if node.ip != master_ip ] + [client] if len(config.nodes) <= 2: logger.debug( ' Insufficient nodes in cluster {0}. Full restart'.format( cluster_name)) for function in [ArakoonInstaller.stop, ArakoonInstaller.start]: for client in all_clients: function(cluster_name, client) ArakoonInstaller.wait_for_cluster(cluster_name) else: logger.debug( ' Sufficient nodes in cluster {0}. Sequential restart'.format( cluster_name)) for client in all_clients: ArakoonInstaller.stop(cluster_name, client) ArakoonInstaller.start(cluster_name, client) logger.debug(' Restarted node {0} on cluster {1}'.format( client.ip, cluster_name)) ArakoonInstaller.wait_for_cluster(cluster_name) logger.debug('Restart sequence for {0} via {1} completed'.format( cluster_name, master_ip))
def restart_cluster_add(cluster_name, current_ips, new_ip): """ Execute a (re)start sequence after adding a new node to a cluster. """ logger.debug('Restart sequence (add) for {0}'.format(cluster_name)) logger.debug('Current ips: {0}'.format(', '.join(current_ips))) logger.debug('New ip: {0}'.format(new_ip)) logger.debug('Catching up new node {0} for cluster {1}'.format( new_ip, cluster_name)) with Remote(new_ip, [ArakoonManagementEx], 'ovs') as remote: cluster = remote.ArakoonManagementEx().getCluster(cluster_name) cluster.catchup_node() logger.debug( 'Catching up new node {0} for cluster {1} completed'.format( new_ip, cluster_name)) threshold = 2 if new_ip in current_ips else 1 for ip in current_ips: if ip == new_ip: continue client = SSHClient(ip, username='******') ArakoonInstaller.stop(cluster_name, client=client) ArakoonInstaller.start(cluster_name, client=client) logger.debug(' Restarted node {0} for cluster {1}'.format( client.ip, cluster_name)) if len(current_ips ) > threshold: # A two node cluster needs all nodes running ArakoonInstaller.wait_for_cluster(cluster_name) new_client = SSHClient(new_ip, username='******') ArakoonInstaller.start(cluster_name, client=new_client) ArakoonInstaller.wait_for_cluster(cluster_name) logger.debug('Started node {0} for cluster {1}'.format( new_ip, cluster_name))
def deploy_to_slave(master_ip, slave_ip, cluster_name): """ Deploys the configuration file to a slave :param master_ip: IP of the node to deploy from :type master_ip: str :param slave_ip: IP of the slave to deploy to :type slave_ip: str :param cluster_name: Name of the cluster of which to deploy the configuration file :type cluster_name: str :return: None """ EtcdInstaller._logger.debug(' Setting up proxy "{0}" from {1} to {2}'.format(cluster_name, master_ip, slave_ip)) master_client = SSHClient(master_ip, username='******') slave_client = SSHClient(slave_ip, username='******') current_cluster = [] for item in master_client.run('etcdctl member list').splitlines(): info = re.search(EtcdInstaller.MEMBER_REGEX, item).groupdict() current_cluster.append('{0}={1}'.format(info['name'], info['peer'])) EtcdInstaller._setup_proxy(','.join(current_cluster), slave_client, cluster_name, force=True) EtcdInstaller._logger.debug(' Setting up proxy "{0}" from {1} to {2} completed'.format(cluster_name, master_ip, slave_ip))
def shrink_cluster(remaining_node_ip, deleted_node_ip, cluster_name): """ Removes a node from a cluster, the old node will become a slave :param cluster_name: The name of the cluster to shrink :param deleted_node_ip: The ip of the node that should be deleted :param remaining_node_ip: The ip of a remaining node """ logger.debug('Shrinking cluster "{0}" from {1}'.format( cluster_name, deleted_node_ip)) current_client = SSHClient(remaining_node_ip, username='******') if not EtcdInstaller._is_healty(cluster_name, current_client): raise RuntimeError( 'Cluster "{0}" unhealthy, aborting shrink'.format( cluster_name)) old_client = SSHClient(deleted_node_ip, username='******') node_name = System.get_my_machine_id(old_client) node_id = None for item in current_client.run('etcdctl member list').splitlines(): info = re.search(EtcdInstaller.MEMBER_REGEX, item).groupdict() if info['name'] == node_name: node_id = info['id'] if node_id is None: raise RuntimeError( 'Could not locate {0} in the cluster'.format(deleted_node_ip)) current_client.run('etcdctl member remove {0}'.format(node_id)) EtcdInstaller.deploy_to_slave(remaining_node_ip, deleted_node_ip, cluster_name) EtcdInstaller.wait_for_cluster(cluster_name, current_client) logger.debug('Shrinking cluster "{0}" from {1} completed'.format( cluster_name, deleted_node_ip))
def setUp(cls): for node in TestArakoonInstaller.nodes: client = SSHClient(node) root_client = SSHClient(node, username='******') root_client.dir_delete('/tmp/db') root_client.dir_create('/tmp/db') client.dir_delete(TestArakoonInstaller.cluster_config_path) client.dir_create(TestArakoonInstaller.cluster_config_path)
def deploy_to_slave(master_ip, slave_ip, cluster_name): """ Deploys the configuration file to a slave """ client = SSHClient(master_ip) config = ArakoonClusterConfig(cluster_name) config.load_config(client) client = SSHClient(slave_ip) config.write_config(client)
def remove_from_slave(master_ip, slave_ip, cluster_name): """ Removes everything related to a given cluster from the slave """ client = SSHClient(master_ip) config = ArakoonClusterConfig(cluster_name) config.load_config(client) client = SSHClient(slave_ip) config.delete_config(client)
def install_plugins(): """ (Re)load plugins """ if ServiceManager.has_service('ovs-watcher-framework', SSHClient('127.0.0.1', username='******')): # If the watcher is running, 'ovs setup' was executed and we need to restart everything to load # the plugin. In the other case, the plugin will be loaded once 'ovs setup' is executed from ovs.dal.lists.storagerouterlist import StorageRouterList clients = [] try: for storagerouter in StorageRouterList.get_storagerouters(): clients.append(SSHClient(storagerouter, username='******')) except UnableToConnectException: raise RuntimeError('Not all StorageRouters are reachable') for client in clients: for service_name in ['watcher-framework', 'memcached']: ServiceManager.stop_service(service_name, client=client) wait = 30 while wait > 0: if ServiceManager.get_service_status( service_name, client=client) is False: break time.sleep(1) wait -= 1 if wait == 0: raise RuntimeError( 'Could not stop service: {0}'.format(service_name)) for client in clients: for service_name in ['memcached', 'watcher-framework']: ServiceManager.start_service(service_name, client=client) wait = 30 while wait > 0: if ServiceManager.get_service_status( service_name, client=client) is True: break time.sleep(1) wait -= 1 if wait == 0: raise RuntimeError( 'Could not start service: {0}'.format( service_name)) from ovs.dal.helpers import Migration Migration.migrate() from ovs.lib.helpers.toolbox import Toolbox ip = System.get_my_storagerouter().ip functions = Toolbox.fetch_hooks('plugin', 'postinstall') for function in functions: function(ip=ip)
def restart_cluster(cluster_name, master_ip, filesystem): """ Execute a restart sequence (Executed after arakoon and/or alba package upgrade) :param cluster_name: Name of the cluster to restart :type cluster_name: str :param master_ip: IP of one of the cluster nodes :type master_ip: str :param filesystem: Indicates whether the configuration should be on the filesystem or in a configuration cluster :type filesystem: bool :return: None """ ArakoonInstaller._logger.debug( 'Restart sequence for {0} via {1}'.format(cluster_name, master_ip)) config = ArakoonClusterConfig(cluster_name, filesystem) config.load_config(master_ip) arakoon_client = ArakoonInstaller.build_client(config) root_client = SSHClient(master_ip, username='******') all_clients = [ SSHClient(node.ip, username='******') for node in config.nodes ] if len(config.nodes) <= 2: ArakoonInstaller._logger.debug( ' Insufficient nodes in cluster {0}. Full restart'.format( cluster_name)) for function in [ArakoonInstaller.stop, ArakoonInstaller.start]: for client in all_clients: function(cluster_name, client) ArakoonInstaller.wait_for_cluster(cluster_name, master_ip, filesystem) else: ArakoonInstaller._logger.debug( ' Sufficient nodes in cluster {0}. Sequential restart'.format( cluster_name)) for client in all_clients: ArakoonInstaller.stop(cluster_name, client) ArakoonInstaller.start(cluster_name, client) ArakoonInstaller._logger.debug( ' Restarted node {0} on cluster {1}'.format( client.ip, cluster_name)) ArakoonInstaller.wait_for_cluster(cluster_name, master_ip, filesystem) ArakoonInstaller.start(cluster_name, root_client) ArakoonInstaller.wait_for_cluster(cluster_name, master_ip, filesystem) arakoon_client.set(ArakoonInstaller.INTERNAL_CONFIG_KEY, config.export_ini()) ArakoonInstaller._logger.debug( 'Restart sequence for {0} via {1} completed'.format( cluster_name, master_ip))
def install_plugins(): """ (Re)load plugins """ manager = ServiceFactory.get_manager() if manager.has_service('ovs-watcher-framework', SSHClient('127.0.0.1', username='******')): # If the watcher is running, 'ovs setup' was executed and we need to restart everything to load # the plugin. In the other case, the plugin will be loaded once 'ovs setup' is executed print 'Installing plugin into Open vStorage' from ovs.dal.lists.storagerouterlist import StorageRouterList clients = {} masters = StorageRouterList.get_masters() slaves = StorageRouterList.get_slaves() try: for sr in masters + slaves: clients[sr] = SSHClient(sr, username='******') except UnableToConnectException: raise RuntimeError('Not all StorageRouters are reachable') memcached = 'memcached' watcher = 'watcher-framework' for sr in masters + slaves: if manager.has_service(watcher, clients[sr]): print '- Stopping watcher on {0} ({1})'.format( sr.name, sr.ip) manager.stop_service(watcher, clients[sr]) for sr in masters: print '- Restarting memcached on {0} ({1})'.format( sr.name, sr.ip) manager.restart_service(memcached, clients[sr]) for sr in masters + slaves: if manager.has_service(watcher, clients[sr]): print '- Starting watcher on {0} ({1})'.format( sr.name, sr.ip) manager.start_service(watcher, clients[sr]) print '- Execute model migrations' from ovs.dal.helpers import Migration Migration.migrate() from ovs.lib.helpers.toolbox import Toolbox ip = System.get_my_storagerouter().ip functions = Toolbox.fetch_hooks('plugin', 'postinstall') if len(functions) > 0: print '- Execute post installation scripts' for fct in functions: fct(ip=ip) print 'Installing plugin into Open vStorage: Completed'
def set_rdma_capability(storagerouter_guid): """ Check if the StorageRouter has been reconfigured to be able to support RDMA :param storagerouter_guid: Guid of the StorageRouter to check and set :type storagerouter_guid: str :return: None :rtype: NoneType """ storagerouter = StorageRouter(storagerouter_guid) client = SSHClient(storagerouter, username='******') rdma_capable = False with remote(client.ip, [os], username='******') as rem: for root, dirs, files in rem.os.walk('/sys/class/infiniband'): for directory in dirs: ports_dir = '/'.join([root, directory, 'ports']) if not rem.os.path.exists(ports_dir): continue for sub_root, sub_dirs, _ in rem.os.walk(ports_dir): if sub_root != ports_dir: continue for sub_directory in sub_dirs: state_file = '/'.join( [sub_root, sub_directory, 'state']) if rem.os.path.exists(state_file): if 'ACTIVE' in client.run(['cat', state_file]): rdma_capable = True storagerouter.rdma_capable = rdma_capable storagerouter.save()
def _proxy_summary(self): """ Returns a summary of the proxies of this StorageDriver :return: summary of the proxies :rtype: dict """ proxy_info = {'red': 0, 'orange': 0, 'green': 0} summary = {'proxies': proxy_info} try: service_manager = ServiceFactory.get_manager() client = SSHClient(self.storagerouter) except Exception: self._logger.exception('Unable to retrieve necessary clients') else: for alba_proxy in self.alba_proxies: try: service_status = service_manager.get_service_status( alba_proxy.service.name, client) except Exception: # A ValueError can occur when the services are still being deployed (the model will be updated before the actual deployment) self._logger.exception( 'Unable to retrieve the service status for service {0} of StorageDriver {1}' .format(alba_proxy.service.name, self.guid)) proxy_info['red'] += 1 continue if service_status == 'active': proxy_info['green'] += 1 elif service_status == 'inactive': proxy_info['orange'] += 1 else: proxy_info['red'] += 1 finally: return summary
def get_logfiles(albanode_guid, local_storagerouter_guid): """ Collects logs, moves them to a web-accessible location and returns log tgz's filename :param albanode_guid: Alba Node guid to retrieve log files on :type albanode_guid: str :param local_storagerouter_guid: Guid of the StorageRouter on which the collect logs was initiated, eg: through the GUI :type local_storagerouter_guid: str :return: Name of tgz containing the logs :rtype: str """ web_path = '/opt/OpenvStorage/webapps/frontend/downloads' alba_node = AlbaNode(albanode_guid) logfile_name = alba_node.client.get_logs()['filename'] download_url = 'https://{0}:{1}@{2}:{3}/downloads/{4}'.format( alba_node.username, alba_node.password, alba_node.ip, alba_node.port, logfile_name) client = SSHClient(endpoint=StorageRouter(local_storagerouter_guid), username='******') client.dir_create(web_path) client.run([ 'wget', download_url, '--directory-prefix', web_path, '--no-check-certificate' ]) client.run(['chmod', '666', '{0}/{1}'.format(web_path, logfile_name)]) return logfile_name
def get_current_memory_usage_of_process(storagerouter_ip, pid): """ get residential memory usage of a certain storagerouter (through /proc/<PID>/status) VmPeak: 8110620 kB VmSize: 3252752 kB VmLck: 0 kB VmPin: 0 kB VmHWM: 4959820 kB VmRSS: 570764 kB VmData: 3019468 kB VmStk: 136 kB VmExe: 12464 kB VmLib: 58852 kB VmPTE: 2644 kB VmPMD: 24 kB VmSwap: 394224 kB :param storagerouter_ip: ip address of a existing storagerouter :type storagerouter_ip: str :param pid: process ID of the process you want to monitor :type pid: int :return: current usage :rtype: str """ client = SSHClient(storagerouter_ip, username='******') return client.run("grep Vm /proc/{0}/status | tr -s ' '".format(pid), allow_insecure=True)
def check_ovs_processes(logger): """ Checks the availability of processes for Open vStorage :param logger: logging object :type logger: ovs.extensions.healthcheck.result.HCResults :return: None :rtype: NoneType """ logger.info('Checking local ovs services.') client = SSHClient(OpenvStorageHealthCheck.LOCAL_SR) service_manager = ServiceFactory.get_manager() services = [ service for service in service_manager.list_services(client=client) if service.startswith(OpenvStorageHealthCheck.MODULE) ] if len(services) == 0: logger.warning('Found no local ovs services.') for service_name in services: if service_manager.get_service_status(service_name, client) == 'active': logger.success('Service {0} is running!'.format(service_name)) else: logger.failure( 'Service {0} is not running, please check this.'.format( service_name))
def archive_existing_arakoon_data(ip, directory, top_dir, cluster_name): """ Copy existing arakoon data, when setting up a new arakoon cluster, to the side :param ip: IP on which to check for existing data :param directory: Directory to check for existence :param top_dir: Top directory :param cluster_name: Name of arakoon cluster :return: None """ new_client = SSHClient(ip) logger.debug('archive - check if {0} exists'.format(directory)) if new_client.dir_exists(directory): logger.debug('archive - from {0}'.format(directory)) archive_dir = '/'.join([top_dir, 'archive', cluster_name]) if new_client.dir_exists(archive_dir + '/' + os.path.basename(directory)): logger.debug( 'archive - from existing archive {0}'.format(archive_dir)) timestamp = time.strftime('%Y%m%d%H%M%S', time.gmtime()) new_archive_dir = archive_dir + '-' + timestamp new_client.dir_create(new_archive_dir) new_client.run('mv {0} {1}'.format(archive_dir, new_archive_dir)) logger.debug('archive - to new {0}'.format(new_archive_dir)) logger.debug('create archive dir: {0}'.format(archive_dir)) new_client.dir_create(archive_dir) logger.debug('archive from {0} to {1}'.format( directory, archive_dir)) if cluster_name == os.path.basename( directory) and new_client.dir_list(directory): new_client.run('mv {0}/* {1}'.format(directory, archive_dir)) else: new_client.run('mv {0} {1}'.format(directory, archive_dir))
def override_scheduletasks(configuration): """ Override the scheduled tasks crontab with your own confguration :param configuration: configuration to override scheduled tasks :type configuration: dict :return: """ service_name = 'ovs-watcher-framework' Configuration.set(CelerySetup.SCHEDULED_TASK_CFG, configuration) fetched_cfg = Configuration.get(CelerySetup.SCHEDULED_TASK_CFG, configuration) if cmp(fetched_cfg, configuration) == 0: # restart ovs-watcher-framework on all nodes for sr_ip in StoragerouterHelper.get_storagerouter_ips(): client = SSHClient(sr_ip, username='******') service_manager = ServiceFactory.get_manager() try: service_manager.restart_service(service_name, client) except: return False CelerySetup.LOGGER.info( "Successfully restarted all `{0}` services!".format( service_name)) return True else: CelerySetup.LOGGER.warning( "`{0}` config is `{1}` but should be `{2}`".format( CelerySetup.SCHEDULED_TASK_CFG, fetched_cfg, configuration)) return False
def create_cluster(cluster_name, ip, exclude_ports, plugins=None): """ Creates a cluster """ logger.debug('Creating cluster {0} on {1}'.format(cluster_name, ip)) client = SSHClient(ip) base_dir = client.config_read('ovs.arakoon.location').rstrip('/') port_range = client.config_read('ovs.ports.arakoon') ports = System.get_free_ports(port_range, exclude_ports, 2, client) node_name = System.get_my_machine_id(client) config = ArakoonClusterConfig(cluster_name, plugins) if not [node.name for node in config.nodes if node.name == node_name]: config.nodes.append( ArakoonNodeConfig( name=node_name, ip=ip, client_port=ports[0], messaging_port=ports[1], log_dir=ArakoonInstaller.ARAKOON_LOG_DIR.format( cluster_name), home=ArakoonInstaller.ARAKOON_HOME_DIR.format( base_dir, cluster_name), tlog_dir=ArakoonInstaller.ARAKOON_TLOG_DIR.format( base_dir, cluster_name))) ArakoonInstaller._deploy(config) logger.debug('Creating cluster {0} on {1} completed'.format( cluster_name, ip)) return {'client_port': ports[0], 'messaging_port': ports[1]}
def add_suitable_nodes(local_failure_domain, local_safety): """ Adds nodes which are suited to serve the MDS :param local_failure_domain: Failure domain to take into account :param local_safety: Safety which needs to be met :return: Nodes which can be used, MDS services to use """ if len(nodes) < local_safety: for local_load in sorted( failure_domain_load_dict[local_failure_domain]): for local_service in failure_domain_load_dict[ local_failure_domain][local_load]: if len( nodes ) < local_safety and local_service.storagerouter.ip not in nodes: try: SSHClient(local_service.storagerouter) new_services.append(local_service) nodes.add(local_service.storagerouter.ip) except UnableToConnectException: logger.debug( 'MDS safety: vDisk {0}: Skipping storagerouter with IP {1} as it is unreachable' .format(vdisk.guid, service.storagerouter.ip)) return nodes, new_services
def check_alba_processes(result_handler): """ Checks the availability of processes for Alba :param result_handler: logging object :type result_handler: ovs.extensions.healthcheck.result.HCResults :return: None :rtype: NoneType """ result_handler.info('Checking LOCAL ALBA services: ', add_to_result=False) client = SSHClient(AlbaHealthCheck.LOCAL_SR) service_manager = ServiceFactory.get_manager() services = [ service for service in service_manager.list_services(client=client) if service.startswith(AlbaHealthCheck.MODULE) ] if len(services) == 0: result_handler.skip('Found no LOCAL ALBA services.') return for service_name in services: if service_manager.get_service_status(service_name, client) == 'active': result_handler.success( 'Service {0} is running!'.format(service_name)) else: result_handler.failure( 'Service {0} is NOT running! '.format(service_name))
def restart_cluster(cluster_name, master_ip): """ Execute a restart sequence (Executed after arakoon and/or alba package upgrade) :param cluster_name: Name of the cluster to restart :type cluster_name: str :param master_ip: IP of one of the cluster nodes :type master_ip: str :return: None """ ArakoonInstaller._logger.debug('Restart sequence for {0} via {1}'.format(cluster_name, master_ip)) config = ArakoonClusterConfig(cluster_name) config.load_config() all_clients = [SSHClient(node.ip, username='******') for node in config.nodes] if len(config.nodes) <= 2: ArakoonInstaller._logger.debug(' Insufficient nodes in cluster {0}. Full restart'.format(cluster_name)) for function in [ArakoonInstaller.stop, ArakoonInstaller.start]: for client in all_clients: function(cluster_name, client) ArakoonInstaller.wait_for_cluster(cluster_name, all_clients[0]) else: ArakoonInstaller._logger.debug(' Sufficient nodes in cluster {0}. Sequential restart'.format(cluster_name)) for client in all_clients: ArakoonInstaller.stop(cluster_name, client) ArakoonInstaller.start(cluster_name, client) ArakoonInstaller._logger.debug(' Restarted node {0} on cluster {1}'.format(client.ip, cluster_name)) ArakoonInstaller.wait_for_cluster(cluster_name, client) ArakoonInstaller._logger.debug('Restart sequence for {0} via {1} completed'.format(cluster_name, master_ip))
def test_single_node(self): node = sorted(TestArakoonInstaller.nodes.keys())[0] result = ArakoonInstaller.create_cluster(TestArakoonInstaller.cluster_name, node, '/tmp/db') contents = SSHClient(node).file_read(TestArakoonInstaller.cluster_config_file) expected = TestArakoonInstaller.expected_global.format(TestArakoonInstaller.nodes[node], TestArakoonInstaller.cluster_name) expected += TestArakoonInstaller.expected_base.format(TestArakoonInstaller.nodes[node], result['client_port'], result['messaging_port'], TestArakoonInstaller.cluster_name, node) self.assertEqual(contents.strip(), expected.strip())
def check_ovs_packages(result_handler): """ Checks the availability of packages for Open vStorage :param result_handler: logging object :type result_handler: ovs.extensions.healthcheck.result.HCResults :return: None :rtype: NoneType """ result_handler.info('Checking OVS packages: ', add_to_result=False) client = SSHClient(OpenvStorageHealthCheck.LOCAL_SR) package_manager = PackageFactory.get_manager() # Get all base packages base_packages = set() for names in package_manager.package_info['names'].itervalues(): base_packages = base_packages.union(names) base_packages = list(base_packages) extra_packages = Helper.packages installed = package_manager.get_installed_versions(client=client, package_names=base_packages) installed.update(package_manager.get_installed_versions(client=client, package_names=Helper.packages)) for package in base_packages + extra_packages: version = installed.get(package) if version: version = str(version) result_handler.success('Package {0} is installed with version {1}'.format(package, version), code=ErrorCodes.package_required) else: if package in package_manager.package_info['mutually_exclusive']: # Mutually exclusive package, so ignore continue if package in base_packages: result_handler.warning('Package {0} is not installed.'.format(package), code=ErrorCodes.package_required) elif package in extra_packages: result_handler.skip('Package {0} is not installed.'.format(package))
def _features(self): """ Returns information about installed/available features :return: Dictionary containing edition and available features per component """ try: enterprise = 'enterprise' community = 'community' client = SSHClient(self, username='******') enterprise_regex = re.compile('^(?P<edition>ee-)?(?P<version>.*)$') version = client.run( "volumedriver_fs --version | grep version: | awk '{print $2}'", allow_insecure=True, allow_nonzero=True) volumedriver_version = enterprise_regex.match(version).groupdict() volumedriver_edition = enterprise if volumedriver_version[ 'edition'] == 'ee-' else community volumedriver_version_lv = LooseVersion( volumedriver_version['version']) volumedriver_features = [ feature for feature, version in { 'directory_unlink': ('6.15.0', None) }.iteritems() if volumedriver_version_lv >= LooseVersion(version[0]) and ( version[1] is None or version[1] == volumedriver_edition) ] version = client.run("alba version --terse", allow_insecure=True, allow_nonzero=True) alba_version = enterprise_regex.match(version).groupdict() alba_edition = enterprise if alba_version[ 'edition'] == 'ee-' else community alba_version_lv = LooseVersion(alba_version['version']) alba_features = [ feature for feature, version in { 'cache-quota': ('1.4.4', enterprise), 'block-cache': ('1.4.0', enterprise) }.iteritems() if alba_version_lv >= LooseVersion(version[0]) and ( version[1] is None or version[1] == alba_edition) ] return { 'volumedriver': { 'edition': volumedriver_edition, 'features': volumedriver_features }, 'alba': { 'edition': alba_edition, 'features': alba_features } } except UnableToConnectException: pass except Exception: StorageRouter._logger.exception( 'Could not load feature information') return None
def setUp(cls): for node in TestArakoonInstaller.nodes: client = SSHClient(node) client.dir_delete('/tmp/db') client.dir_delete('/tmp/cfg') client.dir_create('/tmp/db') client.dir_create('/tmp/cfg')
def __init__(self, host='127.0.0.1', login='******', passwd=None): logger.debug('Init libvirt') self.states = { libvirt.VIR_DOMAIN_NOSTATE: 'NO STATE', libvirt.VIR_DOMAIN_RUNNING: 'RUNNING', libvirt.VIR_DOMAIN_BLOCKED: 'BLOCKED', libvirt.VIR_DOMAIN_PAUSED: 'PAUSED', libvirt.VIR_DOMAIN_SHUTDOWN: 'SHUTDOWN', libvirt.VIR_DOMAIN_SHUTOFF: 'TURNEDOFF', libvirt.VIR_DOMAIN_CRASHED: 'CRASHED' } pattern = re.compile( r"^(?<!\S)((\d|[1-9]\d|1\d\d|2[0-4]\d|25[0-5])\b|\.\b){7}(?!\S)$") if pattern.match(host): self.host = host else: raise ValueError("{0} is not a valid ip.".format(host)) self.host = host self.login = login self.streams = {} self.ssh_client = SSHClient(host, username=login, password=passwd) self._conn = self.connect(login, host) # Enable event registering libvirt.virEventRegisterDefaultImpl() logger.debug('Init complete')
def remove(license_guid): """ Removes a license """ clients = {} storagerouters = StorageRouterList.get_storagerouters() try: for storagerouter in storagerouters: clients[storagerouter] = SSHClient(storagerouter.ip) except UnableToConnectException: raise RuntimeError('Not all StorageRouters are reachable') lic = License(license_guid) if lic.can_remove is True: remove_functions = Toolbox.fetch_hooks( 'license', '{0}.remove'.format(lic.component)) result = remove_functions[0](component=lic.component, data=lic.data, valid_until=lic.valid_until, signature=lic.signature) if result is True: lic.delete() license_contents = [] for lic in LicenseList.get_licenses(): license_contents.append(lic.hash) for storagerouter in storagerouters: client = clients[storagerouter] client.file_write( '/opt/OpenvStorage/config/licenses', '{0}\n'.format('\n'.join(license_contents))) return result return None
def test_multi_node(self): base_port = Configuration.get('ovs.ports.arakoon')[0] cluster = 'one' nodes = sorted(TestArakoonInstaller.nodes.keys()) nodes = dict((node, SSHClient(node)) for node in nodes) first_node = nodes.keys()[0] ArakoonInstaller.create_cluster(cluster, first_node, []) for node in nodes[1:]: ArakoonInstaller.extend_cluster(first_node, node, cluster, []) expected = TestArakoonInstaller.expected_global.format(cluster, ','.join(TestArakoonInstaller.nodes[node] for node in nodes)) for node in nodes: expected += TestArakoonInstaller.expected_base.format(TestArakoonInstaller.nodes[node], node, base_port, base_port + 1) expected = expected.strip() for node, client in nodes.iteritems(): contents = client.file_read(self._get_config_path(cluster)) self.assertEqual(contents.strip(), expected.strip()) ArakoonInstaller.shrink_cluster(nodes[1], first_node, cluster) expected = TestArakoonInstaller.expected_global.format(cluster, ','.join(TestArakoonInstaller.nodes[node] for node in nodes[1:])) for node in nodes.keys()[1:]: expected += TestArakoonInstaller.expected_base.format(TestArakoonInstaller.nodes[node], node, base_port, base_port + 1) expected = expected.strip() for node, client in nodes.iteritems(): if node == first_node: continue contents = client.file_read(self._get_config_path(cluster)) self.assertEqual(contents.strip(), expected.strip())