def sync_disk_with_reality(cls, guid=None, ip=None, timeout=None, *args, **kwargs): """ :param guid: guid of the storagerouter :type guid: str :param ip: ip of the storagerouter :type ip: str :param timeout: timeout time in seconds :type timeout: int """ if guid is not None: if ip is not None: Logger.warning( 'Both storagerouter guid and ip passed, using guid for sync.' ) storagerouter_guid = guid elif ip is not None: storagerouter_guid = StoragerouterHelper.get_storagerouter_by_ip( ip).guid else: raise ValueError('No guid or ip passed.') task_id = cls.api.post( api='/storagerouters/{0}/rescan_disks/'.format(storagerouter_guid), data=None) return cls.api.wait_for_task(task_id=task_id, timeout=timeout)
class StorageDriverConfiguration(object): """ StorageDriver configuration class """ CACHE_BLOCK = 'block_cache' CACHE_FRAGMENT = 'fragment_cache' def __init__(self, vpool_guid, storagedriver_id): """ Initializes the class """ _log_level = LOG_LEVEL_MAPPING[OVSLogger( 'extensions').getEffectiveLevel()] # noinspection PyCallByClass,PyTypeChecker storagerouterclient.Logger.setupLogging( OVSLogger.load_path('storagerouterclient'), _log_level) # noinspection PyArgumentList storagerouterclient.Logger.enableLogging() self._key = '/ovs/vpools/{0}/hosts/{1}/config'.format( vpool_guid, storagedriver_id) self._logger = OVSLogger('extensions') self._dirty_entries = [] self.remote_path = Configuration.get_configuration_path( self._key).strip('/') # Load configuration if Configuration.exists(self._key): self.configuration = Configuration.get(self._key) self.config_missing = False else: self.configuration = {} self.config_missing = True self._logger.debug( 'Could not find config {0}, a new one will be created'.format( self._key)) def save(self, client=None, force_reload=False): """ Saves the configuration to a given file, optionally a remote one :param client: If provided, save remote configuration :type client: ovs_extensions.generic.sshclient.SSHClient :param force_reload: Make sure the 'update_configuration' gets triggered. Should be used when configuration changes have been applied from 'outside' :type force_reload: bool :return: Changes to the configuration :rtype: list """ changes = [] Configuration.set(self._key, self.configuration) # No changes detected in the configuration management if len(self._dirty_entries) == 0 and force_reload is False: self._logger.debug('No need to apply changes, nothing changed') self.config_missing = False return changes # Retrieve the changes from volumedriver self._logger.info( 'Applying local storagedriver configuration changes{0}'.format( '' if client is None else ' on {0}'.format(client.ip))) reloaded = False try: if client is None: changes = LocalStorageRouterClient( self.remote_path).update_configuration(self.remote_path) else: with remote(client.ip, [LocalStorageRouterClient]) as rem: changes = copy.deepcopy( rem.LocalStorageRouterClient( self.remote_path).update_configuration( self.remote_path)) reloaded = True except Exception as ex: if 'ClusterNotReachableException' not in str(ex): raise # No changes if len(changes) == 0: if reloaded is True: if len(self._dirty_entries) > 0: self._logger.warning( 'Following changes were not applied: {0}'.format( ', '.join(self._dirty_entries))) else: self._logger.warning( 'Changes were not applied since StorageDriver is unavailable' ) self.config_missing = False self._dirty_entries = [] return changes # Verify the output of the changes and log them for change in changes: if not isinstance(change, dict): raise RuntimeError('Unexpected update_configuration output') if 'param_name' not in change or 'old_value' not in change or 'new_value' not in change: raise RuntimeError( 'Unexpected update_configuration output. Expected different keys, but got {0}' .format(', '.join(change.keys()))) param_name = change['param_name'] if force_reload is False: if param_name not in self._dirty_entries: raise RuntimeError( 'Unexpected configuration change: {0}'.format( param_name)) self._dirty_entries.remove(param_name) self._logger.info('Changed {0} from "{1}" to "{2}"'.format( param_name, change['old_value'], change['new_value'])) self._logger.info('Changes applied') if len(self._dirty_entries) > 0: self._logger.warning( 'Following changes were not applied: {0}'.format(', '.join( self._dirty_entries))) self.config_missing = False self._dirty_entries = [] return changes def __getattr__(self, item): from ovs_extensions.generic.toolbox import ExtensionsToolbox if item.startswith('configure_'): section = ExtensionsToolbox.remove_prefix(item, 'configure_') return lambda **kwargs: self._add(section, **kwargs) if item.startswith('clear_'): section = ExtensionsToolbox.remove_prefix(item, 'clear_') return lambda: self._delete(section) def _add(self, section, **kwargs): """ Configures a section """ for item, value in kwargs.iteritems(): if section not in self.configuration: self.configuration[section] = {} if item not in self.configuration[ section] or self.configuration[section][item] != value: self._dirty_entries.append(item) self.configuration[section][item] = value def _delete(self, section): """ Removes a section from the configuration """ if section in self.configuration: del self.configuration[section]
def run(command, config=None, named_params=None, extra_params=None, client=None, debug=False, to_json=True): """ Executes a command on ALBA When --to-json is NOT passed: * An error occurs --> exitcode != 0 * It worked --> exitcode == 0 When --to-json is passed: * An errors occurs during verification of parameters passed -> exitcode != 0 * An error occurs while executing the command --> exitcode == 0 (error in json output) * It worked --> exitcode == 0 :param command: The command to execute, eg: 'list-namespaces' :type command: str :param config: The configuration location to be used, eg: 'arakoon://config/ovs/arakoon/ovsdb/config?ini=%2Fopt%2FOpenvStorage%2Fconfig%2Farakoon_cacc.ini' :type config: str :param named_params: Additional parameters to be given to the command, eg: {'long-id': ','.join(asd_ids)} :type named_params: dict :param extra_params: Additional parameters to be given to the command, eg: [name] :type extra_params: list :param client: A client on which to execute the command :type client: ovs_extensions.generic.sshclient.SSHClient :param debug: Log additional output :type debug: bool :param to_json: Request a JSON response from Alba :type to_json: bool :return: The output of the command :rtype: dict """ if named_params is None: named_params = {} if extra_params is None: extra_params = [] logger = Logger('extensions-plugins') if os.environ.get('RUNNING_UNITTESTS') == 'True': # For the unittest, all commands are passed to a mocked Alba from ovs.extensions.plugins.tests.alba_mockups import VirtualAlbaBackend named_params.update({'config': config}) named_params.update({'extra_params': extra_params}) return getattr(VirtualAlbaBackend, command.replace('-', '_'))(**named_params) debug_log = [] try: if to_json is True: extra_options = ["--to-json"] else: extra_options = [] cmd_list = ['/usr/bin/alba', command] + extra_options if config is not None: cmd_list.append('--config={0}'.format(config)) for key, value in named_params.iteritems(): cmd_list.append('--{0}={1}'.format(key, value)) cmd_list.extend(extra_params) cmd_string = ' '.join(cmd_list) debug_log.append('Command: {0}'.format(cmd_string)) start = time.time() try: if debug is True: logger.debug('Command: {0}'.format(cmd_string)) if client is None: try: if not hasattr(select, 'poll'): import subprocess subprocess._has_poll = False # Damn 'monkey patching' channel = Popen(cmd_list, stdout=PIPE, stderr=PIPE, universal_newlines=True) except OSError as ose: raise CalledProcessError(1, cmd_string, str(ose)) output, stderr = channel.communicate() output = re.sub(r'[^\x00-\x7F]+', '', output) stderr_debug = 'stderr: {0}'.format(stderr) stdout_debug = 'stdout: {0}'.format(output) if debug is True: logger.debug(stderr_debug) logger.debug(stdout_debug) debug_log.append(stderr_debug) debug_log.append(stdout_debug) exit_code = channel.returncode if exit_code != 0: # Raise same error as check_output raise CalledProcessError(exit_code, cmd_string, output) else: if debug is True: output, stderr = client.run(cmd_list, debug=True, return_stderr=True) debug_log.append('stderr: {0}'.format(stderr)) else: output = client.run(cmd_list).strip() debug_log.append('stdout: {0}'.format(output)) if to_json is True: output = json.loads(output) else: return output duration = time.time() - start if duration > 0.5: logger.warning('AlbaCLI call {0} took {1}s'.format( command, round(duration, 2))) except CalledProcessError as cpe: try: output = json.loads(cpe.output) except Exception: raise RuntimeError( 'Executing command {0} failed with output {1}'.format( cmd_string, cpe.output)) if output['success'] is True: return output['result'] raise AlbaError(output['error']['message'], output['error']['exception_code'], output['error']['exception_type']) except Exception as ex: logger.exception('Error: {0}'.format(ex)) # In case there's an exception, we always log for debug_line in debug_log: logger.debug(debug_line) raise