def get_my_linklocal(interface): try: if_str = execute('ip', '-f', 'inet6', '-o', 'addr', 'show', interface) condition = '\s+inet6\s+([0-9a-f:]+)/\d+\s+scope\s+link' links = [re.search(condition, x) for x in if_str[0].split('\n')] address = [w.group(1) for w in links if w is not None] if address[0] is not None: return address[0] else: raise exception.Error(_('Link Local address is not found.:%s') % if_str) except Exception as ex: raise exception.Error(_("Couldn't get Link Local IP of %(interface)s" " :%(ex)s") % locals())
def get_from_path(items, path): """Returns a list of items matching the specified path. Takes an XPath-like expression e.g. prop1/prop2/prop3, and for each item in items, looks up items[prop1][prop2][prop3]. Like XPath, if any of the intermediate results are lists it will treat each list item individually. A 'None' in items or any child expressions will be ignored, this function will not throw because of None (anywhere) in items. The returned list will contain no None values. """ if path is None: raise exception.Error('Invalid mini_xpath') (first_token, sep, remainder) = path.partition('/') if first_token == '': raise exception.Error('Invalid mini_xpath') results = [] if items is None: return results if not isinstance(items, list): # Wrap single objects in a list items = [items] for item in items: if item is None: continue get_method = getattr(item, 'get', None) if get_method is None: continue child = get_method(first_token) if child is None: continue if isinstance(child, list): # Flatten intermediate lists for x in child: results.append(x) else: results.append(child) if not sep: # No more tokens return results else: return get_from_path(results, remainder)
def register(self, ext): # Do nothing if the extension doesn't check out if not self._check_extension(ext): return alias = ext.alias LOG.audit(_('Loaded extension: %s'), alias) if alias in self.extensions: raise exception.Error("Found duplicate extension: %s" % alias) self.extensions[alias] = ext
def db_sync(version=None): if version is not None: try: version = int(version) except ValueError: raise exception.Error(_("version should be an integer")) current_version = db_version() repository = _find_migrate_repo() if version is None or version > current_version: return versioning_api.upgrade(get_engine(), repository, version) else: return versioning_api.downgrade(get_engine(), repository, version)
def ssh_execute(ssh, cmd, process_input=None, addl_env=None, check_exit_code=True): LOG.debug(_('Running cmd (SSH): %s'), cmd) if addl_env: raise exception.Error(_('Environment not supported over SSH')) if process_input: # This is (probably) fixable if we need it... raise exception.Error(_('process_input not supported over SSH')) stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd) channel = stdout_stream.channel #stdin.write('process_input would go here') #stdin.flush() # NOTE(justinsb): This seems suspicious... # ...other SSH clients have buffering issues with this approach stdout = stdout_stream.read() stderr = stderr_stream.read() stdin_stream.close() stdout_stream.close() stderr_stream.close() exit_status = channel.recv_exit_status() # exit_status == -1 if no exit code was returned if exit_status != -1: LOG.debug(_('Result was %s') % exit_status) if check_exit_code and exit_status != 0: raise exception.ProcessExecutionError(exit_code=exit_status, stdout=stdout, stderr=stderr, cmd=cmd) channel.close() return (stdout, stderr)
def db_version(): repository = _find_migrate_repo() try: return versioning_api.db_version(get_engine(), repository) except versioning_exceptions.DatabaseNotControlledError: # If we aren't version controlled we may already have the database # in the state from before we started version control, check for that # and set up version_control appropriately meta = sqlalchemy.MetaData() engine = get_engine() meta.reflect(bind=engine) tables = meta.tables if len(tables) == 0: db_version_control(migration.INIT_VERSION) return versioning_api.db_version(get_engine(), repository) else: raise exception.Error(_("Upgrade DB using Essex release first."))
def __get_backend(self): if not self.__backend: backend_name = FLAGS[self.__pivot] if backend_name not in self.__backends: raise exception.Error(_('Invalid backend: %s') % backend_name) backend = self.__backends[backend_name] if isinstance(backend, tuple): name = backend[0] fromlist = backend[1] else: name = backend fromlist = backend self.__backend = __import__(name, None, None, fromlist) LOG.debug(_('backend %s'), self.__backend) return self.__backend
def execute(*cmd, **kwargs): """Helper method to execute command with optional retry. If you add a run_as_root=True command, don't forget to add the corresponding filter to etc/monitor/rootwrap.d ! :param cmd: Passed to subprocess.Popen. :param process_input: Send to opened process. :param check_exit_code: Single bool, int, or list of allowed exit codes. Defaults to [0]. Raise exception.ProcessExecutionError unless program exits with one of these code. :param delay_on_retry: True | False. Defaults to True. If set to True, wait a short amount of time before retrying. :param attempts: How many times to retry cmd. :param run_as_root: True | False. Defaults to False. If set to True, the command is prefixed by the command specified in the root_helper FLAG. :raises exception.Error: on receiving unknown arguments :raises exception.ProcessExecutionError: :returns: a tuple, (stdout, stderr) from the spawned process, or None if the command fails. """ process_input = kwargs.pop('process_input', None) check_exit_code = kwargs.pop('check_exit_code', [0]) ignore_exit_code = False if isinstance(check_exit_code, bool): ignore_exit_code = not check_exit_code check_exit_code = [0] elif isinstance(check_exit_code, int): check_exit_code = [check_exit_code] delay_on_retry = kwargs.pop('delay_on_retry', True) attempts = kwargs.pop('attempts', 1) run_as_root = kwargs.pop('run_as_root', False) shell = kwargs.pop('shell', False) if len(kwargs): raise exception.Error(_('Got unknown keyword args ' 'to utils.execute: %r') % kwargs) if run_as_root: if FLAGS.rootwrap_config is None or FLAGS.root_helper != 'sudo': LOG.deprecated(_('The root_helper option (which lets you specify ' 'a root wrapper different from monitor-rootwrap, ' 'and defaults to using sudo) is now deprecated. ' 'You should use the rootwrap_config option ' 'instead.')) if (FLAGS.rootwrap_config is not None): cmd = ['sudo', 'monitor-rootwrap', FLAGS.rootwrap_config] + list(cmd) else: cmd = shlex.split(FLAGS.root_helper) + list(cmd) cmd = map(str, cmd) while attempts > 0: attempts -= 1 try: LOG.debug(_('Running cmd (subprocess): %s'), ' '.join(cmd)) _PIPE = subprocess.PIPE # pylint: disable=E1101 obj = subprocess.Popen(cmd, stdin=_PIPE, stdout=_PIPE, stderr=_PIPE, close_fds=True, preexec_fn=_subprocess_setup, shell=shell) result = None if process_input is not None: result = obj.communicate(process_input) else: result = obj.communicate() obj.stdin.close() # pylint: disable=E1101 _returncode = obj.returncode # pylint: disable=E1101 if _returncode: LOG.debug(_('Result was %s') % _returncode) if not ignore_exit_code and _returncode not in check_exit_code: (stdout, stderr) = result raise exception.ProcessExecutionError( exit_code=_returncode, stdout=stdout, stderr=stderr, cmd=' '.join(cmd)) return result except exception.ProcessExecutionError: if not attempts: raise else: LOG.debug(_('%r failed. Retrying.'), cmd) if delay_on_retry: greenthread.sleep(random.randint(20, 200) / 100.0) finally: # NOTE(termie): this appears to be necessary to let the subprocess # call clean something up in between calls, without # it two execute calls in a row hangs the second one greenthread.sleep(0)