def copy_data(self, path): if self.cancelled: return out, err = utils.execute( "ls", "-pA1", "--group-directories-first", path, run_as_root=True) for line in out.split('\n'): if self.cancelled: return if len(line) == 0: continue src_item = os.path.join(path, line) dest_item = src_item.replace(self.src, self.dest) if line[-1] == '/': if line[0:-1] in self.ignore_list: continue utils.execute("mkdir", "-p", dest_item, run_as_root=True) self.copy_data(src_item) else: if line in self.ignore_list: continue size, err = utils.execute("stat", "-c", "%s", src_item, run_as_root=True) self.current_copy = {'file_path': dest_item, 'size': int(size)} self._copy_and_validate(src_item, dest_item) self.current_size += int(size) LOG.info(six.text_type(self.get_progress()))
def _change_file_mode(self, filepath): try: utils.execute("chmod", "666", filepath, run_as_root=True) except Exception as err: LOG.error(_LE("Bad response from change file: %s.") % err) raise err
def _publish_local_config(configpath, pre_lines, exports): tmp_path = '%s.tmp.%s' % (configpath, time.time()) LOG.debug("tmp_path = %s", tmp_path) cpcmd = ['install', '-m', '666', configpath, tmp_path] try: utils.execute(*cpcmd, run_as_root=True) except exception.ProcessExecutionError as e: msg = (_('Failed while publishing ganesha config locally. ' 'Error: %s.') % six.text_type(e)) LOG.error(msg) raise exception.GPFSGaneshaException(msg) with open(tmp_path, 'w+') as f: for l in pre_lines: f.write('%s\n' % l) for e in exports: f.write('EXPORT\n{\n') for attr in exports[e]: f.write('%s = %s ;\n' % (attr, exports[e][attr])) f.write('}\n') mvcmd = ['mv', tmp_path, configpath] try: utils.execute(*mvcmd, run_as_root=True) except exception.ProcessExecutionError as e: msg = (_('Failed while publishing ganesha config locally. ' 'Error: %s.') % six.text_type(e)) LOG.error(msg) raise exception.GPFSGaneshaException(msg) LOG.info(_LI('Ganesha config %s published locally.'), configpath)
def reload_ganesha_config(servers, sshlogin, service='ganesha.nfsd'): """Request ganesha server reload updated config.""" # Note: dynamic reload of ganesha config is not enabled # in ganesha v2.0. Therefore, the code uses the ganesha service restart # option to make sure the config changes are reloaded for server in servers: # Until reload is fully implemented and if the reload returns a bad # status revert to service restart instead LOG.info(_LI('Restart service %(service)s on %(server)s to force a ' 'config file reload'), {'service': service, 'server': server}) run_local = True reload_cmd = ['service', service, 'restart'] localserver_iplist = socket.gethostbyname_ex( socket.gethostname())[2] if server not in localserver_iplist: remote_login = sshlogin + '@' + server reload_cmd = ['ssh', remote_login] + reload_cmd run_local = False try: utils.execute(*reload_cmd, run_as_root=run_local) except exception.ProcessExecutionError as e: msg = (_('Could not restart service %(service)s on ' '%(server)s: %(excmsg)s') % {'service': service, 'server': server, 'excmsg': six.text_type(e)}) LOG.error(msg) raise exception.GPFSGaneshaException(msg)
def _change_file_mode(self, filepath): try: utils.execute('chmod', '666', filepath, run_as_root=True) except Exception as err: LOG.error(_LE('Bad response from change file: %s.') % err) raise err
def _validate_item(src_item, dest_item): src_sum, err = utils.execute( "sha256sum", "%s" % src_item, run_as_root=True) dest_sum, err = utils.execute( "sha256sum", "%s" % dest_item, run_as_root=True) if src_sum.split()[0] != dest_sum.split()[0]: msg = _("Data corrupted while copying. Aborting data copy.") raise exception.ShareDataCopyFailed(reason=msg)
def cleanup_unmount_temp_folder(self, instance, migration_info): try: utils.execute(*migration_info['umount'], run_as_root=True) except Exception as utfe: LOG.exception(six.text_type(utfe)) LOG.error(_LE("Could not unmount folder of instance" " %(instance_id)s for migration of " "share %(share_id)s") % { 'instance_id': instance['id'], 'share_id': self.share['id']})
def mount_share_instance(self, mount_template, mount_path, share_instance_id): path = os.path.join(mount_path, share_instance_id) if not os.path.exists(path): os.makedirs(path) self._check_dir_exists(path) mount_command = mount_template % {'path': path} utils.execute(*(mount_command.split()), run_as_root=True)
def cleanup_temp_folder(self, instance, mount_path): try: utils.execute('rmdir', mount_path + instance['id'], check_exit_code=False) except Exception as tfe: LOG.exception(six.text_type(tfe)) LOG.error(_LE("Could not cleanup instance %(instance_id)s " "temporary folders for migration of " "share %(share_id)s") % { 'instance_id': instance['id'], 'share_id': self.share['id']})
def _ovs_add_port(self, bridge, device_name, port_id, mac_address, internal=True): cmd = ['ovs-vsctl', '--', '--may-exist', 'add-port', bridge, device_name] if internal: cmd += ['--', 'set', 'Interface', device_name, 'type=internal'] cmd += ['--', 'set', 'Interface', device_name, 'external-ids:iface-id=%s' % port_id, '--', 'set', 'Interface', device_name, 'external-ids:iface-status=active', '--', 'set', 'Interface', device_name, 'external-ids:attached-mac=%s' % mac_address] utils.execute(*cmd, run_as_root=True)
def _publish_remote_config(server, sshlogin, sshkey, configpath): dest = '%s@%s:%s' % (sshlogin, server, configpath) scpcmd = ['scp', '-i', sshkey, configpath, dest] try: utils.execute(*scpcmd, run_as_root=False) except exception.ProcessExecutionError as e: msg = (_('Failed while publishing ganesha config on remote server. ' 'Error: %s.') % six.text_type(e)) LOG.error(msg) raise exception.GPFSGaneshaException(msg) LOG.info(_LI('Ganesha config %(path)s published to %(server)s.'), {'path': configpath, 'server': server})
def unmount_share_instance(self, unmount_template, mount_path, share_instance_id): path = os.path.join(mount_path, share_instance_id) unmount_command = unmount_template % {'path': path} utils.execute(*(unmount_command.split()), run_as_root=True) try: if os.path.exists(path): os.rmdir(path) self._check_dir_not_exists(path) except Exception: LOG.warning(_LW("Folder %s could not be removed."), path)
def _publish_access(self, *cmd): for server in self.configuration.gpfs_nfs_server_list: localserver_iplist = socket.gethostbyname_ex( socket.gethostname())[2] run_local = True if server not in localserver_iplist: sshlogin = self.configuration.gpfs_ssh_login remote_login = sshlogin + '@' + server cmd = ['ssh', remote_login] + list(cmd) run_local = False try: utils.execute(*cmd, run_as_root=run_local, check_exit_code=True) except exception.ProcessExecutionError: raise
def run_vsctl(self, args): full_args = ["ovs-vsctl", "--timeout=2"] + args try: return utils.execute(*full_args, run_as_root=True) except Exception as e: LOG.error(_("Unable to execute %(cmd)s. Exception: %(exception)s"), {'cmd': full_args, 'exception': e})
def _execute(self, *cmd, **kwargs): for x in range(0, len(self.hosts)): try: check_exit_code = kwargs.pop('check_exit_code', True) host = self.hosts[x] if host in self.local_hosts: cmd = self._as_user(cmd, self.configuration.maprfs_ssh_name) out, err = utils.execute(*cmd, check_exit_code=check_exit_code) else: out, err = self._run_ssh(host, cmd, check_exit_code) # move available cldb host to the beginning if x > 0: self.hosts[0], self.hosts[x] = self.hosts[x], self.hosts[0] return out, err except exception.ProcessExecutionError as e: if self._check_error(e): raise elif x < len(self.hosts) - 1: msg = ('Error running SSH command. Trying another host') LOG.error(msg) else: raise except Exception as e: if x < len(self.hosts) - 1: msg = ('Error running SSH command. Trying another host') LOG.error(msg) else: raise exception.ProcessExecutionError(six.text_type(e))
def _mount_for_migration(migration_info): try: utils.execute(*migration_info['mount'], run_as_root=True) except Exception: LOG.error(_LE("Failed to mount temporary folder for " "migration of share instance " "%(share_instance_id)s " "to %(new_share_instance_id)s") % { 'share_instance_id': share_instance['id'], 'new_share_instance_id': new_share_instance['id']}) helper.cleanup_migration_access( src_access_ref, src_access) helper.cleanup_migration_access( dest_access_ref, dest_access) raise
def get_progress(self): if self.current_copy is not None: try: size, err = utils.execute("stat", "-c", "%s", self.current_copy['file_path'], run_as_root=True) size = int(size) except utils.processutils.ProcessExecutionError: size = 0 total_progress = 0 if self.total_size > 0: total_progress = self.current_size * 100 / self.total_size current_file_progress = 0 if self.current_copy['size'] > 0: current_file_progress = size * 100 / self.current_copy['size'] current_file_path = self.current_copy['file_path'] progress = { 'total_progress': total_progress, 'current_file_path': current_file_path, 'current_file_progress': current_file_progress } return progress else: return {'total_progress': 100}
def run_vsctl(self, args): full_args = ["ovs-vsctl", "--timeout=2"] + args try: return utils.execute(*full_args, run_as_root=True) except Exception: LOG.exception(_LE("Unable to execute %(cmd)s."), {'cmd': full_args})
def _execute(cls, options, command, args, namespace=None, as_root=False): opt_list = ['-%s' % o for o in options] if namespace: ip_cmd = ['ip', 'netns', 'exec', namespace, 'ip'] else: ip_cmd = ['ip'] total_cmd = ip_cmd + opt_list + [command] + list(args) return utils.execute(*total_cmd, run_as_root=as_root)[0]
def _execute(cls, options, command, args, namespace=None, as_root=False): opt_list = ["-%s" % o for o in options] if namespace: ip_cmd = ["ip", "netns", "exec", namespace, "ip"] else: ip_cmd = ["ip"] total_cmd = ip_cmd + opt_list + [command] + list(args) return utils.execute(*total_cmd, run_as_root=as_root)[0]
def _gpfs_local_execute(self, *cmd, **kwargs): if 'run_as_root' not in kwargs: kwargs.update({'run_as_root': True}) if 'ignore_exit_code' in kwargs: check_exit_code = kwargs.pop('ignore_exit_code') check_exit_code.append(0) kwargs.update({'check_exit_code': check_exit_code}) return utils.execute(*cmd, **kwargs)
def _ipv6_configured(): try: out, err = utils.execute('cat', '/proc/net/if_inet6') except exception.ProcessExecutionError: return False if not out: return False return True
def mount_share_instance(self, mount_template, mount_path, share_instance): path = os.path.join(mount_path, share_instance['id']) options = CONF.data_node_mount_options options = {k.lower(): v for k, v in options.items()} proto_options = options.get(share_instance['share_proto'].lower()) if not proto_options: proto_options = '' if not os.path.exists(path): os.makedirs(path) self._check_dir_exists(path) mount_command = mount_template % {'path': path, 'options': proto_options} utils.execute(*(mount_command.split()), run_as_root=True)
def execute(self, cmds, addl_env={}, check_exit_code=True): if not self._parent.namespace: raise Exception(_('No namespace defined for parent')) else: env_params = [] if addl_env: env_params = (['env'] + ['%s=%s' % pair for pair in addl_env.items()]) total_cmd = ['ip', 'netns', 'exec', self._parent.namespace] + \ env_params + list(cmds) return utils.execute(*total_cmd, run_as_root=True, check_exit_code=check_exit_code)
def execute(self, cmds, addl_env=None, check_exit_code=True): if addl_env is None: addl_env = dict() if not self._parent.namespace: raise Exception(_("No namespace defined for parent")) else: env_params = [] if addl_env: env_params = ["env"] + ["%s=%s" % pair for pair in sorted(addl_env.items())] total_cmd = ["ip", "netns", "exec", self._parent.namespace] + env_params + list(cmds) return utils.execute(*total_cmd, run_as_root=True, check_exit_code=check_exit_code)
def get_total_size(self, path): if self.cancelled: return out, err = utils.execute( "ls", "-pA1", "--group-directories-first", path, run_as_root=True) for line in out.split('\n'): if self.cancelled: return if len(line) == 0: continue src_item = os.path.join(path, line) if line[-1] == '/': if line[0:-1] in self.ignore_list: continue self.get_total_size(src_item) else: if line in self.ignore_list: continue size, err = utils.execute("stat", "-c", "%s", src_item, run_as_root=True) self.total_size += int(size)
def test_no_retry_on_success(self): fd, tmpfilename = tempfile.mkstemp() _, tmpfilename2 = tempfile.mkstemp() try: fp = os.fdopen(fd, 'w+') fp.write('''#!/bin/sh # If we've already run, bail out. grep -q foo "$1" && exit 1 # Mark that we've run before. echo foo > "$1" # Check that stdin gets passed correctly. grep foo ''') fp.close() os.chmod(tmpfilename, 0o755) utils.execute(tmpfilename, tmpfilename2, process_input='foo', attempts=2) finally: os.unlink(tmpfilename) os.unlink(tmpfilename2)
def execute(self, cmds, addl_env=None, check_exit_code=True): if addl_env is None: addl_env = dict() if not self._parent.namespace: raise Exception(_('No namespace defined for parent')) else: env_params = [] if addl_env: env_params = (['env'] + ['%s=%s' % pair for pair in sorted(addl_env.items())]) total_cmd = (['ip', 'netns', 'exec', self._parent.namespace] + env_params + list(cmds)) return utils.execute(*total_cmd, run_as_root=True, check_exit_code=check_exit_code)
def _publish_access(self, *cmd, **kwargs): check_exit_code = kwargs.get('check_exit_code', True) outs = [] localserver_iplist = socket.gethostbyname_ex(socket.gethostname())[2] for server in self.configuration.gpfs_nfs_server_list: if server in localserver_iplist: run_command = cmd run_local = True else: sshlogin = self.configuration.gpfs_ssh_login remote_login = sshlogin + '@' + server run_command = ['ssh', remote_login] + list(cmd) run_local = False try: out = utils.execute(*run_command, run_as_root=run_local, check_exit_code=check_exit_code) except exception.ProcessExecutionError: raise outs.append(out) return outs
def get_progress(self): # Empty share or empty contents if self.completed and self.total_size == 0: return {'total_progress': 100} if not self.initialized or self.current_copy is None: return {'total_progress': 0} try: size, err = utils.execute("stat", "-c", "%s", self.current_copy['file_path'], run_as_root=True) size = int(size) except utils.processutils.ProcessExecutionError: size = 0 current_file_progress = 0 if self.current_copy['size'] > 0: current_file_progress = size * 100 / self.current_copy['size'] current_file_path = self.current_copy['file_path'] total_progress = 0 if self.total_size > 0: if current_file_progress == 100: size = 0 total_progress = int( (self.current_size + size) * 100 / self.total_size) progress = { 'total_progress': total_progress, 'current_file_path': current_file_path, 'current_file_progress': current_file_progress } return progress
def get_progress(self): # Empty share or empty contents if self.completed and self.total_size == 0: return {'total_progress': 100} if not self.initialized or self.current_copy is None: return {'total_progress': 0} try: size, err = utils.execute("stat", "-c", "%s", self.current_copy['file_path'], run_as_root=True) size = int(size) except utils.processutils.ProcessExecutionError: size = 0 current_file_progress = 0 if self.current_copy['size'] > 0: current_file_progress = size * 100 / self.current_copy['size'] current_file_path = self.current_copy['file_path'] total_progress = 0 if self.total_size > 0: if current_file_progress == 100: size = 0 total_progress = int((self.current_size + size) * 100 / self.total_size) progress = { 'total_progress': total_progress, 'current_file_path': current_file_path, 'current_file_progress': current_file_progress } return progress
def copy_data(self, path): if self.cancelled: return out, err = utils.execute("ls", "-pA1", "--group-directories-first", path, run_as_root=True) for line in out.split('\n'): if self.cancelled: return if len(line) == 0: continue src_item = os.path.join(path, line) dest_item = src_item.replace(self.src, self.dest) if line[-1] == '/': if line[0:-1] in self.ignore_list: continue utils.execute("mkdir", "-p", dest_item, run_as_root=True) self.copy_data(src_item) else: if line in self.ignore_list: continue size, err = utils.execute("stat", "-c", "%s", src_item, run_as_root=True) self.current_copy = {'file_path': dest_item, 'size': int(size)} utils.execute("cp", "-P", "--preserve=all", src_item, dest_item, run_as_root=True) self.current_size += int(size) LOG.info(six.text_type(self.get_progress()))
def _publish_local_config(configpath, pre_lines, exports): tmp_path = '%s.tmp.%s' % (configpath, time.time()) LOG.debug("tmp_path = %s", tmp_path) cpcmd = ['cp', configpath, tmp_path] try: utils.execute(*cpcmd, run_as_root=True) except exception.ProcessExecutionError as e: msg = (_('Failed while publishing ganesha config locally. ' 'Error: %s.') % six.text_type(e)) LOG.error(msg) raise exception.GPFSGaneshaException(msg) # change permission of the tmp file, so that it can be edited # by a non-root user chmodcmd = ['chmod', 'o+w', tmp_path] try: utils.execute(*chmodcmd, run_as_root=True) except exception.ProcessExecutionError as e: msg = (_('Failed while publishing ganesha config locally. ' 'Error: %s.') % six.text_type(e)) LOG.error(msg) raise exception.GPFSGaneshaException(msg) with open(tmp_path, 'w+') as f: for l in pre_lines: f.write('%s\n' % l) for e in exports: f.write('EXPORT\n{\n') for attr in exports[e]: f.write('%s = %s ;\n' % (attr, exports[e][attr])) f.write('}\n') mvcmd = ['mv', tmp_path, configpath] try: utils.execute(*mvcmd, run_as_root=True) except exception.ProcessExecutionError as e: msg = (_('Failed while publishing ganesha config locally. ' 'Error: %s.') % six.text_type(e)) LOG.error(msg) raise exception.GPFSGaneshaException(msg) LOG.info(_LI('Ganesha config %s published locally.'), configpath)
def copy_stats(self, path): if self.cancelled: return out, err = utils.execute( "ls", "-pA1", "--group-directories-first", path, run_as_root=True) for line in out.split('\n'): if self.cancelled: return if len(line) == 0: continue src_item = os.path.join(path, line) dest_item = src_item.replace(self.src, self.dest) # NOTE(ganso): Should re-apply attributes for folders. if line[-1] == '/': if line[0:-1] in self.ignore_list: continue self.copy_stats(src_item) utils.execute("chmod", "--reference=%s" % src_item, dest_item, run_as_root=True) utils.execute("touch", "--reference=%s" % src_item, dest_item, run_as_root=True) utils.execute("chown", "--reference=%s" % src_item, dest_item, run_as_root=True)
def run_vsctl(self, args): full_args = ["ovs-vsctl", "--timeout=2"] + args try: return utils.execute(*full_args, run_as_root=True) except Exception: LOG.exception("Unable to execute %(cmd)s.", {'cmd': full_args})
def copy_share_data(self, context, helper, share, share_instance, share_server, new_share_instance, new_share_server, migration_info_src, migration_info_dest): # NOTE(ganso): This method is here because it is debatable if it can # be overridden by a driver or not. Personally I think it should not, # else it would be possible to lose compatibility with generic # migration between backends, but allows the driver to use it on its # own implementation if it wants to. migrated = False mount_path = self.configuration.safe_get('migration_tmp_location') src_access = migration_info_src['access'] dest_access = migration_info_dest['access'] if None in (src_access['access_to'], dest_access['access_to']): msg = _("Access rules not appropriate for mounting share instances" " for migration of share %(share_id)s," " source share access: %(src_ip)s, destination share" " access: %(dest_ip)s. Aborting.") % { 'src_ip': src_access['access_to'], 'dest_ip': dest_access['access_to'], 'share_id': share['id'] } raise exception.ShareMigrationFailed(reason=msg) # NOTE(ganso): Removing any previously conflicting access rules, which # would cause the following access_allow to fail for one instance. helper.deny_migration_access(None, src_access, False) helper.deny_migration_access(None, dest_access, False) # NOTE(ganso): I would rather allow access to instances separately, # but I require an access_id since it is a new access rule and # destination manager must receive an access_id. I can either move # this code to manager code so I can create the rule in DB manually, # or ignore duplicate access rule errors for some specific scenarios. try: src_access_ref = helper.allow_migration_access(src_access) except Exception as e: LOG.error( _LE("Share migration failed attempting to allow " "access of %(access_to)s to share " "instance %(instance_id)s.") % { 'access_to': src_access['access_to'], 'instance_id': share_instance['id'] }) msg = six.text_type(e) LOG.exception(msg) raise exception.ShareMigrationFailed(reason=msg) try: dest_access_ref = helper.allow_migration_access(dest_access) except Exception as e: LOG.error( _LE("Share migration failed attempting to allow " "access of %(access_to)s to share " "instance %(instance_id)s.") % { 'access_to': dest_access['access_to'], 'instance_id': new_share_instance['id'] }) msg = six.text_type(e) LOG.exception(msg) helper.cleanup_migration_access(src_access_ref, src_access) raise exception.ShareMigrationFailed(reason=msg) # NOTE(ganso): From here we have the possibility of not cleaning # anything when facing an error. At this moment, we have the # destination instance in "inactive" state, while we are performing # operations on the source instance. I think it is best to not clean # the instance, leave it in "inactive" state, but try to clean # temporary access rules, mounts, folders, etc, since no additional # harm is done. def _mount_for_migration(migration_info): try: utils.execute(*migration_info['mount'], run_as_root=True) except Exception: LOG.error( _LE("Failed to mount temporary folder for " "migration of share instance " "%(share_instance_id)s " "to %(new_share_instance_id)s") % { 'share_instance_id': share_instance['id'], 'new_share_instance_id': new_share_instance['id'] }) helper.cleanup_migration_access(src_access_ref, src_access) helper.cleanup_migration_access(dest_access_ref, dest_access) raise utils.execute('mkdir', '-p', ''.join( (mount_path, share_instance['id']))) utils.execute('mkdir', '-p', ''.join( (mount_path, new_share_instance['id']))) # NOTE(ganso): mkdir command sometimes returns faster than it # actually runs, so we better sleep for 1 second. time.sleep(1) try: _mount_for_migration(migration_info_src) except Exception as e: LOG.error( _LE("Share migration failed attempting to mount " "share instance %s.") % share_instance['id']) msg = six.text_type(e) LOG.exception(msg) helper.cleanup_temp_folder(share_instance, mount_path) helper.cleanup_temp_folder(new_share_instance, mount_path) raise exception.ShareMigrationFailed(reason=msg) try: _mount_for_migration(migration_info_dest) except Exception as e: LOG.error( _LE("Share migration failed attempting to mount " "share instance %s.") % new_share_instance['id']) msg = six.text_type(e) LOG.exception(msg) helper.cleanup_unmount_temp_folder(share_instance, migration_info_src) helper.cleanup_temp_folder(share_instance, mount_path) helper.cleanup_temp_folder(new_share_instance, mount_path) raise exception.ShareMigrationFailed(reason=msg) try: ignore_list = self.configuration.safe_get('migration_ignore_files') copy = share_utils.Copy(mount_path + share_instance['id'], mount_path + new_share_instance['id'], ignore_list) copy.run() if copy.get_progress()['total_progress'] == 100: migrated = True except Exception as e: LOG.exception(six.text_type(e)) LOG.error( _LE("Failed to copy files for " "migration of share instance %(share_instance_id)s " "to %(new_share_instance_id)s") % { 'share_instance_id': share_instance['id'], 'new_share_instance_id': new_share_instance['id'] }) # NOTE(ganso): For some reason I frequently get AMQP errors after # copying finishes, which seems like is the service taking too long to # copy while not replying heartbeat messages, so AMQP closes the # socket. There is no impact, it just shows a big trace and AMQP # reconnects after, although I would like to prevent this situation # without the use of additional threads. Suggestions welcome. utils.execute(*migration_info_src['umount'], run_as_root=True) utils.execute(*migration_info_dest['umount'], run_as_root=True) utils.execute('rmdir', ''.join((mount_path, share_instance['id'])), check_exit_code=False) utils.execute('rmdir', ''.join((mount_path, new_share_instance['id'])), check_exit_code=False) helper.deny_migration_access(src_access_ref, src_access) helper.deny_migration_access(dest_access_ref, dest_access) if not migrated: msg = ("Copying from share instance %(instance_id)s " "to %(new_instance_id)s did not succeed." % { 'instance_id': share_instance['id'], 'new_instance_id': new_share_instance['id'] }) raise exception.ShareMigrationFailed(reason=msg) LOG.debug("Copying completed in migration for share %s." % share['id'])
def iproute_arg_supported(command, arg): command += ['help'] stdout, stderr = utils.execute(command, check_exit_code=False, return_stderr=True) return any(arg in line for line in stderr.split('\n'))
def _copy_and_validate(self, src_item, dest_item): utils.execute("cp", "-P", "--preserve=all", src_item, dest_item, run_as_root=True) if self.check_hash: _validate_item(src_item, dest_item)
def _hdfs_local_execute(self, *cmd, **kwargs): if 'run_as_root' not in kwargs: kwargs.update({'run_as_root': False}) return utils.execute(*cmd, **kwargs)
def setup(app): print("**Autodocumenting from %s" % os.path.abspath(os.curdir)) rv = utils.execute('./doc/generate_autodoc_index.sh') print(rv[0])