def _run(self, command, timeout, ignore_status, stdout, stderr, connect_timeout, env, options, stdin, args): """Helper function for run().""" ssh_cmd = self.ssh_command(connect_timeout, options) if not env.strip(): env = "" else: env = "export %s;" % env for arg in args: command += ' "%s"' % utils.sh_escape(arg) full_cmd = '%s "%s %s"' % (ssh_cmd, env, utils.sh_escape(command)) result = utils.run(full_cmd, timeout, True, stdout, stderr, verbose=False, stdin=stdin, stderr_is_expected=ignore_status) # The error messages will show up in band (indistinguishable # from stuff sent through the SSH connection), so we have the # remote computer echo the message "Connected." before running # any command. Since the following 2 errors have to do with # connecting, it's safe to do these checks. if result.exit_status == 255: if re.search(r'^ssh: connect to host .* port .*: ' r'Connection timed out\r$', result.stderr): raise error.AutoservSSHTimeout("ssh timed out", result) if "Permission denied." in result.stderr: msg = "ssh permission denied" raise error.AutoservSshPermissionDeniedError(msg, result) if not ignore_status and result.exit_status > 0: raise error.AutoservRunError("command execution error", result) return result
def refresh_guests(self): """ Refresh the list of guests addresses. The is_used status will be updated according to the presence of the process specified in the pid file that was written when the virtual machine was started. TODO(poirier): there are a lot of race conditions in this code because the process might terminate on its own anywhere in between """ for address in self.addresses: if address["is_used"]: pid_file_name= utils.sh_escape(os.path.join( self.pid_dir, "vhost%s_pid" % (address["ip"],))) monitor_file_name= utils.sh_escape(os.path.join( self.pid_dir, "vhost%s_monitor" % (address["ip"],))) retval= self.host.run( _check_process_script % { "pid_file_name" : pid_file_name, "monitor_file_name" : monitor_file_name, "qemu_binary" : utils.sh_escape( os.path.join(self.build_dir, "qemu/x86_64-softmmu/" "qemu-system-x86_64")),}) if (retval.stdout.strip() != "process present"): address["is_used"]= False
def install(self, host, **kwargs): """ Install a kernel on the remote host. This will also invoke the guest's bootloader to set this kernel as the default kernel. Args: host: the host on which to install the kernel [kwargs]: remaining keyword arguments will be passed to Bootloader.add_kernel() Raises: AutoservError: no package has yet been obtained. Call DEBKernel.get() with a .deb package. """ if self.source_material is None: raise error.AutoservError("A kernel must first be " "specified via get()") remote_tmpdir = host.get_tmp_dir() basename = os.path.basename(self.source_material) remote_filename = os.path.join(remote_tmpdir, basename) host.send_file(self.source_material, remote_filename) host.run('dpkg -i "%s"' % (utils.sh_escape(remote_filename), )) host.run('mkinitramfs -o "%s" "%s"' % ( utils.sh_escape(self.get_initrd_name()), utils.sh_escape(self.get_version()), )) host.bootloader.add_kernel(self.get_image_name(), initrd=self.get_initrd_name(), **kwargs)
def get_file(self, source, dest, delete_dest=False, preserve_perm=True, preserve_symlinks=False): """ Copy files from the remote host to a local path. Directories will be copied recursively. If a source component is a directory with a trailing slash, the content of the directory will be copied, otherwise, the directory itself and its content will be copied. This behavior is similar to that of the program 'rsync'. Args: source: either 1) a single file or directory, as a string 2) a list of one or more (possibly mixed) files or directories dest: a file or a directory (if source contains a directory or more than one element, you must supply a directory dest) delete_dest: if this is true, the command will also clear out any old files at dest that are not in the source preserve_perm: tells get_file() to try to preserve the sources permissions on files and dirs preserve_symlinks: try to preserve symlinks instead of transforming them into files/dirs on copy Raises: AutoservRunError: the scp command failed """ if isinstance(source, basestring): source = [source] dest = os.path.abspath(dest) try: remote_source = self._encode_remote_paths(source) local_dest = utils.sh_escape(dest) rsync = self._make_rsync_cmd([remote_source], local_dest, delete_dest, preserve_symlinks) utils.run(rsync) except error.CmdError, e: # logging.warn("warning: rsync failed with: %s", e) logging.info("attempting to copy with scp instead") # scp has no equivalent to --delete, just drop the entire dest dir if delete_dest and os.path.isdir(dest): shutil.rmtree(dest) os.mkdir(dest) remote_source = self._make_rsync_compatible_source(source, False) if remote_source: # _make_rsync_compatible_source() already did the escaping remote_source = self._encode_remote_paths(remote_source, escape=False) local_dest = utils.sh_escape(dest) scp = self._make_scp_cmd([remote_source], local_dest) try: utils.run(scp) except error.CmdError, e: raise error.AutoservRunError(e.args[0], e.args[1])
def extract(self, host): """Extract the kernel package. This function is only useful to access the content of the package (for example the kernel image) without installing it. It is not necessary to run this function to install the kernel. Args: host: the host on which to extract the kernel package. Returns: The full path to the temporary directory on host where the package was extracted. Raises: AutoservError: no package has yet been obtained. Call DEBKernel.get() with a .deb package. """ if self.source_material is None: raise error.AutoservError("A kernel must first be " "specified via get()") remote_tmpdir = host.get_tmp_dir() basename = os.path.basename(self.source_material) remote_filename = os.path.join(remote_tmpdir, basename) host.send_file(self.source_material, remote_filename) content_dir= os.path.join(remote_tmpdir, "contents") host.run('dpkg -x "%s" "%s"' % (utils.sh_escape(remote_filename), utils.sh_escape(content_dir),)) return content_dir
def extract(self, host): """Extract the kernel package. This function is only useful to access the content of the package (for example the kernel image) without installing it. It is not necessary to run this function to install the kernel. Args: host: the host on which to extract the kernel package. Returns: The full path to the temporary directory on host where the package was extracted. Raises: AutoservError: no package has yet been obtained. Call DEBKernel.get() with a .deb package. """ if self.source_material is None: raise error.AutoservError("A kernel must first be " "specified via get()") remote_tmpdir = host.get_tmp_dir() basename = os.path.basename(self.source_material) remote_filename = os.path.join(remote_tmpdir, basename) host.send_file(self.source_material, remote_filename) content_dir = os.path.join(remote_tmpdir, "contents") host.run('dpkg -x "%s" "%s"' % ( utils.sh_escape(remote_filename), utils.sh_escape(content_dir), )) return content_dir
def test_install(self): self.common_code() # record self.host.run.expect_call('dpkg -i "%s"' % (utils.sh_escape(self.remote_filename))) result = common_utils.CmdResult() result.stdout = "1" utils.run.expect_call('dpkg-deb -f "%s" version' % utils.sh_escape(self.kernel.source_material)).and_return(result) utils.run.expect_call('dpkg-deb -f "%s" version' % utils.sh_escape(self.kernel.source_material)).and_return(result) self.host.run.expect_call('mkinitramfs -o "/boot/initrd.img-1" "1"') utils.run.expect_call('dpkg-deb -f "%s" version' % utils.sh_escape(self.kernel.source_material)).and_return(result) utils.run.expect_call('dpkg-deb -f "%s" version' % utils.sh_escape(self.kernel.source_material)).and_return(result) self.host.bootloader.add_kernel.expect_call('/boot/vmlinuz-1', initrd='/boot/initrd.img-1') # run and check self.kernel.install(self.host) self.god.check_playback()
def install(self, host, **kwargs): """ Install a kernel on the remote host. This will also invoke the guest's bootloader to set this kernel as the default kernel. Args: host: the host on which to install the kernel [kwargs]: remaining keyword arguments will be passed to Bootloader.add_kernel() Raises: AutoservError: no package has yet been obtained. Call DEBKernel.get() with a .deb package. """ if self.source_material is None: raise error.AutoservError("A kernel must first be " "specified via get()") remote_tmpdir = host.get_tmp_dir() basename = os.path.basename(self.source_material) remote_filename = os.path.join(remote_tmpdir, basename) host.send_file(self.source_material, remote_filename) host.run('dpkg -i "%s"' % (utils.sh_escape(remote_filename),)) host.run('mkinitramfs -o "%s" "%s"' % ( utils.sh_escape(self.get_initrd_name()), utils.sh_escape(self.get_version()),)) host.bootloader.add_kernel(self.get_image_name(), initrd=self.get_initrd_name(), **kwargs)
def _get_autodir(host): autodir = host.get_autodir() if autodir: logging.debug('Using existing host autodir: %s', autodir) return autodir client_autodir_paths = global_config.global_config.get_config_value( 'AUTOSERV', 'client_autodir_paths', type=list) # Look for a preinstalled autotest binary for path in client_autodir_paths: try: autotest_binary = os.path.join(path, 'bin', 'autotest') host.run('test -x %s' % utils.sh_escape(autotest_binary)) logging.debug('Found autotest binary at %s', path) return path except error.AutoservRunError: logging.debug('%s does not exist on %s', path, host.hostname) # Look for a location to create the autotest folder for path in client_autodir_paths: try: host.run('mkdir -p %s' % utils.sh_escape(path)) logging.debug('Created autotest folder at %s', path) return path except error.AutoservRunError: logging.debug('Unable to create %s on %s', path, host.hostname) raise error.AutotestRunError('Cannot figure out autotest directory')
def reset_guest(self, guest_hostname): """ Perform a hard reset on a virtual machine. Args: guest_hostname: the ip (as it was specified in the address list given to install()) of the guest to terminate. Raises: AutoservVirtError: the guest_hostname argument is invalid """ for address in self.addresses: if address["ip"] is guest_hostname: if address["is_used"]: break else: raise error.AutoservVirtError("guest " "hostname not in use") else: raise error.AutoservVirtError("Unknown guest hostname") monitor_file_name= utils.sh_escape(os.path.join(self.pid_dir, "vhost%s_monitor" % (address["ip"],))) self.host.run('python -c "%s"' % (utils.sh_escape( _hard_reset_script % { "monitor_file_name" : monitor_file_name,}),))
def run_async(self, command, stdout_tee=None, stderr_tee=None, args=(), connect_timeout=30, options='', verbose=True, stderr_level=utils.DEFAULT_STDERR_LEVEL, cmd_outside_subshell=''): """ Run a command on the remote host. Returns an AsyncJob object to interact with the remote process. This is mostly copied from SSHHost.run and SSHHost._run """ if verbose: logging.debug("Running (async ssh) '%s'" % command) # Start a master SSH connection if necessary. self.start_master_ssh() self.send_file( os.path.join(self.job.clientdir, "common_lib", "hosts", "scripts", "run_helper.py"), os.path.join(self.job.tmpdir, "run_helper.py")) env = " ".join("=".join(pair) for pair in self.env.iteritems()) ssh_cmd = self.ssh_command(connect_timeout, options) if not env.strip(): env = "" else: env = "export %s;" % env for arg in args: command += ' "%s"' % utils.sh_escape(arg) full_cmd = '{ssh_cmd} "{env} {cmd}"'.format( ssh_cmd=ssh_cmd, env=env, cmd=utils.sh_escape( "%s (%s '%s')" % (cmd_outside_subshell, os.path.join(self.job.tmpdir, "run_helper.py"), utils.sh_escape(command)))) job = utils.AsyncJob(full_cmd, stdout_tee=stdout_tee, stderr_tee=stderr_tee, verbose=verbose, stderr_level=stderr_level, stdin=subprocess.PIPE) def kill_func(): #this triggers the remote kill utils.nuke_subprocess(job.sp) job.kill_func = kill_func return job
def _install(self, host=None, autodir=None, lightweight=False): """ Install autotest. If get() was not called previously, an attempt will be made to install from the autotest svn repository. Args: host: a Host instance on which autotest will be installed autodir: location on the remote host to install to lightweight: exclude tests, deps and profilers, if possible Raises: AutoservError: if a tarball was not specified and the target host does not have svn installed in its path""" if not host: host = self.host if not self.got: self.get() host.wait_up(timeout=30) host.setup() logging.info("Installing autotest on %s", host.hostname) # set up the autotest directory on the remote machine if not autodir: autodir = self._get_install_dir(host) host.set_autodir(autodir) host.run('mkdir -p %s' % utils.sh_escape(autodir)) # make sure there are no files in $AUTODIR/results results_path = os.path.join(autodir, 'results') host.run('rm -rf %s/*' % utils.sh_escape(results_path), ignore_status=True) # Fetch the autotest client from the nearest repository try: c = global_config.global_config repos = c.get_config_value("PACKAGES", 'fetch_location', type=list) pkgmgr = packages.PackageManager(autodir, hostname=host.hostname, repo_urls=repos, do_locking=False, run_function=host.run, run_function_dargs=dict(timeout=600)) # The packages dir is used to store all the packages that # are fetched on that client. (for the tests,deps etc. # too apart from the client) pkg_dir = os.path.join(autodir, 'packages') # clean up the autodir except for the packages directory host.run('cd %s && ls | grep -v "^packages$"' ' | xargs rm -rf && rm -rf .[^.]*' % autodir) pkgmgr.install_pkg('autotest', 'client', pkg_dir, autodir, preserve_install_dir=True) self.installed = True self.lightweight = lightweight return except global_config.ConfigError, e: logging.error("Could not install autotest using the packaging " "system: %s", e)
def add_args(self, kernel, args): """ Add cmdline arguments for the specified kernel. @param kernel: can be a position number (index) or title @param args: argument to be added to the current list of args """ return self._run_boottool_exit_status('--update-kernel=%s' % utils.sh_escape(str(kernel)), '--args=%s' % utils.sh_escape(args))
def remove_args(self, kernel, args): """ Removes specified cmdline arguments. @param kernel: can be a position number (index) or title @param args: argument to be removed of the current list of args """ return self._run_boottool_exit_status('--update-kernel=%s' % utils.sh_escape(str(kernel)), '--remove-args=%s' % utils.sh_escape(args))
def _find_installable_dir(cls, host): client_autodir_paths = cls.get_client_autodir_paths(host) for path in client_autodir_paths: try: host.run('mkdir -p %s' % utils.sh_escape(path)) host.run('test -w %s' % utils.sh_escape(path)) return path except error.AutoservRunError: logging.debug('Failed to create %s', path) raise error.AutoservInstallError( 'Unable to find a place to install Autotest; tried %s' % ', '.join(client_autodir_paths))
def test_extract(self): # setup self.common_code() content_dir = os.path.join(self.remote_tmpdir, "contents") # record self.host.run.expect_call('dpkg -x "%s" "%s"' % (utils.sh_escape( self.remote_filename), utils.sh_escape(content_dir))) # run and test self.kernel.extract(self.host) self.god.check_playback()
def test_extract(self): # setup self.common_code() content_dir= os.path.join(self.remote_tmpdir, "contents") # record self.host.run.expect_call('dpkg -x "%s" "%s"' % (utils.sh_escape(self.remote_filename), utils.sh_escape(content_dir))) # run and test self.kernel.extract(self.host) self.god.check_playback()
def _install(self, host=None, autodir=None, use_autoserv=True, use_packaging=True): """ Install autotest. If get() was not called previously, an attempt will be made to install from the autotest svn repository. @param host A Host instance on which autotest will be installed @param autodir Location on the remote host to install to @param use_autoserv Enable install modes that depend on the client running with the autoserv harness @param use_packaging Enable install modes that use the packaging system @exception AutoservError if a tarball was not specified and the target host does not have svn installed in its path """ if not host: host = self.host if not self.got: self.get() host.wait_up(timeout=30) host.setup() logging.info("Installing autotest on %s", host.hostname) # set up the autotest directory on the remote machine if not autodir: autodir = self.get_install_dir(host) logging.info('Using installation dir %s', autodir) host.set_autodir(autodir) host.run('mkdir -p %s' % utils.sh_escape(autodir)) # make sure there are no files in $AUTODIR/results results_path = os.path.join(autodir, 'results') host.run('rm -rf %s/*' % utils.sh_escape(results_path), ignore_status=True) # Fetch the autotest client from the nearest repository if use_packaging: try: self._install_using_packaging(host, autodir) logging.info("Installation of autotest completed using the " "packaging system.") return except (error.PackageInstallError, error.AutoservRunError, global_config.ConfigError), e: logging.info( "Could not install autotest using the packaging " "system: %s. Trying other methods", e)
def _find_installable_dir(cls, host): client_autodir_paths = cls.get_client_autodir_paths(host) for path in client_autodir_paths: try: host.run('mkdir -p %s' % utils.sh_escape(path)) host.run('test -w %s' % utils.sh_escape(path)) return path except error.AutoservRunError: logging.debug('Failed to create %s', path) metrics.Counter('chromeos/autotest/errors/no_autotest_install_path' ).increment(fields={'dut_host_name': host.hostname}) raise error.AutoservInstallError( 'Unable to find a place to install Autotest; tried %s' % ', '.join(client_autodir_paths))
def _find_installable_dir(cls, host): client_autodir_paths = cls.get_client_autodir_paths(host) for path in client_autodir_paths: try: host.run('mkdir -p %s' % utils.sh_escape(path)) host.run('test -w %s' % utils.sh_escape(path)) return path except error.AutoservRunError: logging.debug('Failed to create %s', path) metadata = {'_type': 'AutoservInstallError', 'hostname': host.hostname} autotest_stats.Counter('AutoservInstallError', metadata=metadata).increment() raise error.AutoservInstallError( 'Unable to find a place to install Autotest; tried %s' % ', '.join(client_autodir_paths))
def run_cmd_on_host(hostname, cmd, stdin, stdout, stderr): base_cmd = abstract_ssh.make_ssh_command() full_cmd = "%s %s \"%s\"" % (base_cmd, hostname, server_utils.sh_escape(cmd)) return subprocess.Popen(full_cmd, stdin=stdin, stdout=stdout, stderr=stderr, shell=True)
def _verbose_logger_command(self, command): """ Prepend the command for the client with information about the ssh command to be executed and the server stack state. @param command: the ssh command to be executed. """ stack_frames = inspect.stack() stack = '' # The last 2 frames on the stack are boring. Print 5-2=3 stack frames. count = min(5, len(stack_frames)) if count >= 3: stack = inspect.getframeinfo(stack_frames[2][0]).function for frame in stack_frames[3:count]: function_name = inspect.getframeinfo(frame[0]).function stack = '%s|%s' % (function_name, stack) del stack_frames # If "logger" executable exists on the DUT use it to respew |command|. # Then regardless of "logger" run |command| as usual. command = ( 'if type "logger" > /dev/null 2>&1; then' ' logger -tag "autotest" "server[stack::%s] -> ssh_run(%s)";' 'fi; ' '%s' % (stack, utils.sh_escape(command), command)) return command
def boot_once(self, title): if self._host().job: self._host().job.last_boot_tag = title title_opt = '--title=%s' % utils.sh_escape(title) return self._run_boottool_exit_status('--boot-once', title_opt)
def _make_rsync_compatible_globs(self, path, is_local): """ Given an rsync-style path, returns a list of globbed paths that will hopefully provide equivalent behaviour for scp. Does not support the full range of rsync pattern matching behaviour, only that exposed in the get/send_file interface (trailing slashes). The is_local param is flag indicating if the paths should be interpreted as local or remote paths. """ # non-trailing slash paths should just work if len(path) == 0 or path[-1] != "/": return [path] # make a function to test if a pattern matches any files if is_local: def glob_matches_files(path): return len(glob.glob(path)) > 0 else: def glob_matches_files(path): result = self.run("ls \"%s\"" % utils.sh_escape(path), ignore_status=True) return result.exit_status == 0 # take a set of globs that cover all files, and see which are needed patterns = ["*", ".[!.]*"] patterns = [p for p in patterns if glob_matches_files(path + p)] # convert them into a set of paths suitable for the commandline path = utils.sh_escape(path) if is_local: return ["\"%s\"%s" % (path, pattern) for pattern in patterns] else: return ["\"%s\"" % (path + pattern) for pattern in patterns]
def run_async(self, command, stdout_tee=None, stderr_tee=None, args=(), connect_timeout=30, options='', verbose=True, stderr_level=utils.DEFAULT_STDERR_LEVEL, cmd_outside_subshell=''): """ Run a command on the remote host. Returns an AsyncJob object to interact with the remote process. This is mostly copied from SSHHost.run and SSHHost._run """ if verbose: logging.debug("Running (async ssh) '%s'" % command) # Start a master SSH connection if necessary. self.start_master_ssh() self.send_file(os.path.join(self.job.clientdir, "common_lib", "hosts", "scripts", "run_helper.py"), os.path.join(self.job.tmpdir, "run_helper.py")) env = " ".join("=".join(pair) for pair in self.env.iteritems()) ssh_cmd = self.ssh_command(connect_timeout, options) if not env.strip(): env = "" else: env = "export %s;" % env for arg in args: command += ' "%s"' % utils.sh_escape(arg) full_cmd = '{ssh_cmd} "{env} {cmd}"'.format( ssh_cmd=ssh_cmd, env=env, cmd=utils.sh_escape("%s (%s '%s')" % (cmd_outside_subshell, os.path.join(self.job.tmpdir, "run_helper.py"), utils.sh_escape(command)))) job = utils.AsyncJob(full_cmd, stdout_tee=stdout_tee, stderr_tee=stderr_tee, verbose=verbose, stderr_level=stderr_level, stdin=subprocess.PIPE) def kill_func(): #this triggers the remote kill utils.nuke_subprocess(job.sp) job.kill_func = kill_func return job
def delete_tmp_dir(self, tmpdir): """ Delete the given temporary directory on the remote machine. @param tmpdir The directory to delete. """ self.run('rm -rf "%s"' % utils.sh_escape(tmpdir), ignore_status=True) self.tmp_dirs.remove(tmpdir)
def send_file(self, source, dest, delete_dest=False, preserve_symlinks=False): """ Copy files from a local path to the remote host. Directories will be copied recursively. If a source component is a directory with a trailing slash, the content of the directory will be copied, otherwise, the directory itself and its content will be copied. This behavior is similar to that of the program 'rsync'. Args: source: either 1) a single file or directory, as a string 2) a list of one or more (possibly mixed) files or directories dest: a file or a directory (if source contains a directory or more than one element, you must supply a directory dest) delete_dest: if this is true, the command will also clear out any old files at dest that are not in the source preserve_symlinks: controls if symlinks on the source will be copied as such on the destination or transformed into the referenced file/directory Raises: AutoservRunError: the scp command failed """ logging.debug('send_file. source: %s, dest: %s, delete_dest: %s,' 'preserve_symlinks:%s', source, dest, delete_dest, preserve_symlinks) # Start a master SSH connection if necessary. self.start_master_ssh() if isinstance(source, basestring): source = [source] remote_dest = self._encode_remote_paths([dest]) local_sources = [utils.sh_escape(path) for path in source] if not local_sources: raise error.TestError('source |%s| yielded an empty list' % ( source)) if any([local_source.find('\x00') != -1 for local_source in local_sources]): raise error.TestError('one or more sources include NUL char') # If rsync is disabled or fails, try scp. try_scp = True if self.use_rsync(): logging.debug('Using Rsync.') try: rsync = self._make_rsync_cmd(local_sources, remote_dest, delete_dest, preserve_symlinks) utils.run(rsync) try_scp = False except error.CmdError, e: logging.warning("trying scp, rsync failed: %s", e)
def send_file(self, source, dest, delete_dest=False, preserve_symlinks=False): """ Copy files from a local path to the remote host. Directories will be copied recursively. If a source component is a directory with a trailing slash, the content of the directory will be copied, otherwise, the directory itself and its content will be copied. This behavior is similar to that of the program 'rsync'. Args: source: either 1) a single file or directory, as a string 2) a list of one or more (possibly mixed) files or directories dest: a file or a directory (if source contains a directory or more than one element, you must supply a directory dest) delete_dest: if this is true, the command will also clear out any old files at dest that are not in the source preserve_symlinks: controls if symlinks on the source will be copied as such on the destination or transformed into the referenced file/directory Raises: AutoservRunError: the scp command failed """ if isinstance(source, basestring): source = [source] remote_dest = self._encode_remote_paths([dest]) try: local_sources = [utils.sh_escape(path) for path in source] rsync = self._make_rsync_cmd(local_sources, remote_dest, delete_dest, preserve_symlinks) utils.run(rsync) except error.CmdError, e: logging.warn("Command rsync failed with: %s", e) logging.info("Attempting to copy with scp instead") # scp has no equivalent to --delete, just drop the entire dest dir if delete_dest: is_dir = self.run("ls -d %s/" % dest, ignore_status=True).exit_status == 0 if is_dir: cmd = "rm -rf %s && mkdir %s" cmd %= (remote_dest, remote_dest) self.run(cmd) local_sources = self._make_rsync_compatible_source(source, True) if local_sources: scp = self._make_scp_cmd(local_sources, remote_dest) try: utils.run(scp) except error.CmdError, e: raise error.AutoservRunError(e.args[0], e.args[1])
def _install(self, host=None, autodir=None, use_autoserv=True, use_packaging=True): """ Install autotest. If get() was not called previously, an attempt will be made to install from the autotest svn repository. @param host A Host instance on which autotest will be installed @param autodir Location on the remote host to install to @param use_autoserv Enable install modes that depend on the client running with the autoserv harness @param use_packaging Enable install modes that use the packaging system @exception AutoservError If it wasn't possible to install the client after trying all available methods """ if not host: host = self.host if not self.got: self.get() host.wait_up(timeout=30) host.setup() logging.info("Installing autotest on %s", host.hostname) # set up the autotest directory on the remote machine if not autodir: autodir = self.get_install_dir(host) logging.info('Using installation dir %s', autodir) host.set_autodir(autodir) host.run('mkdir -p %s' % utils.sh_escape(autodir)) # make sure there are no files in $AUTODIR/results results_path = os.path.join(autodir, 'results') host.run('rm -rf %s/*' % utils.sh_escape(results_path), ignore_status=True) # Fetch the autotest client from the nearest repository if use_packaging: try: self._install_using_packaging(host, autodir) return except (error.PackageInstallError, error.AutoservRunError, global_config.ConfigError), e: logging.info("Could not install autotest using the packaging " "system: %s. Trying other methods", e)
def add_kernel(self, path, title='autoserv', root=None, args=None, initrd=None, xen_hypervisor=None, default=True): """ If an entry with the same title is already present, it will be replaced. """ if title in self.get_titles(): self._run_boottool('--remove-kernel "%s"' % ( utils.sh_escape(title),)) parameters = '--add-kernel "%s" --title "%s"' % ( utils.sh_escape(path), utils.sh_escape(title),) if root: parameters += ' --root "%s"' % (utils.sh_escape(root),) if args: parameters += ' --args "%s"' % (utils.sh_escape(args),) # add an initrd now or forever hold your peace if initrd: parameters += ' --initrd "%s"' % ( utils.sh_escape(initrd),) if default: parameters += ' --make-default' # add parameter if this is a Xen entry if self.xen_mode: parameters += ' --xen' if xen_hypervisor: parameters += ' --xenhyper "%s"' % ( utils.sh_escape(xen_hypervisor),) self._run_boottool(parameters)
def _encode_local_paths(self, paths, escape=True): """ Given a list of file paths, encodes it as a single local path. escape: add \\ to protect special characters. """ if escape: paths = [utils.sh_escape(path) for path in paths] return " ".join('"%s"' % p for p in paths)
def _run_boottool_cmd(self, *options): ''' Runs a boottool command, escaping parameters ''' cmd = self._get_boottool_path() # FIXME: add unsafe options strings sequence to host.run() parameters for option in options: cmd += ' "%s"' % utils.sh_escape(option) return self._host().run(cmd)
def close(self): super(RemoteHost, self).close() self.stop_loggers() if hasattr(self, 'tmp_dirs'): for dir in self.tmp_dirs: try: self.run('rm -rf "%s"' % (utils.sh_escape(dir))) except error.AutoservRunError: pass
def _make_ssh_cmd(self, cmd): """ Create a base ssh command string for the host which can be used to run commands directly on the machine """ base_cmd = self.make_ssh_command(user=self.user, port=self.port, opts=self.master_ssh_option, hosts_file=self.known_hosts_file) return '%s %s "%s"' % (base_cmd, self.hostname, utils.sh_escape(cmd))
def _make_ssh_cmd(self, cmd): """ Create a base ssh command string for the host which can be used to run commands directly on the machine """ base_cmd = make_ssh_command(user=self.user, port=self.port, opts=self.master_ssh_option, hosts_file=self.known_hosts_fd) return '%s %s "%s"' % (base_cmd, self.hostname, utils.sh_escape(cmd))
def get_file(self, source, dest, delete_dest=False, preserve_perm=True, preserve_symlinks=False): """ Copy files from the remote host to a local path. Directories will be copied recursively. If a source component is a directory with a trailing slash, the content of the directory will be copied, otherwise, the directory itself and its content will be copied. This behavior is similar to that of the program 'rsync'. Args: source: either 1) a single file or directory, as a string 2) a list of one or more (possibly mixed) files or directories dest: a file or a directory (if source contains a directory or more than one element, you must supply a directory dest) delete_dest: if this is true, the command will also clear out any old files at dest that are not in the source preserve_perm: tells get_file() to try to preserve the sources permissions on files and dirs preserve_symlinks: try to preserve symlinks instead of transforming them into files/dirs on copy Raises: AutoservRunError: the scp command failed """ # Start a master SSH connection if necessary. self.start_master_ssh() if isinstance(source, basestring): source = [source] dest = os.path.abspath(dest) # If rsync is disabled or fails, try scp. try_scp = True if self.use_rsync(): try: remote_source = self._encode_remote_paths(source) local_dest = utils.sh_escape(dest) rsync = self._make_rsync_cmd([remote_source], local_dest, delete_dest, preserve_symlinks) utils.run(rsync) try_scp = False except error.CmdError, e: logging.warn("trying scp, rsync failed: %s" % e)
def get_installed_autodir(cls, host): """ Find where the Autotest client is installed on the host. @returns an absolute path to an installed Autotest client root. @raises AutodirNotFoundError if no Autotest installation can be found. """ autodir = host.get_autodir() if autodir: logging.debug("Using existing host autodir: %s", autodir) return autodir for path in Autotest.get_client_autodir_paths(host): try: autotest_binary = os.path.join(path, "bin", "autotest") host.run("test -x %s" % utils.sh_escape(autotest_binary)) host.run("test -w %s" % utils.sh_escape(path)) logging.debug("Found existing autodir at %s", path) return path except error.AutoservRunError: logging.debug("%s does not exist on %s", autotest_binary, host.hostname) raise AutodirNotFoundError
def __init__(self, repodir, giturl, weburl): super(installable_object.InstallableObject, self).__init__() if repodir is None: e_msg = 'You must provide a directory to hold the git repository' raise ValueError(e_msg) self.repodir = utils.sh_escape(repodir) if giturl is None: raise ValueError('You must provide a git URL to the repository') self.giturl = giturl if weburl is None: raise ValueError('You must provide a http URL to the repository') self.weburl = weburl # path to .git dir self.gitpath = utils.sh_escape(os.path.join(self.repodir,'.git')) # base git command , pointing to gitpath git dir self.gitcmdbase = 'git --git-dir=%s' % self.gitpath # default to same remote path as local self.__build = os.path.dirname(self.repodir)
def set_default_by_index(self, index): ''' Sets the given entry number to be the default on every next boot To set a default only for the next boot, use boot_once() instead. @param index: entry index number to set as the default. ''' if self._host().job: self._host().job.last_boot_tag = None return self._run_boottool_exit_status('--set-default=%s' % utils.sh_escape(str(index)))
def test_get_version(self): # record result = common_utils.CmdResult() result.exit_status = 0 result.stdout = "image" cmd = ('rpm -qpi %s | grep Version | awk \'{print($3);}\'' % (utils.sh_escape("source.rpm"))) utils.run.expect_call(cmd).and_return(result) # run and test self.assertEquals(self.kernel.get_version(), result.stdout) self.god.check_playback()
def get_installed_autodir(cls, host): """ Find where the Autotest client is installed on the host. @returns an absolute path to an installed Autotest client root. @raises AutodirNotFoundError if no Autotest installation can be found. """ autodir = host.get_autodir() if autodir: logging.debug('Using existing host autodir: %s', autodir) return autodir for path in Autotest.get_client_autodir_paths(host): try: autotest_binary = os.path.join(path, 'bin', 'autotest') host.run('test -x %s' % utils.sh_escape(autotest_binary)) host.run('test -w %s' % utils.sh_escape(path)) logging.debug('Found existing autodir at %s', path) return path except error.AutoservRunError: logging.debug('%s does not exist on %s', autotest_binary, host.hostname) raise AutodirNotFoundError
def _insert_modules(self): """ Insert the kvm modules into the kernel. The modules inserted are the ones from the build directory, NOT the ones from the kernel. This function should only be called after install(). It will check that the modules are not already loaded before attempting to insert them. """ cpu_flags = self.host.run( 'cat /proc/cpuinfo | ' 'grep -e "^flags" | head -1 | cut -d " " -f 2-').stdout.strip() if cpu_flags.find('vmx') != -1: module_type = "intel" elif cpu_flags.find('svm') != -1: module_type = "amd" else: raise error.AutoservVirtError("No harware " "virtualization extensions found, " "KVM cannot run") self.host.run( 'if ! $(grep -q "^kvm " /proc/modules); ' 'then insmod "%s"; fi' % (utils.sh_escape(os.path.join(self.build_dir, "kernel/kvm.ko")), )) if module_type == "intel": self.host.run( 'if ! $(grep -q "^kvm_intel " ' '/proc/modules); then insmod "%s"; fi' % (utils.sh_escape( os.path.join(self.build_dir, "kernel/kvm-intel.ko")), )) elif module_type == "amd": self.host.run( 'if ! $(grep -q "^kvm_amd " ' '/proc/modules); then insmod "%s"; fi' % (utils.sh_escape( os.path.join(self.build_dir, "kernel/kvm-amd.ko")), ))
def _run(self, command, timeout, ignore_status, stdout, stderr, connect_timeout, env, options, stdin, args): """Helper function for run().""" ssh_cmd = self.ssh_command(connect_timeout, options) if not env.strip(): env = "" else: env = "export %s;" % env for arg in args: command += ' "%s"' % utils.sh_escape(arg) full_cmd = '%s "%s %s"' % (ssh_cmd, env, utils.sh_escape(command)) result = utils.run(full_cmd, timeout, True, stdout, stderr, verbose=False, stdin=stdin, stderr_is_expected=ignore_status) # The error messages will show up in band (indistinguishable # from stuff sent through the SSH connection), so we have the # remote computer echo the message "Connected." before running # any command. Since the following 2 errors have to do with # connecting, it's safe to do these checks. if result.exit_status == 255: if re.search( r'^ssh: connect to host .* port .*: ' r'Connection timed out\r$', result.stderr): raise error.AutoservSSHTimeout("ssh timed out", result) if "Permission denied." in result.stderr: msg = "ssh permission denied" raise error.AutoservSshPermissionDeniedError(msg, result) if not ignore_status and result.exit_status > 0: raise error.AutoservRunError("command execution error", result) return result
def _insert_modules(self): """ Insert the kvm modules into the kernel. The modules inserted are the ones from the build directory, NOT the ones from the kernel. This function should only be called after install(). It will check that the modules are not already loaded before attempting to insert them. """ cpu_flags= self.host.run('cat /proc/cpuinfo | ' 'grep -e "^flags" | head -1 | cut -d " " -f 2-' ).stdout.strip() if cpu_flags.find('vmx') != -1: module_type= "intel" elif cpu_flags.find('svm') != -1: module_type= "amd" else: raise error.AutoservVirtError("No harware " "virtualization extensions found, " "KVM cannot run") self.host.run('if ! $(grep -q "^kvm " /proc/modules); ' 'then insmod "%s"; fi' % (utils.sh_escape( os.path.join(self.build_dir, "kernel/kvm.ko")),)) if module_type == "intel": self.host.run('if ! $(grep -q "^kvm_intel " ' '/proc/modules); then insmod "%s"; fi' % (utils.sh_escape(os.path.join(self.build_dir, "kernel/kvm-intel.ko")),)) elif module_type == "amd": self.host.run('if ! $(grep -q "^kvm_amd " ' '/proc/modules); then insmod "%s"; fi' % (utils.sh_escape(os.path.join(self.build_dir, "kernel/kvm-amd.ko")),))
def send_file(self, source, dest, delete_dest=False, preserve_symlinks=False): """ Copy files from a local path to the remote host. Directories will be copied recursively. If a source component is a directory with a trailing slash, the content of the directory will be copied, otherwise, the directory itself and its content will be copied. This behavior is similar to that of the program 'rsync'. Args: source: either 1) a single file or directory, as a string 2) a list of one or more (possibly mixed) files or directories dest: a file or a directory (if source contains a directory or more than one element, you must supply a directory dest) delete_dest: if this is true, the command will also clear out any old files at dest that are not in the source preserve_symlinks: controls if symlinks on the source will be copied as such on the destination or transformed into the referenced file/directory Raises: AutoservRunError: the scp command failed """ # Start a master SSH connection if necessary. self.start_master_ssh() if isinstance(source, basestring): source_is_dir = os.path.isdir(source) source = [source] remote_dest = self._encode_remote_paths([dest]) # If rsync is disabled or fails, try scp. try_scp = True if self.use_rsync(): try: local_sources = [utils.sh_escape(path) for path in source] rsync = self._make_rsync_cmd(local_sources, remote_dest, delete_dest, preserve_symlinks) utils.run(rsync) try_scp = False except error.CmdError, e: logging.warn("trying scp, rsync failed: %s" % e)
def _verbose_logger_command(self, command): """ Prepend the command for the client with information about the ssh command to be executed and the server stack state. @param command: the ssh command to be executed. """ # The last 3 frames on the stack are boring. Print 6-3=3 stack frames. stack = self._get_server_stack_state(lowest_frames=3, highest_frames=6) # If "logger" executable exists on the DUT use it to respew |command|. # Then regardless of "logger" run |command| as usual. command = ('if type "logger" > /dev/null 2>&1; then' ' logger -tag "autotest" "server[stack::%s] -> ssh_run(%s)";' 'fi; ' '%s' % (stack, utils.sh_escape(command), command)) return command
def _install_using_send_file(self, host, autodir): dirs_to_exclude = set(["tests", "site_tests", "deps", "profilers"]) light_files = [ os.path.join(self.source_material, f) for f in os.listdir(self.source_material) if f not in dirs_to_exclude ] host.send_file(light_files, autodir, delete_dest=True) # create empty dirs for all the stuff we excluded commands = [] for path in dirs_to_exclude: abs_path = os.path.join(autodir, path) abs_path = utils.sh_escape(abs_path) commands.append("mkdir -p '%s'" % abs_path) commands.append("touch '%s'/__init__.py" % abs_path) host.run(';'.join(commands))
def get_version(self): """Get the version of the kernel to be installed. Returns: The version string, as would be returned by 'make kernelrelease'. Raises: AutoservError: no package has yet been obtained. Call RPMKernel.get() with a .rpm package. """ if self.source_material is None: raise error.AutoservError("A kernel must first be \ specified via get()") retval = utils.run('rpm -qpi %s | grep Version | awk \'{print($3);}\'' % utils.sh_escape(self.source_material)) return retval.stdout.strip()
def _make_rsync_compatible_globs(self, path, is_local): """ Given an rsync-style path, returns a list of globbed paths that will hopefully provide equivalent behaviour for scp. Does not support the full range of rsync pattern matching behaviour, only that exposed in the get/send_file interface (trailing slashes). The is_local param is flag indicating if the paths should be interpreted as local or remote paths. """ # non-trailing slash paths should just work if len(path) == 0 or path[-1] != "/": return [path] # make a function to test if a pattern matches any files if is_local: def glob_matches_files(path, pattern): return len(glob.glob(path + pattern)) > 0 else: def glob_matches_files(path, pattern): result = self.run("ls \"%s\"%s" % (utils.sh_escape(path), pattern), stdout_tee=None, ignore_status=True) return result.exit_status == 0 # take a set of globs that cover all files, and see which are needed patterns = ["*", ".[!.]*"] patterns = [p for p in patterns if glob_matches_files(path, p)] # convert them into a set of paths suitable for the commandline if is_local: return [ "\"%s\"%s" % (utils.sh_escape(path), pattern) for pattern in patterns ] else: return [ utils.scp_remote_escape(path) + pattern for pattern in patterns ]
def uninstall(self, host=None): """ Uninstall (i.e. delete) autotest. Removes the autotest client install from the specified host. @params host a Host instance from which the client will be removed """ if not self.installed: return if not host: host = self.host autodir = host.get_autodir() if not autodir: return # perform the actual uninstall host.run("rm -rf %s" % utils.sh_escape(autodir), ignore_status=True) host.set_autodir(None) self.installed = False
def get_version(self): """Get the version of the kernel to be installed. Returns: The version string, as would be returned by 'make kernelrelease'. Raises: AutoservError: no package has yet been obtained. Call DEBKernel.get() with a .deb package. """ if self.source_material is None: raise error.AutoservError("A kernel must first be " "specified via get()") retval = utils.run( 'dpkg-deb -f "%s" version' % utils.sh_escape(self.source_material), ) return retval.stdout.strip()
def new_guest(self, qemu_options): """ Start a new guest ("virtual machine"). Returns: The ip that was picked from the list supplied to install() and assigned to this guest. Raises: AutoservVirtError: no more addresses are available. """ for address in self.addresses: if not address["is_used"]: break else: raise error.AutoservVirtError("No more addresses available") retval = self.host.run( '%s' # this is the line of options that can be modified ' %s ' '-pidfile "%s" -daemonize -nographic ' #~ '-serial telnet::4444,server ' '-monitor unix:"%s",server,nowait ' '-net nic,macaddr="%s" -net tap,script="%s" -L "%s"' % ( utils.sh_escape( os.path.join(self.build_dir, "qemu/x86_64-softmmu/qemu-system-x86_64")), qemu_options, utils.sh_escape( os.path.join(self.pid_dir, "vhost%s_pid" % (address["ip"], ))), utils.sh_escape( os.path.join(self.pid_dir, "vhost%s_monitor" % (address["ip"], ))), utils.sh_escape(address["mac"]), utils.sh_escape(os.path.join(self.support_dir, "qemu-ifup.sh")), utils.sh_escape(os.path.join(self.build_dir, "qemu/pc-bios")), )) address["is_used"] = True return address["ip"]
def _run(self, command, timeout, ignore_status, stdout, stderr, connect_timeout, env, options, stdin, args, ignore_timeout): """Helper function for run().""" ssh_cmd = self.ssh_command(connect_timeout, options) if not env.strip(): env = "" else: env = "export %s;" % env for arg in args: command += ' "%s"' % utils.sh_escape(arg) full_cmd = '%s "%s %s"' % (ssh_cmd, env, utils.sh_escape(command)) # TODO(jrbarnette): crbug.com/484726 - When we're in an SSP # container, sometimes shortly after reboot we will see DNS # resolution errors on ssh commands; the problem never # occurs more than once in a row. This especially affects # the autoupdate_Rollback test, but other cases have been # affected, too. # # We work around it by detecting the first DNS resolution error # and retrying exactly one time. dns_retry_count = 2 while True: result = utils.run(full_cmd, timeout, True, stdout, stderr, verbose=False, stdin=stdin, stderr_is_expected=ignore_status, ignore_timeout=ignore_timeout) dns_retry_count -= 1 if (result and result.exit_status == 255 and re.search( r'^ssh: .*: Name or service not known', result.stderr)): if dns_retry_count: logging.debug('Retrying because of DNS failure') continue logging.debug('Retry failed.') autotest_stats.Counter('dns_retry_hack.fail').increment() elif not dns_retry_count: logging.debug('Retry succeeded.') autotest_stats.Counter('dns_retry_hack.pass').increment() break if ignore_timeout and not result: return None # The error messages will show up in band (indistinguishable # from stuff sent through the SSH connection), so we have the # remote computer echo the message "Connected." before running # any command. Since the following 2 errors have to do with # connecting, it's safe to do these checks. if result.exit_status == 255: if re.search( r'^ssh: connect to host .* port .*: ' r'Connection timed out\r$', result.stderr): raise error.AutoservSSHTimeout("ssh timed out", result) if "Permission denied." in result.stderr: msg = "ssh permission denied" raise error.AutoservSshPermissionDeniedError(msg, result) if not ignore_status and result.exit_status > 0: raise error.AutoservRunError("command execution error", result) return result
class AbstractSSHHost(remote.RemoteHost): """ This class represents a generic implementation of most of the framework necessary for controlling a host via ssh. It implements almost all of the abstract Host methods, except for the core Host.run method. """ VERSION_PREFIX = '' def _initialize(self, hostname, user="******", port=22, password="", is_client_install_supported=True, host_attributes={}, *args, **dargs): super(AbstractSSHHost, self)._initialize(hostname=hostname, *args, **dargs) # IP address is retrieved only on demand. Otherwise the host # initialization will fail for host is not online. self._ip = None self.user = user self.port = port self.password = password self._is_client_install_supported = is_client_install_supported self._use_rsync = None self.known_hosts_file = tempfile.mkstemp()[1] self._rpc_server_tracker = rpc_server_tracker.RpcServerTracker(self); """ Master SSH connection background job, socket temp directory and socket control path option. If master-SSH is enabled, these fields will be initialized by start_master_ssh when a new SSH connection is initiated. """ self.master_ssh_job = None self.master_ssh_tempdir = None self.master_ssh_option = '' # Create a Lock to protect against race conditions. self._lock = Lock() self.host_attributes = host_attributes @property def ip(self): """@return IP address of the host. """ if not self._ip: self._ip = socket.getaddrinfo(self.hostname, None)[0][4][0] return self._ip @property def is_client_install_supported(self): """" Returns True if the host supports autotest client installs, False otherwise. """ return self._is_client_install_supported @property def rpc_server_tracker(self): """" @return The RPC server tracker associated with this host. """ return self._rpc_server_tracker def make_ssh_command(self, user="******", port=22, opts='', hosts_file='/dev/null', connect_timeout=30, alive_interval=300): base_command = ("/usr/bin/ssh -a -x %s -o StrictHostKeyChecking=no " "-o UserKnownHostsFile=%s -o BatchMode=yes " "-o ConnectTimeout=%d -o ServerAliveInterval=%d " "-l %s -p %d") assert isinstance(connect_timeout, (int, long)) assert connect_timeout > 0 # can't disable the timeout return base_command % (opts, hosts_file, connect_timeout, alive_interval, user, port) def use_rsync(self): if self._use_rsync is not None: return self._use_rsync # Check if rsync is available on the remote host. If it's not, # don't try to use it for any future file transfers. self._use_rsync = self._check_rsync() if not self._use_rsync: logging.warning("rsync not available on remote host %s -- disabled", self.hostname) return self._use_rsync def _check_rsync(self): """ Check if rsync is available on the remote host. """ try: self.run("rsync --version", stdout_tee=None, stderr_tee=None) except error.AutoservRunError: return False return True def _encode_remote_paths(self, paths, escape=True): """ Given a list of file paths, encodes it as a single remote path, in the style used by rsync and scp. """ if escape: paths = [utils.scp_remote_escape(path) for path in paths] remote = self.hostname # rsync and scp require IPv6 brackets, even when there isn't any # trailing port number (ssh doesn't support IPv6 brackets). # In the Python >= 3.3 future, 'import ipaddress' will parse addresses. if re.search(r':.*:', remote): remote = '[%s]' % remote return '%s@%s:"%s"' % (self.user, remote, " ".join(paths)) def _make_rsync_cmd(self, sources, dest, delete_dest, preserve_symlinks): """ Given a list of source paths and a destination path, produces the appropriate rsync command for copying them. Remote paths must be pre-encoded. """ ssh_cmd = self.make_ssh_command(user=self.user, port=self.port, opts=self.master_ssh_option, hosts_file=self.known_hosts_file) if delete_dest: delete_flag = "--delete" else: delete_flag = "" if preserve_symlinks: symlink_flag = "" else: symlink_flag = "-L" command = ("rsync %s %s --timeout=1800 --rsh='%s' -az --no-o --no-g " "%s \"%s\"") return command % (symlink_flag, delete_flag, ssh_cmd, " ".join(['"%s"' % p for p in sources]), dest) def _make_ssh_cmd(self, cmd): """ Create a base ssh command string for the host which can be used to run commands directly on the machine """ base_cmd = self.make_ssh_command(user=self.user, port=self.port, opts=self.master_ssh_option, hosts_file=self.known_hosts_file) return '%s %s "%s"' % (base_cmd, self.hostname, utils.sh_escape(cmd)) def _make_scp_cmd(self, sources, dest): """ Given a list of source paths and a destination path, produces the appropriate scp command for encoding it. Remote paths must be pre-encoded. """ command = ("scp -rq %s -o StrictHostKeyChecking=no " "-o UserKnownHostsFile=%s -P %d %s '%s'") return command % (self.master_ssh_option, self.known_hosts_file, self.port, " ".join(sources), dest) def _make_rsync_compatible_globs(self, path, is_local): """ Given an rsync-style path, returns a list of globbed paths that will hopefully provide equivalent behaviour for scp. Does not support the full range of rsync pattern matching behaviour, only that exposed in the get/send_file interface (trailing slashes). The is_local param is flag indicating if the paths should be interpreted as local or remote paths. """ # non-trailing slash paths should just work if len(path) == 0 or path[-1] != "/": return [path] # make a function to test if a pattern matches any files if is_local: def glob_matches_files(path, pattern): return len(glob.glob(path + pattern)) > 0 else: def glob_matches_files(path, pattern): result = self.run("ls \"%s\"%s" % (utils.sh_escape(path), pattern), stdout_tee=None, ignore_status=True) return result.exit_status == 0 # take a set of globs that cover all files, and see which are needed patterns = ["*", ".[!.]*"] patterns = [p for p in patterns if glob_matches_files(path, p)] # convert them into a set of paths suitable for the commandline if is_local: return ["\"%s\"%s" % (utils.sh_escape(path), pattern) for pattern in patterns] else: return [utils.scp_remote_escape(path) + pattern for pattern in patterns] def _make_rsync_compatible_source(self, source, is_local): """ Applies the same logic as _make_rsync_compatible_globs, but applies it to an entire list of sources, producing a new list of sources, properly quoted. """ return sum((self._make_rsync_compatible_globs(path, is_local) for path in source), []) def _set_umask_perms(self, dest): """ Given a destination file/dir (recursively) set the permissions on all the files and directories to the max allowed by running umask. """ # now this looks strange but I haven't found a way in Python to _just_ # get the umask, apparently the only option is to try to set it umask = os.umask(0) os.umask(umask) max_privs = 0777 & ~umask def set_file_privs(filename): """Sets mode of |filename|. Assumes |filename| exists.""" file_stat = os.stat(filename) file_privs = max_privs # if the original file permissions do not have at least one # executable bit then do not set it anywhere if not file_stat.st_mode & 0111: file_privs &= ~0111 os.chmod(filename, file_privs) # try a bottom-up walk so changes on directory permissions won't cut # our access to the files/directories inside it for root, dirs, files in os.walk(dest, topdown=False): # when setting the privileges we emulate the chmod "X" behaviour # that sets to execute only if it is a directory or any of the # owner/group/other already has execute right for dirname in dirs: os.chmod(os.path.join(root, dirname), max_privs) # Filter out broken symlinks as we go. for filename in filter(os.path.exists, files): set_file_privs(os.path.join(root, filename)) # now set privs for the dest itself if os.path.isdir(dest): os.chmod(dest, max_privs) else: set_file_privs(dest) def get_file(self, source, dest, delete_dest=False, preserve_perm=True, preserve_symlinks=False): """ Copy files from the remote host to a local path. Directories will be copied recursively. If a source component is a directory with a trailing slash, the content of the directory will be copied, otherwise, the directory itself and its content will be copied. This behavior is similar to that of the program 'rsync'. Args: source: either 1) a single file or directory, as a string 2) a list of one or more (possibly mixed) files or directories dest: a file or a directory (if source contains a directory or more than one element, you must supply a directory dest) delete_dest: if this is true, the command will also clear out any old files at dest that are not in the source preserve_perm: tells get_file() to try to preserve the sources permissions on files and dirs preserve_symlinks: try to preserve symlinks instead of transforming them into files/dirs on copy Raises: AutoservRunError: the scp command failed """ logging.debug('get_file. source: %s, dest: %s, delete_dest: %s,' 'preserve_perm: %s, preserve_symlinks:%s', source, dest, delete_dest, preserve_perm, preserve_symlinks) # Start a master SSH connection if necessary. self.start_master_ssh() if isinstance(source, basestring): source = [source] dest = os.path.abspath(dest) # If rsync is disabled or fails, try scp. try_scp = True if self.use_rsync(): logging.debug('Using Rsync.') try: remote_source = self._encode_remote_paths(source) local_dest = utils.sh_escape(dest) rsync = self._make_rsync_cmd([remote_source], local_dest, delete_dest, preserve_symlinks) utils.run(rsync) try_scp = False except error.CmdError, e: logging.warning("trying scp, rsync failed: %s", e) if try_scp: logging.debug('Trying scp.') # scp has no equivalent to --delete, just drop the entire dest dir if delete_dest and os.path.isdir(dest): shutil.rmtree(dest) os.mkdir(dest) remote_source = self._make_rsync_compatible_source(source, False) if remote_source: # _make_rsync_compatible_source() already did the escaping remote_source = self._encode_remote_paths(remote_source, escape=False) local_dest = utils.sh_escape(dest) scp = self._make_scp_cmd([remote_source], local_dest) try: utils.run(scp) except error.CmdError, e: logging.debug('scp failed: %s', e) raise error.AutoservRunError(e.args[0], e.args[1])
def _run_boottool(self, *options): cmd = self._get_boottool_path() # FIXME: add unsafe options strings sequence to host.run() parameters for option in options: cmd += ' "%s"' % utils.sh_escape(option) return self._host().run(cmd).stdout
def glob_matches_files(path, pattern): result = self.run("ls \"%s\"%s" % (utils.sh_escape(path), pattern), stdout_tee=None, ignore_status=True) return result.exit_status == 0