def get_package_management(self): """ Determine the supported package management systems present on the system. If more than one package management system installed, try to find the best supported system. """ list_supported = [] for high_level_pm in SUPPORTED_PACKAGE_MANAGERS: try: os_dep.command(high_level_pm) list_supported.append(high_level_pm) except ValueError: pass pm_supported = None if len(list_supported) == 0: pm_supported = None if len(list_supported) == 1: pm_supported = list_supported[0] elif len(list_supported) > 1: if "apt-get" in list_supported and self.distro in ("debian", "ubuntu"): pm_supported = "apt-get" elif "yum" in list_supported and self.distro in ("redhat", "fedora"): pm_supported = "yum" else: pm_supported = list_supported[0] return pm_supported
def __init__(self, path): super(DistroPkgInfoLoaderDeb, self).__init__(path) try: os_dep.command('dpkg-deb') self.capable = True except ValueError: self.capable = False
def __init__(self, params, root_dir="/tmp"): os_dep.command("iscsiadm") self.target = params.get("target") self.export_flag = False if params.get("portal_ip"): self.portal_ip = params.get("portal_ip") else: self.portal_ip = utils.system_output("hostname") if params.get("iscsi_thread_id"): self.id = params.get("iscsi_thread_id") else: self.id = utils.generate_random_string(4) self.initiator = params.get("initiator") if params.get("emulated_image"): self.initiator = None os_dep.command("tgtadm") emulated_image = params.get("emulated_image") self.emulated_image = os.path.join(root_dir, emulated_image) self.emulated_id = "" self.emulated_size = params.get("image_size") self.unit = self.emulated_size[-1].upper() self.emulated_size = self.emulated_size[:-1] # maps K,M,G,T => (count, bs) emulated_size = { 'K': (1, 1), 'M': (1, 1024), 'G': (1024, 1024), 'T': (1024, 1048576), } if self.unit in emulated_size: block_size = emulated_size[self.unit][1] size = int(self.emulated_size) * emulated_size[self.unit][0] self.create_cmd = ("dd if=/dev/zero of=%s count=%s bs=%sK" % (self.emulated_image, size, block_size))
def get_package_management(self): """ Determine the supported package management systems present on the system. If more than one package management system installed, try to find the best supported system. """ list_supported = [] for high_level_pm in SUPPORTED_PACKAGE_MANAGERS: try: os_dep.command(high_level_pm) list_supported.append(high_level_pm) except Exception: pass pm_supported = None if len(list_supported) == 0: pm_supported = None if len(list_supported) == 1: pm_supported = list_supported[0] elif len(list_supported) > 1: if 'apt-get' in list_supported and self.distro in ['Debian', 'Ubuntu']: pm_supported = 'apt-get' elif 'yum' in list_supported and self.distro == 'Fedora': pm_supported = 'yum' else: pm_supported = list_supported[0] logging.debug('Package Manager backend: %s' % pm_supported) return pm_supported
def convert(package, destination_format): """\ Convert packages with the 'alien' utility. If alien is not installed, it throws a NotImplementedError exception. returns: filename of the package generated. """ try: os_dep.command('alien') except Exception: e_msg = 'Cannot convert to %s, alien not installed' % destination_format raise error.TestError(e_msg) # alien supports converting to many formats, but its interesting to map # convertions only for the implemented package types. if destination_format == 'dpkg': deb_pattern = re.compile('[A-Za-z0-9_.-]*[.][d][e][b]') conv_output = utils.system_output('alien --to-deb %s 2>/dev/null' % package) converted_package = re.findall(deb_pattern, conv_output)[0] elif destination_format == 'rpm': rpm_pattern = re.compile('[A-Za-z0-9_.-]*[.][r][p][m]') conv_output = utils.system_output('alien --to-rpm %s 2>/dev/null' % package) converted_package = re.findall(rpm_pattern, conv_output)[0] else: e_msg = 'Convertion to format %s not implemented' % destination_format raise NotImplementedError(e_msg) print 'Package %s successfuly converted to %s' % \ (os.path.basename(package), os.path.basename(converted_package)) return os.path.abspath(converted_package)
def _server_system_wide_install(): for path in SYSTEM_WIDE_PATHS: try: os_dep.command(path) except ValueError: return False return True
def _get_service_cmds(self): """ Figure out the commands used to control the NFS service. """ error.context("Finding out appropriate commands to handle NFS service") service = os_dep.command("service") try: systemctl = os_dep.command("systemctl") except ValueError: systemctl = None if systemctl is not None: init_script = "/etc/init.d/nfs" service_file = "/lib/systemd/system/nfs-server.service" if os.path.isfile(init_script): service_name = "nfs" elif os.path.isfile(service_file): service_name = "nfs-server" else: raise error.TestError("Files %s and %s absent, don't know " "how to set up NFS for this host" % (init_script, service_file)) start_cmd = "%s start %s.service" % (systemctl, service_name) stop_cmd = "%s stop %s.service" % (systemctl, service_name) restart_cmd = "%s restart %s.service" % (systemctl, service_name) status_cmd = "%s status %s.service" % (systemctl, service_name) else: start_cmd = "%s nfs start" % service stop_cmd = "%s nfs stop" % service restart_cmd = "%s nfs restart" % service status_cmd = "%s nfs status" % service return [start_cmd, stop_cmd, restart_cmd, status_cmd]
def mkinitrd(self, version, image, system_map, initrd): """Build kernel initrd image. Try to use distro specific way to build initrd image. Parameters: version new kernel version image new kernel image file system_map System.map file initrd initrd image file to build """ vendor = utils.get_os_vendor() if os.path.isfile(initrd): print "Existing %s file, will remove it." % initrd os.remove(initrd) args = self.job.config_get('kernel.mkinitrd_extra_args') # don't leak 'None' into mkinitrd command if not args: args = '' # It is important to match the version with a real directory inside # /lib/modules real_version_list = glob.glob('/lib/modules/%s*' % version) rl = len(real_version_list) if rl == 0: logging.error("No directory %s found under /lib/modules. Initramfs" "creation will most likely fail and your new kernel" "will fail to build", version) else: if rl > 1: logging.warning("Found more than one possible match for " "kernel version %s under /lib/modules", version) version = os.path.basename(real_version_list[0]) if vendor in ['Red Hat', 'Fedora']: try: cmd = os_dep.command('dracut') full_cmd = '%s -f %s %s' % (cmd, initrd, version) except ValueError: cmd = os_dep.command('mkinitrd') full_cmd = '%s %s %s %s' % (cmd, args, initrd, version) utils.system(full_cmd) elif vendor in ['SUSE']: utils.system('mkinitrd %s -k %s -i %s -M %s' % (args, image, initrd, system_map)) elif vendor in ['Debian', 'Ubuntu']: if os.path.isfile('/usr/sbin/mkinitrd'): cmd = '/usr/sbin/mkinitrd' elif os.path.isfile('/usr/sbin/mkinitramfs'): cmd = '/usr/sbin/mkinitramfs' else: raise error.TestError('No Debian initrd builder') utils.system('%s %s -o %s %s' % (cmd, args, initrd, version)) else: raise error.TestError('Unsupported vendor %s' % vendor)
def get_package_management(self): """ Determine the supported package management systems present on the system. If more than one package management system installed, try to find the best supported system. """ list_supported = [] for high_level_pm in SUPPORTED_PACKAGE_MANAGERS: try: os_dep.command(high_level_pm) list_supported.append(high_level_pm) except Exception: pass pm_supported = None if len(list_supported) == 0: pm_supported = None if len(list_supported) == 1: pm_supported = list_supported[0] elif len(list_supported) > 1: if 'apt-get' in list_supported and self.distro in [ 'Debian', 'Ubuntu' ]: pm_supported = 'apt-get' elif 'yum' in list_supported and self.distro == 'Fedora': pm_supported = 'yum' else: pm_supported = list_supported[0] logging.debug('Package Manager backend: %s' % pm_supported) return pm_supported
def has_pbzip2(): '''Check if parallel bzip2 is available on this system.''' try: os_dep.command('pbzip2') except ValueError: return False return True
def configure_crash_handler(self): """ Configure the crash handler by: * Setting up core size to unlimited * Putting an appropriate crash handler on /proc/sys/kernel/core_pattern * Create files that the crash handler will use to figure which tests are active at a given moment The crash handler will pick up the core file and write it to self.debugdir, and perform analysis on it to generate a report. The program also outputs some results to syslog. If multiple tests are running, an attempt to verify if we still have the old PID on the system process table to determine whether it is a parent of the current test execution. If we can't determine it, the core file and the report file will be copied to all test debug dirs. """ self.crash_handling_enabled = False # make sure this script will run with a new enough python to work cmd = ("python3 -c 'import sys; " "print(sys.version_info[0], sys.version_info[1])'") result = utils.run(cmd, ignore_status=True, verbose=False) if result.exit_status != 0: logging.warning( 'System python is too old, crash handling disabled') return major, minor = [int(x) for x in result.stdout.strip().split()] if (major, minor) < (2, 4): logging.warning( 'System python is too old, crash handling disabled') return if not settings.settings.get_value( 'COMMON', 'crash_handling_enabled', type=bool): return self.pattern_file = '/proc/sys/kernel/core_pattern' try: # Enable core dumps resource.setrlimit(resource.RLIMIT_CORE, (-1, -1)) # Trying to backup core pattern and register our script self.core_pattern_backup = open(self.pattern_file, 'r').read() pattern_file = open(self.pattern_file, 'w') tools_dir = os.path.join(self.autodir, 'tools') crash_handler_path = os.path.join(tools_dir, 'crash_handler.py') pattern_file.write('|' + crash_handler_path + ' %p %t %u %s %h %e') # Writing the files that the crash handler is going to use self.debugdir_tmp_file = ('/tmp/autotest_results_dir.%s' % os.getpid()) utils.open_write_close(self.debugdir_tmp_file, self.debugdir + "\n") except Exception as e: logging.warning('Crash handling disabled: %s', e) else: self.crash_handling_enabled = True try: os_dep.command('gdb') except ValueError: logging.warning('Could not find GDB installed. Crash handling ' 'will operate with limited functionality')
def mkinitrd(self, version, image, system_map, initrd): """Build kernel initrd image. Try to use distro specific way to build initrd image. Parameters: version new kernel version image new kernel image file system_map System.map file initrd initrd image file to build """ d = distro.detect() if os.path.isfile(initrd): print "Existing %s file, will remove it." % initrd os.remove(initrd) args = self.job.config_get('kernel.mkinitrd_extra_args') # don't leak 'None' into mkinitrd command if not args: args = '' # It is important to match the version with a real directory inside # /lib/modules real_version_list = glob.glob('/lib/modules/%s*' % version) rl = len(real_version_list) if rl == 0: logging.error("No directory %s found under /lib/modules. Initramfs" "creation will most likely fail and your new kernel" "will fail to build", version) else: if rl > 1: logging.warning("Found more than one possible match for " "kernel version %s under /lib/modules", version) version = os.path.basename(real_version_list[0]) if d.name in ['redhat', 'fedora']: try: cmd = os_dep.command('dracut') full_cmd = '%s -f %s %s' % (cmd, initrd, version) except ValueError: cmd = os_dep.command('mkinitrd') full_cmd = '%s %s %s %s' % (cmd, args, initrd, version) utils.system(full_cmd) elif d.name in ['sles']: utils.system('mkinitrd %s -k %s -i %s -M %s' % (args, image, initrd, system_map)) elif d.name in ['debian', 'ubuntu']: if os.path.isfile('/usr/sbin/mkinitrd'): cmd = '/usr/sbin/mkinitrd' elif os.path.isfile('/usr/sbin/mkinitramfs'): cmd = '/usr/sbin/mkinitramfs' else: raise error.TestError('No Debian initrd builder') utils.system('%s %s -o %s %s' % (cmd, args, initrd, version)) else: raise error.TestError('Unsupported distro %s' % d.name)
def get_package_management(self): """ Determine the supported package management systems present on the system. If more than one package management system installed, try to find the best supported system. """ list_supported = [] for high_level_pm in SUPPORTED_PACKAGE_MANAGERS: try: os_dep.command(high_level_pm) list_supported.append(high_level_pm) except ValueError: pass pm_supported = None if len(list_supported) == 0: pm_supported = None if len(list_supported) == 1: pm_supported = list_supported[0] elif len(list_supported) > 1: if ('apt-get' in list_supported and self.distro in ('debian', 'ubuntu')): pm_supported = 'apt-get' elif ('yum' in list_supported and self.distro in ('redhat', 'fedora')): pm_supported = 'yum' else: pm_supported = list_supported[0] return pm_supported
def __init__(self, params): os_dep.command("lvm") self.params = self.__format_params(params) self.pvs = self.__reload_pvs() self.vgs = self.__reload_vgs() self.lvs = self.__reload_lvs() self.trash = []
def __init__(self, params, root_dir="/tmp"): os_dep.command("iscsiadm") self.target = params.get("target") self.export_flag = False if params.get("portal_ip"): self.portal_ip = params.get("portal_ip") else: self.portal_ip = utils.system_output("hostname") if params.get("iscsi_thread_id"): self.id = params.get("iscsi_thread_id") else: self.id = utils.generate_random_string(4) self.initiator = params.get("initiator") if params.get("emulated_image"): self.initiator = None os_dep.command("tgtadm") emulated_image = params.get("emulated_image") self.emulated_image = os.path.join(root_dir, emulated_image) self.emulated_id = "" self.emulated_size = params.get("image_size") self.unit = self.emulated_size[-1].upper() self.emulated_size = self.emulated_size[:-1] # maps K,M,G,T => (count, bs) emulated_size = {'K': (1, 1), 'M': (1, 1024), 'G': (1024, 1024), 'T': (1024, 1048576), } if emulated_size.has_key(self.unit): block_size = emulated_size[self.unit][1] size = int(self.emulated_size) * emulated_size[self.unit][0] self.create_cmd = ("dd if=/dev/zero of=%s count=%s bs=%sK" % (self.emulated_image, size, block_size))
def run(test, params, env): """ Test steps: 1) Check the environment and get the params from params. 2) while(loop_time < timeout): ttcp command. 3) clean up. """ # Find the ttcp command. try: os_dep.command("ttcp") except ValueError: raise error.TestNAError("Not find ttcp command on host.") # Get VM. vms = env.get_all_vms() for vm in vms: session = vm.wait_for_login() status, _ = session.cmd_status_output("which ttcp") if status: raise error.TestNAError("Not find ttcp command on guest.") # Get parameters from params. timeout = int(params.get("LB_ttcp_timeout", "300")) ttcp_server_command = params.get("LB_ttcp_server_command", "ttcp -s -r -v -D -p5015") ttcp_client_command = params.get("LB_ttcp_client_command", "ttcp -s -t -v -D -p5015 -b65536 -l65536 -n1000 -f K") host_session = aexpect.ShellSession("sh") try: current_time = int(time.time()) end_time = current_time + timeout # Start the loop from current_time to end_time. while current_time < end_time: for vm in vms: session = vm.wait_for_login() host_session.sendline(ttcp_server_command) cmd = ("%s %s" % (ttcp_client_command, utils_net.get_host_ip_address(params))) def _ttcp_good(): status, output = session.cmd_status_output(cmd) logging.debug(output) if status: return False return True if not utils_misc.wait_for(_ttcp_good, timeout=60): status, output = session.cmd_status_output(cmd) if status: raise error.TestFail("Failed to run ttcp command on guest.\n" "Detail: %s." % output) remote.handle_prompts(host_session, None, None, r"[\#\$]\s*$") current_time = int(time.time()) finally: # Clean up. host_session.close() session.close()
def run(test, params, env): """ Test steps: 1) Check the environment and get the params from params. 2) while(loop_time < timeout): ttcp command. 3) clean up. """ # Find the ttcp command. try: os_dep.command("ttcp") except ValueError: raise error.TestNAError("Not find ttcp command on host.") # Get VM. vms = env.get_all_vms() for vm in vms: session = vm.wait_for_login() status, _ = session.cmd_status_output("which ttcp") if status: raise error.TestNAError("Not find ttcp command on guest.") # Get parameters from params. timeout = int(params.get("LB_ttcp_timeout", "600")) ttcp_server_command = params.get("LB_ttcp_server_command", "ttcp -s -r -v -D -p5015") ttcp_client_command = params.get("LB_ttcp_client_command", "ttcp -s -t -v -D -p5015 -b65536 -l65536 -n1000 -f K") host_session = aexpect.ShellSession("sh") try: current_time = int(time.time()) end_time = current_time + timeout # Start the loop from current_time to end_time. while current_time < end_time: for vm in vms: session = vm.wait_for_login() host_session.sendline(ttcp_server_command) cmd = ("%s %s" % (ttcp_client_command, utils_net.get_host_ip_address(params))) def _ttcp_good(): status, output = session.cmd_status_output(cmd) logging.debug(output) if status: return False return True if not utils_misc.wait_for(_ttcp_good, timeout=5): status, output = session.cmd_status_output(cmd) if status: raise error.TestFail("Failed to run ttcp command on guest.\n" "Detail: %s." % output) remote.handle_prompts(host_session, None, None, r"[\#\$]\s*$") current_time = int(time.time()) finally: # Clean up. host_session.close() session.close()
def test_lgf_cmd(self): cmd = "libguestfs-test-tool" try: os_dep.command(cmd) self.assertEqual(lgf.lgf_command(cmd).exit_status, 0) except ValueError: logging.warning("Command %s not installed, skipping unittest...", cmd)
def setup(self, tarball='bonnie++-1.96.tgz'): tarball = utils.unmap_url(self.bindir, tarball, self.tmpdir) utils.extract_tarball_to_dir(tarball, self.srcdir) os.chdir(self.srcdir) os_dep.command('g++') utils.configure() utils.make()
def compute_checksum(self, pkg_path): ''' Compute the MD5 checksum for the package file and return it. pkg_path : The complete path for the package file ''' os_dep.command("md5sum") md5sum_output = self._run_command("md5sum %s " % pkg_path).stdout return md5sum_output.split()[0]
def setup(self, tarball = 'bonnie++-1.03a.tgz'): tarball = utils.unmap_url(self.bindir, tarball, self.tmpdir) utils.extract_tarball_to_dir(tarball, self.srcdir) os.chdir(self.srcdir) os_dep.command('g++') utils.system('patch -p1 < ../bonnie++-1.03a-gcc43.patch') utils.configure() utils.make()
def setup(self, tarball='bonnie++-1.03a.tgz'): tarball = utils.unmap_url(self.bindir, tarball, self.tmpdir) utils.extract_tarball_to_dir(tarball, self.srcdir) os.chdir(self.srcdir) os_dep.command('g++') utils.system('patch -p1 < ../bonnie++-1.03a-gcc43.patch') utils.configure() utils.make()
def test_lgf_cmd_check(self): cmds = ['virt-ls', 'virt-cat'] for cmd in cmds: try: os_dep.command(cmd) self.assertTrue(lgf.lgf_cmd_check(cmd)) except ValueError: logging.warning("Command %s not installed, skipping " "unittest...", cmd)
def init_db(self): utils.run(os_dep.command("ovsdb-tool"), timeout=10, args=["create", self.db_path, self.dbschema]) utils.run(os_dep.command("ovsdb-server"), timeout=10, args=["--remote=punix:%s" % (self.db_socket), "--remote=db:Open_vSwitch,manager_options", "--pidfile=%s" % (self.db_pidfile), "--detach"]) self.ovs_vsctl(["--no-wait", "init"])
def test_lgf_cmd_check(self): cmds = ['virt-ls', 'virt-cat'] for cmd in cmds: try: os_dep.command(cmd) self.assertTrue(lgf.lgf_cmd_check(cmd)) except ValueError: logging.warning( "Command %s not installed, skipping " "unittest...", cmd)
def initialize(self, qemu_path=''): if qemu_path: # Prepending the path at the beginning of $PATH will make the # version found on qemu_path be preferred over other ones. os.environ['PATH'] = qemu_path + ":" + os.environ['PATH'] try: self.qemu_img_path = os_dep.command('qemu-img') self.qemu_io_path = os_dep.command('qemu-io') except ValueError, e: raise error.TestNAError('Commands qemu-img or qemu-io missing')
def install_pkg_post(self, filename, fetch_dir, install_dir, preserve_install_dir=False): os_dep.command("tar") filename, _ = self.pkgmgr.parse_tarball_name(filename) install_path = re.sub(filename, "", install_dir) for suffix in ['', '.tar', '.tar.bz2']: pkg_name = "%s%s" % (suffix, re.sub("/", "_", filename)) fetch_path = os.path.join(fetch_dir, pkg_name) if os.path.exists(fetch_path): self.pkgmgr._run_command('tar -xf %s -C %s' % (fetch_path, install_path))
def _dpkg_info(dpkg_package): """\ Private function that returns a dictionary with information about a dpkg package file - type: Package management program that handles the file - system_support: If the package management program is installed on the system or not - source: If it is a source (True) our binary (False) package - version: The package version (or name), that is used to check against the package manager if the package is installed - arch: The architecture for which a binary package was built - installed: Whether the package is installed (True) on the system or not (False) """ # We will make good use of what the file command has to tell us about the # package :) file_result = utils.system_output('file ' + dpkg_package) package_info = {} package_info['type'] = 'dpkg' # There's no single debian source package as is the case # with RPM package_info['source'] = False try: os_dep.command('dpkg') # Build the command strings that will be used to get package info # a_cmd - Command to determine package architecture # v_cmd - Command to determine package version # i_cmd - Command to determiine if package is installed a_cmd = 'dpkg -f ' + dpkg_package + ' Architecture 2>/dev/null' v_cmd = 'dpkg -f ' + dpkg_package + ' Package 2>/dev/null' i_cmd = 'dpkg -s ' + utils.system_output(v_cmd) + ' 2>&1' package_info['system_support'] = True package_info['version'] = utils.system_output(v_cmd) package_info['arch'] = utils.system_output(a_cmd) # Checking if package is installed package_status = utils.system_output(i_cmd, ignore_status=True) not_inst_pattern = re.compile('not[ -]installed', re.IGNORECASE) dpkg_not_installed = re.search(not_inst_pattern, package_status) if dpkg_not_installed: package_info['installed'] = False else: package_info['installed'] = True except Exception: package_info['system_support'] = False package_info['installed'] = False # The output of file is not as generous for dpkg files as # it is with rpm files package_info['arch'] = 'Not Available' package_info['version'] = 'Not Available' return package_info
def _dpkg_info(dpkg_package): """\ Private function that returns a dictionary with information about a dpkg package file - type: Package management program that handles the file - system_support: If the package management program is installed on the system or not - source: If it is a source (True) our binary (False) package - version: The package version (or name), that is used to check against the package manager if the package is installed - arch: The architecture for which a binary package was built - installed: Whether the package is installed (True) on the system or not (False) """ # We will make good use of what the file command has to tell us about the # package :) file_result = utils.system_output('file ' + dpkg_package) package_info = {} package_info['type'] = 'dpkg' # There's no single debian source package as is the case # with RPM package_info['source'] = False try: os_dep.command('dpkg') # Build the command strings that will be used to get package info # a_cmd - Command to determine package architecture # v_cmd - Command to determine package version # i_cmd - Command to determiine if package is installed a_cmd = 'dpkg -f ' + dpkg_package + ' Architecture 2>/dev/null' v_cmd = 'dpkg -f ' + dpkg_package + ' Package 2>/dev/null' i_cmd = 'dpkg -s ' + utils.system_output(v_cmd) + ' 2>/dev/null' package_info['system_support'] = True package_info['version'] = utils.system_output(v_cmd) package_info['arch'] = utils.system_output(a_cmd) # Checking if package is installed package_status = utils.system_output(i_cmd, ignore_status=True) not_inst_pattern = re.compile('not-installed', re.IGNORECASE) dpkg_not_installed = re.search(not_inst_pattern, package_status) if dpkg_not_installed: package_info['installed'] = False else: package_info['installed'] = True except Exception: package_info['system_support'] = False package_info['installed'] = False # The output of file is not as generous for dpkg files as # it is with rpm files package_info['arch'] = 'Not Available' package_info['version'] = 'Not Available' return package_info
def setup(self, tarball='ffsb-6.0-rc2.tar.bz2'): """ Uncompress the FFSB tarball and compiles it. @param tarball: FFSB tarball. Could be either a path relative to self.srcdir or a URL. """ tarball = utils.unmap_url(self.bindir, tarball, self.tmpdir) utils.extract_tarball_to_dir(tarball, self.srcdir) os.chdir(self.srcdir) os_dep.command('gcc') utils.configure() utils.make()
def initialize(self): try: import pexpect except ImportError: raise error.TestError("Missing python library pexpect. You have to " "install the package python-pexpect or the " "equivalent for your distro") try: os_dep.command("nmap") except ValueError: raise error.TestError("Missing required command nmap. You have to" "install the package nmap or the equivalent" "for your distro")
def set_install_params(self, test, params): super(KojiInstaller, self).set_install_params(test, params) os_dep.command("rpm") os_dep.command("yum") self.tag = params.get("%s_tag" % self.param_key_prefix, None) self.koji_cmd = params.get("%s_cmd" % self.param_key_prefix, None) if self.tag is not None: utils_koji.set_default_koji_tag(self.tag) self.koji_pkgs = params.get("%s_pkgs" % self.param_key_prefix, "").split() self.koji_scratch_pkgs = params.get("%s_scratch_pkgs" % self.param_key_prefix, "").split() self.koji_yumrepo_baseurl = params.get("%s_yumrepo_baseurl" % self.param_key_prefix, None) if self.install_debug_info: self._expand_koji_pkgs_with_debuginfo()
def _install_using_send_file(self, host, autodir): system_wide = True try: autotest_local = os_dep.command('autotest-local') autotest_local_streamhandler = os_dep.command( 'autotest-local-streamhandler') autotest_daemon = os_dep.command('autotest-daemon') autotest_daemon_monitor = os_dep.command('autotest-daemon-monitor') except: system_wide = False dirs_to_exclude = set(["tests", "site_tests", "deps", "profilers"]) light_files = [ os.path.join(self.source_material, f) for f in os.listdir(self.source_material) if f not in dirs_to_exclude ] if system_wide: light_files.append(autotest_local) light_files.append(autotest_local_streamhandler) light_files.append(autotest_daemon) light_files.append(autotest_daemon_monitor) # there should be one and only one grubby tarball grubby_glob = os.path.join(self.source_material, "deps/grubby/grubby-*.tar.bz2") grubby_tarball_paths = glob.glob(grubby_glob) if grubby_tarball_paths: grubby_tarball_path = grubby_tarball_paths[0] if os.path.exists(grubby_tarball_path): light_files.append(grubby_tarball_path) host.send_file(light_files, autodir, delete_dest=True) profilers_autodir = os.path.join(autodir, 'profilers') profilers_init = os.path.join(self.source_material, 'profilers', '__init__.py') host.run("mkdir -p %s" % profilers_autodir) host.send_file(profilers_init, profilers_autodir, delete_dest=True) dirs_to_exclude.discard("profilers") # create empty dirs for all the stuff we excluded commands = [] for path in dirs_to_exclude: abs_path = os.path.join(autodir, path) abs_path = utils.sh_escape(abs_path) commands.append("mkdir -p '%s'" % abs_path) commands.append("touch '%s'/__init__.py" % abs_path) host.run(';'.join(commands))
def set_install_params(self, test, params): super(KojiInstaller, self).set_install_params(test, params) os_dep.command("rpm") os_dep.command("yum") self.tag = params.get("%s_tag" % self.param_key_prefix, None) self.koji_cmd = params.get("%s_cmd" % self.param_key_prefix, None) if self.tag is not None: virt_utils.set_default_koji_tag(self.tag) self.koji_pkgs = params.get("%s_pkgs" % self.param_key_prefix, "").split() self.koji_scratch_pkgs = params.get( "%s_scratch_pkgs" % self.param_key_prefix, "").split() if self.install_debug_info: self._expand_koji_pkgs_with_debuginfo()
def service_avail(cmd): """ Check the availability of three init services. :param cmd: service name. Can be initctl, systemctl or initscripts :return: True if init system avaiable or False if not. """ if cmd in ['initctl', 'systemctl']: try: os_dep.command(cmd) return True except ValueError: return False elif cmd == 'initscripts': return os.path.exists('/etc/rc.d/init.d/libvirtd')
def __init__(self, *args, **dargs): """ Initialization of SSH connection. (1). Call __init__ of class ConnectionBase. (2). Initialize tools will be used in conn setup. """ init_dict = dict(*args, **dargs) init_dict['ssh_rsa_pub_path'] = init_dict.get('ssh_rsa_pub_path', '/root/.ssh/id_rsa.pub') init_dict['ssh_id_rsa_path'] = init_dict.get('ssh_id_rsa_path', '/root/.ssh/id_rsa') super(SSHConnection, self).__init__(init_dict) # set the tool for ssh setup. tool_dict = {'SSH_KEYGEN': 'ssh-keygen', 'SSH_ADD': 'ssh-add', 'SSH_COPY_ID': 'ssh-copy-id', 'SSH_AGENT': 'ssh-agent', 'SHELL': 'sh', 'SSH': 'ssh'} for key in tool_dict: toolName = tool_dict[key] try: tool = os_dep.command(toolName) except ValueError: logging.debug("%s executable not set or found on path," "some function of connection will fail.", toolName) tool = '/bin/true' self.__dict_set__(key, tool)
def lgf_cmd_check(cmd): """ To check whether the cmd is supported on this host. @param cmd: the cmd to use a libguest tool. @return: None if the cmd is not exist, otherwise return its path. """ libguestfs_cmds = [ 'libguestfs-test-tool', 'guestfish', 'guestmount', 'virt-alignment-scan', 'virt-cat', 'virt-copy-in', 'virt-copy-out', 'virt-df', 'virt-edit', 'virt-filesystems', 'virt-format', 'virt-inspector', 'virt-list-filesystems', 'virt-list-partitions', 'virt-ls', 'virt-make-fs', 'virt-rescue', 'virt-resize', 'virt-sparsify', 'virt-sysprep', 'virt-tar', 'virt-tar-in', 'virt-tar-out', 'virt-win-reg' ] if not (cmd in libguestfs_cmds): raise LibguestfsCmdError( "Command %s is not supported by libguestfs yet." % cmd) try: return os_dep.command(cmd) except ValueError: logging.warning("You have not installed %s on this host.", cmd) return None
def setup(self, tarball='ffsb-6.0-rc2.tar.bz2'): """ Uncompress the FFSB tarball and compiles it. @param tarball: FFSB tarball. Could be either a path relative to self.srcdir or a URL. """ profile_src = os.path.join(self.bindir, 'profile.cfg.sample') profile_dst = os.path.join(os.path.dirname(self.srcdir), 'profile.cfg') shutil.copyfile(profile_src, profile_dst) tarball = utils.unmap_url(self.bindir, tarball, self.tmpdir) utils.extract_tarball_to_dir(tarball, self.srcdir) os.chdir(self.srcdir) os_dep.command('gcc') utils.configure() utils.make()
def untar_pkg(self, tarball_path, dest_dir): ''' Untar the package present in the tarball_path and put a ".checksum" file in the dest_dir containing the checksum of the tarball. This method assumes that the package to be untarred is of the form <name>.tar.bz2 ''' os_dep.command("tar") self._run_command('tar xjf %s -C %s' % (tarball_path, dest_dir)) # Put the .checksum file in the install_dir to note # where the package came from pkg_checksum = self.compute_checksum(tarball_path) pkg_checksum_path = os.path.join(dest_dir, '.checksum') self._run_command('echo "%s" > %s ' % (pkg_checksum, pkg_checksum_path))
def initialize(self, **dargs): """ Gets path of kvm_stat and verifies if debugfs needs to be mounted. """ self.is_enabled = False kvm_stat_installed = False try: self.stat_path = os_dep.command('kvm_stat') kvm_stat_installed = True except ValueError: logging.error('Command kvm_stat not present') if kvm_stat_installed: try: utils.run("%s --batch" % self.stat_path) self.is_enabled = True except error.CmdError, e: if 'debugfs' in str(e): try: utils.run('mount -t debugfs debugfs /sys/kernel/debug') except error.CmdError, e: logging.error('Failed to mount debugfs:\n%s', str(e)) else: logging.error('Failed to execute kvm_stat:\n%s', str(e))
def __init__(self, uri, branch='master', lbranch=None, commit=None, destination_dir=None, base_uri=None): ''' Instantiates a new GitRepoHelper :type uri: string :param uri: git repository url :type branch: string :param branch: git remote branch :type destination_dir: string :param destination_dir: path of a dir where to save downloaded code :type commit: string :param commit: specific commit to download :type lbranch: string :param lbranch: git local branch name, if different from remote :type base_uri: string :param base_uri: a closer, usually local, git repository url from where to fetch content first ''' self.uri = uri self.base_uri = base_uri self.branch = branch self.commit = commit if destination_dir is None: uri_basename = uri.split("/")[-1] self.destination_dir = os.path.join("/tmp", uri_basename) else: self.destination_dir = destination_dir if lbranch is None: self.lbranch = branch else: self.lbranch = lbranch self.cmd = os_dep.command('git')
def lgf_cmd_check(cmd): """ To check whether the cmd is supported on this host. @param cmd: the cmd to use a libguest tool. @return: None if the cmd is not exist, otherwise return its path. """ libguestfs_cmds = ['libguestfs-test-tool', 'guestfish', 'guestmount', 'virt-alignment-scan', 'virt-cat', 'virt-copy-in', 'virt-copy-out', 'virt-df', 'virt-edit', 'virt-filesystems', 'virt-format', 'virt-inspector', 'virt-list-filesystems', 'virt-list-partitions', 'virt-ls', 'virt-make-fs', 'virt-rescue', 'virt-resize', 'virt-sparsify', 'virt-sysprep', 'virt-tar', 'virt-tar-in', 'virt-tar-out', 'virt-win-reg'] if not (cmd in libguestfs_cmds): raise LibguestfsCmdError("Command %s is not supported by libguestfs yet." % cmd) try: return os_dep.command(cmd) except ValueError: logging.warning("You have not installed %s on this host.", cmd) return None
def run(test, params, env): """ Test for virt-xml-validate """ # Get the full path of virt-xml-validate command. VIRT_XML_VALIDATE = os_dep.command("virt-xml-validate") vm_name = params.get("main_vm", "virt-tests-vm1") vm = env.get_vm(vm_name) schema = params.get("schema", "domain") output = params.get("output_file", "output") output_path = os.path.join(data_dir.get_tmp_dir(), output) if schema == "domain": virsh.dumpxml(vm_name, to_file=output_path) # TODO Add more case for other schema. cmd = "%s %s %s" % (VIRT_XML_VALIDATE, output_path, schema) cmd_result = utils.run(cmd, ignore_status=True) if cmd_result.exit_status: raise error.TestFail("virt-xml-validate command failed.\n" "Detail: %s." % cmd_result) if cmd_result.stdout.count("fail"): raise error.TestFail("xml fails to validate\n" "Detail: %s." % cmd_result)