Beispiel #1
0
    def run_once(self, test='antlr', config='./dacapo.cfg', jvm='default'):
        cfg = config_loader(cfg=config, tmpdir=self.tmpdir, raise_errors=True)
        self.test = test
        cachedir = os.path.join(os.path.dirname(self.srcdir), 'cache')
        if not os.path.isdir(cachedir):
            os.makedirs(cachedir)

        dacapo_url = cfg.get('dacapo', 'tarball_url')
        dacapo_md5 = cfg.get('dacapo', 'package_md5')
        dacapo_pkg = utils.unmap_url_cache(cachedir, dacapo_url, dacapo_md5)

        if not jvm == 'default':
            # Get the jvm package
            jvm_pkg_url = cfg.get(jvm, 'jvm_pkg_url')
            jvm_pkg_md5 = cfg.get(jvm, 'package_md5')
            jvm_pkg = utils.unmap_url_cache(cachedir, jvm_pkg_url, jvm_pkg_md5)
            # Install it
            swman = software_manager.SoftwareManager()
            swman.install(jvm_pkg)
            # Basic Java environment variables setup
            java_root = cfg.get(jvm, 'java_root')
            self.set_java_environment(jvm, java_root)

        if cfg.get('global', 'use_global') == 'yes':
            iterations = cfg.get('global', 'iterations')
            workload = cfg.get('global', 'workload')
        else:
            iterations = cfg.get(test, 'iterations')
            workload = cfg.get(test, 'workload')

        verbose = '-v '
        workload = '-s %s ' % workload
        iterations = '-n %s ' % iterations
        self.scratch = os.path.join(self.resultsdir, test)
        scratch = '--scratch-directory %s ' % self.scratch
        args = verbose + workload + scratch + iterations + test

        self.raw_result_file = os.path.join(self.resultsdir,
                                            'raw_output_%s' % self.iteration)
        raw_result = open(self.raw_result_file, 'w')

        logging.info('Running dacapo benchmark %s', test)
        try:
            cmd = 'java -jar %s %s' % (dacapo_pkg, args)
            results = utils.run(command=cmd,
                                stdout_tee=raw_result,
                                stderr_tee=raw_result)
            self.results = results.stderr
            raw_result.close()
        except error.CmdError, e:
            raise error.TestError('Dacapo benchmark %s has failed: %s' %
                                  (test, e))
Beispiel #2
0
    def run_once(self, test='antlr', config='./dacapo.cfg', jvm='default'):
        cfg = config_loader(cfg=config, tmpdir=self.tmpdir, raise_errors=True)
        self.test = test
        cachedir = os.path.join(os.path.dirname(self.srcdir), 'cache')
        if not os.path.isdir(cachedir):
            os.makedirs(cachedir)

        dacapo_url = cfg.get('dacapo', 'tarball_url')
        dacapo_md5 = cfg.get('dacapo', 'package_md5')
        dacapo_pkg = utils.unmap_url_cache(cachedir, dacapo_url, dacapo_md5)

        if not jvm == 'default':
            # Get the jvm package
            jvm_pkg_url = cfg.get(jvm, 'jvm_pkg_url')
            jvm_pkg_md5 = cfg.get(jvm, 'package_md5')
            jvm_pkg = utils.unmap_url_cache(cachedir, jvm_pkg_url, jvm_pkg_md5)
            # Install it
            swman = software_manager.SoftwareManager()
            swman.install(jvm_pkg)
            # Basic Java environment variables setup
            java_root = cfg.get(jvm, 'java_root')
            self.set_java_environment(jvm, java_root)

        if cfg.get('global', 'use_global') == 'yes':
            iterations = cfg.get('global', 'iterations')
            workload = cfg.get('global', 'workload')
        else:
            iterations = cfg.get(test, 'iterations')
            workload = cfg.get(test, 'workload')

        verbose = '-v '
        workload = '-s %s ' % workload
        iterations = '-n %s ' % iterations
        self.scratch = os.path.join(self.resultsdir, test)
        scratch = '--scratch-directory %s ' % self.scratch
        args = verbose + workload + scratch + iterations + test

        self.raw_result_file = os.path.join(self.resultsdir,
                                            'raw_output_%s' % self.iteration)
        raw_result = open(self.raw_result_file, 'w')

        logging.info('Running dacapo benchmark %s', test)
        try:
            cmd = 'java -jar %s %s' % (dacapo_pkg, args)
            results = utils.run(command=cmd, stdout_tee=raw_result,
                                stderr_tee=raw_result)
            self.results = results.stderr
            raw_result.close()
        except error.CmdError, e:
            raise error.TestError('Dacapo benchmark %s has failed: %s' %
                                  (test, e))
Beispiel #3
0
    def setup(self, tarball_base='linux-2.6.18.tar.bz2', parallel=True):
        """
        Downloads a copy of the linux kernel, calculate an estimated size of
        the uncompressed tarball, use this value to calculate the number of
        copies of the linux kernel that will be uncompressed.

            @param tarball_base: Name of the kernel tarball location that will
            be looked up on the kernel.org mirrors.
            @param parallel: If we are going to uncompress the copies of the
            kernel in parallel or not
        """
        if not os.path.isdir(self.cachedir):
            os.makedirs(self.cachedir)
        self.parallel = parallel

        kernel_repo = 'http://www.kernel.org/pub/linux/kernel/v2.6'
        tarball_url = os.path.join(kernel_repo, tarball_base)
        tarball_md5 = '296a6d150d260144639c3664d127d174'
        logging.info('Downloading linux kernel tarball')
        self.tarball = utils.unmap_url_cache(self.cachedir, tarball_url,
                                             tarball_md5)
        size_tarball = os.path.getsize(self.tarball) / 1024 / 1024
        # Estimation of the tarball size after uncompression
        compress_ratio = 5
        est_size = size_tarball * compress_ratio
        self.sim_cps = self.get_sim_cps(est_size)
        logging.info('Source file: %s' % tarball_base)
        logging.info('Megabytes per copy: %s' % size_tarball)
        logging.info('Compress ratio: %s' % compress_ratio)
        logging.info('Estimated size after uncompression: %s' % est_size)
        logging.info('Number of copies: %s' % self.sim_cps)
        logging.info('Parallel: %s' % parallel)
Beispiel #4
0
    def setup(self, tarball_base='linux-2.6.18.tar.bz2', parallel=True):
        """
        Downloads a copy of the linux kernel, calculate an estimated size of
        the uncompressed tarball, use this value to calculate the number of
        copies of the linux kernel that will be uncompressed.

            @param tarball_base: Name of the kernel tarball location that will
            be looked up on the kernel.org mirrors.
            @param parallel: If we are going to uncompress the copies of the
            kernel in parallel or not
        """
        if not os.path.isdir(self.cachedir):
            os.makedirs(self.cachedir)
        self.parallel = parallel

        kernel_repo = 'http://www.kernel.org/pub/linux/kernel/v2.6'
        tarball_url = os.path.join(kernel_repo, tarball_base)
        tarball_md5 = '296a6d150d260144639c3664d127d174'
        logging.info('Downloading linux kernel tarball')
        self.tarball = utils.unmap_url_cache(self.cachedir, tarball_url,
                                             tarball_md5)
        size_tarball = os.path.getsize(self.tarball) / 1024 / 1024
        # Estimation of the tarball size after uncompression
        compress_ratio = 5
        est_size = size_tarball * compress_ratio
        self.sim_cps = self.get_sim_cps(est_size)
        logging.info('Source file: %s' % tarball_base)
        logging.info('Megabytes per copy: %s' % size_tarball)
        logging.info('Compress ratio: %s' % compress_ratio)
        logging.info('Estimated size after uncompression: %s' % est_size)
        logging.info('Number of copies: %s' % self.sim_cps)
        logging.info('Parallel: %s' % parallel)
    def setup(self, tarball_base='linux-2.6.18.tar.bz2', parallel=True):
        """
        Downloads a copy of the linux kernel, calculate an estimated size of
        the uncompressed tarball, use this value to calculate the number of
        copies of the linux kernel that will be uncompressed.

            :param tarball_base: Name of the kernel tarball location that will
            be looked up on the kernel.org mirrors.
            :param parallel: If we are going to uncompress the copies of the
            kernel in parallel or not
        """
        if not os.path.isdir(self.cachedir):
            os.makedirs(self.cachedir)
        self.parallel = parallel

        kernel_repo = 'http://www.kernel.org/pub/linux/kernel/v2.6'
        tarball_url = os.path.join(kernel_repo, tarball_base)
        tarball_url = 'https://launchpad.net/ubuntu/+archive/primary/+files/linux_2.6.38.orig.tar.gz'
        tarball_md5 = 'cf0b587742611328f095da4b329e9fc7'
        logging.info('Downloading linux kernel tarball')
        self.tarball = utils.unmap_url_cache(self.cachedir, tarball_url,
                                             tarball_md5)
        size_tarball = os.path.getsize(self.tarball) / 1024 / 1024
        # Estimation of the tarball size after uncompression
        compress_ratio = 5
        est_size = size_tarball * compress_ratio
        self.sim_cps = self.get_sim_cps(est_size)
        logging.info('Source file: %s' % tarball_base)
        logging.info('Megabytes per copy: %s' % size_tarball)
        logging.info('Compress ratio: %s' % compress_ratio)
        logging.info('Estimated size after uncompression: %s' % est_size)
        logging.info('Number of copies: %s' % self.sim_cps)
        logging.info('Parallel: %s' % parallel)
    def setup(self, tarball_base='linux-2.6.18.tar.bz2', parallel=True):
        """
        Downloads a copy of the linux kernel, calculate an estimated size of
        the uncompressed tarball, use this value to calculate the number of
        copies of the linux kernel that will be uncompressed.

            :param tarball_base: Name of the kernel tarball location that will
            be looked up on the kernel.org mirrors.
            :param parallel: If we are going to uncompress the copies of the
            kernel in parallel or not
        """
        if not os.path.isdir(self.cachedir):
            os.makedirs(self.cachedir)
        self.parallel = parallel

        kernel_repo = 'http://www.kernel.org/pub/linux/kernel/v2.6'
        tarball_url = os.path.join(kernel_repo, tarball_base)
        tarball_url = 'https://launchpad.net/ubuntu/+archive/primary/+files/linux_2.6.38.orig.tar.gz'
        tarball_md5 = 'cf0b587742611328f095da4b329e9fc7'
        logging.info('Downloading linux kernel tarball')
        self.tarball = utils.unmap_url_cache(self.cachedir, tarball_url, tarball_md5)
        size_tarball = os.path.getsize(self.tarball) / 1024 / 1024
        # Estimation of the tarball size after uncompression
        compress_ratio = 5
        est_size = size_tarball * compress_ratio
        self.sim_cps = self.get_sim_cps(est_size)
        logging.info('Source file: %s' % tarball_base)
        logging.info('Megabytes per copy: %s' % size_tarball)
        logging.info('Compress ratio: %s' % compress_ratio)
        logging.info('Estimated size after uncompression: %s' % est_size)
        logging.info('Number of copies: %s' % self.sim_cps)
        logging.info('Parallel: %s' % parallel)
Beispiel #7
0
    def __init__(self, address, netperf_path, md5sum="", local_path="",
                 client="ssh", port="22", username="******", password="******",
                 check_command=None):
        """
        Class NetperfPackage just represent the netperf package
        Init NetperfPackage class.

        :param address: Remote host or guest address
        :param netperf_path: Remote netperf path
        :param me5sum: Local netperf package me5sum
        :param local_path: Local netperf (path or link) path
        :param client: The client to use ('ssh', 'telnet' or 'nc')
        :param port: Port to connect to
        :param username: Username (if required)
        :param password: Password (if required)
        """
        super(NetperfPackage, self).__init__(address, client, username,
                                             password, port, netperf_path)

        self.local_netperf = local_path
        self.pack_suffix = ""
        if client == "nc":
            self.prompt = r"^\w:\\.*>\s*$"
            self.linesep = "\r\n"
        else:
            self.prompt = "^\[.*\][\#\$]\s*$"
            self.linesep = "\n"
            if self.remote_path.endswith("tar.bz2"):
                self.pack_suffix = ".tar.bz2"
                self.decomp_cmd = "tar jxvf"
            elif self.remote_path.endswith("tar.gz"):
                self.pack_suffix = ".tar.gz"
                self.decomp_cmd = "tar zxvf"

            self.netperf_dir = self.remote_path.rstrip(self.pack_suffix)
            self.netperf_base_dir = os.path.dirname(self.remote_path)
            self.netperf_exec = os.path.basename(self.remote_path)

        logging.debug("Create remote session")
        self.session = remote.remote_login(self.client, self.address,
                                           self.port, self.username,
                                           self.password, self.prompt,
                                           self.linesep, timeout=360)

        self.build_tool = True
        if check_command:
            netperf_status = self.session.cmd_status("which %s" %
                                                     check_command)
            if netperf_status == 0:
                self.build_tool = False

        if self.build_tool:
            if utils.is_url(local_path):
                logging.debug("Download URL file to local path")
                tmp_dir = data_dir.get_download_dir()
                self.local_netperf = utils.unmap_url_cache(tmp_dir, local_path,
                                                           md5sum)
            self.push_file(self.local_netperf)
Beispiel #8
0
    def install_stress_app(self):
        error.context("install stress app on host")
        output = utils.run(self.app_check_cmd, ignore_status=True).stdout
        installed = re.search("Usage:", output)
        if installed:
            logging.debug("Stress has been installed.")
            return

        try:
            pkg = utils.unmap_url_cache(self.tmp_dir, self.link, self.md5sum)
        except Exception, detail:
            raise StressError(str(detail))
    def install_stress_app(self):
        error.context("install stress app on host")
        output = utils.run(self.app_check_cmd, ignore_status=True).stdout
        installed = re.search("Usage:", output)
        if installed:
            logging.debug("Stress has been installed.")
            return

        try:
            pkg = utils.unmap_url_cache(self.tmp_dir, self.link, self.md5sum)
        except Exception, detail:
            raise StressError(str(detail))
    def pull_file(self, netperf_source=None):
        """
        Copy file from remote to local.
        """

        if utils.is_url(netperf_source):
            logging.debug("Download URL file to local path")
            tmp_dir = data_dir.get_download_dir()
            self.netperf_source = utils.unmap_url_cache(tmp_dir, netperf_source,
                                                        self.md5sum)
        else:
            self.netperf_source = netperf_source
        return self.netperf_source
Beispiel #11
0
    def install_stress_app(self):
        error.context("install stress app in guest")
        session = self.get_session()
        installed = session.cmd_status(self.params.get("app_check_cmd")) == 0
        if installed:
            logging.debug("Stress has been installed.")
            return

        try:
            pkg = utils.unmap_url_cache(data_dir.get_download_dir(),
                                        self.link, self.md5sum)
        except Exception, detail:
            raise StressError(str(detail))
Beispiel #12
0
    def pull_file(self, netperf_source=None):
        """
        Copy file from remote to local.
        """

        if utils.is_url(netperf_source):
            logging.debug("Download URL file to local path")
            tmp_dir = data_dir.get_download_dir()
            self.netperf_source = utils.unmap_url_cache(
                tmp_dir, netperf_source, self.md5sum)
        else:
            self.netperf_source = netperf_source
        return self.netperf_source
Beispiel #13
0
    def install_stress_app(self):
        error.context("install stress app in guest")
        session = self.get_session()
        _, output = session.cmd_status_output(self.app_check_cmd)
        installed = re.search("Usage:", output)
        if installed:
            logging.debug("Stress has been installed.")
            return

        try:
            pkg = utils.unmap_url_cache(data_dir.get_download_dir(), self.link,
                                        self.md5sum)
        except Exception, detail:
            raise StressError(str(detail))
    def install_stress_app(self):
        error.context("install stress app in guest")
        session = self.get_session()
        _, output = session.cmd_status_output(self.app_check_cmd)
        installed = re.search("Usage:", output)
        if installed:
            logging.debug("Stress has been installed.")
            return

        try:
            pkg = utils.unmap_url_cache(data_dir.get_download_dir(),
                                        self.link, self.md5sum)
        except Exception, detail:
            raise StressError(str(detail))
Beispiel #15
0
    def __init__(self, address, netperf_path, md5sum="", local_path="",
                 client="ssh", port="22", username="******", password="******"):
        """
        Class NetperfPackage just represent the netperf package
        Init NetperfPackage class.

        :param address: Remote host or guest address
        :param netperf_path: Remote netperf path
        :param me5sum: Local netperf package me5sum
        :param local_path: Local netperf (path or link) path
        :param client: The client to use ('ssh', 'telnet' or 'nc')
        :param port: Port to connect to
        :param username: Username (if required)
        :param password: Password (if required)
        """
        super(NetperfPackage, self).__init__(address, client, username,
                                             password, port, netperf_path)

        self.local_netperf = local_path
        self.pack_suffix = ""
        if client == "nc":
            self.prompt = r"^\w:\\.*>\s*$"
            self.linesep = "\r\n"
        else:
            self.prompt = "^\[.*\][\#\$]\s*$"
            self.linesep = "\n"
            if self.remote_path.endswith("tar.bz2"):
                self.pack_suffix = ".tar.bz2"
                self.decomp_cmd = "tar jxvf"
            elif self.remote_path.endswith("tar.gz"):
                self.pack_suffix = ".tar.gz"
                self.decomp_cmd = "tar zxvf"

            self.netperf_dir = self.remote_path.rstrip(self.pack_suffix)
            self.netperf_base_dir = os.path.dirname(self.remote_path)
            self.netperf_exec = os.path.basename(self.remote_path)

        if utils.is_url(local_path):
            logging.debug("Download URL file to local path")
            tmp_dir = data_dir.get_download_dir()
            self.local_netperf = utils.unmap_url_cache(tmp_dir, local_path,
                                                       md5sum)
        self.push_file(self.local_netperf)

        logging.debug("Create remote session")
        self.session = remote.remote_login(self.client, self.address,
                                           self.port, self.username,
                                           self.password, self.prompt,
                                           self.linesep, timeout=360)
Beispiel #16
0
    def env_setup(session, ip, user, port, password):
        error.context("Setup env for %s" % ip)
        ssh_cmd(session, "iptables -F; true")
        ssh_cmd(session, "service iptables stop; true")
        ssh_cmd(session, "echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore")

        download_link = params.get("netperf_download_link")
        download_dir = data_dir.get_download_dir()
        md5sum = params.get("pkg_md5sum")
        pkg = utils.unmap_url_cache(download_dir, download_link, md5sum)
        remote.scp_to_remote(ip, shell_port, username, password, pkg, "/tmp")
        ssh_cmd(session, params.get("setup_cmd"))

        agent_path = os.path.join(test.virtdir, "scripts/netperf_agent.py")
        remote.scp_to_remote(ip, shell_port, username, password, agent_path, "/tmp")
Beispiel #17
0
    def env_setup(session, ip, user, port, password):
        error.context("Setup env for %s" % ip)
        ssh_cmd(session, "iptables -F; true")
        ssh_cmd(session, "service iptables stop; true")
        ssh_cmd(session, "echo 1 > /proc/sys/net/ipv4/conf/all/arp_ignore")

        download_link = params.get("netperf_download_link")
        download_dir = data_dir.get_download_dir()
        md5sum = params.get("pkg_md5sum")
        pkg = utils.unmap_url_cache(download_dir, download_link, md5sum)
        remote.scp_to_remote(ip, shell_port, username, password, pkg, "/tmp")
        ssh_cmd(session, params.get("setup_cmd"))

        agent_path = os.path.join(test.virtdir, "scripts/netperf_agent.py")
        remote.scp_to_remote(ip, shell_port, username, password,
                             agent_path, "/tmp")
Beispiel #18
0
 def env_setup(session, ip_addr, username, shell_port, password):
     """
     Test env setup
     """
     error.context("Setup env for %s" % ip_addr)
     ssh_cmd(session, "service iptables stop; true")
     netperf_links = params["netperf_links"].split()
     remote_dir = params.get("remote_dir", "/var/tmp")
     for netperf_link in netperf_links:
         if utils.is_url(netperf_link):
             download_dir = data_dir.get_download_dir()
             md5sum = params.get("pkg_md5sum")
             netperf_dir = utils.unmap_url_cache(download_dir, netperf_link,
                                                 md5sum)
         elif netperf_link:
             netperf_dir = os.path.join(data_dir.get_root_dir(),
                                        "shared/%s" % netperf_link)
         remote.scp_to_remote(ip_addr, shell_port, username, password,
                              netperf_dir, remote_dir)
     ssh_cmd(session, params.get("setup_cmd"))
 def install_stress_app(self):
     error.context("install stress app in guest")
     params = self.parser_test_args()
     session = self.get_session()
     installed = session.cmd_status(params.get("app_check_cmd")) == 0
     if installed:
         return
     link = params.get("download_link")
     md5sum = params.get("md5sum")
     tmp_dir = params.get("tmp_dir")
     install_cmd = params.get("install_cmd") % tmp_dir
     config_cmd = params.get("config_cmd")
     pkg = utils.unmap_url_cache(self.test.tmpdir, link, md5sum)
     self.vm.copy_files_to(pkg, tmp_dir)
     s, o = session.cmd_status_output(install_cmd, timeout=300)
     if s != 0:
         raise error.TestError("Fail to install stress app(%s)"  % o)
     s, o = session.cmd_status_output(config_cmd, timeout=300)
     if s != 0:
         raise error.TestError("Fail to conifg stress app(%s)"  % o)
 def env_setup(session, ip_addr, username, shell_port, password):
     """
     Test env setup
     """
     error.context("Setup env for %s" % ip_addr)
     ssh_cmd(session, "service iptables stop; true")
     netperf_links = params["netperf_links"].split()
     remote_dir = params.get("remote_dir", "/var/tmp")
     for netperf_link in netperf_links:
         if utils.is_url(netperf_link):
             download_dir = data_dir.get_download_dir()
             md5sum = params.get("pkg_md5sum")
             netperf_dir = utils.unmap_url_cache(download_dir,
                                                 netperf_link, md5sum)
         elif netperf_link:
             netperf_dir = os.path.join(data_dir.get_root_dir(),
                                        "shared/%s" % netperf_link)
         remote.scp_to_remote(ip_addr, shell_port, username, password,
                              netperf_dir, remote_dir)
     ssh_cmd(session, params.get("setup_cmd"))
Beispiel #21
0
 def install_stress_app(self):
     params = self.parser_test_args()
     session = self.get_session()
     if session.cmd_status(params.get("app_check_cmd", "true")) == 0:
         return True
     error.context("install stress app in guest", logging.info)
     link = params.get("download_link")
     md5sum = params.get("pkg_md5sum")
     tmp_dir = params.get("tmp_dir")
     install_cmd = params.get("install_cmd")
     config_cmd = params.get("config_cmd")
     logging.info("Fetch package: %s" % link)
     pkg = utils.unmap_url_cache(self.test.tmpdir, link, md5sum)
     self.vm.copy_files_to(pkg, tmp_dir)
     logging.info("Install app: %s" % install_cmd)
     s, o = session.cmd_status_output(install_cmd, timeout=300)
     if s != 0:
         raise error.TestError("Fail to install stress app(%s)" % o)
     logging.info("Configure app: %s" % config_cmd)
     s, o = session.cmd_status_output(config_cmd, timeout=300)
     if s != 0:
         raise error.TestError("Fail to conifg stress app(%s)" % o)
Beispiel #22
0
 def install_stress_app(self):
     params = self.parser_test_args()
     session = self.get_session()
     if session.cmd_status(params.get("app_check_cmd", "true")) == 0:
         return True
     error.context("install stress app in guest", logging.info)
     link = params.get("download_link")
     md5sum = params.get("pkg_md5sum")
     tmp_dir = params.get("tmp_dir")
     install_cmd = params.get("install_cmd")
     config_cmd = params.get("config_cmd")
     logging.info("Fetch package: %s" % link)
     pkg = utils.unmap_url_cache(self.test.tmpdir, link, md5sum)
     self.vm.copy_files_to(pkg, tmp_dir)
     logging.info("Install app: %s" % install_cmd)
     s, o = session.cmd_status_output(install_cmd, timeout=300)
     if s != 0:
         raise error.TestError("Fail to install stress app(%s)" % o)
     logging.info("Configure app: %s" % config_cmd)
     s, o = session.cmd_status_output(config_cmd, timeout=300)
     if s != 0:
         raise error.TestError("Fail to conifg stress app(%s)" % o)
Beispiel #23
0
    def install_stress_app(session):
        """
        Install stress app in guest.
        """
        if session.cmd_status(params.get("app_check_cmd", "true")) == 0:
            logging.info("Stress app already installed in guest.")
            return

        link = params.get("download_link")
        md5sum = params.get("pkg_md5sum")
        tmp_dir = params.get("tmp_dir", "/var/tmp")
        install_cmd = params.get("install_cmd")

        logging.info("Fetch package: '%s'" % link)
        pkg = utils.unmap_url_cache(test.tmpdir, link, md5sum)
        vm.copy_files_to(pkg, tmp_dir)

        logging.info("Install app: '%s' in guest." % install_cmd)
        s, o = session.cmd_status_output(install_cmd, timeout=300)
        if s != 0:
            raise error.TestError("Fail to install stress app(%s)" % o)

        logging.info("Install app successed")
Beispiel #24
0
    def install_stress_app(session):
        """
        Install stress app in guest.
        """
        if session.cmd_status(params.get("app_check_cmd", "true")) == 0:
            logging.info("Stress app already installed in guest.")
            return

        link = params.get("download_link")
        md5sum = params.get("pkg_md5sum")
        tmp_dir = params.get("tmp_dir", "/var/tmp")
        install_cmd = params.get("install_cmd")

        logging.info("Fetch package: '%s'" % link)
        pkg = utils.unmap_url_cache(test.tmpdir, link, md5sum)
        vm.copy_files_to(pkg, tmp_dir)

        logging.info("Install app: '%s' in guest." % install_cmd)
        s, o = session.cmd_status_output(install_cmd, timeout=300)
        if s != 0:
            raise error.TestError("Fail to install stress app(%s)" % o)

        logging.info("Install app successed")
Beispiel #25
0
def run_performance(test, params, env):
    """
    KVM performance test:

    The idea is similar to 'client/tests/kvm/tests/autotest.py',
    but we can implement some special requests for performance
    testing.

    @param test: QEMU test object
    @param params: Dictionary with the test parameters
    @param env: Dictionary with test environment.
    """
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()

    test_timeout = int(params.get("test_timeout", 240))
    monitor_cmd = params["monitor_cmd"]
    login_timeout = int(params.get("login_timeout", 360))
    test_cmd = params["test_cmd"]
    guest_path = params.get("result_path", "/tmp/guest_result")
    test_src = params["test_src"]
    test_patch = params.get("test_patch")

    # Prepare test environment in guest
    session = vm.wait_for_login(timeout=login_timeout)

    prefix = test.outputdir.split(".performance.")[0]
    summary_results = params.get("summary_results")
    guest_ver = session.cmd_output("uname -r").strip()

    if summary_results:
        result_dir = params.get("result_dir", os.path.dirname(test.outputdir))
        result_sum(result_dir, params, guest_ver, test.resultsdir, test)
        session.close()
        return

    guest_launcher = os.path.join(test.virtdir, "scripts/cmd_runner.py")
    vm.copy_files_to(guest_launcher, "/tmp")
    md5value = params.get("md5value")

    tarball = utils.unmap_url_cache(test.tmpdir, test_src, md5value)
    test_src = re.split("/", test_src)[-1]
    vm.copy_files_to(tarball, "/tmp")

    session.cmd("rm -rf /tmp/src*")
    session.cmd("mkdir -p /tmp/src_tmp")
    session.cmd("tar -xf /tmp/%s -C %s" % (test_src, "/tmp/src_tmp"))

    # Find the newest file in src tmp directory
    cmd = "ls -rt /tmp/src_tmp"
    s, o = session.cmd_status_output(cmd)
    if len(o) > 0:
        new_file = re.findall("(.*)\n", o)[-1]
    else:
        raise error.TestError("Can not decompress test file in guest")
    session.cmd("mv /tmp/src_tmp/%s /tmp/src" % new_file)

    if test_patch:
        test_patch_path = os.path.join(data_dir.get_root_dir(), 'shared',
                                       'deps', 'performance', test_patch)
        vm.copy_files_to(test_patch_path, "/tmp/src")
        session.cmd("cd /tmp/src && patch -p1 < /tmp/src/%s" % test_patch)

    compile_cmd = params.get("compile_cmd")
    if compile_cmd:
        session.cmd("cd /tmp/src && %s" % compile_cmd)

    prepare_cmd = params.get("prepare_cmd")
    if prepare_cmd:
        s, o = session.cmd_status_output(prepare_cmd, test_timeout)
        if s != 0:
            raise error.TestError("Fail to prepare test env in guest")

    cmd = "cd /tmp/src && python /tmp/cmd_runner.py \"%s &> " % monitor_cmd
    cmd += "/tmp/guest_result_monitor\"  \"/tmp/src/%s" % test_cmd
    cmd += " &> %s \" \"/tmp/guest_result\""
    cmd += " %s" % int(test_timeout)

    test_cmd = cmd
    # Run guest test with monitor
    tag = utils_test.cmd_runner_monitor(vm,
                                        monitor_cmd,
                                        test_cmd,
                                        guest_path,
                                        timeout=test_timeout)

    # Result collecting
    result_list = [
        "/tmp/guest_result_%s" % tag,
        "/tmp/host_monitor_result_%s" % tag,
        "/tmp/guest_monitor_result_%s" % tag
    ]
    guest_results_dir = os.path.join(test.outputdir, "guest_results")
    if not os.path.exists(guest_results_dir):
        os.mkdir(guest_results_dir)
    ignore_pattern = params.get("ignore_pattern")
    head_pattern = params.get("head_pattern")
    row_pattern = params.get("row_pattern")
    for i in result_list:
        if re.findall("monitor_result", i):
            result = utils_test.summary_up_result(i, ignore_pattern,
                                                  head_pattern, row_pattern)
            fd = open("%s.sum" % i, "w")
            sum_info = {}
            head_line = ""
            for keys in result:
                head_line += "\t%s" % keys
                for col in result[keys]:
                    col_sum = "line %s" % col
                    if col_sum in sum_info:
                        sum_info[col_sum] += "\t%s" % result[keys][col]
                    else:
                        sum_info[col_sum] = "%s\t%s" % (col, result[keys][col])
            fd.write("%s\n" % head_line)
            for keys in sum_info:
                fd.write("%s\n" % sum_info[keys])
            fd.close()
            shutil.copy("%s.sum" % i, guest_results_dir)
        shutil.copy(i, guest_results_dir)

    session.cmd("rm -rf /tmp/src")
    session.cmd("rm -rf guest_test*")
    session.cmd("rm -rf pid_file*")
    session.close()
Beispiel #26
0
def run(test, params, env):
    """
    KVM guest stop test:
    1) Log into a guest
    2) Check is HeavyLoad.exe installed , download and
       install it if not installed.
    3) Start Heavyload to make guest in heavyload
    4) Check vm is alive
    5) Stop heavyload process and clean temp file.

    :param test: kvm test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """

    def loop_session_cmd(session, cmd):
        def session_cmd(session, cmd):
            try:
                return session.cmd_status(cmd) == 0
            except (aexpect.ShellStatusError, aexpect.ShellTimeoutError):
                pass

        count = 0
        while count < 3:
            ret = session_cmd(session, cmd)
            if ret is not None:
                return ret
            count += 1
        return None

    def add_option(cmd, key, val):
        """
        Append options into command;
        """
        if re.match(r".*/%s.*", cmd, re.I):
            if val:
                rex = r"/%s\b+\S+\b+" % key
                val = "/%s %s " % (key, val)
                cmd = re.sub(rex, val, cmd, re.I)
        else:
            cmd += " /%s %s " % (key, val)
        return cmd

    tmp_dir = data_dir.get_tmp_dir()
    install_path = params["install_path"].rstrip("\\")
    heavyload_bin = '"%s\heavyload.exe"' % install_path
    start_cmd = "%s /CPU /MEMORY /FILE " % heavyload_bin
    stop_cmd = "taskkill /T /F /IM heavyload.exe"
    stop_cmd = params.get("stop_cmd", stop_cmd)
    start_cmd = params.get("start_cmd", start_cmd)
    check_running_cmd = "tasklist|findstr /I heavyload"
    check_running_cmd = params.get("check_running_cmd", check_running_cmd)
    test_installed_cmd = "dir '%s'|findstr /I heavyload" % install_path
    test_installed_cmd = params.get("check_installed_cmd", test_installed_cmd)

    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    timeout = float(params.get("login_timeout", 240))
    session = vm.wait_for_login(timeout=timeout)
    try:
        installed = session.cmd_status(test_installed_cmd) == 0
        if not installed:
            download_url = params.get("download_url")
            if download_url:
                dst = r"c:\\"
                pkg_md5sum = params["pkg_md5sum"]
                error.context("Download HeavyLoadSetup.exe", logging.info)
                pkg = utils.unmap_url_cache(tmp_dir,
                                            download_url, pkg_md5sum)
                vm.copy_files_to(pkg, dst)
            else:
                dst = r"%s:\\" % utils_misc.get_winutils_vol(session)

            error.context("Install HeavyLoad in guest", logging.info)
            install_cmd = params["install_cmd"]
            install_cmd = re.sub(r"DRIVE:\\+", dst, install_cmd)
            session.cmd(install_cmd)
            config_cmd = params.get("config_cmd")
            if config_cmd:
                session.cmd(config_cmd)

        error.context("Start heavyload in guest", logging.info)
        # genery heavyload command automaticly
        if params.get("autostress") == "yes":
            free_mem = utils_misc.get_free_mem(session, "windows")
            free_disk = utils_misc.get_free_disk(session, "C:")
            start_cmd = '"%s\heavyload.exe"' % params["install_path"]
            start_cmd = add_option(start_cmd, 'CPU', params["smp"])
            start_cmd = add_option(start_cmd, 'MEMORY', free_mem)
            start_cmd = add_option(start_cmd, 'FILE', free_disk)
        else:
            start_cmd = params["start_cmd"]
        # reformat command to ensure heavyload started as except
        test_timeout = int(params.get("timeout", "60"))
        steping = 60
        if test_timeout < 60:
            logging.warn("Heavyload use minis as unit of timeout,"
                         "values is too small, use default: 60s")
            test_timeout = 60
            steping = 30
        test_timeout = test_timeout / 60
        start_cmd = add_option(start_cmd, 'DURATION', test_timeout)
        start_cmd = add_option(start_cmd, 'START', '')
        start_cmd = add_option(start_cmd, 'AUTOEXIT', '')
        logging.info("heavyload cmd: %s" % start_cmd)
        session.sendline(start_cmd)
        if not loop_session_cmd(session, check_running_cmd):
            raise error.TestError("heavyload process is not started")

        error.context("Verify vm is alive", logging.info)
        utils_misc.wait_for(vm.verify_alive,
                            timeout=test_timeout, step=steping)
    finally:
        error.context("Stop load and clean tmp files", logging.info)
        if not installed and download_url:
            utils.system("rm -f %s/HeavyLoad*.exe" % tmp_dir)
            session.cmd("del /f /s %sHeavyLoad*.exe" % dst)
        if loop_session_cmd(session, check_running_cmd):
            if not loop_session_cmd(session, stop_cmd):
                raise error.TestFail("Unable to terminate heavyload process")
        if session:
            session.close()
Beispiel #27
0
    def netload_kill_problem(session_serial):
        setup_cmd = params.get("setup_cmd")
        clean_cmd = params.get("clean_cmd")
        firewall_flush = params.get("firewall_flush", "service iptables stop")
        error.context("Stop firewall in guest and host.", logging.info)
        try:
            utils.run(firewall_flush)
        except Exception:
            logging.warning("Could not stop firewall in host")

        try:
            session_serial.cmd(firewall_flush)
        except Exception:
            logging.warning("Could not stop firewall in guest")

        netperf_links = params["netperf_links"].split()
        remote_dir = params.get("remote_dir", "/var/tmp")
        # netperf_links support multi links. In case we need apply patchs to
        # netperf or need copy other files.
        for netperf_link in netperf_links:
            if utils.is_url(netperf_link):
                download_dir = data_dir.get_download_dir()
                netperf_link = utils.unmap_url_cache(download_dir,
                                                     netperf_link)
                netperf_dir = download_dir
            elif netperf_link:
                netperf_link = utils_misc.get_path(data_dir.get_deps_dir(),
                                                   netperf_link)
            vm.copy_files_to(netperf_link, remote_dir)
            utils.force_copy(netperf_link, remote_dir)

        guest_ip = vm.get_address(0)
        server_ip = utils_net.get_correspond_ip(guest_ip)

        error.context("Setup and run netperf server in host and guest",
                      logging.info)
        session_serial.cmd(setup_cmd % remote_dir, timeout=200)
        utils.run(setup_cmd % remote_dir, timeout=200)

        try:
            session_serial.cmd(clean_cmd)
        except Exception:
            pass
        session_serial.cmd(params.get("netserver_cmd") % remote_dir)

        utils.run(clean_cmd, ignore_status=True)
        utils.run(params.get("netserver_cmd") % remote_dir)
        p_size = params.get("packet_size", "1500")
        host_netperf_cmd = params.get("netperf_cmd") % (remote_dir,
                                                        "TCP_STREAM",
                                                        guest_ip,
                                                        p_size)
        guest_netperf_cmd = params.get("netperf_cmd") % (remote_dir,
                                                         "TCP_STREAM",
                                                         server_ip,
                                                         p_size)
        try:
            error.context("Start heavy network load host <=> guest.",
                          logging.info)
            session_serial.sendline(guest_netperf_cmd)
            utils.BgJob(host_netperf_cmd)

            # Wait for create big network usage.
            time.sleep(10)
            msg = "During netperf running, Check that we can kill VM with signal 0"
            error.context(msg, logging.info)
            kill_and_check(vm)

        finally:
            error.context("Clean up netperf server in host and guest.",
                          logging.info)
            utils.run(clean_cmd, ignore_status=True)
            try:
                session_serial.cmd(clean_cmd)
            except Exception:
                pass
Beispiel #28
0
def run_netperf_udp(test, params, env):
    """
    Run netperf on server and client side, we need run this case on two
    machines. If dsthost is not set will start netperf server on local
    host and log a error message.:
    1) Start one vm guest os as client.
    2) Start a reference machine (dsthost) as server.
    3) Setup netperf on guest and reference machine (dsthost).
    4) Run netserver on server using control.server.
    5) Run netperf client command in guest several time with different
       message size.
    6) Compare UDP performance to make sure it is acceptable.

    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    def get_remote_host_session():
        dsthostssh = remote.remote_login("ssh",
                                         dsthost,
                                         22,
                                         "root",
                                         passwd,
                                         "#",
                                         timeout=30)
        if dsthostssh:
            dsthostssh.set_status_test_command("echo $?")
            return dsthostssh
        else:
            return None

    def scp_to_remote(local_path="", remote_path=""):
        remote.scp_to_remote(dsthost, 22, "root", passwd, local_path,
                             remote_path)
        vm.copy_files_to(local_path, remote_path)

    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()

    session = vm.wait_for_login(timeout=int(params.get("login_timeout", 360)))

    dsthost = params.get("dsthost")
    if not dsthost:
        dsthost = utils_net.get_ip_address_by_interface(params.get("netdst"))
        logging.error("dsthost is not set, use localhost ip %s" % dsthost)
    else:
        logging.info("Dest host is %s" % dsthost)
    passwd = params.get("hostpasswd")
    test_timeout = float(params.get("test_timeout", "1200"))

    error.context("Create session connection to remote machine")
    dsthostssh = utils_misc.wait_for(get_remote_host_session, 120, 0, 2)
    if not dsthostssh:
        raise error.TestError("Could not login into remote host %s " % dsthost)

    # Get range of message size.
    message_size_range = params.get("message_size_range")
    message_size = message_size_range.split()
    start_size = int(message_size[0])
    end_size = int(message_size[1])
    step = int(message_size[2])
    m_size = start_size

    error.context("Copy netperf to dsthost and guest vm.")
    download_link = params.get("netperf_download_link")
    download_dir = data_dir.get_download_dir()
    md5sum = params.get("pkg_md5sum")
    host_netperf_dir = utils.unmap_url_cache(download_dir, download_link,
                                             md5sum)
    remote_dir = params.get("tmp_dir", "/tmp")
    scp_to_remote(host_netperf_dir, remote_dir)

    # Setup netpref.
    error.context("Set up netperf on reference machine.", logging.info)
    cmd = params.get("setup_cmd")
    (status, output) = dsthostssh.cmd_status_output(cmd % remote_dir,
                                                    timeout=test_timeout)
    if status != 0:
        raise error.TestError("Fail to setup netperf on reference machine.")
    error.context("Setup netperf on guest os.", logging.info)
    (status, output) = session.cmd_status_output(cmd % remote_dir,
                                                 timeout=test_timeout)
    if status != 0:
        raise error.TestError("Fail to setup netperf on guest os.")

    # Start netperf server in dsthost.
    cmd = "killall netserver"
    dsthostssh.cmd_status_output(cmd)
    cmd = params.get("netserver_cmd")
    txt = "Run netserver on server (dsthost) using control.server."
    error.context(txt, logging.info)
    (status, output) = dsthostssh.cmd_status_output(cmd)
    if status != 0:
        txt = "Fail to start netperf server on remote machine."
        txt += " Command output: %s" % output
        raise error.TestError(txt)

    throughput = []

    # Run netperf with message size defined in range.
    msg = "Detail result for netperf udp test with different message size.\n"
    while (m_size <= end_size):
        test_protocol = params.get("test_protocol", "UDP_STREAM")
        cmd = params.get("netperf_cmd") % (dsthost, test_protocol, m_size)
        txt = "Run netperf client command in guest: %s" % cmd
        error.context(txt, logging.info)
        (status, output) = session.cmd_status_output(cmd)
        if status != 0:
            txt = "Fail to execute netperf client side command in guest."
            txt += " Command output: %s" % output
            raise error.TestError(txt)
        if test_protocol == "UDP_STREAM":
            speed_index = 6
        elif test_protocol == "UDP_RR":
            speed_index = 7
        else:
            error.TestNAError("Protocol %s is not support" % test_protocol)

        line_tokens = output.splitlines()[speed_index].split()
        if not line_tokens:
            raise error.TestError("Output format is not expected")
        throughput.append(float(line_tokens[5]))

        msg += output
        m_size += step
    file(os.path.join(test.debugdir, "udp_results"), "w").write(msg)

    failratio = float(params.get("failratio", 0.3))
    error.context("Compare UDP performance.", logging.info)
    for i in range(len(throughput) - 1):
        if abs(throughput[i] - throughput[i + 1]) > throughput[i] * failratio:
            txt = "The gap between adjacent throughput is greater than"
            txt += "%f." % failratio
            txt += "Please refer to log file for details:\n %s" % msg
            raise error.TestFail(txt)
    logging.info("The UDP performance as measured via netperf is ok.")
    logging.info("Throughput of netperf command: %s" % throughput)
    logging.debug("Output of netperf command:\n %s" % msg)
    error.context("Kill netperf server on server (dsthost).")

    try:
        remote_files = "%s/netperf*" % remote_dir
        dsthostssh.cmd("killall -9 netserver", ignore_all_errors=True)
        dsthostssh.cmd("rm -rf %s" % remote_files, ignore_all_errors=True)
        session.cmd("rm -rf %s" % remote_files, ignore_all_errors=True)
        utils.system("rm -rf %s" % host_netperf_dir, ignore_status=True)
        session.close()
        dsthostssh.close()
    except Exception:
        pass
Beispiel #29
0
def run(test, params, env):
    """
    KVM performance test:

    The idea is similar to 'client/tests/kvm/tests/autotest.py',
    but we can implement some special requests for performance
    testing.

    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()

    test_timeout = int(params.get("test_timeout", 240))
    monitor_cmd = params["monitor_cmd"]
    login_timeout = int(params.get("login_timeout", 360))
    test_cmd = params["test_cmd"]
    guest_path = params.get("result_path", "/tmp/guest_result")
    test_src = params["test_src"]
    test_patch = params.get("test_patch")

    # Prepare test environment in guest
    session = vm.wait_for_login(timeout=login_timeout)

    prefix = test.outputdir.split(".performance.")[0]
    summary_results = params.get("summary_results")
    guest_ver = session.cmd_output("uname -r").strip()

    if summary_results:
        result_dir = params.get("result_dir", os.path.dirname(test.outputdir))
        result_sum(result_dir, params, guest_ver, test.resultsdir, test)
        session.close()
        return

    guest_launcher = os.path.join(test.virtdir, "scripts/cmd_runner.py")
    vm.copy_files_to(guest_launcher, "/tmp")
    md5value = params.get("md5value")

    tarball = utils.unmap_url_cache(test.tmpdir, test_src, md5value)
    test_src = re.split("/", test_src)[-1]
    vm.copy_files_to(tarball, "/tmp")

    session.cmd("rm -rf /tmp/src*")
    session.cmd("mkdir -p /tmp/src_tmp")
    session.cmd("tar -xf /tmp/%s -C %s" % (test_src, "/tmp/src_tmp"))

    # Find the newest file in src tmp directory
    cmd = "ls -rt /tmp/src_tmp"
    s, o = session.cmd_status_output(cmd)
    if len(o) > 0:
        new_file = re.findall("(.*)\n", o)[-1]
    else:
        raise error.TestError("Can not decompress test file in guest")
    session.cmd("mv /tmp/src_tmp/%s /tmp/src" % new_file)

    if test_patch:
        test_patch_path = os.path.join(data_dir.get_root_dir(), 'shared',
                                       'deps', 'performance', test_patch)
        vm.copy_files_to(test_patch_path, "/tmp/src")
        session.cmd("cd /tmp/src && patch -p1 < /tmp/src/%s" % test_patch)

    compile_cmd = params.get("compile_cmd")
    if compile_cmd:
        session.cmd("cd /tmp/src && %s" % compile_cmd)

    prepare_cmd = params.get("prepare_cmd")
    if prepare_cmd:
        s, o = session.cmd_status_output(prepare_cmd, test_timeout)
        if s != 0:
            raise error.TestError("Fail to prepare test env in guest")

    cmd = "cd /tmp/src && python /tmp/cmd_runner.py \"%s &> " % monitor_cmd
    cmd += "/tmp/guest_result_monitor\"  \"/tmp/src/%s" % test_cmd
    cmd += " &> %s \" \"/tmp/guest_result\""
    cmd += " %s" % int(test_timeout)

    test_cmd = cmd
    # Run guest test with monitor
    tag = cmd_runner_monitor(vm, monitor_cmd, test_cmd,
                             guest_path, timeout=test_timeout)

    # Result collecting
    result_list = ["/tmp/guest_result_%s" % tag,
                   "/tmp/host_monitor_result_%s" % tag,
                   "/tmp/guest_monitor_result_%s" % tag]
    guest_results_dir = os.path.join(test.outputdir, "guest_results")
    if not os.path.exists(guest_results_dir):
        os.mkdir(guest_results_dir)
    ignore_pattern = params.get("ignore_pattern")
    head_pattern = params.get("head_pattern")
    row_pattern = params.get("row_pattern")
    for i in result_list:
        if re.findall("monitor_result", i):
            result = utils_test.summary_up_result(i, ignore_pattern,
                                                  head_pattern, row_pattern)
            fd = open("%s.sum" % i, "w")
            sum_info = {}
            head_line = ""
            for keys in result:
                head_line += "\t%s" % keys
                for col in result[keys]:
                    col_sum = "line %s" % col
                    if col_sum in sum_info:
                        sum_info[col_sum] += "\t%s" % result[keys][col]
                    else:
                        sum_info[col_sum] = "%s\t%s" % (col, result[keys][col])
            fd.write("%s\n" % head_line)
            for keys in sum_info:
                fd.write("%s\n" % sum_info[keys])
            fd.close()
            shutil.copy("%s.sum" % i, guest_results_dir)
        shutil.copy(i, guest_results_dir)

    session.cmd("rm -rf /tmp/src")
    session.cmd("rm -rf guest_test*")
    session.cmd("rm -rf pid_file*")
    session.close()
Beispiel #30
0
    def install_lsb_packages(self):
        if not self.packages_installed:
            # First, we download the LSB DTK manager package, worry about
            # installing it later
            dtk_manager_arch = self.config.get('dtk-manager', 'arch-%s' % self.arch)
            dtk_manager_url = self.config.get('dtk-manager',
                                         'tarball_url') % dtk_manager_arch
            if not dtk_manager_url:
                raise error.TestError('Could not get DTK manager URL from'
                                      ' configuration file')

            dtk_md5 = self.config.get('dtk-manager', 'md5-%s' % self.arch)
            if dtk_md5:
                logging.info('Caching LSB DTK manager RPM')
                dtk_manager_pkg = utils.unmap_url_cache(self.cachedir,
                                                        dtk_manager_url,
                                                        dtk_md5)
            else:
                raise error.TestError('Could not find DTK manager package md5,'
                                      ' cannot cache DTK manager tarball')

            # Get LSB tarball, cache it and uncompress under autotest srcdir
            if self.config.get('lsb', 'override_default_url') == 'no':
                lsb_url = self.config.get('lsb', 'tarball_url') % self.arch
            else:
                lsb_url = self.config.get('lsb', 'tarball_url_alt') % self.arch
            if not lsb_url:
                raise error.TestError('Could not get LSB URL from configuration'
                                      ' file')
            md5_key = 'md5-%s' % self.arch
            lsb_md5 = self.config.get('lsb', md5_key)
            if lsb_md5:
                logging.info('Caching LSB tarball')
                lsb_pkg = utils.unmap_url_cache(self.cachedir, lsb_url, lsb_md5)
            else:
                raise error.TestError('Could not find LSB package md5, cannot'
                                      ' cache LSB tarball')

            utils.extract_tarball_to_dir(lsb_pkg, self.srcdir)

            # Lets load a file that contains the list of RPMs
            os.chdir(self.srcdir)
            if not os.path.isfile('inst-config'):
                raise IOError('Could not find file with package info,'
                              ' inst-config')
            rpm_file_list = open('inst-config', 'r')
            pkg_pattern = re.compile('[A-Za-z0-9_.-]*[.][r][p][m]')
            lsb_pkg_list = []
            for line in rpm_file_list.readlines():
                try:
                    # We will install lsb-dtk-manager separately, so we can remove
                    # it from the list of packages
                    if not 'lsb-dtk-manager' in line:
                        line = re.findall(pkg_pattern, line)[0]
                        lsb_pkg_list.append(line)
                except Exception:
                    # If we don't get a match, no problem
                    pass

            # Lets figure out the host distro
            distro_pkg_support = package.os_support()
            if os.path.isfile('/etc/debian_version') and \
            distro_pkg_support['dpkg']:
                logging.debug('Debian based distro detected')
                if distro_pkg_support['conversion']:
                    logging.debug('Package conversion supported')
                    distro_type = 'debian-based'
                else:
                    raise EnvironmentError('Package conversion not supported.'
                                           'Cannot handle LSB package'
                                           ' installation')
            elif distro_pkg_support['rpm']:
                logging.debug('Red Hat based distro detected')
                distro_type = 'redhat-based'
            else:
                logging.error('OS does not seem to be red hat or debian based')
                raise EnvironmentError('Cannot handle LSB package installation')

            # According to the host distro detection, we can install the packages
            # using the list previously assembled
            if distro_type == 'redhat-based':
                logging.info('Installing LSB RPM packages')
                package.install(dtk_manager_pkg)
                for lsb_rpm in lsb_pkg_list:
                    package.install(lsb_rpm, nodeps=True)
            elif distro_type == 'debian-based':
                logging.info('Remember that you must have the following lsb'
                             ' compliance packages installed:')
                logging.info('lsb-core lsb-cxx lsb-graphics lsb-desktop lsb-qt4'
                             ' lsb-languages lsb-multimedia lsb-printing')
                logging.info('Converting and installing LSB packages')
                dtk_manager_dpkg = package.convert(dtk_manager_pkg, 'dpkg')
                package.install(dtk_manager_dpkg)
                for lsb_rpm in lsb_pkg_list:
                    lsb_dpkg = package.convert(lsb_rpm, 'dpkg')
                    package.install(lsb_dpkg, nodeps=True)

            self.packages_installed = True
Beispiel #31
0
    def install_lsb_packages(self):
        if not self.packages_installed:
            # First, we download the LSB DTK manager package, worry about
            # installing it later
            dtk_manager_arch = self.config.get('dtk-manager',
                                               'arch-%s' % self.arch)
            dtk_manager_url = self.config.get('dtk-manager',
                                              'tarball_url') % dtk_manager_arch
            if not dtk_manager_url:
                raise error.TestError('Could not get DTK manager URL from'
                                      ' configuration file')

            dtk_md5 = self.config.get('dtk-manager', 'md5-%s' % self.arch)
            if dtk_md5:
                logging.info('Caching LSB DTK manager RPM')
                dtk_manager_pkg = utils.unmap_url_cache(
                    self.cachedir, dtk_manager_url, dtk_md5)
            else:
                raise error.TestError('Could not find DTK manager package md5,'
                                      ' cannot cache DTK manager tarball')

            # Get LSB tarball, cache it and uncompress under autotest srcdir
            if self.config.get('lsb', 'override_default_url') == 'no':
                lsb_url = self.config.get('lsb', 'tarball_url') % self.arch
            else:
                lsb_url = self.config.get('lsb', 'tarball_url_alt') % self.arch
            if not lsb_url:
                raise error.TestError(
                    'Could not get LSB URL from configuration'
                    ' file')
            md5_key = 'md5-%s' % self.arch
            lsb_md5 = self.config.get('lsb', md5_key)
            if lsb_md5:
                logging.info('Caching LSB tarball')
                lsb_pkg = utils.unmap_url_cache(self.cachedir, lsb_url,
                                                lsb_md5)
            else:
                raise error.TestError('Could not find LSB package md5, cannot'
                                      ' cache LSB tarball')

            utils.extract_tarball_to_dir(lsb_pkg, self.srcdir)

            # Lets load a file that contains the list of RPMs
            os.chdir(self.srcdir)
            if not os.path.isfile('inst-config'):
                raise IOError('Could not find file with package info,'
                              ' inst-config')
            rpm_file_list = open('inst-config', 'r')
            pkg_pattern = re.compile('[A-Za-z0-9_.-]*[.][r][p][m]')
            lsb_pkg_list = []
            for line in rpm_file_list.readlines():
                try:
                    # We will install lsb-dtk-manager separately, so we can remove
                    # it from the list of packages
                    if not 'lsb-dtk-manager' in line:
                        line = re.findall(pkg_pattern, line)[0]
                        lsb_pkg_list.append(line)
                except Exception:
                    # If we don't get a match, no problem
                    pass

            # Lets figure out the host distro
            distro_pkg_support = package.os_support()
            if os.path.isfile('/etc/debian_version') and \
            distro_pkg_support['dpkg']:
                logging.debug('Debian based distro detected')
                if distro_pkg_support['conversion']:
                    logging.debug('Package conversion supported')
                    distro_type = 'debian-based'
                else:
                    raise EnvironmentError('Package conversion not supported.'
                                           'Cannot handle LSB package'
                                           ' installation')
            elif distro_pkg_support['rpm']:
                logging.debug('Red Hat based distro detected')
                distro_type = 'redhat-based'
            else:
                logging.error('OS does not seem to be red hat or debian based')
                raise EnvironmentError(
                    'Cannot handle LSB package installation')

            # According to the host distro detection, we can install the packages
            # using the list previously assembled
            if distro_type == 'redhat-based':
                logging.info('Installing LSB RPM packages')
                package.install(dtk_manager_pkg)
                for lsb_rpm in lsb_pkg_list:
                    package.install(lsb_rpm, nodeps=True)
            elif distro_type == 'debian-based':
                logging.info('Remember that you must have the following lsb'
                             ' compliance packages installed:')
                logging.info(
                    'lsb-core lsb-cxx lsb-graphics lsb-desktop lsb-qt4'
                    ' lsb-languages lsb-multimedia lsb-printing')
                logging.info('Converting and installing LSB packages')
                dtk_manager_dpkg = package.convert(dtk_manager_pkg, 'dpkg')
                package.install(dtk_manager_dpkg)
                for lsb_rpm in lsb_pkg_list:
                    lsb_dpkg = package.convert(lsb_rpm, 'dpkg')
                    package.install(lsb_dpkg, nodeps=True)

            self.packages_installed = True
    def netload_kill_problem(session_serial):
        setup_cmd = params.get("setup_cmd")
        clean_cmd = params.get("clean_cmd")
        firewall_flush = params.get("firewall_flush", "service iptables stop")
        error.context("Stop firewall in guest and host.", logging.info)
        try:
            utils.run(firewall_flush)
        except Exception:
            logging.warning("Could not stop firewall in host")

        try:
            session_serial.cmd(firewall_flush)
        except Exception:
            logging.warning("Could not stop firewall in guest")

        netperf_links = params["netperf_links"].split()
        remote_dir = params.get("remote_dir", "/var/tmp")
        # netperf_links support multi links. In case we need apply patchs to
        # netperf or need copy other files.
        for netperf_link in netperf_links:
            if utils.is_url(netperf_link):
                download_dir = data_dir.get_download_dir()
                netperf_link = utils.unmap_url_cache(download_dir,
                                                     netperf_link)
                netperf_dir = download_dir
            elif netperf_link:
                netperf_link = utils_misc.get_path(data_dir.get_deps_dir(),
                                                   netperf_link)
            vm.copy_files_to(netperf_link, remote_dir)
            utils.force_copy(netperf_link, remote_dir)

        guest_ip = vm.get_address(0)
        server_ip = utils_net.get_correspond_ip(guest_ip)

        error.context("Setup and run netperf server in host and guest",
                      logging.info)
        session_serial.cmd(setup_cmd % remote_dir, timeout=200)
        utils.run(setup_cmd % remote_dir, timeout=200)

        try:
            session_serial.cmd(clean_cmd)
        except Exception:
            pass
        session_serial.cmd(params.get("netserver_cmd") % remote_dir)

        utils.run(clean_cmd, ignore_status=True)
        utils.run(params.get("netserver_cmd") % remote_dir)
        p_size = params.get("packet_size", "1500")
        host_netperf_cmd = params.get("netperf_cmd") % (
            remote_dir, "TCP_STREAM", guest_ip, p_size)
        guest_netperf_cmd = params.get("netperf_cmd") % (
            remote_dir, "TCP_STREAM", server_ip, p_size)
        try:
            error.context("Start heavy network load host <=> guest.",
                          logging.info)
            session_serial.sendline(guest_netperf_cmd)
            utils.BgJob(host_netperf_cmd)

            # Wait for create big network usage.
            time.sleep(10)
            msg = "During netperf running, Check that we can kill VM with signal 0"
            error.context(msg, logging.info)
            kill_and_check(vm)

        finally:
            error.context("Clean up netperf server in host and guest.",
                          logging.info)
            utils.run(clean_cmd, ignore_status=True)
            try:
                session_serial.cmd(clean_cmd)
            except Exception:
                pass
Beispiel #33
0
def run_netperf_udp(test, params, env):
    """
    Run netperf on server and client side, we need run this case on two
    machines. If dsthost is not set will start netperf server on local
    host and log a error message.:
    1) Start one vm guest os as client.
    2) Start a reference machine (dsthost) as server.
    3) Setup netperf on guest and reference machine (dsthost).
    4) Run netserver on server using control.server.
    5) Run netperf client command in guest several time with different
       message size.
    6) Compare UDP performance to make sure it is acceptable.

    :param test: QEMU test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """

    def get_remote_host_session():
        dsthostssh = remote.remote_login("ssh", dsthost, 22, "root",
                                         passwd, "#", timeout=30)
        if dsthostssh:
            dsthostssh.set_status_test_command("echo $?")
            return dsthostssh
        else:
            return None

    def scp_to_remote(local_path="", remote_path=""):
        remote.scp_to_remote(dsthost, 22, "root", passwd, local_path,
                             remote_path)
        vm.copy_files_to(local_path, remote_path)

    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()

    session = vm.wait_for_login(timeout=int(params.get("login_timeout", 360)))

    dsthost = params.get("dsthost")
    if not dsthost:
        dsthost = utils_net.get_ip_address_by_interface(params.get("netdst"))
        logging.error("dsthost is not set, use localhost ip %s" % dsthost)
    else:
        logging.info("Dest host is %s" % dsthost)
    passwd = params.get("hostpasswd")
    test_timeout = float(params.get("test_timeout", "1200"))

    error.context("Create session connection to remote machine")
    dsthostssh = utils_misc.wait_for(get_remote_host_session, 120, 0, 2)
    if not dsthostssh:
        raise error.TestError("Could not login into remote host %s " % dsthost)

    # Get range of message size.
    message_size_range = params.get("message_size_range")
    message_size = message_size_range.split()
    start_size = int(message_size[0])
    end_size = int(message_size[1])
    step = int(message_size[2])
    m_size = start_size

    error.context("Copy netperf to dsthost and guest vm.")
    netperf_links = params["netperf_links"].split()
    remote_dir = params.get("remote_dir", "/var/tmp")
    for netperf_link in netperf_links:
        if utils.is_url(netperf_link):
            download_dir = data_dir.get_download_dir()
            md5sum = params.get("pkg_md5sum")
            netperf_dir = utils.unmap_url_cache(download_dir,
                                                netperf_link, md5sum)
        elif netperf_link:
            netperf_dir = os.path.join(test.virtdir, netperf_link)
        scp_to_remote(netperf_dir, remote_dir)

    # Setup netpref.
    error.context("Set up netperf on reference machine.", logging.info)
    cmd = params.get("setup_cmd")
    (status, output) = dsthostssh.cmd_status_output(cmd % remote_dir,
                                                    timeout=test_timeout)
    if status != 0:
        raise error.TestError("Fail to setup netperf on reference machine.")
    error.context("Setup netperf on guest os.", logging.info)
    (status, output) = session.cmd_status_output(cmd % remote_dir,
                                                 timeout=test_timeout)
    if status != 0:
        raise error.TestError("Fail to setup netperf on guest os.")

    # Start netperf server in dsthost.
    cmd = "killall netserver"
    dsthostssh.cmd_status_output(cmd)
    cmd = params.get("netserver_cmd")
    txt = "Run netserver on server (dsthost) using control.server."
    error.context(txt, logging.info)
    (status, output) = dsthostssh.cmd_status_output(cmd)
    if status != 0:
        txt = "Fail to start netperf server on remote machine."
        txt += " Command output: %s" % output
        raise error.TestError(txt)

    throughput = []

    # Run netperf with message size defined in range.
    msg = "Detail result for netperf udp test with different message size.\n"
    while(m_size <= end_size):
        test_protocol = params.get("test_protocol", "UDP_STREAM")
        cmd = params.get("netperf_cmd") % (dsthost, test_protocol, m_size)
        txt = "Run netperf client command in guest: %s" % cmd
        error.context(txt, logging.info)
        (status, output) = session.cmd_status_output(cmd)
        if status != 0:
            txt = "Fail to execute netperf client side command in guest."
            txt += " Command output: %s" % output
            raise error.TestError(txt)
        if test_protocol == "UDP_STREAM":
            speed_index = 6
        elif test_protocol == "UDP_RR":
            speed_index = 7
        else:
            error.TestNAError("Protocol %s is not support" % test_protocol)

        line_tokens = output.splitlines()[speed_index].split()
        if not line_tokens:
            raise error.TestError("Output format is not expected")
        throughput.append(float(line_tokens[5]))

        msg += output
        m_size += step
    file(os.path.join(test.debugdir, "udp_results"), "w").write(msg)

    failratio = float(params.get("failratio", 0.3))
    error.context("Compare UDP performance.", logging.info)
    for i in range(len(throughput) - 1):
        if abs(throughput[i] - throughput[i + 1]) > throughput[i] * failratio:
            txt = "The gap between adjacent throughput is greater than"
            txt += "%f." % failratio
            txt += "Please refer to log file for details:\n %s" % msg
            raise error.TestFail(txt)
    logging.info("The UDP performance as measured via netperf is ok.")
    logging.info("Throughput of netperf command: %s" % throughput)
    logging.debug("Output of netperf command:\n %s" % msg)
    error.context("Kill netperf server on server (dsthost).")

    try:
        remote_files = "%s/netperf*" % remote_dir
        dsthostssh.cmd("killall -9 netserver", ignore_all_errors=True)
        dsthostssh.cmd("rm -rf %s" % remote_files, ignore_all_errors=True)
        session.cmd("rm -rf %s" % remote_files, ignore_all_errors=True)
        session.close()
        dsthostssh.close()
    except Exception:
        pass
Beispiel #34
0
def run_multi_queues_test(test, params, env):
    """
    Enable MULTI_QUEUE feature in guest

    1) Boot up VM(s)
    2) Login guests one by one
    3) Enable MQ for all virtio nics by ethtool -L
    4) Run netperf on guest
    5) check vhost threads on host, if vhost is enable
    6) check cpu affinity if smp == queues

    @param test: QEMU test object.
    @param params: Dictionary with the test parameters.
    @param env: Dictionary with test environment.
    """
    def run_netperf(vm_session, n_instance, host_ip, client_path, test_time,
                    ext_args, taskset_cpu=[]):
        cmd = ""
        if taskset_cpu:
            cmd += "taskset -c %s " % " ".join(taskset_cpu)
        cmd += "/home/netperf_agent.py %d " % n_instance
        cmd += "%s -D 1 -H %s -l %s %s" % (client_path, host_ip,
                                           int(test_time) * 1.5, ext_args)
        cmd += " > /home/netperf_log &"
        session.cmd(cmd, timeout=120)

    def netperf_env_setup(session, host_path):
        tmp_dir = params.get("tmp_dir")
        agent_path = os.path.join(test.virtdir, "scripts/netperf_agent.py")
        guest_root_path = params.get("tmp_dir", "/home")
        vm.copy_files_to(agent_path, guest_root_path)
        vm.copy_files_to(host_path, guest_root_path, timeout=transfer_timeout)
        error.context("Setup env in linux guest")
        session.cmd("service iptables stop; true")
        session.cmd("iptables -F; true")
        session.cmd(setup_cmd % tmp_dir)

    def get_virtio_queues_irq(session):
        """
        Return multi queues input irq list
        """
        guest_irq_info = session.cmd_output("cat /proc/interrupts")
        return re.findall(r"(\d+):.*virtio\d+-input.\d", guest_irq_info)

    def get_cpu_affinity_hint(session, irq_number):
        """
        Return the cpu affinity_hint of irq_number
        """
        cmd_get_cpu_affinity = r"cat /proc/irq/%s/affinity_hint" % irq_number
        return session.cmd_output(cmd_get_cpu_affinity).strip()

    def get_cpu_index(cpu_id):
        """
        Transfer cpu_id to cpu index
        """
        cpu_used_index = []
        for cpu_index in range(int(vm.cpuinfo.smp)):
            if int(cpu_id) & (0b1 << cpu_index) != 0:
                cpu_used_index.append(cpu_index)
        return cpu_used_index

    def set_cpu_affinity(session):
        """
        Set cpu affinity
        """
        cmd_set_cpu_affinity = r"echo $(cat /proc/irq/%s/affinity_hint)"
        cmd_set_cpu_affinity += " > /proc/irq/%s/smp_affinity"
        irq_list = get_virtio_queues_irq(session)
        for irq in irq_list:
            session.cmd(cmd_set_cpu_affinity % (irq, irq))

    def get_cpu_irq_statistics(session, irq_number, cpu_id=None):
        """
        Get guest interrupts statistics
        """
        cmd = r"cat /proc/interrupts | sed -n '/^\s\+%s:/p'" % irq_number
        irq_statics = session.cmd_output(cmd)
        irq_statics_list = map(int, irq_statics.split()[1:-2])
        if irq_statics_list:
            if cpu_id and cpu_id < len(irq_statics_list):
                return irq_statics_list[cpu_id]
            if not cpu_id:
                return irq_statics_list
        return []

    login_timeout = int(params.get("login_timeout", 360))
    transfer_timeout = int(params.get("transfer_timeout", 360))
    queues = int(params.get("queues", 1))
    setup_cmd = params.get("setup_cmd")
    vms = params.get("vms").split()
    if queues == 1:
        logging.info("No need to enable MQ feature for single queue")
        return
    for vm in vms:
        vm = env.get_vm(vm)
        vm.verify_alive()
        session = vm.wait_for_login(timeout=login_timeout)
        for i, nic in enumerate(vm.virtnet):
            if "virtio" in nic['nic_model']:
                ifname = utils_net.get_linux_ifname(session,
                                                    vm.get_mac_address(0))
                session.cmd_output("ethtool -L %s combined %d" % (ifname,
                                                                  queues))
                o = session.cmd_output("ethtool -l %s" % ifname)
                if len(re.findall(r"Combined:\s+%d\s" % queues, o)) != 2:
                    raise error.TestError("Fail to enable MQ feature of (%s)"
                                          % nic.nic_name)
                logging.info("MQ feature of (%s) is enabled" % nic.nic_name)

        # Run netperf under ground
        error.context("Set netperf test env on host", logging.info)
        download_dir = data_dir.get_download_dir()
        download_link = params.get("netperf_download_link")
        md5sum = params.get("pkg_md5sum")
        pkg = utils.unmap_url_cache(download_dir, download_link, md5sum)
        utils.system(setup_cmd % download_dir)

        os_type = params.get("os_type")
        if os_type == "linux":
            netperf_env_setup(session, pkg)
        else:
            raise

        error.context("Run netperf server on the host")
        netserver_path = "%s/netperf-2.6.0/src/netserver" % download_dir
        utils.system("pidof netserver || %s" % netserver_path)

        host_ip = utils_net.get_host_ip_address(params)
        n_instance = int(params.get("instance", queues))
        client_path = "%s/netperf-2.6.0/src/netperf" % "/home"

        error.context("Run %s netperf on the guest" % int(queues))
        ext_args = params.get("ext_args", "")
        test_time = params.get("test_time", 60)
        taskset_cpu = params.get("netperf_taskset_cpu", [])
        check_cpu_affinity = params.get("check_cpu_affinity", 'yes')

        if check_cpu_affinity == 'yes' and (vm.cpuinfo.smp == queues):
            utils.system("systemctl stop irqbalance.service")
            set_cpu_affinity(session)

        netperf_thread = utils.InterruptedThread(run_netperf, (session,
                                                               n_instance,
                                                               host_ip,
                                                               client_path,
                                                               test_time,
                                                               ext_args,
                                                               taskset_cpu))

        netperf_thread.start()

        def all_clients_works():
            try:
                content = session.cmd("grep -c MIGRATE /home/netperf_log")
                if int(n_instance) == int(content):
                    return True
            except:
                content = 0
            return False

        if utils_misc.wait_for(all_clients_works, 120, 5, 5,
                               "Wait until all netperf clients start to work"):
            logging.debug("All netperf clients start to work.")
        else:
            raise error.TestNAError("Error, not all netperf clients at work")

        if params.get("vhost") == 'vhost=on':
            error.context("Check vhost threads on host", logging.info)
            vhost_thread_pattern = params.get("vhost_thread_pattern",
                                              r"\w+\s+(\d+)\s.*\[vhost-%s\]")
            vhost_threads = vm.get_vhost_threads(vhost_thread_pattern)
            time.sleep(10)

            top_cmd = r"top -n 1 -p %s -b" % ",".join(map(str, vhost_threads))
            top_info = utils.system_output(top_cmd)
            logging.info("%s", top_info)
            vhost_re = re.compile(r"S(\s+0.0+){2}.*vhost-\d+[\d|+]")
            sleep_vhost_thread = len(vhost_re.findall(top_info, re.I))
            running_threads = len(vhost_threads) - int(sleep_vhost_thread)

            n_instance = min(n_instance, int(queues), int(vm.cpuinfo.smp))
            if (running_threads != n_instance):
                err_msg = "Run %s netperf session, but %s queues works"
                raise error.TestError(err_msg % (n_instance, running_threads))

        # check cpu affinity
        error.context("Check cpu affinity", logging.info)
        if check_cpu_affinity == 'yes' and (vm.cpuinfo.smp == queues):
            vectors = params.get("vectors", None)
            expect_vectors = 2 * int(queues) + 1
            if (not vectors) and (params.get("enable_msix_vectors") == "yes"):
                vectors = expect_vectors
            if vectors and (vectors >= expect_vectors) and taskset_cpu:
                cpu_irq_affinity = {}
                for irq in get_virtio_queues_irq(session):
                    cpu_id = get_cpu_affinity_hint(session, irq)
                    cpu_index = get_cpu_index(cpu_id)
                    if cpu_index:
                        for cpu in cpu_index:
                            cpu_irq_affinity["%s" % cpu] = irq
                    else:
                        raise error.TestError("Can not get the cpu")

                irq_number = cpu_irq_affinity[taskset_cpu]
                irq_ori = get_cpu_irq_statistics(session, irq_number)
                logging.info("Cpu irq info: %s" % irq_ori)
                time.sleep(10)
                irq_cur = get_cpu_irq_statistics(session, irq_number)
                logging.info("After 10s, cpu irq info: %s" % irq_cur)

                irq_change_list = map(lambda x: x[0] - x[1],
                                      zip(irq_cur, irq_ori))
                cpu_affinity = irq_change_list.index(max(irq_change_list))
                if cpu_affinity != int(taskset_cpu):
                    err_msg = "Error, taskset on cpu %s, but queues use cpu %s"
                    raise error.TestError(err_msg % (taskset_cpu,
                                                     cpu_affinity))

        netperf_thread.join()
        session.cmd("rm -rf /home/netperf*")
        session.close()
Beispiel #35
0
def run(test, params, env):
    """
    KVM guest stop test:
    1) Log into a guest
    2) Check is HeavyLoad.exe installed , download and
       install it if not installed.
    3) Start Heavyload to make guest in heavyload
    4) Check vm is alive
    5) Stop heavyload process and clean temp file.

    :param test: kvm test object
    :param params: Dictionary with the test parameters
    :param env: Dictionary with test environment.
    """
    def loop_session_cmd(session, cmd):
        def session_cmd(session, cmd):
            try:
                return session.cmd_status(cmd) == 0
            except (aexpect.ShellStatusError, aexpect.ShellTimeoutError):
                pass

        count = 0
        while count < 3:
            ret = session_cmd(session, cmd)
            if ret is not None:
                return ret
            count += 1
        return None

    def add_option(cmd, key, val):
        """
        Append options into command;
        """
        if re.match(r".*/%s.*", cmd, re.I):
            if val:
                rex = r"/%s\b+\S+\b+" % key
                val = "/%s %s " % (key, val)
                cmd = re.sub(rex, val, cmd, re.I)
        else:
            cmd += " /%s %s " % (key, val)
        return cmd

    tmp_dir = data_dir.get_tmp_dir()
    install_path = params["install_path"].rstrip("\\")
    heavyload_bin = '"%s\heavyload.exe"' % install_path
    start_cmd = "%s /CPU /MEMORY /FILE " % heavyload_bin
    stop_cmd = "taskkill /T /F /IM heavyload.exe"
    stop_cmd = params.get("stop_cmd", stop_cmd)
    start_cmd = params.get("start_cmd", start_cmd)
    check_running_cmd = "tasklist|findstr /I heavyload"
    check_running_cmd = params.get("check_running_cmd", check_running_cmd)
    test_installed_cmd = "dir '%s'|findstr /I heavyload" % install_path
    test_installed_cmd = params.get("check_installed_cmd", test_installed_cmd)

    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    timeout = float(params.get("login_timeout", 240))
    session = vm.wait_for_login(timeout=timeout)
    try:
        installed = session.cmd_status(test_installed_cmd) == 0
        if not installed:
            download_url = params.get("download_url")
            if download_url:
                dst = r"c:\\"
                pkg_md5sum = params["pkg_md5sum"]
                error.context("Download HeavyLoadSetup.exe", logging.info)
                pkg = utils.unmap_url_cache(tmp_dir, download_url, pkg_md5sum)
                vm.copy_files_to(pkg, dst)
            else:
                dst = r"%s:\\" % utils_misc.get_winutils_vol(session)

            error.context("Install HeavyLoad in guest", logging.info)
            install_cmd = params["install_cmd"]
            install_cmd = re.sub(r"DRIVE:\\+", dst, install_cmd)
            session.cmd(install_cmd)
            config_cmd = params.get("config_cmd")
            if config_cmd:
                session.cmd(config_cmd)

        error.context("Start heavyload in guest", logging.info)
        # genery heavyload command automaticly
        if params.get("autostress") == "yes":
            free_mem = utils_misc.get_free_mem(session, "windows")
            free_disk = utils_misc.get_free_disk(session, "C:")
            start_cmd = '"%s\heavyload.exe"' % params["install_path"]
            start_cmd = add_option(start_cmd, 'CPU', params["smp"])
            start_cmd = add_option(start_cmd, 'MEMORY', free_mem)
            start_cmd = add_option(start_cmd, 'FILE', free_disk)
        else:
            start_cmd = params["start_cmd"]
        # reformat command to ensure heavyload started as except
        test_timeout = int(params.get("timeout", "60"))
        steping = 60
        if test_timeout < 60:
            logging.warn("Heavyload use minis as unit of timeout,"
                         "values is too small, use default: 60s")
            test_timeout = 60
            steping = 30
        test_timeout = test_timeout / 60
        start_cmd = add_option(start_cmd, 'DURATION', test_timeout)
        start_cmd = add_option(start_cmd, 'START', '')
        start_cmd = add_option(start_cmd, 'AUTOEXIT', '')
        logging.info("heavyload cmd: %s" % start_cmd)
        session.sendline(start_cmd)
        if not loop_session_cmd(session, check_running_cmd):
            raise error.TestError("heavyload process is not started")

        sleep_before_migration = int(params.get("sleep_before_migration", "0"))
        time.sleep(sleep_before_migration)

        error.context("Verify vm is alive", logging.info)
        utils_misc.wait_for(vm.verify_alive,
                            timeout=test_timeout,
                            step=steping)
    finally:
        # in migration test, no need to stop heavyload on src host
        cleanup_in_the_end = params.get("unload_stress_in_the_end", "yes")
        if cleanup_in_the_end == "yes":
            error.context("Stop load and clean tmp files", logging.info)
            if not installed and download_url:
                utils.system("rm -f %s/HeavyLoad*.exe" % tmp_dir)
                session.cmd("del /f /s %sHeavyLoad*.exe" % dst)
            if loop_session_cmd(session, check_running_cmd):
                if not loop_session_cmd(session, stop_cmd):
                    raise error.TestFail("Unable to terminate heavyload "
                                         "process")
        if session:
            session.close()