예제 #1
0
파일: mysql.py 프로젝트: autotest/autotest
def setup(tarball, topdir):
    srcdir = os.path.join(topdir, 'src')
    if not os.path.exists(tarball):
        utils.get_file('http://downloads.mysql.com/archives/mysql-5.0/mysql-5.0.45.tar.gz', tarball)
    utils.extract_tarball_to_dir(tarball, 'src')
    os.chdir(srcdir)
    utils.configure('--prefix=%s/mysql --enable-thread-safe-client'
                    % topdir)
    utils.make('-j %d' % utils.count_cpus())
    utils.make('install')

    #
    # MySQL doesn't create this directory on it's own.
    # This is where database logs and files are created.
    #
    try:
        os.mkdir(topdir + '/mysql/var')
    except Exception:
        pass
    #
    # Initialize the database.
    #
    utils.system('%s/mysql/bin/mysql_install_db' % topdir)

    os.chdir(topdir)
예제 #2
0
    def build(self, make_opts="", logfile="", extraversion="autotest"):
        """build the kernel

        make_opts
                additional options to make, if any
        """
        os_dep.commands("gcc", "make")
        if logfile == "":
            logfile = os.path.join(self.log_dir, "kernel_build")
        os.chdir(self.build_dir)
        if extraversion:
            self.extraversion(extraversion)
        self.set_cross_cc()
        # setup_config_file(config_file, config_overrides)

        # Not needed on 2.6, but hard to tell -- handle failure
        utils.system("make dep", ignore_status=True)
        threads = 2 * utils.count_cpus()
        build_string = "make -j %d %s %s" % (threads, make_opts, self.build_target)
        # eg make bzImage, or make zImage
        print build_string
        utils.system(build_string)
        if kernel_config.modules_needed(".config"):
            utils.system("make -j %d %s modules" % (threads, make_opts))

        kernel_version = self.get_kernel_build_ver()
        kernel_version = re.sub("-autotest", "", kernel_version)
        self.logfile.write("BUILD VERSION: %s\n" % kernel_version)

        utils.force_copy(self.build_dir + "/System.map", self.results_dir)
예제 #3
0
 def reboot_setup(self):
     # save the partition list and mount points, as well as the cpu count
     partition_list = partition_lib.get_partition_list(self,
                                                       exclude_swap=False)
     mount_info = partition_lib.get_mount_info(partition_list)
     self._state.set('client', 'mount_info', mount_info)
     self._state.set('client', 'cpu_count', utils.count_cpus())
예제 #4
0
    def setup(self,
              tarball='oprofile-0.9.4.tar.bz2',
              local=None,
              *args,
              **dargs):
        if local == True:
            return

        try:
            self.tarball = utils.unmap_url(self.bindir, tarball, self.tmpdir)
            utils.extract_tarball_to_dir(self.tarball, self.srcdir)
            os.chdir(self.srcdir)

            patch = os.path.join(self.bindir, "oprofile-69455.patch")
            utils.system('patch -p1 < %s' % patch)
            utils.configure('--with-kernel-support --prefix=' + \
                                                    self.srcdir)
            utils.make('-j %d' % utils.count_cpus())
            utils.make('install')
        except Exception:
            # Build from source failed.
            # But maybe can still use the local copy
            local_opcontrol = os.path.exists('/usr/bin/opcontrol')
            local_opreport = os.path.exists('/usr/bin/opreport')
            if local == False or not local_opcontrol or not local_opreport:
                raise error.AutotestError('No oprofile available')
        else:
            # if we managed to build, try again to pick binaries
            self._pick_binaries(True)
예제 #5
0
    def setup(self, tarball='oprofile-0.9.4.tar.bz2', local=None,
              *args, **dargs):
        if local is True:
            return

        try:
            self.tarball = utils.unmap_url(self.bindir, tarball,
                                           self.tmpdir)
            utils.extract_tarball_to_dir(self.tarball, self.srcdir)
            os.chdir(self.srcdir)

            patch = os.path.join(self.bindir, "oprofile-69455.patch")
            utils.system('patch -p1 < %s' % patch)
            utils.configure('--with-kernel-support --prefix=' +
                            self.srcdir)
            utils.make('-j %d' % utils.count_cpus())
            utils.make('install')
        except Exception:
            # Build from source failed.
            # But maybe can still use the local copy
            local_opcontrol = os.path.exists('/usr/bin/opcontrol')
            local_opreport = os.path.exists('/usr/bin/opreport')
            if local is False or not local_opcontrol or not local_opreport:
                raise error.AutotestError('No oprofile available')
        else:
            # if we managed to build, try again to pick binaries
            self._pick_binaries(True)
예제 #6
0
    def build(self, make_opts='', logfile='', extraversion='autotest'):
        """build the kernel

        make_opts
                additional options to make, if any
        """
        os_dep.commands('gcc', 'make')
        if logfile == '':
            logfile = os.path.join(self.log_dir, 'kernel_build')
        os.chdir(self.build_dir)
        if extraversion:
            self.extraversion(extraversion)
        self.set_cross_cc()
        # setup_config_file(config_file, config_overrides)

        # Not needed on 2.6, but hard to tell -- handle failure
        utils.system('make dep', ignore_status=True)
        threads = 2 * utils.count_cpus()
        build_string = 'make -j %d %s %s' % (threads, make_opts,
                                             self.build_target)
        # eg make bzImage, or make zImage
        print build_string
        utils.system(build_string)
        if kernel_config.modules_needed('.config'):
            utils.system('make -j %d %s modules' % (threads, make_opts))

        kernel_version = self.get_kernel_build_ver()
        kernel_version = re.sub('-autotest', '', kernel_version)
        self.logfile.write('BUILD VERSION: %s\n' % kernel_version)

        utils.force_copy(self.build_dir + '/System.map', self.results_dir)
예제 #7
0
 def reboot_setup(self):
     # save the partition list and mount points, as well as the cpu count
     partition_list = partition_lib.get_partition_list(self,
                                                       exclude_swap=False)
     mount_info = partition_lib.get_mount_info(partition_list)
     self._state.set('client', 'mount_info', mount_info)
     self._state.set('client', 'cpu_count', utils.count_cpus())
예제 #8
0
    def build(self, make_opts='', logfile='', extraversion='autotest'):
        """build the kernel

        make_opts
                additional options to make, if any
        """
        os_dep.commands('gcc', 'make')
        if logfile == '':
            logfile = os.path.join(self.log_dir, 'kernel_build')
        os.chdir(self.build_dir)
        if extraversion:
            self.extraversion(extraversion)
        self.set_cross_cc()
        # setup_config_file(config_file, config_overrides)

        # Not needed on 2.6, but hard to tell -- handle failure
        utils.system('make dep', ignore_status=True)
        threads = 2 * utils.count_cpus()
        build_string = 'make -j %d %s %s' % (threads, make_opts,
                                             self.build_target)
        # eg make bzImage, or make zImage
        print build_string
        utils.system(build_string)
        if kernel_config.modules_needed('.config'):
            utils.system('make -j %d %s modules' % (threads, make_opts))

        kernel_version = self.get_kernel_build_ver()
        kernel_version = re.sub('-autotest', '', kernel_version)
        self.logfile.write('BUILD VERSION: %s\n' % kernel_version)

        utils.force_copy(self.build_dir + '/System.map', self.results_dir)
예제 #9
0
파일: mysql.py 프로젝트: wkf31156/autotest
def setup(tarball, topdir):
    srcdir = os.path.join(topdir, 'src')
    if not os.path.exists(tarball):
        utils.get_file(
            'http://downloads.mysql.com/archives/mysql-5.0/mysql-5.0.45.tar.gz',
            tarball)
    utils.extract_tarball_to_dir(tarball, 'src')
    os.chdir(srcdir)
    utils.configure('--prefix=%s/mysql --enable-thread-safe-client' % topdir)
    utils.make('-j %d' % utils.count_cpus())
    utils.make('install')

    #
    # MySQL doesn't create this directory on it's own.
    # This is where database logs and files are created.
    #
    try:
        os.mkdir(topdir + '/mysql/var')
    except Exception:
        pass
    #
    # Initialize the database.
    #
    utils.system('%s/mysql/bin/mysql_install_db' % topdir)

    os.chdir(topdir)
예제 #10
0
    def run_once(self, db_type = 'pgsql', build = 1, \
                    num_threads = utils.count_cpus(), max_time = 60, \
                    read_only = 0, args = ''):
        plib = os.path.join(self.autodir, 'deps/pgsql/pgsql/lib')
        mlib = os.path.join(self.autodir, 'deps/mysql/mysql/lib/mysql')
        ld_path = utils.prepend_path(plib,
            utils.environ('LD_LIBRARY_PATH'))
        ld_path = utils.prepend_path(mlib, ld_path)
        os.environ['LD_LIBRARY_PATH'] = ld_path

        # The databases don't want to run as root so run them as nobody
        self.dbuser = '******'
        self.dbuid = pwd.getpwnam(self.dbuser)[2]
        self.sudo = 'sudo -u ' + self.dbuser + ' '

        # Check for nobody user
        try:
            utils.system(self.sudo + '/bin/true')
        except Exception:
            raise error.TestError('Unable to run as nobody')

        if (db_type == 'pgsql'):
            self.execute_pgsql(build, num_threads, max_time, read_only, args)
        elif (db_type == 'mysql'):
            self.execute_mysql(build, num_threads, max_time, read_only, args)
예제 #11
0
    def host():
        logging.info("Setup monitor server on host")
        # Kill previous instances of the host load programs, if any
        _kill_host_programs(kill_stress_cmd, kill_monitor_cmd)
        # Cleanup previous log instances
        if os.path.isfile(monitor_log_file_server):
            os.remove(monitor_log_file_server)
        # Opening firewall ports on host
        utils.run("iptables -F", ignore_status=True)

        # Run heartbeat on host
        utils.run(
            server_setup_cmd %
            (monitor_dir, threshold, monitor_log_file_server, monitor_port))

        if stress_setup_cmd is not None:
            logging.info("Build stress on host")
            # Uncompress and build stress on host
            utils.run(stress_setup_cmd % stress_dir)

        logging.info("Run stress on host")
        # stress_threads = 2 * n_cpus
        threads_host = 2 * utils.count_cpus()
        # Run stress test on host
        utils.run(stress_cmd % (stress_dir, threads_host))
예제 #12
0
 def initialize(self, tests=''):
     # Initialize failure counter
     self.n_fail = 0
     # Get the parameters for run_once()
     self.tests = tests
     # Ratio is the reason between 1 and the number of CPUs of the system.
     self.ratio = 1.0 / utils.count_cpus()
     logging.debug('Ratio (1/n_cpus) found for this system: %s' % self.ratio)
예제 #13
0
def run(test, params, env):
    """
    Test vcpupin while numad is running
    """
    vcpu_placement = params.get("vcpu_placement")
    bug_url = params.get("bug_url", "")
    status_error = "yes" == params.get("status_error", "no")
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)

    # Prepare numatune memory parameter dict
    mem_tuple = ('memory_mode', 'memory_placement', 'memory_nodeset')
    numa_memory = {}
    for mem_param in mem_tuple:
        value = params.get(mem_param)
        if value:
            numa_memory[mem_param.split('_')[1]] = value

    libvirtd = utils_libvirtd.Libvirtd()
    libvirtd.start()

    try:
        # Get host numa node list
        host_numa_node = utils_misc.NumaInfo()
        node_list = host_numa_node.online_nodes
        logging.debug("host node list is %s", node_list)
        if numa_memory.get('nodeset'):
            used_node = utils_test.libvirt.cpus_parser(numa_memory['nodeset'])
            logging.debug("set node list is %s", used_node)
            if not status_error:
                for i in used_node:
                    if i > max(node_list):
                        raise error.TestNAError("nodeset %s out of range" %
                                                numa_memory['nodeset'])
        # Start numad
        utils.run("service numad start")

        # Start vm and do vcpupin
        vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name)
        vmxml.numa_memory = numa_memory
        vmxml.placement = vcpu_placement
        logging.debug("vm xml is %s", vmxml)
        vmxml.sync()
        vm.start()
        vm.wait_for_login()

        host_cpu_count = utils.count_cpus()
        for i in range(host_cpu_count):
            ret = virsh.vcpupin(vm_name, 0, i, debug=True, ignore_status=True)
            if ret.exit_status:
                raise error.TestFail("vcpupin failed while numad running, %s"
                                     % bug_url)
            virsh.vcpuinfo(vm_name, debug=True)
    finally:
        utils.run("service numad stop")
        libvirtd.restart()
        backup_xml.sync()
예제 #14
0
 def make_parallel(self):
     '''
     Runs "make" using the correct number of parallel jobs
     '''
     parallel_make_jobs = utils.count_cpus()
     make_command = "make -j %s" % parallel_make_jobs
     logging.info("Running parallel make on build dir")
     os.chdir(self.build_dir)
     utils.system(make_command)
예제 #15
0
    def output_check(nodeinfo_output):
        # Check CPU model
        cpu_model_nodeinfo = _check_nodeinfo(nodeinfo_output, "CPU model", 3)
        cpu_model_os = utils.get_current_kernel_arch()
        if not re.match(cpu_model_nodeinfo, cpu_model_os):
            raise error.TestFail(
                "Virsh nodeinfo output didn't match CPU model")

        # Check number of CPUs
        cpus_nodeinfo = _check_nodeinfo(nodeinfo_output, "CPU(s)", 2)
        cpus_os = utils.count_cpus()
        if int(cpus_nodeinfo) != cpus_os:
            raise error.TestFail(
                "Virsh nodeinfo output didn't match number of "
                "CPU(s)")

        # Check CPU frequency
        cpu_frequency_nodeinfo = _check_nodeinfo(nodeinfo_output,
                                                 'CPU frequency', 3)
        cmd = ("cat /proc/cpuinfo | grep 'cpu MHz' | head -n1 | "
               "awk '{print $4}' | awk -F. '{print $1}'")
        cmd_result = utils.run(cmd, ignore_status=True)
        cpu_frequency_os = cmd_result.stdout.strip()
        print cpu_frequency_os
        if not re.match(cpu_frequency_nodeinfo, cpu_frequency_os):
            raise error.TestFail("Virsh nodeinfo output didn't match CPU "
                                 "frequency")

        # Check CPU socket(s)
        cpu_sockets_nodeinfo = int(
            _check_nodeinfo(nodeinfo_output, 'CPU socket(s)', 3))
        cmd = "grep 'physical id' /proc/cpuinfo | uniq | sort | uniq |wc -l"
        cmd_result = utils.run(cmd, ignore_status=True)
        cpu_NUMA_nodeinfo = _check_nodeinfo(nodeinfo_output, 'NUMA cell(s)', 3)
        cpu_sockets_os = int(
            cmd_result.stdout.strip()) / int(cpu_NUMA_nodeinfo)
        if cpu_sockets_os != cpu_sockets_nodeinfo:
            raise error.TestFail("Virsh nodeinfo output didn't match CPU "
                                 "socket(s)")

        # Check Core(s) per socket
        cores_per_socket_nodeinfo = _check_nodeinfo(nodeinfo_output,
                                                    'Core(s) per socket', 4)
        cmd = "grep 'cpu cores' /proc/cpuinfo | head -n1 | awk '{print $4}'"
        cmd_result = utils.run(cmd, ignore_status=True)
        cores_per_socket_os = cmd_result.stdout.strip()
        if not re.match(cores_per_socket_nodeinfo, cores_per_socket_os):
            raise error.TestFail("Virsh nodeinfo output didn't match Core(s) "
                                 "per socket")

        # Check Memory size
        memory_size_nodeinfo = int(
            _check_nodeinfo(nodeinfo_output, 'Memory size', 3))
        memory_size_os = utils.memtotal()
        if memory_size_nodeinfo != memory_size_os:
            raise error.TestFail("Virsh nodeinfo output didn't match "
                                 "Memory size")
예제 #16
0
 def make_parallel(self):
     '''
     Runs "make" using the correct number of parallel jobs
     '''
     parallel_make_jobs = utils.count_cpus()
     make_command = "make -j %s" % parallel_make_jobs
     logging.info("Running parallel make on build dir")
     os.chdir(self.build_dir)
     utils.system(make_command)
예제 #17
0
    def output_check(nodeinfo_output):
        # Check CPU model
        cpu_model_nodeinfo = _check_nodeinfo(nodeinfo_output, "CPU model", 3)
        cpu_model_os = utils.get_current_kernel_arch()
        if not re.match(cpu_model_nodeinfo, cpu_model_os):
            raise error.TestFail(
                "Virsh nodeinfo output didn't match CPU model")

        # Check number of CPUs
        cpus_nodeinfo = _check_nodeinfo(nodeinfo_output, "CPU(s)", 2)
        cpus_os = utils.count_cpus()
        if int(cpus_nodeinfo) != cpus_os:
            raise error.TestFail("Virsh nodeinfo output didn't match number of "
                                 "CPU(s)")

        # Check CPU frequency
        cpu_frequency_nodeinfo = _check_nodeinfo(
            nodeinfo_output, 'CPU frequency', 3)
        cmd = ("cat /proc/cpuinfo | grep 'cpu MHz' | head -n1 | "
               "awk '{print $4}' | awk -F. '{print $1}'")
        cmd_result = utils.run(cmd, ignore_status=True)
        cpu_frequency_os = cmd_result.stdout.strip()
        print cpu_frequency_os
        if not re.match(cpu_frequency_nodeinfo, cpu_frequency_os):
            raise error.TestFail("Virsh nodeinfo output didn't match CPU "
                                 "frequency")

        # Check CPU socket(s)
        cpu_sockets_nodeinfo = int(
            _check_nodeinfo(nodeinfo_output, 'CPU socket(s)', 3))
        cmd = "grep 'physical id' /proc/cpuinfo | uniq | sort | uniq |wc -l"
        cmd_result = utils.run(cmd, ignore_status=True)
        cpu_NUMA_nodeinfo = _check_nodeinfo(nodeinfo_output, 'NUMA cell(s)', 3)
        cpu_sockets_os = int(
            cmd_result.stdout.strip()) / int(cpu_NUMA_nodeinfo)
        if cpu_sockets_os != cpu_sockets_nodeinfo:
            raise error.TestFail("Virsh nodeinfo output didn't match CPU "
                                 "socket(s)")

        # Check Core(s) per socket
        cores_per_socket_nodeinfo = _check_nodeinfo(
            nodeinfo_output, 'Core(s) per socket', 4)
        cmd = "grep 'cpu cores' /proc/cpuinfo | head -n1 | awk '{print $4}'"
        cmd_result = utils.run(cmd, ignore_status=True)
        cores_per_socket_os = cmd_result.stdout.strip()
        if not re.match(cores_per_socket_nodeinfo, cores_per_socket_os):
            raise error.TestFail("Virsh nodeinfo output didn't match Core(s) "
                                 "per socket")

        # Check Memory size
        memory_size_nodeinfo = int(
            _check_nodeinfo(nodeinfo_output, 'Memory size', 3))
        memory_size_os = utils_memory.memtotal()
        if memory_size_nodeinfo != memory_size_os:
            raise error.TestFail("Virsh nodeinfo output didn't match "
                                 "Memory size")
예제 #18
0
def setup(topdir):
    srcdir = os.path.join(topdir, 'src')

    os.chdir(srcdir)

    utils.configure('--with-elfutils=elfutils --prefix=%s/systemtap' % topdir)
    utils.make('-j %d' % utils.count_cpus())
    utils.make('install')

    os.chdir(topdir)
예제 #19
0
    def output_check(nodeinfo_output):
        # Check CPU model
        cpu_model_nodeinfo = _check_nodeinfo(nodeinfo_output, "CPU model", 3)
        cpu_model_os = utils.get_current_kernel_arch()
        if not re.match(cpu_model_nodeinfo, cpu_model_os):
            raise error.TestFail("Virsh nodeinfo output didn't match CPU model")

        # Check number of CPUs
        cpus_nodeinfo = _check_nodeinfo(nodeinfo_output, "CPU(s)", 2)
        cpus_os = utils.count_cpus()
        if int(cpus_nodeinfo) != cpus_os:
            raise error.TestFail("Virsh nodeinfo output didn't match number of " "CPU(s)")

        # Check CPU frequency
        cpu_frequency_nodeinfo = _check_nodeinfo(nodeinfo_output, "CPU frequency", 3)
        cmd = "cat /proc/cpuinfo | grep 'cpu MHz' | head -n1 | " "awk '{print $4}' | awk -F. '{print $1}'"
        cmd_result = utils.run(cmd, ignore_status=True)
        cpu_frequency_os = cmd_result.stdout.strip()
        logging.debug("cpu_frequency_nodeinfo=%s cpu_frequency_os=%s", cpu_frequency_nodeinfo, cpu_frequency_os)
        #
        # Matching CPU Frequency is not an exact science in todays modern
        # processors and OS's. CPU's can have their execution speed varied
        # based on current workload in order to save energy and keep cool.
        # Thus since we're getting the values at disparate points in time,
        # we cannot necessarily do a pure comparison.
        # So, let's get the absolute value of the difference and ensure
        # that it's within 20 percent of each value to give us enough of
        # a "fudge" factor to declare "close enough". Don't return a failure
        # just print a debug message and move on.
        diffval = abs(int(cpu_frequency_nodeinfo) - int(cpu_frequency_os))
        if float(diffval) / float(cpu_frequency_nodeinfo) > 0.20 or float(diffval) / float(cpu_frequency_os) > 0.20:
            logging.debug("Virsh nodeinfo output didn't match CPU " "frequency within 20 percent")

        # Check CPU socket(s)
        cpu_sockets_nodeinfo = int(_check_nodeinfo(nodeinfo_output, "CPU socket(s)", 3))
        cmd = "grep 'physical id' /proc/cpuinfo | uniq | sort | uniq |wc -l"
        cmd_result = utils.run(cmd, ignore_status=True)
        cpu_NUMA_nodeinfo = _check_nodeinfo(nodeinfo_output, "NUMA cell(s)", 3)
        cpu_sockets_os = int(cmd_result.stdout.strip()) / int(cpu_NUMA_nodeinfo)
        if cpu_sockets_os != cpu_sockets_nodeinfo:
            raise error.TestFail("Virsh nodeinfo output didn't match CPU " "socket(s)")

        # Check Core(s) per socket
        cores_per_socket_nodeinfo = _check_nodeinfo(nodeinfo_output, "Core(s) per socket", 4)
        cmd = "grep 'cpu cores' /proc/cpuinfo | head -n1 | awk '{print $4}'"
        cmd_result = utils.run(cmd, ignore_status=True)
        cores_per_socket_os = cmd_result.stdout.strip()
        if not re.match(cores_per_socket_nodeinfo, cores_per_socket_os):
            raise error.TestFail("Virsh nodeinfo output didn't match Core(s) " "per socket")

        # Check Memory size
        memory_size_nodeinfo = int(_check_nodeinfo(nodeinfo_output, "Memory size", 3))
        memory_size_os = utils_memory.memtotal()
        if memory_size_nodeinfo != memory_size_os:
            raise error.TestFail("Virsh nodeinfo output didn't match " "Memory size")
예제 #20
0
파일: ltp.py 프로젝트: LaneWolf/autotest
    def setup(self, tarball = 'ltp-full-20120104.bz2'):
        tarball = utils.unmap_url(self.bindir, tarball, self.tmpdir)
        utils.extract_tarball_to_dir(tarball, self.srcdir)
        os.chdir(self.srcdir)
        ltpbin_dir = os.path.join(self.srcdir, 'bin')
        os.mkdir(ltpbin_dir)

        utils.system('cp ../scan.c pan/')   # saves having lex installed
        utils.configure('--prefix=%s' % ltpbin_dir)
        utils.make('-j %d all' % utils.count_cpus())
        utils.system('yes n | make SKIP_IDCHECK=1 install')
예제 #21
0
def setup(tarball, topdir):
    srcdir = os.path.join(topdir, 'src')
    if not os.path.exists(tarball):
        utils.get_file('ftp://ftp-archives.postgresql.org/pub/source/v8.3.1/postgresql-8.3.1.tar.bz2', tarball)
    utils.extract_tarball_to_dir(tarball, 'src')
    os.chdir(srcdir)
    utils.configure('--without-readline --without-zlib --enable-debug --prefix=%s/pgsql' % topdir)
    utils.make('-j %d' % utils.count_cpus())
    utils.make('install')

    os.chdir(topdir)
예제 #22
0
파일: ltp.py 프로젝트: vi-patel/autotest
    def setup(self, tarball='ltp-full-20120104.bz2'):
        tarball = utils.unmap_url(self.bindir, tarball, self.tmpdir)
        utils.extract_tarball_to_dir(tarball, self.srcdir)
        os.chdir(self.srcdir)
        ltpbin_dir = os.path.join(self.srcdir, 'bin')
        os.mkdir(ltpbin_dir)

        utils.system('cp ../scan.c pan/')  # saves having lex installed
        utils.configure('--prefix=%s' % ltpbin_dir)
        utils.make('-j %d all' % utils.count_cpus())
        utils.system('yes n | make SKIP_IDCHECK=1 install')
예제 #23
0
    def setup(self, tarball="ltp-full-20140115.tar.bz2"):
        tarball = utils.unmap_url(self.bindir, tarball, self.tmpdir)
        utils.extract_tarball_to_dir(tarball, self.srcdir)
        os.chdir(self.srcdir)
        ltpbin_dir = os.path.join(self.srcdir, "bin")
        os.mkdir(ltpbin_dir)

        # saves having lex installed
        shutil.copy(os.path.join(self.bindir, "scan.c"), os.path.join(self.srcdir, "pan"))
        utils.configure("--prefix=%s" % ltpbin_dir)
        utils.make("-j %d all" % utils.count_cpus())
        utils.system("yes n | make SKIP_IDCHECK=1 install")
예제 #24
0
    def setup(self, tarball='ltp-full-20160510.tar.bz2'):
        tarball = utils.unmap_url(self.bindir, tarball, self.tmpdir)
        utils.extract_tarball_to_dir(tarball, self.srcdir)
        os.chdir(self.srcdir)
        ltpbin_dir = os.path.join(self.srcdir, 'bin')
        os.mkdir(ltpbin_dir)

        # saves having lex installed
        shutil.copy(os.path.join(self.bindir, 'scan.c'),
                    os.path.join(self.srcdir, 'pan'))
        utils.system('make autotools')
        utils.configure('--prefix=%s' % ltpbin_dir)
        utils.make('-j %d all' % utils.count_cpus())
        utils.system('yes n | make SKIP_IDCHECK=1 install')
예제 #25
0
    def setup(self, tarball='ltp-full-20160510.tar.bz2'):
        tarball = utils.unmap_url(self.bindir, tarball, self.tmpdir)
        utils.extract_tarball_to_dir(tarball, self.srcdir)
        os.chdir(self.srcdir)
        ltpbin_dir = os.path.join(self.srcdir, 'bin')
        os.mkdir(ltpbin_dir)

        # saves having lex installed
        shutil.copy(os.path.join(self.bindir, 'scan.c'),
                    os.path.join(self.srcdir, 'pan'))
        utils.system('make autotools')
        utils.configure('--prefix=%s' % ltpbin_dir)
        utils.make('-j %d all' % utils.count_cpus())
        utils.system('yes n | make SKIP_IDCHECK=1 install')
예제 #26
0
파일: pgsql.py 프로젝트: yumingfei/autotest
def setup(tarball, topdir):
    srcdir = os.path.join(topdir, 'src')
    if not os.path.exists(tarball):
        utils.get_file(
            'ftp://ftp-archives.postgresql.org/pub/source/v8.3.1/postgresql-8.3.1.tar.bz2',
            tarball)
    utils.extract_tarball_to_dir(tarball, 'src')
    os.chdir(srcdir)
    utils.configure(
        '--without-readline --without-zlib --enable-debug --prefix=%s/pgsql' %
        topdir)
    utils.make('-j %d' % utils.count_cpus())
    utils.make('install')

    os.chdir(topdir)
예제 #27
0
    def setup(self, tarball = 'sysbench-0.4.8.tar.bz2'):
        tarball = utils.unmap_url(self.bindir, tarball, self.tmpdir)
        utils.extract_tarball_to_dir(tarball, self.srcdir)
        self.job.setup_dep(['pgsql', 'mysql'])

        os.chdir(self.srcdir)

        pgsql_dir = os.path.join(self.autodir, 'deps/pgsql/pgsql')
        mysql_dir = os.path.join(self.autodir, 'deps/mysql/mysql')

        # configure wants to get at pg_config, so add its path
        utils.system(
            'PATH=%s/bin:$PATH ./configure --with-mysql=%s --with-pgsql'
            % (pgsql_dir, mysql_dir))
        utils.make('-j %d' % utils.count_cpus())
예제 #28
0
    def format_affinity_str(cpu_list):
        """
        Format affinity str

        :param cpu_list: list of cpu number
        :return: cpu affinity string
        """
        cpu_affinity_str = ""
        host_cpu_count = utils.count_cpus()
        for i in range(host_cpu_count):
            if i in cpu_list:
                cpu_affinity_str += "y"
            else:
                cpu_affinity_str += "-"
        return cpu_affinity_str
예제 #29
0
파일: pgpool.py 프로젝트: autotest/autotest
def setup(tarball, topdir):
    # FIXME - Waiting to be able to specify dependency.
    # self.job.setup_dep(['pgsql'])
    srcdir = os.path.join(topdir, 'src')
    if not os.path.exists(tarball):
        utils.get_file('http://pgfoundry.org/frs/download.php/1083/pgpool-II-1.0.1.tar.gz', tarball)
    utils.extract_tarball_to_dir(tarball, 'src')
    os.chdir(srcdir)
    # FIXEME - Waiting to be able to use self.autodir instead of
    # os.environ['AUTODIR']
    utils.configure('--prefix=%s/pgpool --with-pgsql=%s/deps/pgsql/pgsql'
                    % (topdir, os.environ['AUTODIR']))
    utils.make('-j %d' % utils.count_cpus())
    utils.make('install')

    os.chdir(topdir)
예제 #30
0
    def make_guest_kernel(self):
        '''
        Runs "make", using a single job
        '''
        os.chdir(self.source)
        logging.info("Building guest kernel")
        logging.debug("Kernel config is %s" % self.config)
        utils.get_file(self.config, '.config')

        # FIXME currently no support for builddir
        # run old config
        utils.system('yes "" | make oldconfig > /dev/null')
        parallel_make_jobs = utils.count_cpus()
        make_command = "make -j %s %s" % (parallel_make_jobs, self.build_target)
        logging.info("Running parallel make on src dir")
        utils.system(make_command)
예제 #31
0
파일: job.py 프로젝트: LaneWolf/autotest
    def _check_post_reboot(self, subdir, running_id=None):
        """
        Function to perform post boot checks such as if the system configuration
        has changed across reboots (specifically, CPUs and partitions).

        @param subdir: The subdir to use in the job.record call.
        @param running_id: An optional running_id to include in the reboot
            failure log message

        @raise JobError: Raised if the current configuration does not match the
            pre-reboot configuration.
        """
        abort_on_mismatch = GLOBAL_CONFIG.get_config_value('CLIENT',
                                                           'abort_on_mismatch',
                                                           type=bool,
                                                           default=False)
        # check to see if any partitions have changed
        partition_list = partition_lib.get_partition_list(self,
                                                          exclude_swap=False)
        mount_info = partition_lib.get_mount_info(partition_list)
        old_mount_info = self._state.get('client', 'mount_info')
        if mount_info != old_mount_info:
            new_entries = mount_info - old_mount_info
            old_entries = old_mount_info - mount_info
            description = ("mounted partitions are different after reboot "
                           "(old entries: %s, new entries: %s)" %
                           (old_entries, new_entries))
            if abort_on_mismatch:
                self._record_reboot_failure(subdir, "reboot.verify_config",
                                            description, running_id=running_id)
                raise error.JobError("Reboot failed: %s" % description)
            else:
                logging.warning(description)

        # check to see if any CPUs have changed
        cpu_count = utils.count_cpus()
        old_count = self._state.get('client', 'cpu_count')
        if cpu_count != old_count:
            description = ('Number of CPUs changed after reboot '
                           '(old count: %d, new count: %d)' %
                           (old_count, cpu_count))
            if abort_on_mismatch:
                self._record_reboot_failure(subdir, 'reboot.verify_config',
                                            description, running_id=running_id)
                raise error.JobError('Reboot failed: %s' % description)
            else:
                logging.warning(description)
예제 #32
0
    def _check_post_reboot(self, subdir, running_id=None):
        """
        Function to perform post boot checks such as if the system configuration
        has changed across reboots (specifically, CPUs and partitions).

        @param subdir: The subdir to use in the job.record call.
        @param running_id: An optional running_id to include in the reboot
            failure log message

        @raise JobError: Raised if the current configuration does not match the
            pre-reboot configuration.
        """
        abort_on_mismatch = GLOBAL_CONFIG.get_config_value('CLIENT',
                                                           'abort_on_mismatch',
                                                           type=bool,
                                                           default=False)
        # check to see if any partitions have changed
        partition_list = partition_lib.get_partition_list(self,
                                                          exclude_swap=False)
        mount_info = partition_lib.get_mount_info(partition_list)
        old_mount_info = self._state.get('client', 'mount_info')
        if mount_info != old_mount_info:
            new_entries = mount_info - old_mount_info
            old_entries = old_mount_info - mount_info
            description = ("mounted partitions are different after reboot "
                           "(old entries: %s, new entries: %s)" %
                           (old_entries, new_entries))
            if abort_on_mismatch:
                self._record_reboot_failure(subdir, "reboot.verify_config",
                                            description, running_id=running_id)
                raise error.JobError("Reboot failed: %s" % description)
            else:
                logging.warning(description)

        # check to see if any CPUs have changed
        cpu_count = utils.count_cpus()
        old_count = self._state.get('client', 'cpu_count')
        if cpu_count != old_count:
            description = ('Number of CPUs changed after reboot '
                           '(old count: %d, new count: %d)' %
                           (old_count, cpu_count))
            if abort_on_mismatch:
                self._record_reboot_failure(subdir, 'reboot.verify_config',
                                            description, running_id=running_id)
                raise error.JobError('Reboot failed: %s' % description)
            else:
                logging.warning(description)
예제 #33
0
    def make_guest_kernel(self):
        '''
        Runs "make", using a single job
        '''
        os.chdir(self.source)
        logging.info("Building guest kernel")
        logging.debug("Kernel config is %s" % self.config)
        utils.get_file(self.config, '.config')

        # FIXME currently no support for builddir
        # run old config
        utils.system('yes "" | make oldconfig > /dev/null')
        parallel_make_jobs = utils.count_cpus()
        make_command = "make -j %s %s" % (parallel_make_jobs,
                                          self.build_target)
        logging.info("Running parallel make on src dir")
        utils.system(make_command)
예제 #34
0
def affinity_from_vcpupin(vm):
    """
    Returns dict of vcpu's affinity from virsh vcpupin output

    :param vm: VM object

    :return: dict of affinity of VM
    """
    vcpupin_output = []
    vcpupin_affinity = {}
    host_cpu_count = utils.count_cpus()
    for vcpu in virsh.vcpupin().stdout.strip().split('\n')[2:]:
        vcpupin_output[vcpu.split(":")[0]] = vcpu.split(":")[1]
    for vcpu in vcpupin_output:
        vcpupin_affinity[vcpu] = libvirt.cpus_string_to_affinity_list(
            vcpupin_output[vcpu], host_cpu_count)
    return vcpupin_affinity
예제 #35
0
파일: pgpool.py 프로젝트: wkf31156/autotest
def setup(tarball, topdir):
    # FIXME - Waiting to be able to specify dependency.
    # self.job.setup_dep(['pgsql'])
    srcdir = os.path.join(topdir, 'src')
    if not os.path.exists(tarball):
        utils.get_file(
            'http://pgfoundry.org/frs/download.php/1083/pgpool-II-1.0.1.tar.gz',
            tarball)
    utils.extract_tarball_to_dir(tarball, 'src')
    os.chdir(srcdir)
    # FIXEME - Waiting to be able to use self.autodir instead of
    # os.environ['AUTODIR']
    utils.configure('--prefix=%s/pgpool --with-pgsql=%s/deps/pgsql/pgsql' %
                    (topdir, os.environ['AUTODIR']))
    utils.make('-j %d' % utils.count_cpus())
    utils.make('install')

    os.chdir(topdir)
예제 #36
0
    def initialize(self):
        # Check if the kernel supports cpu hotplug
        if utils.running_config():
            utils.check_for_kernel_feature('HOTPLUG_CPU')

        # Check cpu nums, if equals 1, quit.
        if utils.count_cpus() == 1:
            e_msg = 'Single CPU online detected, test not supported.'
            raise error.TestNAError(e_msg)

        # Have a simple and quick check first, FIX me please.
        utils.system('dmesg -c > /dev/null')
        for cpu in utils.cpu_online_map():
            if os.path.isfile('/sys/devices/system/cpu/cpu%s/online' % cpu):
                utils.system('echo 0 > /sys/devices/system/cpu/cpu%s/online' % cpu, 1)
                utils.system('dmesg -c')
                time.sleep(3)
                utils.system('echo 1 > /sys/devices/system/cpu/cpu%s/online' % cpu, 1)
                utils.system('dmesg -c')
                time.sleep(3)
예제 #37
0
def affinity_from_proc(vm):
    """
    Return dict of affinity from proc

    :param vm: VM object

    :return: dict of affinity of VM
    """
    pid = vm.get_pid()
    proc_affinity = {}
    vcpu_pids = []
    host_cpu_count = utils.count_cpus()
    vcpu_pids = vm.get_vcpus_pid()
    for vcpu in range(len(vcpu_pids)):
        output = utils_test.libvirt.cpu_allowed_list_by_task(
            pid, vcpu_pids[vcpu])
        output_affinity = utils_test.libvirt.cpus_string_to_affinity_list(
            output, int(host_cpu_count))
        proc_affinity[vcpu] = output_affinity
    return proc_affinity
예제 #38
0
    def initialize(self):
        # Check if the kernel supports cpu hotplug
        if utils.running_config():
            utils.check_for_kernel_feature('HOTPLUG_CPU')

        # Check cpu nums, if equals 1, quit.
        if utils.count_cpus() == 1:
            e_msg = 'Single CPU online detected, test not supported.'
            raise error.TestNAError(e_msg)

        # Have a simple and quick check first, FIX me please.
        utils.system('dmesg -c > /dev/null')
        for cpu in utils.cpu_online_map():
            if os.path.isfile('/sys/devices/system/cpu/cpu%s/online' % cpu):
                utils.system('echo 0 > /sys/devices/system/cpu/cpu%s/online' % cpu, 1)
                utils.system('dmesg -c')
                time.sleep(3)
                utils.system('echo 1 > /sys/devices/system/cpu/cpu%s/online' % cpu, 1)
                utils.system('dmesg -c')
                time.sleep(3)
예제 #39
0
def affinity_from_proc(vm):
    """
    Return dict of affinity from proc

    :param vm: VM object

    :return: dict of affinity of VM
    """
    pid = vm.get_pid()
    proc_affinity = {}
    vcpu_pids = []
    host_cpu_count = utils.count_cpus()
    vcpu_pids = vm.get_vcpus_pid()
    for vcpu in range(len(vcpu_pids)):
        output = utils_test.libvirt.cpu_allowed_list_by_task(
            pid, vcpu_pids[vcpu])
        output_affinity = utils_test.libvirt.cpus_string_to_affinity_list(
            output,
            int(host_cpu_count))
        proc_affinity[vcpu] = output_affinity
    return proc_affinity
예제 #40
0
    def host():
        logging.info("Setup monitor server on host")
        # Kill previous instances of the host load programs, if any
        _kill_host_programs(kill_stress_cmd, kill_monitor_cmd)
        # Cleanup previous log instances
        if os.path.isfile(monitor_log_file_server):
            os.remove(monitor_log_file_server)
        # Opening firewall ports on host
        utils.run("iptables -F", ignore_status=True)

        # Run heartbeat on host
        utils.run(server_setup_cmd % (monitor_dir, threshold, monitor_log_file_server, monitor_port))

        logging.info("Build stress on host")
        # Uncompress and build stress on host
        utils.run(stress_setup_cmd % stress_dir)

        logging.info("Run stress on host")
        # stress_threads = 2 * n_cpus
        threads_host = 2 * utils.count_cpus()
        # Run stress test on host
        utils.run(stress_cmd % (stress_dir, threads_host))
예제 #41
0
    def run_once(self, args='', stress_length=60):
        if not args:
            # We will use 2 workers of each type for each CPU detected
            threads = 2 * utils.count_cpus()

            # Sometimes the default memory used by each memory worker (256 M)
            # might make our machine go OOM and then funny things might start to
            # happen. Let's avoid that.
            mb = (utils_memory.freememtotal() +
                  utils_memory.read_from_meminfo('SwapFree') / 2)
            memory_per_thread = (mb * 1024) / threads

            # Even though unlikely, it's good to prevent from allocating more
            # disk than this machine actually has on its autotest directory
            # (limit the amount of disk used to max of 90 % of free space)
            free_disk = utils.freespace(self.srcdir)
            file_size_per_thread = 1024 ** 2
            if (0.9 * free_disk) < file_size_per_thread * threads:
                file_size_per_thread = (0.9 * free_disk) / threads

            # Number of CPU workers spinning on sqrt()
            args = '--cpu %d ' % threads
            # Number of IO workers spinning on sync()
            args += '--io %d ' % threads
            # Number of Memory workers spinning on malloc()/free()
            args += '--vm %d ' % threads
            # Amount of memory used per each worker
            args += '--vm-bytes %d ' % memory_per_thread
            # Number of HD workers spinning on write()/ulink()
            args += '--hdd %d ' % threads
            # Size of the files created by each worker in bytes
            args += '--hdd-bytes %d ' % file_size_per_thread
            # Time for which the stress test will run
            args += '--timeout %d ' % stress_length
            # Verbose flag
            args += '--verbose'

        utils.system(self.srcdir + '/src/stress ' + args)
예제 #42
0
    def run_once(self, args='', stress_length=60):
        if not args:
            # We will use 2 workers of each type for each CPU detected
            threads = 2 * utils.count_cpus()

            # Sometimes the default memory used by each memory worker (256 M)
            # might make our machine go OOM and then funny things might start to
            # happen. Let's avoid that.
            mb = (utils_memory.freememtotal() +
                  utils_memory.read_from_meminfo('SwapFree') / 2)
            memory_per_thread = (mb * 1024) / threads

            # Even though unlikely, it's good to prevent from allocating more
            # disk than this machine actually has on its autotest directory
            # (limit the amount of disk used to max of 90 % of free space)
            free_disk = utils.freespace(self.srcdir)
            file_size_per_thread = 1024 ** 2
            if (0.9 * free_disk) < file_size_per_thread * threads:
                file_size_per_thread = (0.9 * free_disk) / threads

            # Number of CPU workers spinning on sqrt()
            args = '--cpu %d ' % threads
            # Number of IO workers spinning on sync()
            args += '--io %d ' % threads
            # Number of Memory workers spinning on malloc()/free()
            args += '--vm %d ' % threads
            # Amount of memory used per each worker
            args += '--vm-bytes %d ' % memory_per_thread
            # Number of HD workers spinning on write()/ulink()
            args += '--hdd %d ' % threads
            # Size of the files created by each worker in bytes
            args += '--hdd-bytes %d ' % file_size_per_thread
            # Time for which the stress test will run
            args += '--timeout %d ' % stress_length
            # Verbose flag
            args += '--verbose'

        utils.system(self.srcdir + '/src/stress ' + args)
예제 #43
0
def affinity_from_xml(vm):
    """
    Returns dict of the vcpu's affinity from
    guest xml

    :param vm: VM object

    :return: dict of affinity of VM
    """
    host_cpu_count = utils.count_cpus()
    xml_affinity_list = []
    xml_affinity = {}
    try:
        vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm.name)
        xml_affinity_list = vmxml['cputune'].vcpupins
    except LibvirtXMLNotFoundError:
        logging.debug("No <cputune> element find in domain xml")
        return xml_affinity
    # Store xml_affinity_list to a dict
    for vcpu in xml_affinity_list:
        xml_affinity[vcpu['vcpu']] = "".join(
            libvirt.cpus_string_to_affinity_list(vcpu['cpuset'],
                                                 host_cpu_count))
    return xml_affinity
예제 #44
0
    def execute(self, testdir=None, iterations=10000):
        if not testdir:
            testdir = self.tmpdir
        os.chdir(testdir)
        file = os.path.join(testdir, 'foo')
        # Want to use 3/4 of all memory for each of
        # bash-shared-mapping and usemem
        kilobytes = (3 * utils_memory.memtotal()) / 4

        # Want two usemem -m megabytes in parallel in background.
        pid = [None, None]
        usemem = os.path.join(self.srcdir, 'usemem')
        args = ('usemem', '-N', '-m', '%d' % (kilobytes / 1024))
        # print_to_tty ('2 x ' + ' '.join(args))
        for i in (0, 1):
            pid[i] = os.spawnv(os.P_NOWAIT, usemem, args)

        cmd = "%s/bash-shared-mapping %s %d -t %d -n %d" % \
            (self.srcdir, file, kilobytes,
             utils.count_cpus(), iterations)
        os.system(cmd)

        for i in (0, 1):
            os.kill(pid[i], signal.SIGKILL)
예제 #45
0
def check_affinity(vm, expect_vcpupin):
    """
    Check the affinity of vcpus in various libvirt API output

    :param vm: VM object
    :param expect_vcpupin: Expected affinity details

    :return: True if affinity matches from different virsh API outputs,
             False if not
    """
    host_cpu_count = utils.count_cpus()
    affinity_xml = affinity_from_xml(vm)
    affinity_vcpupin = affinity_from_vcpupin(vm)
    affinity_vcpuinfo = affinity_from_vcpuinfo(vm)
    result = True

    for vcpu in expect_vcpupin.keys():
        expect_affinity = libvirt.cpus_string_to_affinity_list(
            expect_vcpupin[vcpu], host_cpu_count)
        # Check for vcpuinfo affinity
        if affinity_vcpuinfo[int(vcpu)] != expect_affinity:
            logging.error("CPU affinity in virsh vcpuinfo output"
                          " is unexpected")
            result = False
        # Check for vcpupin affinity
        if affinity_vcpupin[vcpu] != expect_affinity:
            logging.error("Virsh vcpupin output is unexpected")
            result = False
        # Check for affinity in Domain xml
        if affinity_xml:
            if affinity_xml[vcpu] != expect_affinity:
                logging.error("Affinity in domain XML is unexpected")
                result = False
    if result:
        logging.debug("Vcpupin info check pass")
    return result
예제 #46
0
def run(test, params, env):
    """
    Test the command virsh vcpupin

    (1) Get the host and guest cpu count
    (2) Call virsh vcpupin for each vcpu with pinning of each cpu
    (3) Check whether the virsh vcpupin has pinned the respective vcpu to cpu
    """

    def affinity_from_vcpuinfo(vm_name, vcpu):
        """
        This function returns list of the vcpu's affinity from
        virsh vcpuinfo output

        :param vm_name: VM Name to operate on
        :param vcpu: vcpu number for which the affinity is required
        """

        output = virsh.vcpuinfo(vm_name).stdout.rstrip()
        affinity = re.findall('CPU Affinity: +[-y]+', output)
        total_affinity = affinity[int(vcpu)].split()[-1].strip()
        actual_affinity = list(total_affinity)
        return actual_affinity

    def check_vcpupin(vm_name, vcpu, cpu_list, pid, vcpu_pid):
        """
        This function checks the actual and the expected affinity of given vcpu
        and raises error if not matchs

        :param vm_name: VM Name to operate on
        :param vcpu: vcpu number for which the affinity is required
        :param cpu: cpu details for the affinity
        :param pid: VM pid
        :param vcpu: VM cpu pid
        """

        total_cpu = utils.run("ls -d /sys/devices/system/cpu/cpu[0-9]* |wc -l").stdout
        expected_output = utils_test.libvirt.cpus_string_to_affinity_list(
            cpu_list,
            int(total_cpu))
        logging.debug("Expecte affinity: %s", expected_output)
        actual_output = affinity_from_vcpuinfo(vm_name, vcpu)
        logging.debug("Actual affinity in vcpuinfo output: %s", actual_output)

        if expected_output == actual_output:
            logging.info("successfully pinned cpu_list: %s --> vcpu: %s",
                         cpu_list, vcpu)
        else:
            raise error.TestFail("Cpu pinning details not updated properly in"
                                 " virsh vcpuinfo command output")

        if pid is None:
            return
        # Get the actual cpu affinity value in the proc entry
        output = utils_test.libvirt.cpu_allowed_list_by_task(pid, vcpu_pid)
        actual_output_proc = utils_test.libvirt.cpus_string_to_affinity_list(
            output,
            int(total_cpu))
        logging.debug("Actual affinity in guest proc: %s", actual_output_proc)
        if expected_output == actual_output_proc:
            logging.info("successfully pinned vcpu: %s --> cpu: %s"
                         " in respective proc entry", vcpu, cpu_list)
        else:
            raise error.TestFail("Cpu pinning details not updated properly in"
                                 " /proc/%s/task/%s/status" % (pid, vcpu_pid))

    def run_and_check_vcpupin(vm, vm_ref, vcpu, cpu_list, options):
        """
        Run the vcpupin command and then check the result.
        """
        if vm_ref == "name":
            vm_ref = vm.name
        elif vm_ref == "uuid":
            vm_ref = vm.get_uuid()
        # Execute virsh vcpupin command.
        cmdResult = virsh.vcpupin(vm_ref, vcpu, cpu_list, options, debug=True)
        if cmdResult.exit_status:
            if not status_error:
                # Command fail and it is in positive case.
                raise error.TestFail(cmdResult)
            else:
                # Command fail and it is in negative case.
                return
        else:
            if status_error:
                # Command success and it is in negative case.
                raise error.TestFail(cmdResult)
            else:
                # Command success and it is in positive case.
                # "--config" will take effect after VM destroyed.
                pid = None
                vcpu_pid = None
                if options == "--config":
                    virsh.destroy(vm.name)
                else:
                    pid = vm.get_pid()
                    logging.debug("vcpus_pid: %s", vm.get_vcpus_pid())
                    vcpu_pid = vm.get_vcpus_pid()[vcpu]
                # Check the result of vcpupin command.
                check_vcpupin(vm.name, vcpu, cpu_list, pid, vcpu_pid)

    def offline_pin_and_check(vm, vcpu, cpu_list):
        """
        Edit domain xml to pin vcpu and check the result.
        """
        cputune = vm_xml.VMCPUTuneXML()
        cputune.vcpupins = [{'vcpu': str(vcpu), 'cpuset': cpu_list}]
        vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name)
        vmxml.cputune = cputune
        vmxml.sync()
        utils_misc.wait_for(lambda: vm.state() == "shut off", 10)
        cmdResult = virsh.start(vm.name, debug=True)
        libvirt.check_exit_status(cmdResult, status_error)
        pid = vm.get_pid()
        vcpu_pid = vm.get_vcpus_pid()[vcpu]
        check_vcpupin(vm.name, vcpu, cpu_list, pid, vcpu_pid)

    if not virsh.has_help_command('vcpucount'):
        raise error.TestNAError("This version of libvirt doesn't"
                                " support this test")
    vm_name = params.get("main_vm", "virt-tests-vm1")
    vm = env.get_vm(vm_name)
    # Get the variables for vcpupin command.
    vm_ref = params.get("vcpupin_vm_ref", "name")
    options = params.get("vcpupin_options", "--current")
    cpu_list = params.get("vcpupin_cpu_list", "x")
    start_vm = ("yes" == params.get("start_vm", "yes"))
    # Get status of this case.
    status_error = ("yes" == params.get("status_error", "no"))

    # Edit domain xml to pin vcpus
    offline_pin = ("yes" == params.get("offline_pin", "no"))

    # Backup for recovery.
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    # Get the guest vcpu count
    if offline_pin:
        vcpucount_option = "--config --active"
    else:
        vcpucount_option = "--live --active"
    guest_vcpu_count = virsh.vcpucount(vm_name,
                                       vcpucount_option).stdout.strip()

    try:
        # Control multi domain vcpu affinity
        multi_dom = ("yes" == params.get("multi_dom_pin", "no"))
        vm2 = None
        if multi_dom:
            vm_names = params.get("vms").split()
            if len(vm_names) > 1:
                vm2 = env.get_vm(vm_names[1])
            else:
                raise error.TestError("Need more than one domains")
            if not vm2:
                raise error.TestNAError("No %s find" % vm_names[1])
            vm2.destroy()
            vm2xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm2.name)
            vm2xml_backup = vm2xml.copy()
            # Make sure vm2 has the same cpu numbers with vm
            vm2xml.set_vm_vcpus(vm2.name, int(guest_vcpu_count), guest_vcpu_count)
            if start_vm:
                vm2.start()

        # Run cases when guest is shutoff.
        if not offline_pin:
            if vm.is_dead() and not start_vm:
                run_and_check_vcpupin(vm, vm_ref, 0, 0, "")
                return
        # Get the host cpu count
        host_cpu_count = utils.count_cpus()
        cpu_max = int(host_cpu_count) - 1
        if (int(host_cpu_count) < 2) and (not cpu_list == "x"):
            raise error.TestNAError("We need more cpus on host in this case "
                                    "for the cpu_list=%s. But current number "
                                    "of cpu on host is %s."
                                    % (cpu_list, host_cpu_count))

        # Find the alive cpus list
        cpus_list = utils.cpu_online_map()
        logging.info("Active cpus in host are %s", cpus_list)

        # Run test case
        for vcpu in range(int(guest_vcpu_count)):
            if cpu_list == "x":
                for cpu in cpus_list:
                    left_cpus = "0-%s,^%s" % (cpu_max, cpu)
                    if offline_pin:
                        offline_pin_and_check(vm, vcpu, str(cpu))
                        if multi_dom:
                            offline_pin_and_check(vm2, vcpu, left_cpus)
                    else:
                        run_and_check_vcpupin(vm, vm_ref, vcpu, str(cpu),
                                              options)
                        if multi_dom:
                            run_and_check_vcpupin(vm2, "name", vcpu, left_cpus,
                                                  options)
            else:
                if cpu_list == "x-y":
                    cpus = "0-%s" % cpu_max
                elif cpu_list == "x,y":
                    cpus = ','.join(random.sample(cpus_list, 2))
                    logging.info(cpus)
                elif cpu_list == "x-y,^z":
                    cpus = "0-%s,^%s" % (cpu_max, cpu_max)
                elif cpu_list == "r":
                    cpus = "r"
                elif cpu_list == "-1":
                    cpus = "-1"
                elif cpu_list == "out_of_max":
                    cpus = str(cpu_max + 1)
                else:
                    raise error.TestNAError("Cpu_list=%s is not recognized."
                                            % cpu_list)
                if offline_pin:
                    offline_pin_and_check(vm, vcpu, cpus)
                else:
                    run_and_check_vcpupin(vm, vm_ref, vcpu, cpus, options)
    finally:
        # Recover xml of vm.
        vmxml_backup.sync()
        if vm2:
            vm2xml_backup.sync()
예제 #47
0
def run_virsh_vcpupin(test, params, env):
    """
    Test the command virsh vcpupin

    (1) Get the host and guest cpu count
    (2) Call virsh vcpupin for each vcpu with pinning of each cpu
    (3) Check whether the virsh vcpupin has pinned the respective vcpu to cpu
    (4) TODO: Right now the testcase covers the pinning one cpu at a time
              this can be improved by a random number of cpus
    """
    def build_actual_info(domname, vcpu):
        """
        This function returns list of the vcpu's affinity from
        virsh vcpuinfo output

        @param: domname: VM Name to operate on
        @param: vcpu: vcpu number for which the affinity is required
        """

        output = virsh.vcpuinfo(domname)
        cmd = re.findall('[^Affinity:][-y]+', str(output))
        total_affinity = cmd[vcpu].lstrip()
        actual_affinity = list(total_affinity)
        return actual_affinity

    def build_expected_info(vcpu, cpu):
        """
        This function returns the list of vcpu's expected affinity build

        @param: vcpu: vcpu number for which the affinity is required
        @param: cpu: cpu details for the affinity
        """

        expected_affinity = []

        for i in range(int(host_cpu_count)):
            expected_affinity.append('y')

        for i in range(int(host_cpu_count)):
            if cpu != i:
                expected_affinity[i] = '-'

        expected_affinity_proc = int(math.pow(2, cpu))
        return expected_affinity, expected_affinity_proc

    def virsh_check_vcpupin(domname, vcpu, cpu, pid):
        """
        This function checks the actual and the expected affinity of given vcpu
        and raises error if not matchs

        @param: domname:  VM Name to operate on
        @param: vcpu: vcpu number for which the affinity is required
        @param: cpu: cpu details for the affinity
        """

        expected_output, expected_output_proc = build_expected_info(vcpu, cpu)
        actual_output = build_actual_info(domname, vcpu)

        # Get the vcpus pid
        vcpus_pid = vm.get_vcpus_pid()
        vcpu_pid = vcpus_pid[vcpu]

        # Get the actual cpu affinity value in the proc entry
        output = utils.cpu_affinity_by_task(pid, vcpu_pid)
        actual_output_proc = int(output, 16)

        if expected_output == actual_output:
            logging.info("successfully pinned cpu: %s --> vcpu: %s", cpu, vcpu)
        else:
            raise error.TestFail(
                "Command 'virsh vcpupin %s %s %s'not succeeded"
                ", cpu pinning details not updated properly in"
                " virsh vcpuinfo command output" % (vm_name, vcpu, cpu))

        if expected_output_proc == actual_output_proc:
            logging.info(
                "successfully pinned cpu: %s --> vcpu: %s"
                " in respective proc entry", cpu, vcpu)
        else:
            raise error.TestFail(
                "Command 'virsh vcpupin %s %s %s'not succeeded"
                " cpu pinning details not updated properly in"
                " /proc/%s/task/%s/status" %
                (vm_name, vcpu, cpu, pid, vcpu_pid))

    if not virsh.has_help_command('vcpucount'):
        raise error.TestNAError(
            "This version of libvirt doesn't support this test")
    # Get the vm name, pid of vm and check for alive
    vm_name = params.get("main_vm")
    vm = env.get_vm(params["main_vm"])
    vm.verify_alive()
    pid = vm.get_pid()

    # Get the host cpu count
    host_cpu_count = utils.count_cpus()

    # Get the guest vcpu count
    guest_vcpu_count = virsh.vcpucount_live(vm_name)

    # Run test case
    for vcpu in range(int(guest_vcpu_count)):
        for cpu in range(int(host_cpu_count)):
            vm.vcpupin(vcpu, cpu)
            virsh_check_vcpupin(vm_name, vcpu, cpu, pid)
예제 #48
0
def run(test, params, env):
    """
    Domain CPU management testing.

    1. Prepare a domain for testing, install qemu-guest-ga if needed.
    2. Plug vcpu for the domain.
    3. Checking:
      3.1. Virsh vcpucount.
      3.2. Virsh vcpuinfo.
      3.3. Current vcpu number in domain xml.
      3.4. Virsh vcpupin and vcpupin in domain xml.
      3.5. The vcpu number in domain.
      3.6. Virsh cpu-stats.
    4. Repeat step 3 to check again.
    5. Control domain(save, managedsave, s3, s4, migrate, etc.).
    6. Repeat step 3 to check again.
    7. Recover domain(restore, wakeup, etc.).
    8. Repeat step 3 to check again.
    9. Unplug vcpu for the domain.
    10. Repeat step 3 to check again.
    11. Repeat step 5 to control domain(As BZ#1088216 not fix, skip
        save/managedsave/migrate related actions).
    12. Repeat step 3 to check again.
    13. Repeat step 7 to recover domain.
    14. Repeat step 3 to check again.
    15. Recover test environment.
    """

    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    vm_operation = params.get("vm_operation", "null")
    vcpu_max_num = params.get("vcpu_max_num")
    vcpu_current_num = params.get("vcpu_current_num")
    vcpu_plug = "yes" == params.get("vcpu_plug", "no")
    vcpu_plug_num = params.get("vcpu_plug_num")
    vcpu_unplug = "yes" == params.get("vcpu_unplug", "no")
    vcpu_unplug_num = params.get("vcpu_unplug_num")
    setvcpu_option = params.get("setvcpu_option", "")
    agent_channel = "yes" == params.get("agent_channel", "yes")
    install_qemuga = "yes" == params.get("install_qemuga", "no")
    start_qemuga = "yes" == params.get("start_qemuga", "no")
    restart_libvirtd = "yes" == params.get("restart_libvirtd", "no")
    setvcpu_readonly = "yes" == params.get("setvcpu_readonly", "no")
    status_error = "yes" == params.get("status_error", "no")
    pin_before_plug = "yes" == params.get("pin_before_plug", "no")
    pin_after_plug = "yes" == params.get("pin_after_plug", "no")
    pin_before_unplug = "yes" == params.get("pin_before_unplug", "no")
    pin_after_unplug = "yes" == params.get("pin_after_unplug", "no")
    pin_vcpu = params.get("pin_vcpu")
    pin_cpu_list = params.get("pin_cpu_list", "x")
    check_after_plug_fail = "yes" == params.get("check_after_plug_fail", "no")

    # Init expect vcpu count values
    expect_vcpu_num = [
        vcpu_max_num, vcpu_max_num, vcpu_current_num, vcpu_current_num,
        vcpu_current_num
    ]
    if check_after_plug_fail:
        expect_vcpu_num_bk = list(expect_vcpu_num)
    # Init expect vcpu pin values
    expect_vcpupin = {}

    # Init cpu-list for vcpupin
    host_cpu_count = utils.count_cpus()
    if (int(host_cpu_count) < 2) and (not pin_cpu_list == "x"):
        raise error.TestNAError("We need more cpus on host in this case for"
                                " the cpu-list=%s. But current number of cpu"
                                " on host is %s." %
                                (pin_cpu_list, host_cpu_count))

    cpus_list = utils.cpu_online_map()
    logging.info("Active cpus in host are %s", cpus_list)

    cpu_seq_str = ""
    for i in range(len(cpus_list) - 1):
        if int(cpus_list[i]) + 1 == int(cpus_list[i + 1]):
            cpu_seq_str = "%s-%s" % (cpus_list[i], cpus_list[i + 1])
            break

    if pin_cpu_list == "x":
        pin_cpu_list = cpus_list[-1]
    if pin_cpu_list == "x-y":
        if cpu_seq_str:
            pin_cpu_list = cpu_seq_str
        else:
            pin_cpu_list = "%s-%s" % (cpus_list[0], cpus_list[0])
    elif pin_cpu_list == "x,y":
        pin_cpu_list = "%s,%s" % (cpus_list[0], cpus_list[1])
    elif pin_cpu_list == "x-y,^z":
        if cpu_seq_str:
            pin_cpu_list = cpu_seq_str + ",^%s" % cpu_seq_str.split('-')[1]
        else:
            pin_cpu_list = "%s,%s,^%s" % (cpus_list[0], cpus_list[1],
                                          cpus_list[0])
    else:
        # Just use the value get from cfg
        pass

    need_mkswap = False
    # Back up domain XML
    vmxml = VMXML.new_from_inactive_dumpxml(vm_name)
    backup_xml = vmxml.copy()
    try:
        # Customize domain vcpu number
        if vm.is_alive():
            vm.destroy()
        if agent_channel:
            vmxml.set_agent_channel()
        else:
            vmxml.remove_agent_channels()
        vmxml.sync()

        vmxml.set_vm_vcpus(vm_name, int(vcpu_max_num), int(vcpu_current_num))
        # Do not apply S3/S4 on power
        if 'power' not in cpu_util.get_cpu_arch():
            vmxml.set_pm_suspend(vm_name, "yes", "yes")
        vm.start()

        # Create swap partition/file if nessesary
        if vm_operation == "s4":
            need_mkswap = not vm.has_swap()
        if need_mkswap:
            logging.debug("Creating swap partition")
            vm.create_swap_partition()

        # Prepare qemu guest agent
        if install_qemuga:
            vm.prepare_guest_agent(prepare_xml=False, start=start_qemuga)
            vm.setenforce(0)
        else:
            # Remove qemu-guest-agent for negative test
            vm.remove_package('qemu-guest-agent')

        # Run test
        check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin)
        # plug vcpu
        if vcpu_plug:
            # Pin vcpu
            if pin_before_plug:
                result = virsh.vcpupin(vm_name,
                                       pin_vcpu,
                                       pin_cpu_list,
                                       ignore_status=True,
                                       debug=True)
                libvirt.check_exit_status(result)
                expect_vcpupin = {pin_vcpu: pin_cpu_list}

            result = virsh.setvcpus(vm_name,
                                    vcpu_plug_num,
                                    setvcpu_option,
                                    readonly=setvcpu_readonly,
                                    ignore_status=True,
                                    debug=True)
            check_setvcpus_result(result, status_error)

            if setvcpu_option == "--config":
                expect_vcpu_num[2] = vcpu_plug_num
            elif setvcpu_option == "--guest":
                # vcpuset '--guest' only affect vcpu number in guest
                expect_vcpu_num[4] = vcpu_plug_num
            else:
                expect_vcpu_num[3] = vcpu_plug_num
                expect_vcpu_num[4] = vcpu_plug_num
                if not status_error:
                    if not online_new_vcpu(vm, vcpu_plug_num):
                        raise error.TestFail("Fail to enable new added cpu")

            # Pin vcpu
            if pin_after_plug:
                result = virsh.vcpupin(vm_name,
                                       pin_vcpu,
                                       pin_cpu_list,
                                       ignore_status=True,
                                       debug=True)
                libvirt.check_exit_status(result)
                expect_vcpupin = {pin_vcpu: pin_cpu_list}

            if status_error and check_after_plug_fail:
                check_vcpu_number(vm, expect_vcpu_num_bk, {}, setvcpu_option)

            if not status_error:
                if restart_libvirtd:
                    utils_libvirtd.libvirtd_restart()

                # Check vcpu number and related commands
                check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin,
                                  setvcpu_option)

                # Control domain
                manipulate_domain(vm_name, vm_operation)

                if vm_operation != "null":
                    # Check vcpu number and related commands
                    check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin,
                                      setvcpu_option)

                # Recover domain
                manipulate_domain(vm_name, vm_operation, recover=True)

                # Resume domain from S4 status may takes long time(QEMU bug),
                # here we wait for 10 mins then skip the remaining part of
                # tests if domain not resume successfully
                try:
                    vm.wait_for_login(timeout=600)
                except Exception, e:
                    raise error.TestWarn("Skip remaining test steps as domain"
                                         " not resume in 10 mins: %s" % e)
                # For hotplug/unplug vcpu without '--config flag, after
                # suspend domain to disk(shut off) and re-start it, the
                # current live vcpu number will recover to orinial value
                if vm_operation == 's4':
                    if setvcpu_option.count("--config"):
                        expect_vcpu_num[3] = vcpu_plug_num
                        expect_vcpu_num[4] = vcpu_plug_num
                    elif setvcpu_option.count("--guest"):
                        expect_vcpu_num[4] = vcpu_plug_num
                    else:
                        expect_vcpu_num[3] = vcpu_current_num
                        expect_vcpu_num[4] = vcpu_current_num
                if vm_operation != "null":
                    # Check vcpu number and related commands
                    check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin,
                                      setvcpu_option)

        # Unplug vcpu
        if vcpu_unplug:
            # Pin vcpu
            if pin_before_unplug:
                result = virsh.vcpupin(vm_name,
                                       pin_vcpu,
                                       pin_cpu_list,
                                       ignore_status=True,
                                       debug=True)
                libvirt.check_exit_status(result)
                # As the vcpu will unplug later, so set expect_vcpupin to empty
                expect_vcpupin = {}

            result = virsh.setvcpus(vm_name,
                                    vcpu_unplug_num,
                                    setvcpu_option,
                                    readonly=setvcpu_readonly,
                                    ignore_status=True,
                                    debug=True)

            try:
                check_setvcpus_result(result, status_error)
            except error.TestNAError:
                raise error.TestWarn("Skip unplug vcpu as it is not supported")

            if setvcpu_option == "--config":
                expect_vcpu_num[2] = vcpu_unplug_num
            elif setvcpu_option == "--guest":
                # vcpuset '--guest' only affect vcpu number in guest
                expect_vcpu_num[4] = vcpu_unplug_num
            else:
                expect_vcpu_num[3] = vcpu_unplug_num
                expect_vcpu_num[4] = vcpu_unplug_num

            # Pin vcpu
            if pin_after_unplug:
                result = virsh.vcpupin(vm_name,
                                       pin_vcpu,
                                       pin_cpu_list,
                                       ignore_status=True,
                                       debug=True)
                libvirt.check_exit_status(result)
                expect_vcpupin = {pin_vcpu: pin_cpu_list}

            if not status_error:
                if restart_libvirtd:
                    utils_libvirtd.libvirtd_restart()

                # Check vcpu number and related commands
                check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin,
                                  setvcpu_option)

                # Control domain
                manipulate_domain(vm_name, vm_operation)

                if vm_operation != "null":
                    # Check vcpu number and related commands
                    check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin,
                                      setvcpu_option)

                # Recover domain
                manipulate_domain(vm_name, vm_operation, recover=True)

                # Resume domain from S4 status may takes long time(QEMU bug),
                # here we wait for 10 mins then skip the remaining part of
                # tests if domain not resume successfully
                try:
                    vm.wait_for_login(timeout=600)
                except Exception, e:
                    raise error.TestWarn("Skip remaining test steps as domain"
                                         " not resume in 10 mins: %s" % e)
                # For hotplug/unplug vcpu without '--config flag, after
                # suspend domain to disk(shut off) and re-start it, the
                # current live vcpu number will recover to orinial value
                if vm_operation == 's4':
                    if setvcpu_option.count("--config"):
                        expect_vcpu_num[3] = vcpu_unplug_num
                        expect_vcpu_num[4] = vcpu_unplug_num
                    elif setvcpu_option.count("--guest"):
                        expect_vcpu_num[4] = vcpu_unplug_num
                    else:
                        expect_vcpu_num[3] = vcpu_current_num
                        expect_vcpu_num[4] = vcpu_current_num
                if vm_operation != "null":
                    # Check vcpu number and related commands
                    check_vcpu_number(vm, expect_vcpu_num, expect_vcpupin,
                                      setvcpu_option)
예제 #49
0
            raise error.TestFail("Max vcpu number %s in domain XML is not"
                                 " expected" % vmxml['vcpu'])
        if vmxml['current_vcpu'] != expect_vcpu_num[i]:
            raise error.TestFail("Current vcpu number %s in domain XML is"
                                 " not expected" % vmxml['current_vcpu'])
    except (ValueError, IndexError), detail:
        raise error.TestFail(detail)
    logging.debug("Vcpu number in domain xml check pass")

    # check cpu affinity got from vcpupin command output, and vcpupin command
    # output, and vcpupin info(cputune element) in domain xml
    result = virsh.vcpupin(vm.name, ignore_status=True, debug=True)
    libvirt.check_exit_status(result)
    vcpupin_output = result.stdout.strip().splitlines()[2:]
    if expect_vcpupin:
        host_cpu_count = utils.count_cpus()
        xml_affinity_list = []
        xml_affinity = {}
        try:
            xml_affinity_list = vmxml['cputune'].vcpupins
        except LibvirtXMLNotFoundError:
            logging.debug("No <cputune> element find in domain xml")
        # Store xml_affinity_list to a dict
        for vcpu in xml_affinity_list:
            xml_affinity[vcpu['vcpu']] = "".join(
                libvirt.cpus_string_to_affinity_list(vcpu['cpuset'],
                                                     host_cpu_count))
        # Check
        for vcpu in expect_vcpupin.keys():
            expect_affinity = "".join(
                libvirt.cpus_string_to_affinity_list(expect_vcpupin[vcpu],
예제 #50
0
def run(test, params, env):
    """
    Test emulatorpin tuning

    1) Positive testing
       1.1) get the current emulatorpin parameters for a running/shutoff guest
       1.2) set the current emulatorpin parameters for a running/shutoff guest
    2) Negative testing
       2.1) get emulatorpin parameters for a running/shutoff guest
       2.2) set emulatorpin parameters running/shutoff guest
    """

    # Run test case
    vm_name = params.get("main_vm")
    vm = env.get_vm(vm_name)
    cgconfig = params.get("cgconfig", "on")
    cpulist = params.get("emulatorpin_cpulist")
    status_error = params.get("status_error", "no")
    change_parameters = params.get("change_parameters", "no")

    # Backup original vm
    vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name)
    vmxml_backup = vmxml.copy()

    emulatorpin_placement = params.get("emulatorpin_placement", "")
    if emulatorpin_placement:
        vm.destroy()
        vmxml.placement = emulatorpin_placement
        vmxml.sync()
        vm.start()

    test_dicts = dict(params)
    test_dicts['vm'] = vm

    host_cpus = utils.count_cpus()
    test_dicts['host_cpus'] = host_cpus
    cpu_max = int(host_cpus) - 1

    cpu_list = None

    # Assemble cpu list for positive test
    if status_error == "no":
        if cpulist is None:
            pass
        elif cpulist == "x":
            cpulist = random.choice(utils.cpu_online_map())
        elif cpulist == "x-y":
            # By default, emulator is pined to all cpus, and element
            # 'cputune/emulatorpin' may not exist in VM's XML.
            # And libvirt will do nothing if pin emulator to the same
            # cpus, that means VM's XML still have that element.
            # So for testing, we should avoid that value(0-$cpu_max).
            if cpu_max < 2:
                cpulist = "0-0"
            else:
                cpulist = "0-%s" % (cpu_max - 1)
        elif cpulist == "x,y":
            cpulist = ','.join(random.sample(utils.cpu_online_map(), 2))
        elif cpulist == "x-y,^z":
            cpulist = "0-%s,^%s" % (cpu_max, cpu_max)
        elif cpulist == "-1":
            cpulist = "-1"
        elif cpulist == "out_of_max":
            cpulist = str(cpu_max + 1)
        else:
            raise error.TestNAError("CPU-list=%s is not recognized." % cpulist)
    test_dicts['emulatorpin_cpulist'] = cpulist
    if cpulist:
        cpu_list = cpus_parser(cpulist)
        test_dicts['cpu_list'] = cpu_list
        logging.debug("CPU list is %s", cpu_list)

    cg = utils_cgroup.CgconfigService()

    if cgconfig == "off":
        if cg.cgconfig_is_running():
            cg.cgconfig_stop()

    # positive and negative testing #########
    try:
        if status_error == "no":
            if change_parameters == "no":
                get_emulatorpin_parameter(test_dicts)
            else:
                set_emulatorpin_parameter(test_dicts)

        if status_error == "yes":
            if change_parameters == "no":
                get_emulatorpin_parameter(test_dicts)
            else:
                set_emulatorpin_parameter(test_dicts)
    finally:
        # Recover cgconfig and libvirtd service
        if not cg.cgconfig_is_running():
            cg.cgconfig_start()
            utils_libvirtd.libvirtd_restart()
        # Recover vm.
        vmxml_backup.sync()
예제 #51
0
    def run_once(self):
        """
        Run each benchmark twice, with different number of threads.

        A sanity check is made on each benchmark executed:
        The ratio between the times
        time_ratio = time_one_thrd / time_full_thrds

        Has to be contained inside an envelope:
        upper_bound = full_thrds * (1 + (1/n_cpus))
        lower_bound = full_thrds * (1 - (1/n_cpus))

        Otherwise, we throw an exception (this test might be running under a
        virtual machine and sanity check failure might mean bugs on smp
        implementation).
        """
        os.chdir(self.srcdir)

        # get the tests to run
        test_list = self.tests.split()

        if len(test_list) == 0:
            raise error.TestError('No tests (benchmarks) provided. Exit.')

        for itest in test_list:
            itest_cmd = os.path.join('NPB3.3-OMP/bin/', itest)
            try:
                itest = utils.run(itest_cmd)
            except Exception, e:
                logging.error('NPB benchmark %s has failed. Output: %s',
                              itest_cmd, e)
                self.n_fail += 1
                continue
            logging.debug(itest.stdout)

            # Get the number of threads that the test ran
            # (which is supposed to be equal to the number of system cores)
            m = re.search('Total threads\s*=\s*(.*)\n', itest.stdout)

            # Gather benchmark results
            ts = re.search('Time in seconds\s*=\s*(.*)\n', itest.stdout)
            mt = re.search('Mop/s total\s*=\s*(.*)\n', itest.stdout)
            mp = re.search('Mop/s/thread\s*=\s*(.*)\n', itest.stdout)

            time_seconds = float(ts.groups()[0])
            mops_total = float(mt.groups()[0])
            mops_per_thread = float(mp.groups()[0])

            logging.info('Test: %s', itest_cmd)
            logging.info('Time (s): %s', time_seconds)
            logging.info('Total operations executed (mops/s): %s', mops_total)
            logging.info('Total operations per thread (mops/s/thread): %s',
                         mops_per_thread)

            self.write_test_keyval({'test': itest_cmd})
            self.write_test_keyval({'time_seconds': time_seconds})
            self.write_test_keyval({'mops_total': mops_total})
            self.write_test_keyval({'mops_per_thread': mops_per_thread})

            # A little extra sanity check comes handy
            if int(m.groups()[0]) != utils.count_cpus():
                raise error.TestError("NPB test suite evaluated the number "
                                      "of threads incorrectly: System appears "
                                      "to have %s cores, but %s threads were "
                                      "executed.")

            # We will use this integer with float point vars later.
            full_thrds = float(m.groups()[0])

            # get duration for full_threads running.
            m = re.search('Time in seconds\s*=\s*(.*)\n', itest.stdout)
            time_full_thrds = float(m.groups()[0])

            # repeat the execution with single thread.
            itest_single_cmd = ''.join(['OMP_NUM_THREADS=1 ', itest_cmd])
            try:
                itest_single = utils.run(itest_single_cmd)
            except Exception, e:
                logging.error('NPB benchmark single thread %s has failed. '
                              'Output: %s', itest_single_cmd, e)
                self.n_fail += 1
                continue