Пример #1
0
    def run_once(self, disks=None, gigabytes=None, chunk_mb=None):
        """
        Runs one iteration of disktest.

        :param disks: List of directories (usually mountpoints) to be passed
                to the test.
        :param gigabytes: Disk space that will be used for the test to run.
        :param chunk_mb: Size of the portion of the disk used to run the test.
                Cannot be larger than the total amount of free RAM.
        """
        os.chdir(self.disk_srcdir)
        if chunk_mb is None:
            chunk_mb = utils_memory.memtotal() / 1024 / 8
        if disks is None:
            disks = [self.tmpdir]
        if gigabytes is None:
            free = 100  # cap it at 100GB by default
            for disk in disks:
                free = min(utils.freespace(disk) / 1024**3, free)
            gigabytes = free
            logging.info("Resizing to %s GB", gigabytes)
            sys.stdout.flush()

        self.chunk_mb = chunk_mb
        self.memory_mb = utils_memory.memtotal() / 1024 / 8
        if self.memory_mb > chunk_mb:
            raise error.TestError("Too much RAM (%dMB) for this test to work" %
                                  self.memory_mb)

        chunks = (1024 * gigabytes) / chunk_mb

        logging.info("Total of disk chunks that will be used: %s", chunks)
        for i in range(chunks):
            pids = []
            for disk in disks:
                pid = self.test_one_disk_chunk(disk, i)
                pids.append(pid)
            errors = []
            for pid in pids:
                (junk, retval) = os.waitpid(pid, 0)
                if (retval != 0):
                    errors.append(retval)
            if errors:
                raise error.TestError("Errors from children: %s" % errors)
Пример #2
0
    def run_once(self, disks=None, gigabytes=None, chunk_mb=None):
        """
        Runs one iteration of disktest.

        :param disks: List of directories (usually mountpoints) to be passed
                to the test.
        :param gigabytes: Disk space that will be used for the test to run.
        :param chunk_mb: Size of the portion of the disk used to run the test.
                Cannot be larger than the total amount of free RAM.
        """
        os.chdir(self.disk_srcdir)
        if chunk_mb is None:
            chunk_mb = utils_memory.memtotal() / 1024 / 8
        if disks is None:
            disks = [self.tmpdir]
        if gigabytes is None:
            free = 100  # cap it at 100GB by default
            for disk in disks:
                free = min(utils.freespace(disk) / 1024 ** 3, free)
            gigabytes = free
            logging.info("Resizing to %s GB", gigabytes)
            sys.stdout.flush()

        self.chunk_mb = chunk_mb
        self.memory_mb = utils_memory.memtotal() / 1024 / 8
        if self.memory_mb > chunk_mb:
            raise error.TestError("Too much RAM (%dMB) for this test to work" %
                                  self.memory_mb)

        chunks = (1024 * gigabytes) / chunk_mb

        logging.info("Total of disk chunks that will be used: %s", chunks)
        for i in range(chunks):
            pids = []
            for disk in disks:
                pid = self.test_one_disk_chunk(disk, i)
                pids.append(pid)
            errors = []
            for pid in pids:
                (junk, retval) = os.waitpid(pid, 0)
                if (retval != 0):
                    errors.append(retval)
            if errors:
                raise error.TestError("Errors from children: %s" % errors)
Пример #3
0
    def output_check(nodeinfo_output):
        # Check CPU model
        cpu_model_nodeinfo = _check_nodeinfo(nodeinfo_output, "CPU model", 3)
        cpu_model_os = utils.get_current_kernel_arch()
        if not re.match(cpu_model_nodeinfo, cpu_model_os):
            raise error.TestFail("Virsh nodeinfo output didn't match CPU model")

        # Check number of CPUs
        cpus_nodeinfo = _check_nodeinfo(nodeinfo_output, "CPU(s)", 2)
        cpus_os = utils.count_cpus()
        if  int(cpus_nodeinfo) != cpus_os:
            raise error.TestFail("Virsh nodeinfo output didn't match number of "
                                 "CPU(s)")

        # Check CPU frequency
        cpu_frequency_nodeinfo = _check_nodeinfo(nodeinfo_output, 'CPU frequency', 3)
        cmd = ("cat /proc/cpuinfo | grep 'cpu MHz' | head -n1 | "
               "awk '{print $4}' | awk -F. '{print $1}'")
        cmd_result = utils.run(cmd, ignore_status=True)
        cpu_frequency_os = cmd_result.stdout.strip()
        print cpu_frequency_os
        if not re.match(cpu_frequency_nodeinfo, cpu_frequency_os):
            raise error.TestFail("Virsh nodeinfo output didn't match CPU "
                                 "frequency")

        # Check CPU socket(s)
        cpu_sockets_nodeinfo = int(_check_nodeinfo(nodeinfo_output, 'CPU socket(s)', 3))
        cmd = "grep 'physical id' /proc/cpuinfo | uniq | sort | uniq |wc -l"
        cmd_result = utils.run(cmd, ignore_status=True)
        cpu_NUMA_nodeinfo = _check_nodeinfo(nodeinfo_output, 'NUMA cell(s)', 3)
        cpu_sockets_os = int(cmd_result.stdout.strip())/int(cpu_NUMA_nodeinfo)
        if cpu_sockets_os != cpu_sockets_nodeinfo:
            raise error.TestFail("Virsh nodeinfo output didn't match CPU "
                                 "socket(s)")

        # Check Core(s) per socket
        cores_per_socket_nodeinfo = _check_nodeinfo(nodeinfo_output, 'Core(s) per socket', 4)
        cmd = "grep 'cpu cores' /proc/cpuinfo | head -n1 | awk '{print $4}'"
        cmd_result = utils.run(cmd, ignore_status=True)
        cores_per_socket_os = cmd_result.stdout.strip()
        if not re.match(cores_per_socket_nodeinfo, cores_per_socket_os):
            raise error.TestFail("Virsh nodeinfo output didn't match Core(s) "
                                 "per socket")

        # Check Memory size
        memory_size_nodeinfo = int(_check_nodeinfo(nodeinfo_output, 'Memory size', 3))
        memory_size_os = utils_memory.memtotal()
        if memory_size_nodeinfo != memory_size_os:
            raise error.TestFail("Virsh nodeinfo output didn't match "
                                 "Memory size")
Пример #4
0
    def run_the_test(self, iterations):
        utils.write_one_line('/proc/sys/vm/dirty_ratio', '4')
        utils.write_one_line('/proc/sys/vm/dirty_background_ratio', '2')

        cmd = os.path.join(self.srcdir, 'linus_stress')
        args = "%d" % (utils_memory.memtotal() / 32)

        profilers = self.job.profilers
        if profilers.present():
            profilers.start(self)

        for i in range(iterations):
            utils.system(cmd + ' ' + args)

        if profilers.present():
            profilers.stop(self)
            profilers.report(self)
Пример #5
0
    def run_the_test(self, iterations):
        utils.write_one_line('/proc/sys/vm/dirty_ratio', '4')
        utils.write_one_line('/proc/sys/vm/dirty_background_ratio', '2')

        cmd = os.path.join(self.srcdir, 'linus_stress')
        args = "%d" % (utils_memory.memtotal() / 32)

        profilers = self.job.profilers
        if profilers.present():
            profilers.start(self)

        for i in range(iterations):
            utils.system(cmd + ' ' + args)

        if profilers.present():
            profilers.stop(self)
            profilers.report(self)
Пример #6
0
def discover_container_style():
    global super_root_path, cpuset_prefix
    global mem_isolation_on, fake_numa_containers
    global node_mbytes, root_container_bytes
    if super_root_path != '':
        return  # already looked up
    if os.path.exists('/dev/cgroup/tasks'):
        # running on 2.6.26 or later kernel with containers on:
        super_root_path = '/dev/cgroup'
        cpuset_prefix = 'cpuset.'
        if get_boot_numa():
            mem_isolation_on = fake_numa_containers = True
        else:  # memcg containers IFF compiled-in & mounted & non-fakenuma boot
            fake_numa_containers = False
            mem_isolation_on = os.path.exists(
                '/dev/cgroup/memory.limit_in_bytes')
            # TODO: handle possibility of where memcg is mounted as its own
            #       cgroup hierarchy, separate from cpuset??
    elif os.path.exists('/dev/cpuset/tasks'):
        # running on 2.6.18 kernel with containers on:
        super_root_path = '/dev/cpuset'
        cpuset_prefix = ''
        mem_isolation_on = fake_numa_containers = get_boot_numa() != ''
    else:
        # neither cpuset nor cgroup filesystem active:
        super_root_path = None
        cpuset_prefix = 'no_cpusets_or_cgroups_exist'
        mem_isolation_on = fake_numa_containers = False

    logging.debug('mem_isolation: %s', mem_isolation_on)
    logging.debug('fake_numa_containers: %s', fake_numa_containers)
    if fake_numa_containers:
        node_mbytes = int(mbytes_per_mem_node())
    elif mem_isolation_on:  # memcg-style containers
        # For now, limit total of all containers to using just 98% of system's
        #   visible total ram, to avoid oom events at system level, and avoid
        #   page reclaim overhead from going above kswapd highwater mark.
        system_visible_pages = utils_memory.memtotal() >> 2
        usable_pages = int(system_visible_pages * 0.98)
        root_container_bytes = usable_pages << 12
        logging.debug('root_container_bytes: %s',
                      utils.human_format(root_container_bytes))
Пример #7
0
    def execute(self, testdir=None, iterations=10000):
        if not testdir:
            testdir = self.tmpdir
        os.chdir(testdir)
        file = os.path.join(testdir, 'foo')
        # Want to use 3/4 of all memory for each of
        # bash-shared-mapping and usemem
        kilobytes = (3 * utils_memory.memtotal()) / 4

        # Want two usemem -m megabytes in parallel in background.
        pid = [None, None]
        usemem = os.path.join(self.srcdir, 'usemem')
        args = ('usemem', '-N', '-m', '%d' % (kilobytes / 1024))
        # print_to_tty ('2 x ' + ' '.join(args))
        for i in (0, 1):
            pid[i] = os.spawnv(os.P_NOWAIT, usemem, args)

        cmd = "%s/bash-shared-mapping %s %d -t %d -n %d" % \
            (self.srcdir, file, kilobytes,
             utils.count_cpus(), iterations)
        os.system(cmd)

        for i in (0, 1):
            os.kill(pid[i], signal.SIGKILL)
Пример #8
0
    def run_once(self, size=0, loop=10):
        """
        Executes the test and logs the output.

        @param size: size to test in KB. 0 means all usable
        @param loop: number of iteration to test memory
        """
        if size == 0:
            size = utils_memory.freememtotal()
            # leave some memory for kernel
            size = int(size * 0.8)
        elif size > utils_memory.memtotal():
            raise error.TestFail('Specified size is more than total memory.')

        if size <= 0:
            raise error.TestFail('Size must be more than zero.')

        logging.info('Memory test size: %dK', size)

        cmd = 'memtester %dK %d' % (size, loop)
        logging.info('cmd: %s', cmd)

        with open(os.path.join(self.resultsdir, 'memtester_stdout'), 'w') as f:
            utils.run(cmd, stdout_tee=f)
Пример #9
0
def run(test, params, env):
    """
    Test migration under stress.
    """
    vm_names = params.get("migration_vms").split()
    if len(vm_names) < 2:
        raise error.TestNAError("Provide enough vms for migration first.")

    src_uri = params.get("migrate_src_uri", "qemu+ssh://EXAMPLE/system")
    if src_uri.count('///') or src_uri.count('EXAMPLE'):
        raise error.TestNAError("The src_uri '%s' is invalid", src_uri)

    dest_uri = params.get("migrate_dest_uri", "qemu+ssh://EXAMPLE/system")
    if dest_uri.count('///') or dest_uri.count('EXAMPLE'):
        raise error.TestNAError("The dest_uri '%s' is invalid", dest_uri)

    # Migrated vms' instance
    vms = []
    for vm_name in vm_names:
        vms.append(
            libvirt_vm.VM(vm_name, params, test.bindir,
                          env.get("address_cache")))

    load_vm_names = params.get("load_vms").split()
    # vms for load
    load_vms = []
    for vm_name in load_vm_names:
        load_vms.append(
            libvirt_vm.VM(vm_name, params, test.bindir,
                          env.get("address_cache")))
    params['load_vms'] = load_vms

    cpu = int(params.get("smp", 1))
    memory = int(params.get("mem")) * 1024
    stress_type = params.get("migration_stress_type")
    vm_bytes = params.get("stress_vm_bytes")
    stress_args = params.get("stress_args")
    migration_type = params.get("migration_type")
    start_migration_vms = "yes" == params.get("start_migration_vms", "yes")
    thread_timeout = int(params.get("thread_timeout", 120))
    remote_host = params.get("migrate_dest_host")
    username = params.get("migrate_dest_user", "root")
    password = params.get("migrate_dest_pwd")
    prompt = params.get("shell_prompt", r"[\#\$]")

    # Set vm_bytes for start_cmd
    mem_total = utils_memory.memtotal()
    vm_reserved = len(vms) * memory
    if vm_bytes == "half":
        vm_bytes = (mem_total - vm_reserved) / 2
    elif vm_bytes == "shortage":
        vm_bytes = mem_total - vm_reserved + 524288
    if vm_bytes is not None:
        params["stress_args"] = stress_args % vm_bytes

    for vm in vms:
        # Keep vm dead for edit
        if vm.is_alive():
            vm.destroy()
        set_cpu_memory(vm.name, cpu, memory)

    try:
        vm_ipaddr = {}
        if start_migration_vms:
            for vm in vms:
                vm.start()
                vm.wait_for_login()
                vm_ipaddr[vm.name] = vm.get_address()
                # TODO: recover vm if start failed?
        # Config ssh autologin for remote host
        ssh_key.setup_ssh_key(remote_host, username, password, port=22)

        do_stress_migration(vms, src_uri, dest_uri, stress_type,
                            migration_type, params, thread_timeout)
        # Check network of vms on destination
        if start_migration_vms and migration_type != "cross":
            for vm in vms:
                check_dest_vm_network(vm, vm_ipaddr[vm.name], remote_host,
                                      username, password, prompt)
    finally:
        logging.debug("Cleanup vms...")
        for vm_name in vm_names:
            vm = libvirt_vm.VM(vm_name, params, test.bindir,
                               env.get("address_cache"))
            utlv.MigrationTest().cleanup_dest_vm(vm, None, dest_uri)
            if vm.is_alive():
                vm.destroy(gracefully=False)
        env.clean_objects()
Пример #10
0
def run_stress_kernel_compile(tests, params, env):
    """
    Boot VMs and run kernel compile inside VM parallel.

    1) Boot up VMs:
       Every VM has 4G vmem, the total vmem of VMs' are
       $overcommit times as host's mem.
    2) Launch kernel compile inside every guest.

    @param test: QEMU test object.
    @param params: Dictionary with the test parameters.
    @param env: Dictionary with test environment.
    """
    def kernelcompile(session, vm_name):
        vm = env.get_vm(vm_name)
        ip = vm.get_address()
        path = params.get("download_url")
        logging.info("kernel path = %s" % path)
        get_kernel_cmd = "wget %s" % path
        try:
            status, output = session.cmd_status_output(get_kernel_cmd,
                                                       timeout=240)
            if status != 0:
                logging.error(output)
                raise error.TestFail("Fail to download the kernel"
                                     " in %s" % vm_name)
            else:
                logging.info("Completed download the kernel src"
                             " in %s" %vm_name)
            test_cmd = params.get("test_cmd")
            status, output = session.cmd_status_output(test_cmd, timeout=1200)
            if status != 0:
                logging.error(output)
        finally:
            status, _ = utils_test.ping(ip, count=10, timeout=30)
            if status != 0:
                raise error.TestFail("vm no response, pls check serial log")


    over_c = float(params.get("overcommit", 1.5))
    guest_number = int(params.get("guest_number", "1"))

    if guest_number < 1:
        logging.warn("At least boot up one guest for this test,"
                     " set up guest number to 1")
        guest_number = 1

    for tag in range(1, guest_number):
        params["vms"] += " stress_guest_%s" % tag

    mem_host = utils_memory.memtotal() / 1024
    vmem = int(mem_host * over_c / guest_number)

    if vmem < 256:
        raise error.TestNAError("The memory size set for guest is too small."
                                " Please try less than %s guests"
                                " in this host." % guest_number)
    params["mem"] = vmem
    params["start_vm"] = "yes"
    login_timeout = int(params.get("login_timeout", 360))

    env_process.preprocess(tests, params, env)

    sessions_info = []
    for vm_name in params["vms"].split():
        vm = env.get_vm(vm_name)
        vm.verify_alive()
        session = vm.wait_for_login(timeout=login_timeout)
        if not session:
            raise error.TestFail("Could not log into guest %s" % vm_name)

        sessions_info.append([session, vm_name])

    # run kernel compile in vms
    try:
        logging.info("run kernel compile in vms")
        bg_threads = []
        for session_info in sessions_info:
            session = session_info[0]
            vm_name = session_info[1]
            bg_thread = utils_test.BackgroundTest(kernelcompile,
                                                  (session, vm_name))
            bg_thread.start()
            bg_threads.append(bg_thread)

        completed = False
        while not completed:
            completed = True
            for bg_thread in bg_threads:
                if bg_thread.is_alive():
                    completed = False
    finally:
        try:
            for bg_thread in bg_threads:
                if bg_thread:
                    bg_thread.join()
        finally:
            for session_info in sessions_info:
                session_info[0].close()
Пример #11
0
def run_stress_kernel_compile(tests, params, env):
    """
    Boot VMs and run kernel compile inside VM parallel.

    1) Boot up VMs:
       Every VM has 4G vmem, the total vmem of VMs' are
       $overcommit times as host's mem.
    2) Launch kernel compile inside every guest.

    @param test: QEMU test object.
    @param params: Dictionary with the test parameters.
    @param env: Dictionary with test environment.
    """
    def kernelcompile(session, vm_name):
        vm = env.get_vm(vm_name)
        ip = vm.get_address()
        path = params.get("download_url")
        logging.info("kernel path = %s" % path)
        get_kernel_cmd = "wget %s" % path
        try:
            status, output = session.cmd_status_output(get_kernel_cmd,
                                                       timeout=240)
            if status != 0:
                logging.error(output)
                raise error.TestFail("Fail to download the kernel"
                                     " in %s" % vm_name)
            else:
                logging.info("Completed download the kernel src"
                             " in %s" % vm_name)
            test_cmd = params.get("test_cmd")
            status, output = session.cmd_status_output(test_cmd, timeout=1200)
            if status != 0:
                logging.error(output)
        finally:
            status, _ = utils_test.ping(ip, count=10, timeout=30)
            if status != 0:
                raise error.TestFail("vm no response, pls check serial log")

    over_c = float(params.get("overcommit", 1.5))
    guest_number = int(params.get("guest_number", "1"))

    if guest_number < 1:
        logging.warn("At least boot up one guest for this test,"
                     " set up guest number to 1")
        guest_number = 1

    for tag in range(1, guest_number):
        params["vms"] += " stress_guest_%s" % tag

    mem_host = utils_memory.memtotal() / 1024
    vmem = int(mem_host * over_c / guest_number)

    if vmem < 256:
        raise error.TestNAError("The memory size set for guest is too small."
                                " Please try less than %s guests"
                                " in this host." % guest_number)
    params["mem"] = vmem
    params["start_vm"] = "yes"
    login_timeout = int(params.get("login_timeout", 360))

    env_process.preprocess(tests, params, env)

    sessions_info = []
    for vm_name in params["vms"].split():
        vm = env.get_vm(vm_name)
        vm.verify_alive()
        session = vm.wait_for_login(timeout=login_timeout)
        if not session:
            raise error.TestFail("Could not log into guest %s" % vm_name)

        sessions_info.append([session, vm_name])

    # run kernel compile in vms
    try:
        logging.info("run kernel compile in vms")
        bg_threads = []
        for session_info in sessions_info:
            session = session_info[0]
            vm_name = session_info[1]
            bg_thread = utils_test.BackgroundTest(kernelcompile,
                                                  (session, vm_name))
            bg_thread.start()
            bg_threads.append(bg_thread)

        completed = False
        while not completed:
            completed = True
            for bg_thread in bg_threads:
                if bg_thread.is_alive():
                    completed = False
    finally:
        try:
            for bg_thread in bg_threads:
                if bg_thread:
                    bg_thread.join()
        finally:
            for session_info in sessions_info:
                session_info[0].close()
Пример #12
0
 def count_vm_bytes(self):
     mem_total = utils_memory.memtotal()
     if self.vm_bytes == "half":
         self.vm_bytes = (mem_total - self.vm_reserved) / 2
     elif self.vm_bytes == "shortage":
         self.vm_bytes = mem_total - self.vm_reserved + 524288
Пример #13
0
def run(test, params, env):
    """
    Test migration under stress.
    """
    vm_names = params.get("migration_vms").split()
    if len(vm_names) < 2:
        raise error.TestNAError("Provide enough vms for migration first.")

    src_uri = params.get("migrate_src_uri", "qemu+ssh://EXAMPLE/system")
    if src_uri.count('///') or src_uri.count('EXAMPLE'):
        raise error.TestNAError("The src_uri '%s' is invalid", src_uri)

    dest_uri = params.get("migrate_dest_uri", "qemu+ssh://EXAMPLE/system")
    if dest_uri.count('///') or dest_uri.count('EXAMPLE'):
        raise error.TestNAError("The dest_uri '%s' is invalid", dest_uri)

    # Migrated vms' instance
    vms = []
    for vm_name in vm_names:
        vms.append(libvirt_vm.VM(vm_name, params, test.bindir,
                                 env.get("address_cache")))

    load_vm_names = params.get("load_vms").split()
    # vms for load
    load_vms = []
    for vm_name in load_vm_names:
        load_vms.append(libvirt_vm.VM(vm_name, params, test.bindir,
                                      env.get("address_cache")))
    params['load_vms'] = load_vms

    cpu = int(params.get("smp", 1))
    memory = int(params.get("mem")) * 1024
    stress_type = params.get("migration_stress_type")
    vm_bytes = params.get("stress_vm_bytes")
    stress_args = params.get("stress_args")
    migration_type = params.get("migration_type")
    start_migration_vms = "yes" == params.get("start_migration_vms", "yes")
    thread_timeout = int(params.get("thread_timeout", 120))
    remote_host = params.get("migrate_dest_host")
    username = params.get("migrate_dest_user", "root")
    password = params.get("migrate_dest_pwd")
    prompt = params.get("shell_prompt", r"[\#\$]")

    # Set vm_bytes for start_cmd
    mem_total = utils_memory.memtotal()
    vm_reserved = len(vms) * memory
    if vm_bytes == "half":
        vm_bytes = (mem_total - vm_reserved) / 2
    elif vm_bytes == "shortage":
        vm_bytes = mem_total - vm_reserved + 524288
    if vm_bytes is not None:
        params["stress_args"] = stress_args % vm_bytes

    for vm in vms:
        # Keep vm dead for edit
        if vm.is_alive():
            vm.destroy()
        set_cpu_memory(vm.name, cpu, memory)

    try:
        vm_ipaddr = {}
        if start_migration_vms:
            for vm in vms:
                vm.start()
                vm.wait_for_login()
                vm_ipaddr[vm.name] = vm.get_address()
                # TODO: recover vm if start failed?
        # Config ssh autologin for remote host
        ssh_key.setup_ssh_key(remote_host, username, password, port=22)

        do_stress_migration(vms, src_uri, dest_uri, stress_type,
                            migration_type, params, thread_timeout)
        # Check network of vms on destination
        if start_migration_vms and migration_type != "cross":
            for vm in vms:
                check_dest_vm_network(vm, vm_ipaddr[vm.name], remote_host,
                                      username, password, prompt)
    finally:
        logging.debug("Cleanup vms...")
        for vm_name in vm_names:
            vm = libvirt_vm.VM(vm_name, params, test.bindir,
                               env.get("address_cache"))
            utlv.MigrationTest().cleanup_dest_vm(vm, None, dest_uri)
            if vm.is_alive():
                vm.destroy(gracefully=False)
        env.clean_objects()
Пример #14
0
        if os.path.exists(e_rh):
            utils.run("echo 'never' > %s" % e_rh)
        new_ksm = True
    else:
        try:
            utils.run("modprobe ksm")
            utils.run("ksmctl start 5000 100")
        except error.CmdError, details:
            raise error.TestFail("Failed to load KSM: %s" % details)

    # host_reserve: mem reserve kept for the host system to run
    host_reserve = int(params.get("ksm_host_reserve", -1))
    if (host_reserve == -1):
        # default host_reserve = MemAvailable + one_minimal_guest(128MB)
        # later we add 64MB per additional guest
        host_reserve = ((utils_memory.memtotal()
                         - utils_memory.read_from_meminfo("MemFree"))
                        / 1024 + 128)
        # using default reserve
        _host_reserve = True
    else:
        _host_reserve = False

    # guest_reserve: mem reserve kept to avoid guest OS to kill processes
    guest_reserve = int(params.get("ksm_guest_reserve", -1))
    if (guest_reserve == -1):
        # default guest_reserve = minimal_system_mem(256MB)
        # later we add tmpfs overhead
        guest_reserve = 256
        # using default reserve
        _guest_reserve = True
Пример #15
0
 def count_vm_bytes(self):
     mem_total = utils_memory.memtotal()
     if self.vm_bytes == "half":
         self.vm_bytes = (mem_total - self.vm_reserved) / 2
     elif self.vm_bytes == "shortage":
         self.vm_bytes = mem_total - self.vm_reserved + 524288
Пример #16
0
def run_virsh_nodememstats(test, params, env):
    """
    Test the command virsh nodememstats

    (1) Call the virsh nodememstats command
    (2) Get the output
    (3) Check the against /proc/meminfo output
    (4) Call the virsh nodememstats command with an unexpected option
    (5) Call the virsh nodememstats command with libvirtd service stop
    """

    # Initialize the variables
    expected = {}
    actual = {}
    deltas = []
    name_stats = ['total', 'free', 'buffers', 'cached']
    itr = int(params.get("itr"))

    def virsh_check_nodememtats(actual_stats, expected_stats, delta):
        """
        Check the nodememstats output value with /proc/meminfo value
        """

        delta_stats = {}
        for name in name_stats:
            delta_stats[name] = abs(actual_stats[name] - expected_stats[name])
            if 'total' in name:
                if not delta_stats[name] == 0:
                    raise error.TestFail("Command 'virsh nodememstats' not"
                                         " succeeded as the value for %s is "
                                         "deviated by %d\nThe total memory "
                                         "value is deviating-check" %
                                         (name, delta_stats[name]))
            else:
                if delta_stats[name] > delta:
                    raise error.TestFail("Command 'virsh nodememstats' not "
                                         "succeeded as the value for %s"
                                         " is deviated by %d" %
                                         (name, delta_stats[name]))
        return delta_stats

    # Prepare libvirtd service
    check_libvirtd = params.has_key("libvirtd")
    if check_libvirtd:
        libvirtd = params.get("libvirtd")
        if libvirtd == "off":
            utils_libvirtd.libvirtd_stop()

    # Get the option for the test case
    option = params.get("virsh_nodememstats_options")

    # Run test case for 10 iterations
    # (default can be changed in subtests.cfg file)
    # and print the final statistics
    for i in range(itr):
        output = virsh.nodememstats(option)

        # Get the status of the virsh command executed
        status = output.exit_status

        # Get status_error option for the test case
        status_error = params.get("status_error")
        if status_error == "yes":
            if status == 0:
                if libvirtd == "off":
                    utils_libvirtd.libvirtd_start()
                    raise error.TestFail("Command 'virsh nodememstats' "
                                         "succeeded with libvirtd service"
                                         " stopped, incorrect")
                else:
                    raise error.TestFail("Command 'virsh nodememstats %s' "
                                         "succeeded (incorrect command)" %
                                         option)

        elif status_error == "no":
            if status == 0:
                # From the beginning of a line, group 1 is one or
                # more word-characters, followed by zero or more
                # whitespace characters and a ':', then one or
                # more whitespace characters, followed by group 2,
                # which is one or more digit characters,
                # then one or more whitespace characters followed by
                # a literal 'kB' or 'KiB' sequence, e.g as below
                # total  :              3809340 kB
                # total  :              3809340 KiB
                # Normalise the value to MBs
                regex_obj = re.compile(r"^(\w+)\s*:\s+(\d+)\s\w+")
                expected = {}

                for line in output.stdout.split('\n'):
                    match_obj = regex_obj.search(line)
                    # Due to the extra space in the list
                    if match_obj is not None:
                        name = match_obj.group(1)
                        value = match_obj.group(2)
                        expected[name] = int(value) / 1024

                # Get the actual value from /proc/meminfo and normalise to MBs
                actual['total'] = int(utils_memory.memtotal()) / 1024
                actual['free'] = int(utils_memory.freememtotal()) / 1024
                actual['buffers'] = int(
                    utils_memory.read_from_meminfo('Buffers')) / 1024
                actual['cached'] = int(
                    utils_memory.read_from_meminfo('Cached')) / 1024

                # Currently the delta value is kept at 200 MB this can be
                # tuned based on the accuracy
                # Check subtests.cfg for more details
                delta = int(params.get("delta"))
                output = virsh_check_nodememtats(actual, expected, delta)
                deltas.append(output)

            else:
                raise error.TestFail("Command virsh nodememstats %s not "
                                     "succeeded:\n%s" % (option, status))

    # Recover libvirtd service start
    if libvirtd == "off":
        utils_libvirtd.libvirtd_start()

    # Print the deviated values for all iterations
    if status_error == "no":
        logging.debug("The following is the deviations from "
                      "the actual(/proc/meminfo) and expected"
                      " value(output of virsh nodememstats)")

        for i in range(itr):
            logging.debug("iteration %d:", i)
            for index, name in enumerate(name_stats):
                logging.debug("%19s : %d", name, deltas[i][name])