def setUp(self):
        '''
        Sets all the reqd parameter and also
        mounts the tmpfs to be used in test.
        '''

        # Set params as per available memory in system
        self.mem_path = os.path.join(data_dir.get_tmp_dir(), 'thp_space')
        free_mem = int(memory.freememtotal() / 1024)
        self.dd_timeout = 900
        self.thp_split = None
        try:
            memory.read_from_vmstat("thp_split_page")
            self.thp_split = "thp_split_page"
        except IndexError:
            self.thp_split = "thp_split"

        # Set block size as hugepage size * 2
        self.block_size = (memory.get_huge_page_size() / 1024) * 2
        self.count = free_mem / self.block_size

        # Mount device as per free memory size
        if not os.path.exists(self.mem_path):
            os.makedirs(self.mem_path)
        self.device = Partition(device="none", mountpoint=self.mem_path)
        self.device.mount(mountpoint=self.mem_path, fstype="tmpfs",
                          args='-o size=%dM' % free_mem)
    def setUp(self):
        '''
        Sets all the reqd parameter and also
        mounts the tmpfs to be used in test.
        '''

        # Set params as per available memory in system
        self.mem_path = os.path.join(data_dir.get_tmp_dir(), 'thp_space')
        free_mem = int(memory.freememtotal() / 1024)
        self.dd_timeout = 900
        self.thp_split = None
        try:
            memory.read_from_vmstat("thp_split_page")
            self.thp_split = "thp_split_page"
        except IndexError:
            self.thp_split = "thp_split"

        # Set block size as hugepage size * 2
        self.block_size = (memory.get_huge_page_size() / 1024) * 2
        self.count = free_mem / self.block_size

        # Mount device as per free memory size
        if not os.path.exists(self.mem_path):
            os.makedirs(self.mem_path)
        self.device = Partition(device="none", mountpoint=self.mem_path)
        self.device.mount(mountpoint=self.mem_path,
                          fstype="tmpfs",
                          args='-o size=%dM' % free_mem)
예제 #3
0
    def setUp(self):
        smm = SoftwareManager()
        dist = distro.detect()
        memsize = int(memory.meminfo.MemFree.b * 0.2)
        self.nr_pages = self.params.get('nr_pages',
                                        default=memsize /
                                        memory.get_page_size())
        self.map_type = self.params.get('map_type', default='private')
        self.hpage = self.params.get('h_page', default=False)

        nodes = memory.numa_nodes_with_memory()
        if len(nodes) < 2:
            self.cancel('Test requires two numa nodes to run.'
                        'Node list with memory: %s' % nodes)

        pkgs = ['gcc', 'make']
        hp_check = 0
        if self.hpage:
            hp_size = memory.get_huge_page_size()
            for node in nodes:
                genio.write_file(
                    '/sys/devices/system/node/node%s/hugepages/hu'
                    'gepages-%skB/nr_hugepages' % (node, str(hp_size)),
                    str(self.nr_pages))
            for node in nodes:
                hp_check += int(
                    genio.read_file(
                        '/sys/devices/system/node/node%s/hugepages/hugepages-%skB'
                        '/nr_hugepages' % (node, str(hp_size))).strip())
            if hp_check < self.nr_pages:
                self.cancel('Not enough pages to be configured on nodes')
        if dist.name == "Ubuntu":
            pkgs.extend(
                ['libpthread-stubs0-dev', 'libnuma-dev', 'libhugetlbfs-dev'])
        elif dist.name in ["centos", "rhel", "fedora"]:
            pkgs.extend(['numactl-devel', 'libhugetlbfs-devel'])
        elif dist.name == "SuSE":
            pkgs.extend(['libnuma-devel'])
            if dist.version >= 15:
                pkgs.extend(['libhugetlbfs-devel'])
            else:
                pkgs.extend(['libhugetlbfs-libhugetlb-devel'])

        for package in pkgs:
            if not smm.check_installed(package) and not smm.install(package):
                self.cancel('%s is needed for the test to be run' % package)

        for file_name in [
                'util.c', 'numa_test.c', 'softoffline.c', 'bench_movepages.c',
                'Makefile'
        ]:
            self.copyutil(file_name)

        build.make(self.teststmpdir)
    def setUp(self):
        smm = SoftwareManager()
        self.hpagesize = int(self.params.get('hpagesize', default=memory.get_huge_page_size()/1024))
        self.num_huge = int(self.params.get('num_pages', default='1'))

        for package in ['gcc', 'make']:
            if not smm.check_installed(package) and not smm.install(package):
                self.cancel('%s is needed for the test to be run' % package)
        for file_name in ['hugepage_sanity.c', 'Makefile']:
            self.copyutil(file_name)

        build.make(self.teststmpdir)
    def test(self):
        '''
        Enables THP, Turns off the defrag and fragments the memory.
        Once the memory gets fragmented turns on the defrag and checks
        whether defrag happened.
        '''

        # Enables THP
        memory.set_thp_value("enabled", "always")

        # Turns off Defrag
        memory.set_thp_value("khugepaged/defrag", "0")

        # Fragments The memory
        self.log.info("Fragmenting the memory Using dd command \n")
        for iterator in range(self.count):
            defrag_cmd = 'dd if=/dev/urandom of=%s/%d bs=%dK count=1'\
                         % (self.mem_path, iterator, self.block_size)
            if (process.system(defrag_cmd,
                               timeout=900,
                               verbose=False,
                               ignore_status=True,
                               shell=True)):
                self.fail('Defrag command Failed %s' % defrag_cmd)

        total = memory.memtotal()
        hugepagesize = memory.get_huge_page_size()
        nr_full = int(0.8 * (total / hugepagesize))

        # Sets max possible hugepages before defrag on
        nr_hp_before = self.set_max_hugepages(nr_full)

        # Turns Defrag ON
        memory.set_thp_value("khugepaged/defrag", "1")

        self.log.info("Sleeping %d seconds to settle out things", 10)
        time.sleep(10)

        # Sets max hugepages after defrag on
        nr_hp_after = self.set_max_hugepages(nr_full)

        # Check for memory defragmentation
        if nr_hp_before >= nr_hp_after:
            e_msg = "No Memory Defragmentation\n"
            e_msg += "%d hugepages before turning khugepaged on,\n"\
                     "%d After it" % (nr_hp_before, nr_hp_after)
            self.fail(e_msg)

        self.log.info("Defrag test passed")
예제 #6
0
    def setUp(self):
        smm = SoftwareManager()
        dist = distro.detect()
        memsize = int(memory.freememtotal() * 1024 * 0.2)
        self.nr_pages = self.params.get(
            'nr_pages', default=memsize / memory.get_page_size())
        self.map_type = self.params.get('map_type', default='private')
        self.hpage = self.params.get('h_page', default=False)

        nodes = memory.numa_nodes_with_memory()
        if len(nodes) < 2:
            self.cancel('Test requires two numa nodes to run.'
                        'Node list with memory: %s' % nodes)

        pkgs = ['gcc', 'make']
        hp_check = 0
        if self.hpage:
            hp_size = memory.get_huge_page_size()
            for node in nodes:
                genio.write_file('/sys/devices/system/node/node%s/hugepages/hu'
                                 'gepages-%skB/nr_hugepages' %
                                 (node, str(hp_size)), str(self.nr_pages))
            for node in nodes:
                hp_check += int(genio.read_file(
                    '/sys/devices/system/node/node%s/hugepages/hugepages-%skB'
                    '/nr_hugepages' % (node, str(hp_size))).strip())
            if hp_check < self.nr_pages:
                self.cancel('Not enough pages to be configured on nodes')
        if dist.name == "Ubuntu":
            pkgs.extend(['libpthread-stubs0-dev',
                         'libnuma-dev', 'libhugetlbfs-dev'])
        elif dist.name in ["centos", "rhel", "fedora"]:
            pkgs.extend(['numactl-devel', 'libhugetlbfs-devel'])
        elif dist.name == "SuSE":
            pkgs.extend(['libnuma-devel'])
            if dist.version >= 15:
                pkgs.extend(['libhugetlbfs-devel'])
            else:
                pkgs.extend(['libhugetlbfs-libhugetlb-devel'])

        for package in pkgs:
            if not smm.check_installed(package) and not smm.install(package):
                self.cancel('%s is needed for the test to be run' % package)

        for file_name in ['util.c', 'numa_test.c', 'Makefile']:
            self.copyutil(file_name)

        build.make(self.teststmpdir)
    def test(self):
        '''
        Enables THP, Turns off the defrag and fragments the memory.
        Once the memory gets fragmented turns on the defrag and checks
        whether defrag happened.
        '''

        # Enables THP
        memory.set_thp_value("enabled", "always")

        # Turns off Defrag
        memory.set_thp_value("khugepaged/defrag", "0")

        # Fragments The memory
        self.log.info("Fragmenting the memory Using dd command \n")
        for iterator in range(self.count):
            defrag_cmd = 'dd if=/dev/urandom of=%s/%d bs=%dK count=1'\
                         % (self.mem_path, iterator, self.block_size)
            if(process.system(defrag_cmd, timeout=900,
                              verbose=False, ignore_status=True, shell=True)):
                self.fail('Defrag command Failed %s' % defrag_cmd)

        total = memory.memtotal()
        hugepagesize = memory.get_huge_page_size()
        nr_full = int(0.8 * (total / hugepagesize))

        # Sets max possible hugepages before defrag on
        nr_hp_before = self.set_max_hugepages(nr_full)

        # Turns Defrag ON
        memory.set_thp_value("khugepaged/defrag", "1")

        self.log.info("Sleeping %d seconds to settle out things", 10)
        time.sleep(10)

        # Sets max hugepages after defrag on
        nr_hp_after = self.set_max_hugepages(nr_full)

        # Check for memory defragmentation
        if nr_hp_before >= nr_hp_after:
            e_msg = "No Memory Defragmentation\n"
            e_msg += "%d hugepages before turning khugepaged on,\n"\
                     "%d After it" % (nr_hp_before, nr_hp_after)
            self.fail(e_msg)

        self.log.info("Defrag test passed")
예제 #8
0
    def setUp(self):
        smm = SoftwareManager()
        self.hpagesize = int(
            self.params.get('hpagesize',
                            default=memory.get_huge_page_size() / 1024))
        cpu_info = genio.read_file("/proc/cpuinfo")
        if 'Radix' in cpu_info and self.hpagesize == 16:
            self.cancel("This hugepage size is not supported.")
        if 'Hash' in cpu_info and self.hpagesize != 16:
            self.cancel("This hugepage size is not supported.")
        self.num_huge = int(self.params.get('num_pages', default='1'))

        for package in ['gcc', 'make']:
            if not smm.check_installed(package) and not smm.install(package):
                self.cancel('%s is needed for the test to be run' % package)
        for file_name in ['hugepage_sanity.c', 'Makefile']:
            self.copyutil(file_name)

        build.make(self.teststmpdir)
    def setUp(self):
        '''
        Sets all the reqd parameter and also
        mounts the tmpfs to be used in test.
        '''

        # Set params as per available memory in system
        self.mem_path = os.path.join(data_dir.get_tmp_dir(), 'thp_space')
        free_mem = int(memory.freememtotal() / 1024)
        self.dd_timeout = 900

        # Set block size as hugepage size * 2
        self.block_size = (memory.get_huge_page_size() / 1024) * 2
        self.count = free_mem / self.block_size

        # Mount device as per free memory size
        os.mkdir(self.mem_path)
        self.device = Partition(device="none", mountpoint=self.mem_path)
        self.device.mount(mountpoint=self.mem_path, fstype="tmpfs",
                          args='-o size=%dM' % free_mem)
예제 #10
0
def run(test, params, env):
    """
    Qemu allocate hugepage from specify node.
    Steps:
    1) Setup total of 4G mem hugepages for specify node.
    2) Setup total of 1G mem hugepages for idle node.
    3) Mount this hugepage to /mnt/kvm_hugepage.
    4) Boot guest only allocate hugepage from specify node.
    5) Check the hugepage used from every node.
    :params test: QEMU test object.
    :params params: Dictionary with the test parameters.
    :params env: Dictionary with test environment.
    """
    memory.drop_caches()
    hugepage_size = memory.get_huge_page_size()
    mem_size = int(normalize_data_size("%sM" % params["mem"], "K"))
    idle_node_mem = int(normalize_data_size("%sM" % params["idle_node_mem"], "K"))

    error_context.context("Get host numa topological structure.", logging.info)
    host_numa_node = utils_misc.NumaInfo()
    node_list = host_numa_node.get_online_nodes_withmem()
    idle_node_list = node_list.copy()
    node_meminfo = host_numa_node.get_all_node_meminfo()

    for node_id in node_list:
        error_context.base_context("Check preprocess HugePages Free on host "
                                   "numa node %s." % node_id, logging.info)
        node_memfree = int(node_meminfo[node_id]["MemFree"])
        if node_memfree < idle_node_mem:
            idle_node_list.remove(node_id)
        if node_memfree < mem_size:
            node_list.remove(node_id)

    if len(idle_node_list) < 2 or not node_list:
        test.cancel("Host node does not have enough nodes to run the test, "
                    "skipping test...")

    for node_id in node_list:
        error_context.base_context("Specify qemu process only allocate "
                                   "HugePages from node%s." % node_id, logging.info)
        params["target_nodes"] = "%s" % node_id
        params["target_num_node%s" % node_id] = math.ceil(mem_size / hugepage_size)
        error_context.context("Setup huge pages for specify node%s." %
                              node_id, logging.info)
        check_list = [_ for _ in idle_node_list if _ != node_id]
        for idle_node in check_list:
            params["target_nodes"] += " %s" % idle_node
            params["target_num_node%s" % idle_node] = math.ceil(idle_node_mem / hugepage_size)
            error_context.context("Setup huge pages for idle node%s." %
                                  idle_node, logging.info)
        params["setup_hugepages"] = "yes"
        hp_config = test_setup.HugePageConfig(params)
        hp_config.setup()
        params["qemu_command_prefix"] = "numactl --membind=%s" % node_id
        params["start_vm"] = "yes"
        params["hugepage_path"] = hp_config.hugepage_path
        env_process.preprocess_vm(test, params, env, params["main_vm"])
        try:
            vm = env.get_vm(params["main_vm"])
            vm.verify_alive()
            vm.wait_for_login()

            meminfo = host_numa_node.get_all_node_meminfo()
            for index in check_list:
                error_context.base_context("Check process HugePages Free on host "
                                           "numa node %s." % index, logging.info)
                hugepages_free = int(meminfo[index]["HugePages_Free"])
                if int(node_meminfo[index]["HugePages_Free"]) > hugepages_free:
                    test.fail("Qemu still use HugePages from other node."
                              "Expect: node%s, used: node%s." % (node_id, index))
        finally:
            vm.destroy()
            hp_config.cleanup()