Пример #1
0
 def blockdev_backup(self):
     assert len(
         self.target_disks) >= len(
         self.source_disks), "No enough target disks define in cfg!"
     source_lst = list(map(lambda x: "drive_%s" % x, self.source_disks))
     target_lst = list(map(lambda x: "drive_%s" % x, self.target_disks))
     bitmap_lst = list(map(lambda x: "bitmap_%s" %
                           x, range(len(self.source_disks))))
     try:
         if len(source_lst) > 1:
             error_context.context(
                 "backup %s to %s, options: %s" %
                 (source_lst, target_lst, self.backup_options))
             backup_utils.blockdev_batch_backup(
                 self.main_vm, source_lst, target_lst, bitmap_lst, **self.backup_options)
         else:
             error_context.context(
                 "backup %s to %s, options: %s" %
                 (source_lst[0], target_lst[0], self.backup_options))
             backup_utils.blockdev_backup(
                 self.main_vm,
                 source_lst[0],
                 target_lst[0],
                 **self.backup_options)
     finally:
         memory.drop_caches()
Пример #2
0
 def wait_mirror_jobs_completed(self):
     """Wait till all mirror jobs completed in parallel"""
     targets = [partial(job_utils.wait_until_block_job_completed,
                        vm=self.main_vm, job_id=j) for j in self._jobs]
     try:
         utils_misc.parallel(targets)
     finally:
         memory.drop_caches()
Пример #3
0
 def blockdev_mirror(self):
     """Run block-mirror and wait job done"""
     try:
         for idx, source_node in enumerate(self._source_nodes):
             backup_utils.blockdev_mirror(self.main_vm, source_node,
                                          self._target_nodes[idx],
                                          **self._backup_options[idx])
     finally:
         memory.drop_caches()
Пример #4
0
 def do_test(self):
     self.do_full_backup()
     self.generate_inc_files()
     self.do_increamental_backup()
     if self.params.get("negative_test") == "yes":
         return
     self.main_vm.destroy()
     self.rebase_target_disk()
     memory.drop_caches()
     self.verify_target_disk()
Пример #5
0
 def blockdev_mirror(self):
     source = "drive_%s" % self.source_disks[0]
     target = "drive_%s" % self.target_disks[0]
     try:
         error_context.context(
             "backup %s to %s, options: %s" %
             (source, target, self.backup_options), logging.info)
         backup_utils.blockdev_mirror(self.main_vm, source, target,
                                      **self.backup_options)
     finally:
         memory.drop_caches()
Пример #6
0
    def blockdev_stream(self):
        """
        Run block-stream and other operations in parallel

        parallel_tests includes function names separated by space
        e.g. parallel_tests = 'stress_test', we should define stress_test
        function with no argument
        """
        parallel_tests = self.params.objects("parallel_tests")
        targets = list(
            [getattr(self, t) for t in parallel_tests if hasattr(self, t)])
        targets.append(
            partial(backup_utils.blockdev_stream,
                    vm=self.main_vm,
                    device=self._top_device,
                    **self._stream_options))

        try:
            utils_misc.parallel(targets)
        finally:
            memory.drop_caches()
Пример #7
0
    def blockdev_mirror(self):
        """Run block-mirror and other operations in parallel"""
        # parallel_tests includes function names separated by space
        # e.g. parallel_tests = 'stress_test', we should define stress_test
        # function with no argument
        parallel_tests = self.params.objects("parallel_tests")
        targets = list(
            [getattr(self, t) for t in parallel_tests if hasattr(self, t)])

        # block-mirror on all source nodes is in parallel too
        for idx, source_node in enumerate(self._source_nodes):
            targets.append(
                partial(backup_utils.blockdev_mirror,
                        vm=self.main_vm,
                        source=source_node,
                        target=self._target_nodes[idx],
                        **self._backup_options[idx]))

        try:
            utils_misc.parallel(targets)
        finally:
            memory.drop_caches()
Пример #8
0
 def post_test(self):
     try:
         self.destroy_vms()
         self.cleanup_data_disks()
     finally:
         memory.drop_caches()
Пример #9
0
def run(test, params, env):
    """
    Qemu allocate hugepage from specify node.
    Steps:
    1) Setup total of 4G mem hugepages for specify node.
    2) Setup total of 1G mem hugepages for idle node.
    3) Mount this hugepage to /mnt/kvm_hugepage.
    4) Boot guest only allocate hugepage from specify node.
    5) Check the hugepage used from every node.
    :params test: QEMU test object.
    :params params: Dictionary with the test parameters.
    :params env: Dictionary with test environment.
    """
    memory.drop_caches()
    hugepage_size = memory.get_huge_page_size()
    mem_size = int(normalize_data_size("%sM" % params["mem"], "K"))
    idle_node_mem = int(normalize_data_size("%sM" % params["idle_node_mem"], "K"))

    error_context.context("Get host numa topological structure.", logging.info)
    host_numa_node = utils_misc.NumaInfo()
    node_list = host_numa_node.get_online_nodes_withmem()
    idle_node_list = node_list.copy()
    node_meminfo = host_numa_node.get_all_node_meminfo()

    for node_id in node_list:
        error_context.base_context("Check preprocess HugePages Free on host "
                                   "numa node %s." % node_id, logging.info)
        node_memfree = int(node_meminfo[node_id]["MemFree"])
        if node_memfree < idle_node_mem:
            idle_node_list.remove(node_id)
        if node_memfree < mem_size:
            node_list.remove(node_id)

    if len(idle_node_list) < 2 or not node_list:
        test.cancel("Host node does not have enough nodes to run the test, "
                    "skipping test...")

    for node_id in node_list:
        error_context.base_context("Specify qemu process only allocate "
                                   "HugePages from node%s." % node_id, logging.info)
        params["target_nodes"] = "%s" % node_id
        params["target_num_node%s" % node_id] = math.ceil(mem_size / hugepage_size)
        error_context.context("Setup huge pages for specify node%s." %
                              node_id, logging.info)
        check_list = [_ for _ in idle_node_list if _ != node_id]
        for idle_node in check_list:
            params["target_nodes"] += " %s" % idle_node
            params["target_num_node%s" % idle_node] = math.ceil(idle_node_mem / hugepage_size)
            error_context.context("Setup huge pages for idle node%s." %
                                  idle_node, logging.info)
        params["setup_hugepages"] = "yes"
        hp_config = test_setup.HugePageConfig(params)
        hp_config.setup()
        params["qemu_command_prefix"] = "numactl --membind=%s" % node_id
        params["start_vm"] = "yes"
        params["hugepage_path"] = hp_config.hugepage_path
        env_process.preprocess_vm(test, params, env, params["main_vm"])
        try:
            vm = env.get_vm(params["main_vm"])
            vm.verify_alive()
            vm.wait_for_login()

            meminfo = host_numa_node.get_all_node_meminfo()
            for index in check_list:
                error_context.base_context("Check process HugePages Free on host "
                                           "numa node %s." % index, logging.info)
                hugepages_free = int(meminfo[index]["HugePages_Free"])
                if int(node_meminfo[index]["HugePages_Free"]) > hugepages_free:
                    test.fail("Qemu still use HugePages from other node."
                              "Expect: node%s, used: node%s." % (node_id, index))
        finally:
            vm.destroy()
            hp_config.cleanup()
Пример #10
0
 def post_test(self):
     try:
         self.destroy_vms()
         self.clean_images()
     finally:
         memory.drop_caches()
Пример #11
0
 def wait_stream_job_completed(self):
     """Wait till the stream job completed"""
     try:
         job_utils.wait_until_block_job_completed(self.main_vm, self._job)
     finally:
         memory.drop_caches()