def check_commit_process(self): offset = None tmo = self.params.get_numeric('server_down_elapsed_time') # stop nbd server self.nbd_export.stop_export() # check commit job should hang for i in range(tmo): time.sleep(1) job = job_utils.get_block_job_by_id(self.main_vm, self.commit_job) if not job: self.test.fail("job cancelled in %d seconds" % tmo) if offset is None: offset = job['offset'] elif offset != job['offset']: self.test.fail("offset changed: %s vs. %s" % (offset, job['offset'])) # resume nbd access self.nbd_export.export_image() # set max speed self.main_vm.monitor.set_block_job_speed(self.commit_job, 0) # commit job should complete job_utils.wait_until_block_job_completed(self.main_vm, self.commit_job)
def commit_snapshots(self): device = self.params.get("device_tag") device_params = self.params.object_params(device) snapshot_tags = device_params["snapshot_tags"].split() self.device_node = self.get_node_name(device) options = ["speed", "filter-node-name"] arguments = self.params.copy_from_keys(options) arguments["speed"] = self.params["commit_speed"] filter_node_name = self.params["filter_node_name"] arguments["filter-node-name"] = filter_node_name device = self.get_node_name(snapshot_tags[-1]) commit_cmd = backup_utils.block_commit_qmp_cmd cmd, args = commit_cmd(device, **arguments) self.main_vm.monitor.cmd(cmd, args) job_id = args.get("job-id", device) block_info = self.main_vm.monitor.info_block() if filter_node_name not in block_info: self.test.fail("Block info not correct,node-name should be '%s'" % filter_node_name) self.main_vm.monitor.cmd("block-job-set-speed", {'device': job_id, 'speed': 0}) job_utils.wait_until_block_job_completed(self.main_vm, job_id) block_info = self.main_vm.monitor.info_block() if filter_node_name in block_info: self.test.fail("Block info not correct,node-name should not" "be '%s'" % filter_node_name)
def commit_snapshots(self): device = self.params.get("device_tag") device_params = self.params.object_params(device) snapshot_tags = device_params["snapshot_tags"].split() self.device_node = self.get_node_name(device) options = ["speed"] arguments = self.params.copy_from_keys(options) arguments["speed"] = self.params["commit_speed"] device = self.get_node_name(snapshot_tags[-1]) commit_cmd = backup_utils.block_commit_qmp_cmd cmd, args = commit_cmd(device, **arguments) self.main_vm.monitor.cmd(cmd, args) self.main_vm.monitor.cmd('device_del', {'id': self.params["device_tag"]}) unplug_s = utils_misc.wait_for(lambda: self.is_device_deleted(device), timeout=60, step=1.0) if not unplug_s: self.test.fail("Hotunplug device failed") job_status = str(job_utils.query_block_jobs(self.main_vm)) if self.params["expect_status"] not in job_status: self.test.fail("Job status not correct,job status is %s" % job_status) job_id = args.get("job-id", device) self.main_vm.monitor.cmd("block-job-set-speed", {'device': job_id, 'speed': 0}) job_utils.wait_until_block_job_completed(self.main_vm, job_id)
def commit_snapshots(self): device_tag = self.params["device_tag"].split()[0] device = self.get_node_name(device_tag) commit_cmd = backup_utils.block_commit_qmp_cmd cmd, args = commit_cmd(device) job_id = args.get("job-id", device) self.main_vm.monitor.cmd(cmd, args) job_utils.wait_until_block_job_completed(self.main_vm, job_id)
def wait_until_job_complete_with_error(self): try: job_utils.wait_until_block_job_completed(self.main_vm, self._job) except AssertionError as e: if self.params["error_msg"] not in str(e): self.test.fail(str(e)) else: self.test.fail("stream completed without error 'No space left'")
def commit_snapshots(self): device = self.params.get("device_tag") device_params = self.params.object_params(device) snapshot_tags = device_params["snapshot_tags"].split() device = self.get_node_name(snapshot_tags[-1]) commit_cmd = backup_utils.block_commit_qmp_cmd cmd, args = commit_cmd(device) self.main_vm.monitor.cmd(cmd, args) job_id = args.get("job-id", device) job_utils.wait_until_block_job_completed(self.main_vm, job_id)
def run_test(self): self.pre_test() try: self.commit_snapshots() job_utils.is_block_job_running(self.main_vm, self.job_id) self.break_net_with_iptables() job_utils.is_block_job_paused(self.main_vm, self.job_id) self.resume_net_with_iptables() job_utils.is_block_job_running(self.main_vm, self.job_id) job_utils.wait_until_block_job_completed(self.main_vm, self.job_id) self.verify_data_file() finally: self.post_test()
def blockdev_backup(vm, source, target, **extra_options): cmd, arguments = blockdev_backup_qmp_cmd(source, target, **extra_options) timeout = int(extra_options.pop("timeout", 600)) if "bitmap" in arguments: info = block_bitmap.get_bitmap_by_name(vm, source, arguments["bitmap"]) assert info, "Bitmap '%s' not exists in device '%s'" % ( arguments["bitmap"], source) auto_disable_bitmap = extra_options.pop("auto_disable_bitmap", True) if auto_disable_bitmap and info.get("status") != "disabled": block_bitmap.block_dirty_bitmap_disable(vm, source, arguments["bitmap"]) vm.monitor.cmd(cmd, arguments) job_id = arguments.get("job-id", source) job_utils.wait_until_block_job_completed(vm, job_id, timeout)
def commit_snapshots(self): device = self.params.get("device_tag") device_params = self.params.object_params(device) snapshot_tags = device_params["snapshot_tags"].split() self.device_node = self.get_node_name(device) options = ["base-node", "top-node", "speed"] arguments = self.params.copy_from_keys(options) arguments["base-node"] = self.get_node_name(device) arguments["top-node"] = self.get_node_name(snapshot_tags[-2]) device = self.get_node_name(snapshot_tags[-1]) commit_cmd = backup_utils.block_commit_qmp_cmd cmd, args = commit_cmd(device, **arguments) self.main_vm.monitor.cmd(cmd, args) job_id = args.get("job-id", device) self.main_vm.monitor.cmd("query-named-block-nodes") job_utils.wait_until_block_job_completed(self.main_vm, job_id)
def blockdev_batch_backup(vm, source_lst, target_lst, bitmap_lst, **extra_options): actions = [] bitmap_add_cmd = "block-dirty-bitmap-add" timeout = int(extra_options.pop("timeout", 600)) jobs_id = [] for idx, src in enumerate(source_lst): backup_cmd, arguments = blockdev_backup_qmp_cmd( src, target_lst[idx], **extra_options) job_id = arguments.get("job-id", src) jobs_id.append(job_id) actions.append({"type": backup_cmd, "data": arguments}) bitmap_data = {"node": source_lst[idx], "name": bitmap_lst[idx]} granularity = extra_options.get("granularity") persistent = extra_options.get("persistent") if granularity is not None: bitmap_data["granularity"] = int(granularity) if persistent is not None: bitmap_data["persistent"] = persistent actions.append({"type": bitmap_add_cmd, "data": bitmap_data}) arguments = {"actions": actions} vm.monitor.cmd("transaction", arguments) list( map(lambda x: job_utils.wait_until_block_job_completed(vm, x, timeout), jobs_id))
def blockdev_batch_backup(vm, source_lst, target_lst, bitmap_lst, **extra_options): actions = [] jobs_id = [] bitmap_add_cmd = "block-dirty-bitmap-add" timeout = int(extra_options.pop("timeout", 600)) completion_mode = extra_options.pop("completion_mode", None) sync_mode = extra_options.get("sync") # we can disable dirty-map in a transaction bitmap_disable_cmd = "block-dirty-bitmap-disable" disabled_bitmap_lst = extra_options.pop("disabled_bitmaps", None) # sometimes the job will never complete, e.g. backup in pull mode, # export fleecing image by internal nbd server wait_job_complete = extra_options.pop("wait_job_complete", True) for idx, src in enumerate(source_lst): if sync_mode in ["incremental", "bitmap"]: assert len(bitmap_lst) == len( source_lst ), "must provide a valid bitmap name for 'incremental' sync mode" extra_options["bitmap"] = bitmap_lst[idx] backup_cmd, arguments = blockdev_backup_qmp_cmd( src, target_lst[idx], **extra_options) job_id = arguments.get("job-id", src) jobs_id.append(job_id) actions.append({"type": backup_cmd, "data": arguments}) if bitmap_lst and (sync_mode == 'full' or sync_mode == 'none'): bitmap_data = {"node": source_lst[idx], "name": bitmap_lst[idx]} granularity = extra_options.get("granularity") persistent = extra_options.get("persistent") disabled = extra_options.get("disabled") if granularity is not None: bitmap_data["granularity"] = int(granularity) if persistent is not None: bitmap_data["persistent"] = persistent if disabled is not None: bitmap_data["disabled"] = disabled actions.append({"type": bitmap_add_cmd, "data": bitmap_data}) if disabled_bitmap_lst: bitmap_data = { "node": source_lst[idx], "name": disabled_bitmap_lst[idx] } actions.append({"type": bitmap_disable_cmd, "data": bitmap_data}) arguments = {"actions": actions} if completion_mode == 'grouped': arguments['properties'] = {"completion-mode": "grouped"} vm.monitor.cmd("transaction", arguments) if wait_job_complete: list( map( lambda x: job_utils.wait_until_block_job_completed( vm, x, timeout), jobs_id))
def full_backup(vm, source_node, target_node, bitmap_count): """start full backup job with 65535 bitmaps""" logging.info("Begin full backup %s to %s" % (source_node, target_node)) actions, extra_options = [], {"sync": "full"} cmd, args = backup_utils.blockdev_backup_qmp_cmd( source_node, target_node, **extra_options) backup_action = {"type": cmd, "data": args} actions.append(backup_action) bitmap_data = {"node": source_node, "persistent": True} for idx in range(0, bitmap_count): data = bitmap_data.copy() data["name"] = "bitmap_%d" % idx action = {"type": "block-dirty-bitmap-add", "data": data} actions.append(action) vm.monitor.cmd("transaction", {"actions": actions}) job_utils.wait_until_block_job_completed(vm, args["job-id"])
def commit_snapshots(self): device = self.params.get("device_tag") device_params = self.params.object_params(device) snapshot_tags = device_params["snapshot_tags"].split() self.device_node = self.get_node_name(device) options = ["speed"] arguments = self.params.copy_from_keys(options) arguments["speed"] = self.params["commit_speed"] self.active_node = self.get_node_name(snapshot_tags[-1]) self.forbidden_node = self.get_node_name(self.params["fnode"]) commit_cmd = backup_utils.block_commit_qmp_cmd cmd, args = commit_cmd(self.active_node, **arguments) self.main_vm.monitor.cmd(cmd, args) job_id = args.get("job-id", self.active_node) self.do_forbidden_actions() self.main_vm.monitor.cmd("block-job-set-speed", { 'device': job_id, 'speed': 0 }) job_utils.wait_until_block_job_completed(self.main_vm, job_id)
def blockdev_batch_snapshot(vm, source_lst, target_lst, **extra_options): actions = [] timeout = int(extra_options.pop("timeout", 600)) jobs_id = [] for idx, src in enumerate(source_lst): snapshot_cmd, arguments = blockdev_snapshot_qmp_cmd( src, target_lst[idx], **extra_options) actions.append({"type": snapshot_cmd, "data": arguments}) arguments = {"actions": actions} vm.monitor.cmd("transaction", arguments) list(map(lambda x: job_utils.wait_until_block_job_completed(vm, x, timeout), jobs_id))
def commit_snapshots(self): job_id_list = [] for device in self.params["device_tag"].split(): device_params = self.params.object_params(device) snapshot_tags = device_params["snapshot_tags"].split() self.device_node = self.get_node_name(device) options = ["base-node", "top-node", "speed"] arguments = self.params.copy_from_keys(options) arguments["base-node"] = self.get_node_name(device) arguments["top-node"] = self.get_node_name(snapshot_tags[-2]) device = self.get_node_name(snapshot_tags[-1]) if len(self.params["device_tag"].split()) == 1: backup_utils.block_commit(self.main_vm, device, **arguments) else: commit_cmd = backup_utils.block_commit_qmp_cmd cmd, args = commit_cmd(device, **arguments) job_id = args.get("job-id", device) job_id_list.append(job_id) self.main_vm.monitor.cmd(cmd, args) for job_id in job_id_list: job_utils.wait_until_block_job_completed(self.main_vm, job_id)
def do_incremental_backup(self): extra_options = {"sync": self.params["inc_sync_mode"], "bitmap": self._bitmaps[0], "on-target-error": self.params["on_target_error"], "auto_disable_bitmap": False} inc_backup = backup_utils.blockdev_backup_qmp_cmd cmd, arguments = inc_backup(self._source_nodes[0], self.params["inc_node"], **extra_options) self.main_vm.monitor.cmd(cmd, arguments) timeout = self.params.get("job_timeout", 600) job_id = arguments.get("job-id", self._source_nodes[0]) get_event = job_utils.get_event_by_condition event = get_event(self.main_vm, job_utils.BLOCK_JOB_ERROR_EVENT, timeout, device=job_id, action='ignore') if not event: self.test.fail("Backup job can't reach error after %s seconds" % timeout) process.system(self.params['lv_extend_cmd'], ignore_status=False, shell=True) job_utils.wait_until_block_job_completed(self.main_vm, job_id, timeout)
def commit_snapshots(self): device = self.params.get("device_tag") device_params = self.params.object_params(device) snapshot_tags = device_params["snapshot_tags"].split() self.device_node = self.get_node_name(device) options = ["speed", "auto-finalize", "auto-dismiss"] arguments = self.params.copy_from_keys(options) arguments["speed"] = self.params["commit_speed"] arguments["auto-finalize"] = self.params.get_boolean("finalize") arguments["auto-dismiss"] = self.params.get_boolean("dismiss") device = self.get_node_name(snapshot_tags[-1]) commit_cmd = backup_utils.block_commit_qmp_cmd cmd, args = commit_cmd(device, **arguments) self.main_vm.monitor.cmd(cmd, args) job_id = args.get("job-id", device) self.main_vm.monitor.cmd("block-job-set-speed", { 'device': job_id, 'speed': 10240 }) self.commit_op("stop") self.commit_op("cont") self.commit_op("job-pause", {'id': job_id}) self.commit_op("job-resume", {'id': job_id}) self.commit_op("job-cancel", {'id': job_id}) event = job_utils.get_event_by_condition(self.main_vm, 'BLOCK_JOB_CANCELLED', self.params.get_numeric( 'job_cancelled_timeout', 60), device=job_id) if event is None: self.test.fail('Job failed to cancel') if not self.params.get_boolean("dismiss"): self.commit_op("job-dismiss", {'id': job_id}) self.main_vm.monitor.cmd(cmd, args) self.main_vm.monitor.cmd("block-job-set-speed", { 'device': job_id, 'speed': 0 }) job_utils.wait_until_block_job_completed(self.main_vm, job_id)
def commit_snapshots(self): device = self.params.get("device_tag") device_params = self.params.object_params(device) snapshot_tags = device_params["snapshot_tags"].split() self.device_node = self.get_node_name(device) options = ["speed"] arguments = self.params.copy_from_keys(options) arguments["speed"] = self.params["speed"] device = self.get_node_name(snapshot_tags[-1]) commit_cmd = backup_utils.block_commit_qmp_cmd cmd, args = commit_cmd(device, **arguments) self.main_vm.monitor.cmd(cmd, args) job_id = args.get("job-id", device) job_utils.check_block_jobs_started(self.main_vm, [job_id]) small_speed = self.params.get_numeric("small_speed") large_speed = self.params.get_numeric("large_speed") commit_speed = self.params.get( "commit_speed", random.randint(small_speed, large_speed)) if self.params.get_boolean("speed_is_int", True): commit_speed = int(commit_speed) try: self.main_vm.monitor.cmd("block-job-set-speed", { 'device': job_id, 'speed': commit_speed }) except QMPCmdError as e: logging.info("Error message is %s", e.data) if self.params.get("error_msg") not in str(e.data): self.test.fail("Error message not as expected") else: output = job_utils.query_block_jobs(self.main_vm) if output[0]["speed"] != commit_speed: self.test.fail("Commit speed set failed") self.main_vm.monitor.cmd("block-job-set-speed", { 'device': job_id, 'speed': 0 }) job_utils.wait_until_block_job_completed(self.main_vm, job_id)
def commit_snapshots(self): device = self.params["device_tag"].split()[0] device_params = self.params.object_params(device) snapshot_tags = device_params["snapshot_tags"].split() self.device_node = self.get_node_name(device) device = self.get_node_name(snapshot_tags[-1]) arguments = {} arguments.update({"on-error": "ignore"}) cmd, arguments = backup_utils.block_commit_qmp_cmd(device, **arguments) timeout = self.params.get("job_timeout", 600) self.main_vm.monitor.cmd(cmd, arguments) job_id = arguments.get("job-id", device) get_event = job_utils.get_event_by_condition event = get_event(self.main_vm, job_utils.BLOCK_JOB_ERROR_EVENT, timeout, device=job_id, action='ignore') if not event: self.test.fail("Commit job can't reach error after %s seconds", timeout) process.system(self.params["extend_backend_space"]) process.system(self.params["resize_backend_size"]) job_utils.wait_until_block_job_completed(self.main_vm, job_id, timeout)
def blockdev_batch_backup(vm, source_lst, target_lst, bitmap_lst, **extra_options): actions = [] jobs_id = [] bitmap_add_cmd = "block-dirty-bitmap-add" timeout = int(extra_options.pop("timeout", 600)) completion_mode = extra_options.pop("completion_mode", None) sync_mode = extra_options.get("sync") for idx, src in enumerate(source_lst): if sync_mode in ["incremental", "bitmap"]: assert len(bitmap_lst) == len( source_lst ), "must provide a valid bitmap name for 'incremental' sync mode" extra_options["bitmap"] = bitmap_lst[idx] backup_cmd, arguments = blockdev_backup_qmp_cmd( src, target_lst[idx], **extra_options) job_id = arguments.get("job-id", src) jobs_id.append(job_id) actions.append({"type": backup_cmd, "data": arguments}) if bitmap_lst and sync_mode == 'full': bitmap_data = {"node": source_lst[idx], "name": bitmap_lst[idx]} granularity = extra_options.get("granularity") persistent = extra_options.get("persistent") if granularity is not None: bitmap_data["granularity"] = int(granularity) if persistent is not None: bitmap_data["persistent"] = persistent actions.append({"type": bitmap_add_cmd, "data": bitmap_data}) arguments = {"actions": actions} if completion_mode == 'grouped': arguments['properties'] = {"completion-mode": "grouped"} vm.monitor.cmd("transaction", arguments) list( map(lambda x: job_utils.wait_until_block_job_completed(vm, x, timeout), jobs_id))
def stream_with_clone_vm(self): job_id = backup_utils.blockdev_stream_nowait(self.clone_vm, self._top_device, **self._stream_options) job_utils.wait_until_block_job_completed(self.clone_vm, job_id)
def blockdev_stream(vm, device, **extra_options): timeout = int(extra_options.pop("timeout", 600)) cmd, arguments = blockdev_stream_qmp_cmd(device, **extra_options) vm.monitor.cmd(cmd, arguments) job_id = arguments.get("job-id", device) job_utils.wait_until_block_job_completed(vm, job_id, timeout)
def blockdev_mirror(vm, source, target, **extra_options): timeout = int(extra_options.pop("timeout", 600)) job_id = blockdev_mirror_nowait(vm, source, target, **extra_options) job_utils.wait_until_block_job_completed(vm, job_id, timeout)
def blockdev_stream(vm, device, **extra_options): """Do block-stream and wait stream completed""" timeout = int(extra_options.pop("timeout", 600)) job_id = blockdev_stream_nowait(vm, device, **extra_options) job_utils.wait_until_block_job_completed(vm, job_id, timeout)
def wait_stream_job_completed(self): """Wait till the stream job completed""" try: job_utils.wait_until_block_job_completed(self.main_vm, self._job) finally: memory.drop_caches()
def blockdev_mirror(vm, source, target, **extra_options): cmd, arguments = blockdev_mirror_qmp_cmd(source, target, **extra_options) timeout = int(extra_options.pop("timeout", 600)) vm.monitor.cmd(cmd, arguments) job_id = arguments.get("job-id", source) job_utils.wait_until_block_job_completed(vm, job_id, timeout)