def download_backup(connection, backup_uuid, args, incremental=False): backup_service = get_backup_service(connection, args.vm_uuid, backup_uuid) try: backup = backup_service.get() except sdk.NotFoundError: raise RuntimeError("Backup {} not found".format(backup_uuid)) if backup.phase != types.BackupPhase.READY: raise RuntimeError("Backup {} is not ready".format(backup_uuid)) backup_disks = backup_service.disks_service().list() timestamp = time.strftime("%Y%m%d%H%M") for disk in backup_disks: download_incremental = incremental backup_mode = get_disk_backup_mode(connection, disk) if download_incremental and backup_mode != types.DiskBackupMode.INCREMENTAL: # if the disk wasn't a part of the previous checkpoint a full backup is taken progress("The backup that was taken for disk %s is %s" % (disk.id, backup_mode)) download_incremental = False backup_type = "incremental" if download_incremental else "full" file_name = "{}.{}.{}.qcow2".format(disk.id, timestamp, backup_type) disk_path = os.path.join(args.backup_dir, file_name) download_disk( connection, backup_uuid, disk, disk_path, args, incremental=download_incremental)
def cmd_stop(args): """ Stop backup using the backup UUID printed by the start command. """ progress("Finalizing backup %s" % args.backup_uuid) connection = common.create_connection(args) with closing(connection): stop_backup(connection, args.backup_uuid, args) progress("Backup %s has finalized" % args.backup_uuid)
def stop_backup(connection, backup_uuid, args): backup_service = get_backup_service(connection, args.vm_uuid, backup_uuid) backup_service.finalize() progress("Waiting until backup is finalized") backup = backup_service.get() while backup.phase != types.BackupPhase.FINALIZING: time.sleep(1) backup = backup_service.get()
def get_image_info(filename): progress("Checking image...") out = subprocess.check_output( ["qemu-img", "info", "--output", "json", filename]) image_info = json.loads(out) if image_info["format"] not in ("qcow2", "raw"): raise RuntimeError("Unsupported image format %(format)s" % image_info) return image_info
def cmd_download(args): """ Download backup using the backup UUID printed by the start command. """ progress("Downloading VM %s disks" % args.vm_uuid) connection = common.create_connection(args) with closing(connection): download_backup( connection, args.backup_uuid, args, incremental=args.incremental) progress("Finished downloading disks")
def cmd_incremental(args): """ Run incremental backup flow - start_incremental, download and stop backup. """ progress("Starting incremental backup for VM %s" % args.vm_uuid) connection = common.create_connection(args) with closing(connection): backup = start_backup(connection, args) try: download_backup(connection, backup.id, args, incremental=True) finally: stop_backup(connection, backup.id, args) progress("Incremental backup completed successfully")
def cmd_full(args): """ Run full backup flow - start, download and stop backup. """ progress("Starting full backup for VM %s" % args.vm_uuid) connection = common.create_connection(args) with closing(connection): args.from_checkpoint_uuid = None backup = start_backup(connection, args) try: download_backup(connection, backup.id, args) finally: stop_backup(connection, backup.id, args) progress("Full backup completed successfully")
def start_backup(connection, args): system_service = connection.system_service() vm_service = system_service.vms_service().vm_service(id=args.vm_uuid) try: vm = vm_service.get() except sdk.NotFoundError: raise RuntimeError( "VM {} does not exist".format(args.vm_uuid)) from None backups_service = vm_service.backups_service() if args.disk_uuid: disks = [types.Disk(id=disk_id) for disk_id in args.disk_uuid] else: disks = get_vm_disks(connection, args.vm_uuid) backup = backups_service.add( types.Backup( disks=disks, from_checkpoint_id=args.from_checkpoint_uuid ) ) progress("Waiting until backup %s is ready" % backup.id) backup_service = backups_service.backup_service(backup.id) while backup.phase != types.BackupPhase.READY: time.sleep(1) try: backup = backup_service.get() except sdk.NotFoundError: failure_event = get_backup_events(connection, backup.id)[0] raise RuntimeError( "Backup {} failed: {}".format(backup.id, failure_event)) if backup.to_checkpoint_id is not None: progress( "Created checkpoint %r (to use in --from-checkpoint-uuid " "for the next incremental backup)" % backup.to_checkpoint_id) return backup
def cmd_start(args): """ Start backup, printing backup UUID. To download the backup run download command. To stop the backup run the stop command. """ if args.from_checkpoint_uuid: progress("Starting incremental backup since checkpoint %r for VM %r" % (args.from_checkpoint_uuid, args.vm_uuid)) else: progress("Starting full backup for VM %r" % args.vm_uuid) connection = common.create_connection(args) with closing(connection): backup = start_backup(connection, args) progress("Backup %s is ready" % backup.id)
def download_disk(connection, backup_uuid, disk, disk_path, args, incremental=False): progress("Creating image transfer for disk %s" % disk.id) transfer = imagetransfer.create_transfer( connection, disk, types.ImageTransferDirection.DOWNLOAD, backup=types.Backup(id=backup_uuid)) try: progress("Image transfer %s is ready" % transfer.id) download_url = transfer.transfer_url extra_args = {} parameters = inspect.signature(client.download).parameters # Use multiple workers to speed up the download. if "max_workers" in parameters: extra_args["max_workers"] = args.max_workers # Use proxy_url if available. Download will use proxy_url if # transfer_url is not available. if "proxy_url" in parameters: extra_args["proxy_url"] = transfer.proxy_url with client.ProgressBar() as pb: client.download(download_url, disk_path, args.cafile, incremental=incremental, secure=args.secure, buffer_size=args.buffer_size, progress=pb, **extra_args) finally: progress("Finalizing image transfer") imagetransfer.finalize_transfer(connection, transfer, disk)
if primary_volume_descriptor == b"\x01CD001\x01\x00": content_type = types.DiskContentType.ISO disk_info["content_type"] = content_type return disk_info args = parse_args() common.configure_logging(args) # Get image and disk info using qemu-img image_info = get_image_info(args.filename) disk_info = get_disk_info(args, image_info) progress("Image format: %s" % image_info["format"]) progress("Disk format: %s" % disk_info["format"]) progress("Disk content type: %s" % disk_info["content_type"]) progress("Disk provisioned size: %s" % disk_info["provisioned_size"]) progress("Disk initial size: %s" % disk_info["initial_size"]) progress("Disk name: %s" % disk_info["name"]) progress("Disk backup: %s" % args.enable_backup) # This example will connect to the server and create a new `floating` # disk, one that isn't attached to any virtual machine. # Then using transfer service it will transfer disk data from local # image to the newly created disk in server. progress("Connecting...") connection = common.create_connection(args)
parser.add_argument( "--buffer-size", type=units.humansize, default=client.BUFFER_SIZE, help="Buffer size per worker. The default ({}) gives good " "performance with the default number of workers. If you use " "smaller number of workers you may want use larger value.".format( client.BUFFER_SIZE)) return parser.parse_args() args = parse_args() common.configure_logging(args) progress("Connecting...") connection = common.create_connection(args) # Get the reference to the disks service: disks_service = connection.system_service().disks_service() # Find the disk we want to download by the id: disk_service = disks_service.disk_service(args.disk_uuid) disk = disk_service.get() # Find a host for this transfer. This is an optional step allowing optimizing # the transfer using unix socket when running this code on a oVirt hypervisor # in the same data center. sd_id = disk.storage_domains[0].id sds_service = connection.system_service().storage_domains_service() storage_domain = sds_service.storage_domain_service(sd_id).get()
backup. """ import time from contextlib import closing import ovirtsdk4 as sdk from helpers import common from helpers.common import progress parser = common.ArgumentParser(description="Remove VM checkpoint") parser.add_argument("vm_uuid", help="VM UUID for removing checkpoint.") args = parser.parse_args() common.configure_logging(args) progress("Removing root checkpoint for VM %r" % args.vm_uuid) # Create a connection to the server connection = common.create_connection(args) with closing(connection): progress("Looking up checkpoints %s" % args.vm_uuid) system_service = connection.system_service() vm_service = system_service.vms_service().vm_service(id=args.vm_uuid) checkpoints_service = vm_service.checkpoints_service() # Validate that the VM has checkpoints checkpoints = checkpoints_service.list() if not checkpoints: raise RuntimeError("VM {} has no checkpoints".format(args.vm_uuid)) # Get the first checkpoint in the chain
# Wait till the disk is OK, as the transfer can't start if the disk is # locked. disk_service = disks_service.disk_service(disk.id) while True: time.sleep(1) disk = disk_service.get() if disk.status == types.DiskStatus.OK: break return disk args = parse_args() common.configure_logging(args) progress("Checking image...") image_info = client.info(args.ova_file, member=args.ova_disk_name) disk_info = get_disk_info(args, image_info) progress("Image format: {}".format(image_info["format"])) progress("Disk name: {}".format(disk_info["name"])) progress("Disk format: {}".format(disk_info["format"])) progress("Disk provisioned size: {}".format(disk_info["provisioned_size"])) progress("Disk initial size: {}".format(disk_info["initial_size"])) progress("Disk backup: {}".format(disk_info["backup"])) connection = common.create_connection(args) with closing(connection): progress("Creating disk...") disk = create_disk(connection, args, disk_info)
from ovirtsdk4 import types from helpers import common from helpers.common import progress parser = common.ArgumentParser(description="Reduce disk") parser.add_argument( "disk_id", help="disk UUID to reduce") args = parser.parse_args() common.configure_logging(args) progress("Connecting...") connection = common.create_connection(args) with closing(connection): # Locate the disk service. disks_service = connection.system_service().disks_service() disk_service = disks_service.disk_service(args.disk_id) try: disk = disk_service.get() except sdk.NotFoundError: raise RuntimeError("No such disk: {}".format(args.disk_id)) from None # TODO: It would be nice to show here the original allocation before and # after the reduce, but engine reports cached value which is not helpful. progress("Reducing disk...")
"--inactivity-timeout", type=int, help="Keep the transfer alive for specified number of seconds if " "the client is not active. (default 60)") parser.add_argument( "--read-delay", type=int, default=50, help="Keep the connection alive by reading from the server every " "read-delay seconds (default 50).") args = parser.parse_args() common.configure_logging(args) progress("Connecting to engine...") connection = common.create_connection(args) with closing(connection): progress("Looking up disk %s" % args.disk_uuid) system_service = connection.system_service() disks_service = connection.system_service().disks_service() disk_service = disks_service.disk_service(args.disk_uuid) disk = disk_service.get() progress("Creating image transfer for %s" % args.direction) if args.direction == "upload": direction = types.ImageTransferDirection.UPLOAD else: direction = types.ImageTransferDirection.DOWNLOAD
parser.add_argument( "--sparse", action="store_true", help="Create sparse disk. Cannot be used with raw format on " "iSCSI or FC storage domain.") parser.add_argument("--count", type=int, help="Number of disks to create.") args = parser.parse_args() if args.format == "raw": disk_format = types.DiskFormat.RAW else: disk_format = types.DiskFormat.COW progress("Connecting...") connection = common.create_connection(args) with closing(connection): disks_service = connection.system_service().disks_service() waiting = set() for i in range(args.count): disk = disks_service.add(disk=types.Disk( name="disk-%s" % i, content_type=types.DiskContentType.DATA, description='Created by add_disks.py', format=disk_format, provisioned_size=args.size, sparse=args.sparse, storage_domains=[types.StorageDomain(name=args.sd_name)]))