def test_incremental_backup_with_backup_mode(tmp_dirs): vm = FakeVm() dom = FakeDomainAdapter() fake_disks = create_fake_disks(vm, backup_mode=backup.MODE_FULL) # start full backup config = { 'backup_id': BACKUP_1_ID, 'disks': fake_disks, 'to_checkpoint_id': CHECKPOINT_1_ID } backup.start_backup(vm, dom, config) backup.stop_backup(vm, dom, BACKUP_1_ID) # start incremental backup socket = backup.socket_path(BACKUP_2_ID) scratch1 = scratch_disk_path(vm, BACKUP_2_ID, "sda") scratch2 = scratch_disk_path(vm, BACKUP_2_ID, "vda") dom.output_checkpoints = [CHECKPOINT_1] # Set vda disk backup_mode to 'incremental' for disk in fake_disks: if disk["imageID"] == IMAGE_2_UUID: disk["backup_mode"] = backup.MODE_INCREMENTAL config = { 'backup_id': BACKUP_2_ID, 'disks': fake_disks, 'from_checkpoint_id': CHECKPOINT_1_ID, 'to_checkpoint_id': CHECKPOINT_2_ID, } backup.start_backup(vm, dom, config) backup_xml = f""" <domainbackup mode='pull'> <incremental>{CHECKPOINT_1_ID}</incremental> <server transport='unix' socket='{socket}'/> <disks> <disk name='sda' backup='yes' type='file' backupmode='full' exportname='sda' index='9'> <driver type='qcow2'/> <scratch file='{scratch1}'> <seclabel model='dac' relabel='no'/> </scratch> </disk> <disk name='vda' backup='yes' type='file' backupmode='incremental' incremental='{CHECKPOINT_1_ID}' exportname='vda' index='10'> <driver type='qcow2'/> <scratch file='{scratch2}'> <seclabel model="dac" relabel="no"/> </scratch> </disk> </disks> </domainbackup> """ assert normalized(dom.backupGetXMLDesc()) == normalized(backup_xml)
def test_cd_xml_on_block_storage(tmpdir, vm_with_cd): fake_cd = tmpdir.join("fake_cd") fake_cd.write("test") sd_id = str(uuid.uuid4()) img_id = str(uuid.uuid4()) vol_id = str(uuid.uuid4()) new_drive_spec = { "device": "cdrom", "domainID": sd_id, "poolID": str(uuid.uuid4()), "imageID": img_id, "volumeID": vol_id, } cdrom_spec = { "iface": "sata", "index": "2", "drive_spec": new_drive_spec, } # Pretend we are on the block storage. vm_with_cd.cif.irs.sd_types[sd_id] = storage.DISK_TYPE.BLOCK vm_with_cd.changeCD(cdrom_spec) expected_dev_xml = """\ <?xml version='1.0' encoding='utf-8'?> <disk type="block" device="cdrom"> <source dev="/run/storage/{}/{}/{}" /> <target dev="sdc" bus="sata" /> </disk>""".format(sd_id, img_id, vol_id) cd_xml = vm_with_cd._dom.devXml assert normalized(expected_dev_xml) == normalized(cd_xml)
def test_start_stop_backup_with_checkpoint(tmp_backupdir, tmp_basedir, disks_in_checkpoint, expected_checkpoint_xml): vm = FakeVm() dom = FakeDomainAdapter() fake_disks = create_fake_disks(disks_in_checkpoint) config = { 'backup_id': BACKUP_1_ID, 'disks': fake_disks, 'to_checkpoint_id': CHECKPOINT_1_ID } res = backup.start_backup(vm, dom, config) assert dom.backing_up assert normalized(expected_checkpoint_xml) == (normalized( dom.input_checkpoint_xml)) verify_scratch_disks_exists(vm) # verify that the vm froze and thawed during the backup assert vm.froze assert vm.thawed result_disks = res['result']['disks'] verify_backup_urls(BACKUP_1_ID, result_disks) backup.stop_backup(vm, dom, BACKUP_1_ID) assert not dom.backing_up verify_scratch_disks_removed(vm)
def test_full_backup_with_backup_mode(tmp_backupdir, tmp_basedir): vm = FakeVm() socket_path = backup.socket_path(BACKUP_1_ID) scratch_disk_paths = _get_scratch_disks_path(BACKUP_1_ID) expected_xml = """ <domainbackup mode='pull'> <server transport='unix' socket='{}'/> <disks> <disk backupmode="full" name='sda' type='file'> <scratch file='{}'> <seclabel model="dac" relabel="no"/> </scratch> </disk> <disk backupmode="full" name='vda' type='file'> <scratch file='{}'> <seclabel model="dac" relabel="no"/> </scratch> </disk> </disks> </domainbackup> """.format(socket_path, scratch_disk_paths[0], scratch_disk_paths[1]) dom = FakeDomainAdapter() fake_disks = create_fake_disks(backup_mode=backup.MODE_FULL) config = {'backup_id': BACKUP_1_ID, 'disks': fake_disks} backup.start_backup(vm, dom, config) assert normalized(expected_xml) == normalized(dom.input_backup_xml)
def test_incremental_backup_with_backup_mode(tmp_backupdir, tmp_basedir): vm = FakeVm() dom = FakeDomainAdapter() fake_disks = create_fake_disks(backup_mode=backup.MODE_FULL) # start full backup config = { 'backup_id': BACKUP_1_ID, 'disks': fake_disks, 'to_checkpoint_id': CHECKPOINT_1_ID } backup.start_backup(vm, dom, config) backup.stop_backup(vm, dom, BACKUP_1_ID) # start incremental backup socket_path = backup.socket_path(BACKUP_2_ID) scratch_disk_paths = _get_scratch_disks_path(BACKUP_2_ID) expected_xml = """ <domainbackup mode='pull'> <incremental>{}</incremental> <server transport='unix' socket='{}'/> <disks> <disk backupmode="full" name='sda' type='file'> <scratch file='{}'> <seclabel model="dac" relabel="no"/> </scratch> </disk> <disk backupmode="incremental" incremental='{}' name='vda' type='file'> <scratch file='{}'> <seclabel model="dac" relabel="no"/> </scratch> </disk> </disks> </domainbackup> """.format(CHECKPOINT_1_ID, socket_path, scratch_disk_paths[0], CHECKPOINT_1_ID, scratch_disk_paths[1]) dom.output_checkpoints = [CHECKPOINT_1] # Set vda disk backup_mode to 'incremental' for disk in fake_disks: if disk["imageID"] == IMAGE_2_UUID: disk["backup_mode"] = backup.MODE_INCREMENTAL config = { 'backup_id': BACKUP_2_ID, 'disks': fake_disks, 'from_checkpoint_id': CHECKPOINT_1_ID, 'to_checkpoint_id': CHECKPOINT_2_ID, } backup.start_backup(vm, dom, config) assert normalized(expected_xml) == normalized(dom.input_backup_xml)
def _test_hook(self, xml, modified_xml, domain='foo', event='migrate', phase='begin'): stdin = io.StringIO(xml) stdout = io.StringIO() vm_libvirt_hook.main(domain, event, phase, stdin=stdin, stdout=stdout) assert normalized(stdout.getvalue()) == normalized(modified_xml)
def test_start_stop_backup_transient_scratch_disk(tmp_dirs): vm = FakeVm() dom = FakeDomainAdapter() socket = backup.socket_path(BACKUP_1_ID) scratch1 = scratch_disk_path(vm, BACKUP_1_ID, "sda") scratch2 = scratch_disk_path(vm, BACKUP_1_ID, "vda") fake_disks = create_fake_disks(vm) config = {'backup_id': BACKUP_1_ID, 'disks': fake_disks} res = backup.start_backup(vm, dom, config) assert dom.backing_up backup_xml = f""" <domainbackup mode='pull'> <server transport='unix' socket='{socket}'/> <disks> <disk name='sda' backup='yes' type='file' backupmode='full' exportname='sda' index='7'> <driver type='qcow2'/> <scratch file='{scratch1}'> <seclabel model='dac' relabel='no'/> </scratch> </disk> <disk name='vda' backup='yes' type='file' backupmode='full' exportname='vda' index='8'> <driver type='qcow2'/> <scratch file='{scratch2}'> <seclabel model="dac" relabel="no"/> </scratch> </disk> </disks> </domainbackup> """ assert normalized(dom.backupGetXMLDesc()) == normalized(backup_xml) # We don't monitor file based scratch disks. for drive in vm.drives.values(): assert drive.scratch_disk is None verify_scratch_disks_exists(vm) # verify that the vm froze and thawed during the backup assert vm.froze assert vm.thawed assert 'checkpoint' not in res['result'] result_disks = res['result']['disks'] verify_backup_urls(vm, BACKUP_1_ID, result_disks) backup.stop_backup(vm, dom, BACKUP_1_ID) assert not dom.backing_up verify_scratch_disks_removed(vm)
def test_start_stop_backup_engine_scratch_disks(tmp_scratch_disks): vm = FakeVm() socket_path = backup.socket_path(BACKUP_1_ID) expected_xml = """ <domainbackup mode='pull'> <server transport='unix' socket='{}'/> <disks> <disk name='sda' type='file'> <scratch file='{}'> <seclabel model="dac" relabel="no"/> </scratch> </disk> <disk name='vda' type='file'> <scratch file='{}'> <seclabel model="dac" relabel="no"/> </scratch> </disk> </disks> </domainbackup> """.format(socket_path, tmp_scratch_disks[0], tmp_scratch_disks[1]) dom = FakeDomainAdapter() fake_disks = create_fake_disks() # Set the scratch disks path to the disks # TODO: add tests for scratch disks on block storage domain. fake_disks[0]['scratch_disk'] = { 'path': tmp_scratch_disks[0], 'type': DISK_TYPE.FILE } fake_disks[1]['scratch_disk'] = { 'path': tmp_scratch_disks[1], 'type': DISK_TYPE.FILE } config = {'backup_id': BACKUP_1_ID, 'disks': fake_disks} res = backup.start_backup(vm, dom, config) assert normalized(expected_xml) == normalized(dom.input_backup_xml) assert dom.backing_up result_disks = res['result']['disks'] verify_backup_urls(BACKUP_1_ID, result_disks) backup.stop_backup(vm, dom, BACKUP_1_ID) assert not dom.backing_up
def test_start_stop_backup(tmp_backupdir, tmp_basedir): vm = FakeVm() socket_path = backup.socket_path(BACKUP_1_ID) scratch_disk_paths = _get_scratch_disks_path(BACKUP_1_ID) expected_xml = """ <domainbackup mode='pull'> <server transport='unix' socket='{}'/> <disks> <disk name='sda' type='file'> <scratch file='{}'> <seclabel model="dac" relabel="no"/> </scratch> </disk> <disk name='vda' type='file'> <scratch file='{}'> <seclabel model="dac" relabel="no"/> </scratch> </disk> </disks> </domainbackup> """.format(socket_path, scratch_disk_paths[0], scratch_disk_paths[1]) dom = FakeDomainAdapter() fake_disks = create_fake_disks() config = {'backup_id': BACKUP_1_ID, 'disks': fake_disks} res = backup.start_backup(vm, dom, config) assert normalized(expected_xml) == normalized(dom.input_backup_xml) assert dom.backing_up verify_scratch_disks_exists(vm) # verify that the vm froze and thawed during the backup assert vm.froze assert vm.thawed assert 'checkpoint' not in res['result'] result_disks = res['result']['disks'] verify_backup_urls(BACKUP_1_ID, result_disks) backup.stop_backup(vm, dom, BACKUP_1_ID) assert not dom.backing_up verify_scratch_disks_removed(vm)
def test_change_loaded_cd(tmpdir, vm_with_cd): cd_path = str(tmpdir.join("fake_cd")) with open(cd_path, "w") as f: f.write("test") cdromspec = { "path": cd_path, "iface": "sata", "index": "2", } vm_with_cd.changeCD(cdromspec) expected_dev_xml = """\ <?xml version='1.0' encoding='utf-8'?> <disk type="file" device="cdrom"> <source file="{}" /> <target dev="sdc" bus="sata" /> </disk>""".format(cd_path) assert normalized(expected_dev_xml) == normalized(vm_with_cd._dom.devXml)
def checkpointCreateXML(self, checkpoint_xml, flags=None): expected_flags = ( libvirt.VIR_DOMAIN_CHECKPOINT_CREATE_REDEFINE | libvirt.VIR_DOMAIN_CHECKPOINT_CREATE_REDEFINE_VALIDATE) assert flags == expected_flags # validate the given checkpoint XML according to the # initialized output_checkpoints, in case output_checkpoints # isn't initialized the validation will be skipped if self.output_checkpoints: normalized_checkpoint_xml = normalized(checkpoint_xml) for checkpoint in self.output_checkpoints: expected_checkpoint_xml = normalized(checkpoint.getXMLDesc()) if normalized_checkpoint_xml == expected_checkpoint_xml: return raise fake.libvirt_error([ libvirt.VIR_ERR_INVALID_DOMAIN_CHECKPOINT, '', "Invalid checkpoint error" ], "Fake checkpoint error")
def test_full_backup_with_backup_mode(tmp_dirs): vm = FakeVm() dom = FakeDomainAdapter() socket = backup.socket_path(BACKUP_1_ID) scratch1 = scratch_disk_path(vm, BACKUP_1_ID, "sda") scratch2 = scratch_disk_path(vm, BACKUP_1_ID, "vda") fake_disks = create_fake_disks(vm, backup_mode=backup.MODE_FULL) config = {'backup_id': BACKUP_1_ID, 'disks': fake_disks} backup.start_backup(vm, dom, config) assert dom.backing_up backup_xml = f""" <domainbackup mode='pull'> <server transport='unix' socket='{socket}'/> <disks> <disk name='sda' backup='yes' type='file' backupmode='full' exportname='sda' index='7'> <driver type='qcow2'/> <scratch file='{scratch1}'> <seclabel model='dac' relabel='no'/> </scratch> </disk> <disk name='vda' backup='yes' type='file' backupmode='full' exportname='vda' index='8'> <driver type='qcow2'/> <scratch file='{scratch2}'> <seclabel model="dac" relabel="no"/> </scratch> </disk> </disks> </domainbackup> """ assert normalized(dom.backupGetXMLDesc()) == normalized(backup_xml)
def test_incremental_backup(tmp_backupdir, tmp_basedir): vm = FakeVm() dom = FakeDomainAdapter() fake_disks = create_fake_disks() # start full backup config = { 'backup_id': BACKUP_1_ID, 'disks': fake_disks, 'to_checkpoint_id': CHECKPOINT_1_ID } res = backup.start_backup(vm, dom, config) assert dom.backing_up result_disks = res['result']['disks'] verify_backup_urls(BACKUP_1_ID, result_disks) backup.stop_backup(vm, dom, BACKUP_1_ID) assert not dom.backing_up verify_scratch_disks_removed(vm) # start incremental backup socket_path = backup.socket_path(BACKUP_2_ID) scratch_disk_paths = _get_scratch_disks_path(BACKUP_2_ID) expected_xml = """ <domainbackup mode='pull'> <incremental>{}</incremental> <server transport='unix' socket='{}'/> <disks> <disk name='sda' type='file'> <scratch file='{}'> <seclabel model="dac" relabel="no"/> </scratch> </disk> <disk name='vda' type='file'> <scratch file='{}'> <seclabel model="dac" relabel="no"/> </scratch> </disk> </disks> </domainbackup> """.format(CHECKPOINT_1_ID, socket_path, scratch_disk_paths[0], scratch_disk_paths[1]) dom.output_checkpoints = [CHECKPOINT_1] config = { 'backup_id': BACKUP_2_ID, 'disks': fake_disks, 'from_checkpoint_id': CHECKPOINT_1_ID, 'to_checkpoint_id': CHECKPOINT_2_ID, } res = backup.start_backup(vm, dom, config) assert dom.backing_up assert normalized(expected_xml) == normalized(dom.input_backup_xml) assert normalized(CHECKPOINT_2_XML) == (normalized( dom.input_checkpoint_xml)) verify_scratch_disks_exists(vm, BACKUP_2_ID) # verify that the vm froze and thawed during the backup assert vm.froze assert vm.thawed result_disks = res['result']['disks'] verify_backup_urls(BACKUP_2_ID, result_disks) backup.stop_backup(vm, dom, BACKUP_2_ID) verify_scratch_disks_removed(vm)
def test_incremental_backup(tmp_dirs): vm = FakeVm() dom = FakeDomainAdapter() fake_disks = create_fake_disks(vm) # start full backup config = { 'backup_id': BACKUP_1_ID, 'disks': fake_disks, 'to_checkpoint_id': CHECKPOINT_1_ID } res = backup.start_backup(vm, dom, config) assert dom.backing_up result_disks = res['result']['disks'] verify_backup_urls(vm, BACKUP_1_ID, result_disks) backup.stop_backup(vm, dom, BACKUP_1_ID) assert not dom.backing_up verify_scratch_disks_removed(vm) # start incremental backup socket = backup.socket_path(BACKUP_2_ID) scratch1 = scratch_disk_path(vm, BACKUP_2_ID, "sda") scratch2 = scratch_disk_path(vm, BACKUP_2_ID, "vda") dom.output_checkpoints = [CHECKPOINT_1] config = { 'backup_id': BACKUP_2_ID, 'disks': fake_disks, 'from_checkpoint_id': CHECKPOINT_1_ID, 'to_checkpoint_id': CHECKPOINT_2_ID, } res = backup.start_backup(vm, dom, config) assert dom.backing_up backup_xml = f""" <domainbackup mode='pull'> <incremental>{CHECKPOINT_1_ID}</incremental> <server transport='unix' socket='{socket}'/> <disks> <disk name='sda' backup='yes' type='file' backupmode='incremental' incremental='{CHECKPOINT_1_ID}' exportname='sda' index='9'> <driver type="qcow2"/> <scratch file='{scratch1}'> <seclabel model="dac" relabel="no"/> </scratch> </disk> <disk name='vda' backup='yes' type='file' backupmode='incremental' incremental='{CHECKPOINT_1_ID}' exportname='vda' index='10'> <driver type="qcow2"/> <scratch file='{scratch2}'> <seclabel model="dac" relabel="no"/> </scratch> </disk> </disks> </domainbackup> """ assert normalized(dom.backupGetXMLDesc()) == normalized(backup_xml) assert normalized(CHECKPOINT_2_XML) == (normalized( dom.input_checkpoint_xml)) verify_scratch_disks_exists(vm, BACKUP_2_ID) # verify that the vm froze and thawed during the backup assert vm.froze assert vm.thawed result_disks = res['result']['disks'] verify_backup_urls(vm, BACKUP_2_ID, result_disks) backup.stop_backup(vm, dom, BACKUP_2_ID) verify_scratch_disks_removed(vm)
def test_start_stop_backup_engine_scratch_disks_with_scratch_ids(tmpdir): vm = FakeVm() dom = FakeDomainAdapter() socket = backup.socket_path(BACKUP_1_ID) scratch1 = create_scratch_disk(tmpdir, "scratch1") scratch2 = create_scratch_disk(tmpdir, "scratch2") fake_disks = create_fake_disks(vm) # Block based scratch disk for block based disk. fake_disks[0]['scratch_disk'] = { 'path': scratch1, 'type': DISK_TYPE.BLOCK, # Engine sends scratch disk IDs. 'domainID': make_uuid(), 'imageID': make_uuid(), 'volumeID': make_uuid(), } # File based scratch disk for file based disk. fake_disks[1]['scratch_disk'] = { 'path': scratch2, 'type': DISK_TYPE.FILE, # Engine sends scratch disk IDs. 'domainID': make_uuid(), 'imageID': make_uuid(), 'volumeID': make_uuid(), } config = {'backup_id': BACKUP_1_ID, 'disks': fake_disks} res = backup.start_backup(vm, dom, config) assert dom.backing_up backup_xml = f""" <domainbackup mode='pull'> <server transport='unix' socket='{socket}'/> <disks> <disk name='sda' backup='yes' type='block' backupmode='full' exportname='sda' index='7'> <driver type='qcow2'/> <scratch dev='{scratch1}'> <seclabel model='dac' relabel='no'/> </scratch> </disk> <disk name='vda' backup='yes' type='file' backupmode='full' exportname='vda' index='8'> <driver type='qcow2'/> <scratch file='{scratch2}'> <seclabel model="dac" relabel="no"/> </scratch> </disk> </disks> </domainbackup> """ assert normalized(dom.backupGetXMLDesc()) == normalized(backup_xml) # We monitor only block based scratch disks. assert vm.drives[IMAGE_1_UUID].scratch_disk == { "index": 7, 'sd_id': fake_disks[0]['scratch_disk']['domainID'], 'img_id': fake_disks[0]['scratch_disk']['imageID'], 'vol_id': fake_disks[0]['scratch_disk']['volumeID'], } assert vm.drives[IMAGE_2_UUID].scratch_disk is None result_disks = res['result']['disks'] verify_backup_urls(vm, BACKUP_1_ID, result_disks) backup.stop_backup(vm, dom, BACKUP_1_ID) assert not dom.backing_up # Stopping backup remove the scratch disks from the drives. for drive in vm.drives.values(): assert drive.scratch_disk is None