def test_start_stop_backup_with_checkpoint(tmp_backupdir, tmp_basedir, disks_in_checkpoint, expected_checkpoint_xml): vm = FakeVm() dom = FakeDomainAdapter() fake_disks = create_fake_disks(disks_in_checkpoint) config = { 'backup_id': BACKUP_1_ID, 'disks': fake_disks, 'to_checkpoint_id': CHECKPOINT_1_ID } res = backup.start_backup(vm, dom, config) assert dom.backing_up assert indented(expected_checkpoint_xml) == (indented( dom.input_checkpoint_xml)) verify_scratch_disks_exists(vm) # verify that the vm froze and thawed during the backup assert vm.froze assert vm.thawed result_disks = res['result']['disks'] verify_backup_urls(BACKUP_1_ID, result_disks) backup.stop_backup(vm, dom, BACKUP_1_ID) assert not dom.backing_up verify_scratch_disks_removed(vm)
def test_incremental_backup_xml(tmp_backupdir): # drives must be sorted for the disks to appear # each time in the same order in the backup XML drives = collections.OrderedDict() drives["img-id-1"] = FakeDrive("sda", "img-id-1") drives["img-id-2"] = FakeDrive("vda", "img-id-2") socket_path = backup.socket_path(BACKUP_ID) addr = nbdutils.UnixAddress(socket_path) backup_xml = backup.create_backup_xml( addr, drives, FAKE_SCRATCH_DISKS, from_checkpoint_id=FROM_CHECKPOINT_ID) expected_xml = """ <domainbackup mode='pull'> <incremental>{}</incremental> <server transport='unix' socket='{}'/> <disks> <disk name='sda' type='file'> <scratch file='/path/to/scratch_sda'> <seclabel model="dac" relabel="no"/> </scratch> </disk> <disk name='vda' type='file'> <scratch file='/path/to/scratch_vda'> <seclabel model="dac" relabel="no"/> </scratch> </disk> </disks> </domainbackup> """.format(FROM_CHECKPOINT_ID, socket_path) assert xmlutils.indented(expected_xml) == xmlutils.indented(backup_xml)
def test_full_backup_with_backup_mode(tmp_backupdir, tmp_basedir): vm = FakeVm() socket_path = backup.socket_path(BACKUP_1_ID) scratch_disk_paths = _get_scratch_disks_path(BACKUP_1_ID) expected_xml = """ <domainbackup mode='pull'> <server transport='unix' socket='{}'/> <disks> <disk backupmode="full" name='sda' type='file'> <scratch file='{}'> <seclabel model="dac" relabel="no"/> </scratch> </disk> <disk backupmode="full" name='vda' type='file'> <scratch file='{}'> <seclabel model="dac" relabel="no"/> </scratch> </disk> </disks> </domainbackup> """.format(socket_path, scratch_disk_paths[0], scratch_disk_paths[1]) dom = FakeDomainAdapter() fake_disks = create_fake_disks(backup_mode=backup.MODE_FULL) config = {'backup_id': BACKUP_1_ID, 'disks': fake_disks} backup.start_backup(vm, dom, config) assert indented(expected_xml) == indented(dom.input_backup_xml)
def test_backup_xml(tmp_backupdir): backup_id = 'backup_id' # drives must be sorted for the disks to appear # each time in the same order in the backup XML drives = collections.OrderedDict() drives["img-id-1"] = FakeDrive("sda", "img-id-1") drives["img-id-2"] = FakeDrive("vda", "img-id-2") socket_path = os.path.join(backup.P_BACKUP, backup_id) addr = nbdutils.UnixAddress(socket_path) backup_xml = backup.create_backup_xml(addr, drives, FAKE_SCRATCH_DISKS) expected_xml = """ <domainbackup mode='pull'> <server transport='unix' socket='{}'/> <disks> <disk name='sda' type='file'> <scratch file='/path/to/scratch_sda'> <seclabel model="dac" relabel="no"/> </scratch> </disk> <disk name='vda' type='file'> <scratch file='/path/to/scratch_vda'> <seclabel model="dac" relabel="no"/> </scratch> </disk> </disks> </domainbackup> """.format(socket_path) assert indented(expected_xml) == indented(backup_xml)
def test_incremental_backup_with_backup_mode(tmp_backupdir, tmp_basedir): vm = FakeVm() dom = FakeDomainAdapter() fake_disks = create_fake_disks(backup_mode=backup.MODE_FULL) # start full backup config = { 'backup_id': BACKUP_1_ID, 'disks': fake_disks, 'to_checkpoint_id': CHECKPOINT_1_ID } backup.start_backup(vm, dom, config) backup.stop_backup(vm, dom, BACKUP_1_ID) # start incremental backup socket_path = backup.socket_path(BACKUP_2_ID) scratch_disk_paths = _get_scratch_disks_path(BACKUP_2_ID) expected_xml = """ <domainbackup mode='pull'> <incremental>{}</incremental> <server transport='unix' socket='{}'/> <disks> <disk backupmode="full" name='sda' type='file'> <scratch file='{}'> <seclabel model="dac" relabel="no"/> </scratch> </disk> <disk backupmode="incremental" incremental='{}' name='vda' type='file'> <scratch file='{}'> <seclabel model="dac" relabel="no"/> </scratch> </disk> </disks> </domainbackup> """.format(CHECKPOINT_1_ID, socket_path, scratch_disk_paths[0], CHECKPOINT_1_ID, scratch_disk_paths[1]) dom.output_checkpoints = [CHECKPOINT_1] # Set vda disk backup_mode to 'incremental' for disk in fake_disks: if disk["imageID"] == IMAGE_2_UUID: disk["backup_mode"] = backup.MODE_INCREMENTAL config = { 'backup_id': BACKUP_2_ID, 'disks': fake_disks, 'from_checkpoint_id': CHECKPOINT_1_ID, 'to_checkpoint_id': CHECKPOINT_2_ID, 'parent_checkpoint_id': CHECKPOINT_1_ID } backup.start_backup(vm, dom, config) assert indented(expected_xml) == indented(dom.input_backup_xml)
def test_checkpoint_xml(disks_in_checkpoint, expected_xml): fake_disks = create_fake_disks(disks_in_checkpoint) config = { 'backup_id': BACKUP_ID, 'disks': fake_disks, 'to_checkpoint_id': TO_CHECKPOINT_ID, 'from_checkpoint_id': FROM_CHECKPOINT_ID } backup_cfg = backup.BackupConfig(config) checkpoint_xml = backup.create_checkpoint_xml(backup_cfg, FAKE_DRIVES) assert indented(expected_xml) == indented(checkpoint_xml)
def test_start_stop_backup(tmp_backupdir, tmp_basedir): vm = FakeVm() socket_path = backup.socket_path(BACKUP_ID) scratch_disk_paths = [] for drive in FAKE_DRIVES.values(): scratch_disk_name = BACKUP_ID + "." + drive.name scratch_disk_path = os.path.join(transientdisk.P_TRANSIENT_DISKS, "vm_id", scratch_disk_name) scratch_disk_paths.append(scratch_disk_path) expected_xml = """ <domainbackup mode='pull'> <server transport='unix' socket='{}'/> <disks> <disk name='sda' type='file'> <scratch file='{}'> <seclabel model="dac" relabel="no"/> </scratch> </disk> <disk name='vda' type='file'> <scratch file='{}'> <seclabel model="dac" relabel="no"/> </scratch> </disk> </disks> </domainbackup> """.format(socket_path, scratch_disk_paths[0], scratch_disk_paths[1]) dom = FakeDomainAdapter() fake_disks = create_fake_disks() config = {'backup_id': BACKUP_ID, 'disks': fake_disks} res = backup.start_backup(vm, dom, config) assert indented(expected_xml) == indented(dom.input_backup_xml) assert dom.backing_up verify_scratch_disks_exists(vm) # verify that the vm froze and thawed during the backup assert vm.froze assert vm.thawed assert 'checkpoint' not in res['result'] result_disks = res['result']['disks'] verify_backup_urls(BACKUP_ID, result_disks) backup.stop_backup(vm, dom, BACKUP_ID) assert not dom.backing_up verify_scratch_disks_removed(vm)
def assertXMLEqual(self, xml, expectedXML): """ Assert that xml is equivalent to expected xml, ignoring whitespace differences. In case of a mismatch, display normalized xmls to make it easier to find the differences. """ actual = xmlutils.indented(xml) expected = xmlutils.indented(expectedXML) self.assertEqual(actual, expected, "XMLs are different:\nActual:\n%s\nExpected:\n%s\n" % (actual, expected))
def checkpointCreateXML(self, checkpoint_xml, flags=None): assert flags == libvirt.VIR_DOMAIN_CHECKPOINT_CREATE_REDEFINE # validate the given checkpoint XML according to the # initialized output_checkpoints, in case output_checkpoints # isn't initialized the validation will be skipped if self.output_checkpoints: indented_checkpoint_xml = indented(checkpoint_xml) for checkpoint in self.output_checkpoints: expected_checkpoint_xml = indented(checkpoint.getXMLDesc()) if indented_checkpoint_xml == expected_checkpoint_xml: return raise fake.libvirt_error([ libvirt.VIR_ERR_INVALID_DOMAIN_CHECKPOINT, '', "Invalid checkpoint error" ], "Fake checkpoint error")
def test_incremental_backup(tmp_backupdir, tmp_basedir): vm = FakeVm() dom = FakeDomainAdapter() fake_disks = create_fake_disks() # start full backup config = { 'backup_id': BACKUP_1_ID, 'disks': fake_disks, 'to_checkpoint_id': CHECKPOINT_1_ID } res = backup.start_backup(vm, dom, config) assert dom.backing_up result_disks = res['result']['disks'] verify_backup_urls(BACKUP_1_ID, result_disks) backup.stop_backup(vm, dom, BACKUP_1_ID) assert not dom.backing_up verify_scratch_disks_removed(vm) # start incremental backup socket_path = backup.socket_path(BACKUP_2_ID) scratch_disk_paths = _get_scratch_disks_path(BACKUP_2_ID) expected_xml = """ <domainbackup mode='pull'> <incremental>{}</incremental> <server transport='unix' socket='{}'/> <disks> <disk name='sda' type='file'> <scratch file='{}'> <seclabel model="dac" relabel="no"/> </scratch> </disk> <disk name='vda' type='file'> <scratch file='{}'> <seclabel model="dac" relabel="no"/> </scratch> </disk> </disks> </domainbackup> """.format(CHECKPOINT_1_ID, socket_path, scratch_disk_paths[0], scratch_disk_paths[1]) dom.output_checkpoints = [CHECKPOINT_1] config = { 'backup_id': BACKUP_2_ID, 'disks': fake_disks, 'from_checkpoint_id': CHECKPOINT_1_ID, 'to_checkpoint_id': CHECKPOINT_2_ID, 'parent_checkpoint_id': CHECKPOINT_1_ID } res = backup.start_backup(vm, dom, config) assert dom.backing_up assert indented(expected_xml) == indented(dom.input_backup_xml) assert indented(CHECKPOINT_2_XML) == (indented(dom.input_checkpoint_xml)) verify_scratch_disks_exists(vm, BACKUP_2_ID) # verify that the vm froze and thawed during the backup assert vm.froze assert vm.thawed result_disks = res['result']['disks'] verify_backup_urls(BACKUP_2_ID, result_disks) backup.stop_backup(vm, dom, BACKUP_2_ID) verify_scratch_disks_removed(vm)