def test_incremental_backup_with_backup_mode(tmp_dirs): vm = FakeVm() dom = FakeDomainAdapter() fake_disks = create_fake_disks(vm, backup_mode=backup.MODE_FULL) # start full backup config = { 'backup_id': BACKUP_1_ID, 'disks': fake_disks, 'to_checkpoint_id': CHECKPOINT_1_ID } backup.start_backup(vm, dom, config) backup.stop_backup(vm, dom, BACKUP_1_ID) # start incremental backup socket = backup.socket_path(BACKUP_2_ID) scratch1 = scratch_disk_path(vm, BACKUP_2_ID, "sda") scratch2 = scratch_disk_path(vm, BACKUP_2_ID, "vda") dom.output_checkpoints = [CHECKPOINT_1] # Set vda disk backup_mode to 'incremental' for disk in fake_disks: if disk["imageID"] == IMAGE_2_UUID: disk["backup_mode"] = backup.MODE_INCREMENTAL config = { 'backup_id': BACKUP_2_ID, 'disks': fake_disks, 'from_checkpoint_id': CHECKPOINT_1_ID, 'to_checkpoint_id': CHECKPOINT_2_ID, } backup.start_backup(vm, dom, config) backup_xml = f""" <domainbackup mode='pull'> <incremental>{CHECKPOINT_1_ID}</incremental> <server transport='unix' socket='{socket}'/> <disks> <disk name='sda' backup='yes' type='file' backupmode='full' exportname='sda' index='9'> <driver type='qcow2'/> <scratch file='{scratch1}'> <seclabel model='dac' relabel='no'/> </scratch> </disk> <disk name='vda' backup='yes' type='file' backupmode='incremental' incremental='{CHECKPOINT_1_ID}' exportname='vda' index='10'> <driver type='qcow2'/> <scratch file='{scratch2}'> <seclabel model="dac" relabel="no"/> </scratch> </disk> </disks> </domainbackup> """ assert normalized(dom.backupGetXMLDesc()) == normalized(backup_xml)
def test_full_backup_with_backup_mode(tmp_backupdir, tmp_basedir): vm = FakeVm() socket_path = backup.socket_path(BACKUP_1_ID) scratch_disk_paths = _get_scratch_disks_path(BACKUP_1_ID) expected_xml = """ <domainbackup mode='pull'> <server transport='unix' socket='{}'/> <disks> <disk backupmode="full" name='sda' type='file'> <scratch file='{}'> <seclabel model="dac" relabel="no"/> </scratch> </disk> <disk backupmode="full" name='vda' type='file'> <scratch file='{}'> <seclabel model="dac" relabel="no"/> </scratch> </disk> </disks> </domainbackup> """.format(socket_path, scratch_disk_paths[0], scratch_disk_paths[1]) dom = FakeDomainAdapter() fake_disks = create_fake_disks(backup_mode=backup.MODE_FULL) config = {'backup_id': BACKUP_1_ID, 'disks': fake_disks} backup.start_backup(vm, dom, config) assert normalized(expected_xml) == normalized(dom.input_backup_xml)
def test_backup_begin_failed_no_disks(tmp_backupdir, tmp_basedir): vm = FakeVm() dom = FakeDomainAdapter() config = {'backup_id': BACKUP_ID, 'disks': ()} with pytest.raises(exception.BackupError): backup.start_backup(vm, dom, config)
def test_incremental_backup_with_backup_mode(tmp_backupdir, tmp_basedir): vm = FakeVm() dom = FakeDomainAdapter() fake_disks = create_fake_disks(backup_mode=backup.MODE_FULL) # start full backup config = { 'backup_id': BACKUP_1_ID, 'disks': fake_disks, 'to_checkpoint_id': CHECKPOINT_1_ID } backup.start_backup(vm, dom, config) backup.stop_backup(vm, dom, BACKUP_1_ID) # start incremental backup socket_path = backup.socket_path(BACKUP_2_ID) scratch_disk_paths = _get_scratch_disks_path(BACKUP_2_ID) expected_xml = """ <domainbackup mode='pull'> <incremental>{}</incremental> <server transport='unix' socket='{}'/> <disks> <disk backupmode="full" name='sda' type='file'> <scratch file='{}'> <seclabel model="dac" relabel="no"/> </scratch> </disk> <disk backupmode="incremental" incremental='{}' name='vda' type='file'> <scratch file='{}'> <seclabel model="dac" relabel="no"/> </scratch> </disk> </disks> </domainbackup> """.format(CHECKPOINT_1_ID, socket_path, scratch_disk_paths[0], CHECKPOINT_1_ID, scratch_disk_paths[1]) dom.output_checkpoints = [CHECKPOINT_1] # Set vda disk backup_mode to 'incremental' for disk in fake_disks: if disk["imageID"] == IMAGE_2_UUID: disk["backup_mode"] = backup.MODE_INCREMENTAL config = { 'backup_id': BACKUP_2_ID, 'disks': fake_disks, 'from_checkpoint_id': CHECKPOINT_1_ID, 'to_checkpoint_id': CHECKPOINT_2_ID, 'parent_checkpoint_id': CHECKPOINT_1_ID } backup.start_backup(vm, dom, config) assert indented(expected_xml) == indented(dom.input_backup_xml)
def test_backup_begin_failed_full_with_inremental_disks(tmp_dirs): vm = FakeVm() dom = FakeDomainAdapter() # Set disks backup_mode to 'incremental' fake_disks = create_fake_disks(vm, backup_mode=backup.MODE_INCREMENTAL) config = {'backup_id': BACKUP_1_ID, 'disks': fake_disks} with pytest.raises(exception.BackupError): backup.start_backup(vm, dom, config)
def test_backup_begin_checkpoint_inconsistent(tmp_backupdir, tmp_basedir): vm = FakeVm() dom = FakeDomainAdapter() dom.errors["backupBegin"] = fake.libvirt_error( [libvirt.VIR_ERR_CHECKPOINT_INCONSISTENT], "Fake libvirt error") fake_disks = create_fake_disks() config = {'backup_id': BACKUP_1_ID, 'disks': fake_disks} with pytest.raises(exception.InconsistentCheckpointError): backup.start_backup(vm, dom, config)
def test_backup_begin_failed_no_parent(tmp_backupdir, tmp_basedir): vm = FakeVm() dom = FakeDomainAdapter() fake_disks = create_fake_disks() config = { 'backup_id': BACKUP_ID, 'disks': fake_disks, 'from_checkpoint_id': FROM_CHECKPOINT_ID } with pytest.raises(exception.BackupError): backup.start_backup(vm, dom, config)
def test_fail_parse_backup_xml(tmp_dirs): vm = FakeVm() dom = FakeDomainAdapter() fake_disks = create_fake_disks(vm) config = {'backup_id': BACKUP_1_ID, 'disks': fake_disks} backup.start_backup(vm, dom, config) dom.backup_xml = """ <domainbackup mode='pull'> <disks/> </domainbackup> """ with pytest.raises(exception.BackupError): backup.backup_info(vm, dom, BACKUP_1_ID)
def test_fail_parse_backup_xml(tmp_backupdir, tmp_basedir): vm = FakeVm() INVALID_BACKUP_XML = """ <domainbackup mode='pull'> <disks/> </domainbackup> """ dom = FakeDomainAdapter(output_backup_xml=INVALID_BACKUP_XML) fake_disks = create_fake_disks() config = {'backup_id': BACKUP_ID, 'disks': fake_disks} backup.start_backup(vm, dom, config) with pytest.raises(exception.BackupError): backup.backup_info(vm, dom, BACKUP_ID)
def test_start_backup_failed_get_checkpoint(tmp_backupdir, tmp_basedir): vm = FakeVm() dom = FakeDomainAdapter() dom.errors["checkpointLookupByName"] = fake.libvirt_error( [libvirt.VIR_ERR_INTERNAL_ERROR], "Fake libvirt error") fake_disks = create_fake_disks() config = { 'backup_id': BACKUP_1_ID, 'disks': fake_disks, 'to_checkpoint_id': CHECKPOINT_1_ID } res = backup.start_backup(vm, dom, config) assert dom.backing_up verify_scratch_disks_exists(vm) # verify that the vm froze and thawed during the backup assert vm.froze assert vm.thawed assert 'checkpoint' not in res['result'] result_disks = res['result']['disks'] verify_backup_urls(BACKUP_1_ID, result_disks) backup.stop_backup(vm, dom, BACKUP_1_ID) assert not dom.backing_up verify_scratch_disks_removed(vm)
def test_full_backup_without_checkpoint_with_previous_chain( tmp_backupdir, tmp_basedir): vm = FakeVm() # This test checks an edge case when a chain of incremental backup was # taken for a VM with RAW disks that a snapshot created for them so their # format is now QCOW2 and they are valid for incremental backup. In this # case, when the snapshot is removed, the disk format is RAW again and only # a full backup without a checkpoint can be taken while there are defined # checkpoints for the VM. dom = FakeDomainAdapter(output_checkpoints=[CHECKPOINT_1, CHECKPOINT_2]) fake_disks = create_fake_disks() # Start full backup without a checkpoint config = { 'backup_id': BACKUP_1_ID, 'disks': fake_disks, } # Start a full backup while skipping the validation for the # last defined checkpoint with the given parent checkpoint # since there is none when a checkpoint isn't created. res = backup.start_backup(vm, dom, config) assert dom.backing_up result_disks = res['result']['disks'] verify_backup_urls(BACKUP_1_ID, result_disks) backup.stop_backup(vm, dom, BACKUP_1_ID) assert not dom.backing_up verify_scratch_disks_removed(vm)
def test_start_stop_backup_with_checkpoint(tmp_backupdir, tmp_basedir, disks_in_checkpoint, expected_checkpoint_xml): vm = FakeVm() dom = FakeDomainAdapter() fake_disks = create_fake_disks(disks_in_checkpoint) config = { 'backup_id': BACKUP_1_ID, 'disks': fake_disks, 'to_checkpoint_id': CHECKPOINT_1_ID } res = backup.start_backup(vm, dom, config) assert dom.backing_up assert indented(expected_checkpoint_xml) == (indented( dom.input_checkpoint_xml)) verify_scratch_disks_exists(vm) # verify that the vm froze and thawed during the backup assert vm.froze assert vm.thawed result_disks = res['result']['disks'] verify_backup_urls(BACKUP_1_ID, result_disks) backup.stop_backup(vm, dom, BACKUP_1_ID) assert not dom.backing_up verify_scratch_disks_removed(vm)
def test_stop_backup_failed(tmp_backupdir, tmp_basedir): vm = FakeVm() dom = FakeDomainAdapter() dom.errors["abortJob"] = fake.libvirt_error( [libvirt.VIR_ERR_INTERNAL_ERROR], "Fake libvirt error") fake_disks = create_fake_disks() config = {'backup_id': BACKUP_ID, 'disks': fake_disks} res = backup.start_backup(vm, dom, config) verify_scratch_disks_exists(vm) result_disks = res['result']['disks'] verify_backup_urls(BACKUP_ID, result_disks) with pytest.raises(exception.BackupError): backup.stop_backup(vm, dom, BACKUP_ID) # Failed to stop, backup still alive assert dom.backing_up # verify scratch disks weren't removed verify_scratch_disks_exists(vm)
def test_backup_info(tmp_backupdir, tmp_basedir): vm = FakeVm() expected_xml = """ <domainbackup mode='pull'> <server transport='unix' socket='{}'/> <disks> <disk name='sda' backup='yes' type='file' exportname='sda'> <driver type='qcow2'/> <scratch file='/path/to/scratch_sda'> <seclabel model='dac' relabel='no'/> </scratch> </disk> <disk name='vda' backup='yes' type='file' exportname='vda'> <driver type='qcow2'/> <scratch file='/path/to/scratch_vda'> <seclabel model="dac" relabel="no"/> </scratch> </disk> <disk name='hdc' backup='no'/> </disks> </domainbackup> """.format(backup.socket_path(BACKUP_ID)) dom = FakeDomainAdapter(output_backup_xml=expected_xml) fake_disks = create_fake_disks() config = {'backup_id': BACKUP_ID, 'disks': fake_disks} res = backup.start_backup(vm, dom, config) backup_info = backup.backup_info(vm, dom, BACKUP_ID) assert res['result']['disks'] == backup_info['result']['disks'] assert 'checkpoint' not in backup_info['result']
def test_start_backup_failed_get_checkpoint(tmp_backupdir, tmp_basedir): vm = FakeVm() dom = FakeDomainAdapter() fake_disks = create_fake_disks() config = { 'backup_id': BACKUP_ID, 'disks': fake_disks, 'to_checkpoint_id': TO_CHECKPOINT_ID } res = backup.start_backup(vm, dom, config) assert dom.backing_up verify_scratch_disks_exists(vm) # verify that the vm froze and thawed during the backup assert vm.froze assert vm.thawed assert 'checkpoint' not in res['result'] result_disks = res['result']['disks'] verify_backup_urls(BACKUP_ID, result_disks) backup.stop_backup(vm, dom, BACKUP_ID) assert not dom.backing_up verify_scratch_disks_removed(vm)
def test_backup_begin_freeze_failed(tmp_backupdir, tmp_basedir): vm = FakeVm() vm.errors["freeze"] = fake.libvirt_error([libvirt.VIR_ERR_INTERNAL_ERROR], "Fake libvirt error") dom = FakeDomainAdapter() fake_disks = create_fake_disks() config = {'backup_id': BACKUP_ID, 'disks': fake_disks} with pytest.raises(libvirt.libvirtError): backup.start_backup(vm, dom, config) verify_scratch_disks_removed(vm) # verify that the vm didn't froze but thawed during the backup assert not vm.froze assert vm.thawed
def test_backup_begin_failed(tmp_dirs): vm = FakeVm() dom = FakeDomainAdapter() dom.errors["backupBegin"] = fake.libvirt_error( [libvirt.VIR_ERR_INTERNAL_ERROR], "Fake libvirt error") fake_disks = create_fake_disks(vm) config = {'backup_id': BACKUP_1_ID, 'disks': fake_disks} with pytest.raises(exception.BackupError): backup.start_backup(vm, dom, config) verify_scratch_disks_removed(vm) # verify that the vm froze and thawed during the backup assert vm.froze assert vm.thawed
def test_backup_info(tmp_dirs): vm = FakeVm() dom = FakeDomainAdapter() fake_disks = create_fake_disks(vm) config = {'backup_id': BACKUP_1_ID, 'disks': fake_disks} res = backup.start_backup(vm, dom, config) backup_info = backup.backup_info(vm, dom, BACKUP_1_ID) assert res['result']['disks'] == backup_info['result']['disks'] assert 'checkpoint' not in backup_info['result']
def test_start_stop_backup_transient_scratch_disk(tmp_dirs): vm = FakeVm() dom = FakeDomainAdapter() socket = backup.socket_path(BACKUP_1_ID) scratch1 = scratch_disk_path(vm, BACKUP_1_ID, "sda") scratch2 = scratch_disk_path(vm, BACKUP_1_ID, "vda") fake_disks = create_fake_disks(vm) config = {'backup_id': BACKUP_1_ID, 'disks': fake_disks} res = backup.start_backup(vm, dom, config) assert dom.backing_up backup_xml = f""" <domainbackup mode='pull'> <server transport='unix' socket='{socket}'/> <disks> <disk name='sda' backup='yes' type='file' backupmode='full' exportname='sda' index='7'> <driver type='qcow2'/> <scratch file='{scratch1}'> <seclabel model='dac' relabel='no'/> </scratch> </disk> <disk name='vda' backup='yes' type='file' backupmode='full' exportname='vda' index='8'> <driver type='qcow2'/> <scratch file='{scratch2}'> <seclabel model="dac" relabel="no"/> </scratch> </disk> </disks> </domainbackup> """ assert normalized(dom.backupGetXMLDesc()) == normalized(backup_xml) # We don't monitor file based scratch disks. for drive in vm.drives.values(): assert drive.scratch_disk is None verify_scratch_disks_exists(vm) # verify that the vm froze and thawed during the backup assert vm.froze assert vm.thawed assert 'checkpoint' not in res['result'] result_disks = res['result']['disks'] verify_backup_urls(vm, BACKUP_1_ID, result_disks) backup.stop_backup(vm, dom, BACKUP_1_ID) assert not dom.backing_up verify_scratch_disks_removed(vm)
def test_start_stop_backup(tmp_backupdir, tmp_basedir): vm = FakeVm() socket_path = backup.socket_path(BACKUP_ID) scratch_disk_paths = [] for drive in FAKE_DRIVES.values(): scratch_disk_name = BACKUP_ID + "." + drive.name scratch_disk_path = os.path.join(transientdisk.P_TRANSIENT_DISKS, "vm_id", scratch_disk_name) scratch_disk_paths.append(scratch_disk_path) expected_xml = """ <domainbackup mode='pull'> <server transport='unix' socket='{}'/> <disks> <disk name='sda' type='file'> <scratch file='{}'> <seclabel model="dac" relabel="no"/> </scratch> </disk> <disk name='vda' type='file'> <scratch file='{}'> <seclabel model="dac" relabel="no"/> </scratch> </disk> </disks> </domainbackup> """.format(socket_path, scratch_disk_paths[0], scratch_disk_paths[1]) dom = FakeDomainAdapter() fake_disks = create_fake_disks() config = {'backup_id': BACKUP_ID, 'disks': fake_disks} res = backup.start_backup(vm, dom, config) assert indented(expected_xml) == indented(dom.input_backup_xml) assert dom.backing_up verify_scratch_disks_exists(vm) # verify that the vm froze and thawed during the backup assert vm.froze assert vm.thawed assert 'checkpoint' not in res['result'] result_disks = res['result']['disks'] verify_backup_urls(BACKUP_ID, result_disks) backup.stop_backup(vm, dom, BACKUP_ID) assert not dom.backing_up verify_scratch_disks_removed(vm)
def test_start_backup_disk_not_found(): vm = FakeVm() dom = FakeDomainAdapter() fake_disks = create_fake_disks() fake_disks.append({ 'domainID': make_uuid(), 'imageID': make_uuid(), 'volumeID': make_uuid(), 'checkpoint': False }) config = {'backup_id': BACKUP_ID, 'disks': fake_disks} with pytest.raises(exception.BackupError): backup.start_backup(vm, dom, config) assert not dom.backing_up verify_scratch_disks_removed(vm) # verify that the vm didn't froze or thawed during the backup assert not vm.froze assert not vm.thawed
def test_full_backup_with_backup_mode(tmp_dirs): vm = FakeVm() dom = FakeDomainAdapter() socket = backup.socket_path(BACKUP_1_ID) scratch1 = scratch_disk_path(vm, BACKUP_1_ID, "sda") scratch2 = scratch_disk_path(vm, BACKUP_1_ID, "vda") fake_disks = create_fake_disks(vm, backup_mode=backup.MODE_FULL) config = {'backup_id': BACKUP_1_ID, 'disks': fake_disks} backup.start_backup(vm, dom, config) assert dom.backing_up backup_xml = f""" <domainbackup mode='pull'> <server transport='unix' socket='{socket}'/> <disks> <disk name='sda' backup='yes' type='file' backupmode='full' exportname='sda' index='7'> <driver type='qcow2'/> <scratch file='{scratch1}'> <seclabel model='dac' relabel='no'/> </scratch> </disk> <disk name='vda' backup='yes' type='file' backupmode='full' exportname='vda' index='8'> <driver type='qcow2'/> <scratch file='{scratch2}'> <seclabel model="dac" relabel="no"/> </scratch> </disk> </disks> </domainbackup> """ assert normalized(dom.backupGetXMLDesc()) == normalized(backup_xml)
def test_start_stop_backup_engine_scratch_disks(tmp_scratch_disks): vm = FakeVm() socket_path = backup.socket_path(BACKUP_1_ID) expected_xml = """ <domainbackup mode='pull'> <server transport='unix' socket='{}'/> <disks> <disk name='sda' type='file'> <scratch file='{}'> <seclabel model="dac" relabel="no"/> </scratch> </disk> <disk name='vda' type='file'> <scratch file='{}'> <seclabel model="dac" relabel="no"/> </scratch> </disk> </disks> </domainbackup> """.format(socket_path, tmp_scratch_disks[0], tmp_scratch_disks[1]) dom = FakeDomainAdapter() fake_disks = create_fake_disks() # Set the scratch disks path to the disks # TODO: add tests for scratch disks on block storage domain. fake_disks[0]['scratch_disk'] = { 'path': tmp_scratch_disks[0], 'type': DISK_TYPE.FILE } fake_disks[1]['scratch_disk'] = { 'path': tmp_scratch_disks[1], 'type': DISK_TYPE.FILE } config = {'backup_id': BACKUP_1_ID, 'disks': fake_disks} res = backup.start_backup(vm, dom, config) assert normalized(expected_xml) == normalized(dom.input_backup_xml) assert dom.backing_up result_disks = res['result']['disks'] verify_backup_urls(BACKUP_1_ID, result_disks) backup.stop_backup(vm, dom, BACKUP_1_ID) assert not dom.backing_up
def test_start_stop_backup(tmp_backupdir, tmp_basedir): vm = FakeVm() socket_path = backup.socket_path(BACKUP_1_ID) scratch_disk_paths = _get_scratch_disks_path(BACKUP_1_ID) expected_xml = """ <domainbackup mode='pull'> <server transport='unix' socket='{}'/> <disks> <disk name='sda' type='file'> <scratch file='{}'> <seclabel model="dac" relabel="no"/> </scratch> </disk> <disk name='vda' type='file'> <scratch file='{}'> <seclabel model="dac" relabel="no"/> </scratch> </disk> </disks> </domainbackup> """.format(socket_path, scratch_disk_paths[0], scratch_disk_paths[1]) dom = FakeDomainAdapter() fake_disks = create_fake_disks() config = {'backup_id': BACKUP_1_ID, 'disks': fake_disks} res = backup.start_backup(vm, dom, config) assert normalized(expected_xml) == normalized(dom.input_backup_xml) assert dom.backing_up verify_scratch_disks_exists(vm) # verify that the vm froze and thawed during the backup assert vm.froze assert vm.thawed assert 'checkpoint' not in res['result'] result_disks = res['result']['disks'] verify_backup_urls(BACKUP_1_ID, result_disks) backup.stop_backup(vm, dom, BACKUP_1_ID) assert not dom.backing_up verify_scratch_disks_removed(vm)
def test_backup_begin_consistency_not_required(tmp_dirs, require_consistency): vm = FakeVm() vm.errors["freeze"] = fake.libvirt_error([libvirt.VIR_ERR_INTERNAL_ERROR], "Fake libvirt error") dom = FakeDomainAdapter() fake_disks = create_fake_disks(vm) config = { 'backup_id': BACKUP_1_ID, 'disks': fake_disks, 'require_consistency': require_consistency } res = backup.start_backup(vm, dom, config) verify_scratch_disks_exists(vm) result_disks = res['result']['disks'] verify_backup_urls(vm, BACKUP_1_ID, result_disks)
def test_incremental_backup(tmp_dirs): vm = FakeVm() dom = FakeDomainAdapter() fake_disks = create_fake_disks(vm) # start full backup config = { 'backup_id': BACKUP_1_ID, 'disks': fake_disks, 'to_checkpoint_id': CHECKPOINT_1_ID } res = backup.start_backup(vm, dom, config) assert dom.backing_up result_disks = res['result']['disks'] verify_backup_urls(vm, BACKUP_1_ID, result_disks) backup.stop_backup(vm, dom, BACKUP_1_ID) assert not dom.backing_up verify_scratch_disks_removed(vm) # start incremental backup socket = backup.socket_path(BACKUP_2_ID) scratch1 = scratch_disk_path(vm, BACKUP_2_ID, "sda") scratch2 = scratch_disk_path(vm, BACKUP_2_ID, "vda") dom.output_checkpoints = [CHECKPOINT_1] config = { 'backup_id': BACKUP_2_ID, 'disks': fake_disks, 'from_checkpoint_id': CHECKPOINT_1_ID, 'to_checkpoint_id': CHECKPOINT_2_ID, } res = backup.start_backup(vm, dom, config) assert dom.backing_up backup_xml = f""" <domainbackup mode='pull'> <incremental>{CHECKPOINT_1_ID}</incremental> <server transport='unix' socket='{socket}'/> <disks> <disk name='sda' backup='yes' type='file' backupmode='incremental' incremental='{CHECKPOINT_1_ID}' exportname='sda' index='9'> <driver type="qcow2"/> <scratch file='{scratch1}'> <seclabel model="dac" relabel="no"/> </scratch> </disk> <disk name='vda' backup='yes' type='file' backupmode='incremental' incremental='{CHECKPOINT_1_ID}' exportname='vda' index='10'> <driver type="qcow2"/> <scratch file='{scratch2}'> <seclabel model="dac" relabel="no"/> </scratch> </disk> </disks> </domainbackup> """ assert normalized(dom.backupGetXMLDesc()) == normalized(backup_xml) assert normalized(CHECKPOINT_2_XML) == (normalized( dom.input_checkpoint_xml)) verify_scratch_disks_exists(vm, BACKUP_2_ID) # verify that the vm froze and thawed during the backup assert vm.froze assert vm.thawed result_disks = res['result']['disks'] verify_backup_urls(vm, BACKUP_2_ID, result_disks) backup.stop_backup(vm, dom, BACKUP_2_ID) verify_scratch_disks_removed(vm)
def test_incremental_backup(tmp_backupdir, tmp_basedir): vm = FakeVm() dom = FakeDomainAdapter() fake_disks = create_fake_disks() # start full backup config = { 'backup_id': BACKUP_1_ID, 'disks': fake_disks, 'to_checkpoint_id': CHECKPOINT_1_ID } res = backup.start_backup(vm, dom, config) assert dom.backing_up result_disks = res['result']['disks'] verify_backup_urls(BACKUP_1_ID, result_disks) backup.stop_backup(vm, dom, BACKUP_1_ID) assert not dom.backing_up verify_scratch_disks_removed(vm) # start incremental backup socket_path = backup.socket_path(BACKUP_2_ID) scratch_disk_paths = _get_scratch_disks_path(BACKUP_2_ID) expected_xml = """ <domainbackup mode='pull'> <incremental>{}</incremental> <server transport='unix' socket='{}'/> <disks> <disk name='sda' type='file'> <scratch file='{}'> <seclabel model="dac" relabel="no"/> </scratch> </disk> <disk name='vda' type='file'> <scratch file='{}'> <seclabel model="dac" relabel="no"/> </scratch> </disk> </disks> </domainbackup> """.format(CHECKPOINT_1_ID, socket_path, scratch_disk_paths[0], scratch_disk_paths[1]) dom.output_checkpoints = [CHECKPOINT_1] config = { 'backup_id': BACKUP_2_ID, 'disks': fake_disks, 'from_checkpoint_id': CHECKPOINT_1_ID, 'to_checkpoint_id': CHECKPOINT_2_ID, 'parent_checkpoint_id': CHECKPOINT_1_ID } res = backup.start_backup(vm, dom, config) assert dom.backing_up assert indented(expected_xml) == indented(dom.input_backup_xml) assert indented(CHECKPOINT_2_XML) == (indented(dom.input_checkpoint_xml)) verify_scratch_disks_exists(vm, BACKUP_2_ID) # verify that the vm froze and thawed during the backup assert vm.froze assert vm.thawed result_disks = res['result']['disks'] verify_backup_urls(BACKUP_2_ID, result_disks) backup.stop_backup(vm, dom, BACKUP_2_ID) verify_scratch_disks_removed(vm)
def test_start_stop_backup_engine_scratch_disks_with_scratch_ids(tmpdir): vm = FakeVm() dom = FakeDomainAdapter() socket = backup.socket_path(BACKUP_1_ID) scratch1 = create_scratch_disk(tmpdir, "scratch1") scratch2 = create_scratch_disk(tmpdir, "scratch2") fake_disks = create_fake_disks(vm) # Block based scratch disk for block based disk. fake_disks[0]['scratch_disk'] = { 'path': scratch1, 'type': DISK_TYPE.BLOCK, # Engine sends scratch disk IDs. 'domainID': make_uuid(), 'imageID': make_uuid(), 'volumeID': make_uuid(), } # File based scratch disk for file based disk. fake_disks[1]['scratch_disk'] = { 'path': scratch2, 'type': DISK_TYPE.FILE, # Engine sends scratch disk IDs. 'domainID': make_uuid(), 'imageID': make_uuid(), 'volumeID': make_uuid(), } config = {'backup_id': BACKUP_1_ID, 'disks': fake_disks} res = backup.start_backup(vm, dom, config) assert dom.backing_up backup_xml = f""" <domainbackup mode='pull'> <server transport='unix' socket='{socket}'/> <disks> <disk name='sda' backup='yes' type='block' backupmode='full' exportname='sda' index='7'> <driver type='qcow2'/> <scratch dev='{scratch1}'> <seclabel model='dac' relabel='no'/> </scratch> </disk> <disk name='vda' backup='yes' type='file' backupmode='full' exportname='vda' index='8'> <driver type='qcow2'/> <scratch file='{scratch2}'> <seclabel model="dac" relabel="no"/> </scratch> </disk> </disks> </domainbackup> """ assert normalized(dom.backupGetXMLDesc()) == normalized(backup_xml) # We monitor only block based scratch disks. assert vm.drives[IMAGE_1_UUID].scratch_disk == { "index": 7, 'sd_id': fake_disks[0]['scratch_disk']['domainID'], 'img_id': fake_disks[0]['scratch_disk']['imageID'], 'vol_id': fake_disks[0]['scratch_disk']['volumeID'], } assert vm.drives[IMAGE_2_UUID].scratch_disk is None result_disks = res['result']['disks'] verify_backup_urls(vm, BACKUP_1_ID, result_disks) backup.stop_backup(vm, dom, BACKUP_1_ID) assert not dom.backing_up # Stopping backup remove the scratch disks from the drives. for drive in vm.drives.values(): assert drive.scratch_disk is None