def picklecopy(obj): """ Returns a deep copy of argument, like copy.deepcopy() does, but faster. To be faster, this function leverages the pickle module. The following types are safely handled: * None, True, and False * integers, long integers, floating point numbers, complex numbers * normal and Unicode strings * tuples, lists, sets, and dictionaries containing only picklable objects * functions defined at the top level of a module * built-in functions defined at the top level of a module * classes that are defined at the top level of a module * instances of such classes whose __dict__ or the result of calling __getstate__() is picklable. Attempts to pickle unpicklable objects will raise the PicklingError exception; For full documentation, see: https://docs.python.org/2/library/pickle.html """ return pickle.loads(pickle.dumps(obj, pickle.HIGHEST_PROTOCOL))
def schema_from(yaml_str): pickled_yaml = pickle.dumps(yaml.load(yaml_str)) mocked_open = mock.mock_open(read_data=pickled_yaml) with mock.patch('{}.io.open'.format(vdsmapi.__name__), mocked_open, create=True): return vdsmapi.Schema.vdsm_api(strict_mode=False)
def _schema_from(yaml_str): pickled_yaml = pickle.dumps(yaml.load(yaml_str)) mocked_open = mock.mock_open(read_data=pickled_yaml) with mock.patch('{}.io.open'.format(vdsmapi.__name__), mocked_open, create=True): return vdsmapi.Schema.vdsm_api(strict_mode=False)
def test_hibernation_params(self): vmParams = {} vmParams.update(self.vmParams) extraParams = { 'a': 42, 'foo': ['bar'], } with temporaryPath(data=pickle.dumps(extraParams)) as path: vmParams['hiberVolHandle'] = path res = self.vm.create(vmParams) self.assertFalse(response.is_error(res)) for param in extraParams: self.assertEqual(extraParams[param], vmParams[param])
def test_hibernation_params_wrong_format(self): vmParams = {} vmParams.update(self.vmParams) refParams = copy.deepcopy(vmParams) refParams['restoreState'] = True # to be added BY TESTS extraParams = ['a', 42] with temporaryPath(data=pickle.dumps(extraParams)) as path: vmParams['hiberVolHandle'] = path res = self.vm.create(vmParams) res = self.vm.create(vmParams) self.assertFalse(response.is_error(res)) self.assertEqual(refParams, vmParams)
def snapshot(self): """Live snapshot command""" def norm_snap_drive_params(drive): """Normalize snapshot parameters""" if "baseVolumeID" in drive: base_drv = { "device": "disk", "domainID": drive["domainID"], "imageID": drive["imageID"], "volumeID": drive["baseVolumeID"] } target_drv = base_drv.copy() target_drv["volumeID"] = drive["volumeID"] elif "baseGUID" in drive: base_drv = {"GUID": drive["baseGUID"]} target_drv = {"GUID": drive["GUID"]} elif "baseUUID" in drive: base_drv = {"UUID": drive["baseUUID"]} target_drv = {"UUID": drive["UUID"]} else: base_drv, target_drv = (None, None) return base_drv, target_drv def rollback_drives(new_drives): """Rollback the prepared volumes for the snapshot""" for vm_dev_name, drive in new_drives.items(): try: self.vm.cif.teardownVolumePath(drive) except Exception: self.vm.log.exception("Unable to teardown drive: %s", vm_dev_name) def memory_snapshot(memory_volume_path): """Libvirt snapshot XML""" return vmxml.Element('memory', snapshot='external', file=memory_volume_path) def vm_conf_for_memory_snapshot(): """Returns the needed vm configuration with the memory snapshot""" return { 'restoreFromSnapshot': True, '_srcDomXML': self.vm.migratable_domain_xml(), 'elapsedTimeOffset': time.time() - self.vm.start_time } snap = vmxml.Element('domainsnapshot') disks = vmxml.Element('disks') new_drives = {} vm_drives = {} for drive in self.snap_drives: base_drv, tget_drv = norm_snap_drive_params(drive) try: self.vm.findDriveByUUIDs(tget_drv) except LookupError: # The vm is not already using the requested volume for the # snapshot, continuing. pass else: # The snapshot volume is the current one, skipping self.vm.log.debug("The volume is already in use: %s", tget_drv) continue # Next drive try: vm_drive = self.vm.findDriveByUUIDs(base_drv) except LookupError: # The volume we want to snapshot doesn't exist self.vm.log.error("The base volume doesn't exist: %s", base_drv) return response.error('snapshotErr') if vm_drive.hasVolumeLeases: self.vm.log.error('disk %s has volume leases', vm_drive.name) return response.error('noimpl') if vm_drive.transientDisk: self.vm.log.error('disk %s is a transient disk', vm_drive.name) return response.error('transientErr') vm_dev_name = vm_drive.name new_drives[vm_dev_name] = tget_drv.copy() new_drives[vm_dev_name]["type"] = "disk" new_drives[vm_dev_name]["diskType"] = vm_drive.diskType new_drives[vm_dev_name]["poolID"] = vm_drive.poolID new_drives[vm_dev_name]["name"] = vm_dev_name new_drives[vm_dev_name]["format"] = "cow" # We need to keep track of the drive object because # it keeps original data and used to generate snapshot element. # We keep the old volume ID so we can clear the block threshold. vm_drives[vm_dev_name] = (vm_drive, base_drv["volumeID"]) prepared_drives = {} for vm_dev_name, vm_device in new_drives.items(): # Adding the device before requesting to prepare it as we want # to be sure to teardown it down even when prepareVolumePath # failed for some unknown issue that left the volume active. prepared_drives[vm_dev_name] = vm_device try: new_drives[vm_dev_name]["path"] = \ self.vm.cif.prepareVolumePath(new_drives[vm_dev_name]) except Exception: self.vm.log.exception( 'unable to prepare the volume path for ' 'disk %s', vm_dev_name) rollback_drives(prepared_drives) return response.error('snapshotErr') drive, _ = vm_drives[vm_dev_name] snapelem = drive.get_snapshot_xml(vm_device) disks.appendChild(snapelem) snap.appendChild(disks) snap_flags = (libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REUSE_EXT | libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_NO_METADATA) if self.memory_params: # Save the needed vm configuration # TODO: this, as other places that use pickle.dump # directly to files, should be done with outOfProcess vm_conf_vol = self.memory_params['dstparams'] vm_conf_vol_path = self.vm.cif.prepareVolumePath(vm_conf_vol) try: with open(vm_conf_vol_path, "rb+") as f: vm_conf = vm_conf_for_memory_snapshot() # protocol=2 is needed for clusters < 4.4 # (for Python 2 host compatibility) data = pickle.dumps(vm_conf, protocol=2) # Ensure that the volume is aligned; qemu-img may segfault # when converting unligned images. # https://bugzilla.redhat.com/1649788 aligned_length = utils.round(len(data), 4096) data = data.ljust(aligned_length, b"\0") f.write(data) f.flush() os.fsync(f.fileno()) finally: self.vm.cif.teardownVolumePath(vm_conf_vol) # Adding the memory volume to the snapshot xml memory_vol = self.memory_params['dst'] memory_vol_path = self.vm.cif.prepareVolumePath(memory_vol) snap.appendChild(memory_snapshot(memory_vol_path)) else: memory_vol = memory_vol_path = None snap_flags |= libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY snapxml = xmlutils.tostring(snap) # TODO: this is debug information. For 3.6.x we still need to # see the XML even with 'info' as default level. self.vm.log.info("%s", snapxml) self._snapshot_job['memoryVolPath'] = memory_vol_path self._snapshot_job['memoryVol'] = memory_vol self._snapshot_job['newDrives'] = new_drives vm_drives_serialized = {} for k, v in vm_drives.items(): vm_drives_serialized[k] = [xmlutils.tostring(v[0].getXML()), v[1]] self._snapshot_job['vmDrives'] = vm_drives_serialized self.vm.update_snapshot_metadata(self._snapshot_job) # We need to stop the drive monitoring for two reasons, one is to # prevent spurious libvirt errors about missing drive paths (since # we're changing them), and also to prevent to trigger a drive # extension for the new volume with the apparent size of the old one # (the apparentsize is updated as last step in updateDriveParameters) self.vm.drive_monitor.disable() try: if self.should_freeze: self.vm.freeze() try: self.vm.log.info( "Taking a live snapshot (drives=%s," "memory=%s)", ', '.join(drive["name"] for drive in new_drives.values()), self.memory_params is not None) self.vm.run_dom_snapshot(snapxml, snap_flags) self.vm.log.info("Completed live snapshot") except libvirt.libvirtError: self.vm.log.exception("Unable to take snapshot") if self.should_freeze: self.vm.thaw() return response.error('snapshotErr') except: # In case the VM was shutdown in the middle of the snapshot # operation we keep doing the finalizing and reporting the failure. self._finalize_vm(memory_vol) res = False else: res = self.teardown(memory_vol_path, memory_vol, new_drives, vm_drives) if not res: raise RuntimeError("Failed to execute snapshot, " "considering the operation as failure")
def test_pickle_copy(self): p1 = ProtectedPassword("12345678") p2 = pickle.loads(pickle.dumps(p1)) self.assertEqual(p1, p2)