def _call_plugin(self, op, args): util.SMlog("Calling cephutils.VDI._call_plugin: op=%s" % op) vdi_uuid = args['vdi_uuid'] vdi_ref = self.session.xenapi.VDI.get_by_uuid(vdi_uuid) sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref) util.SMlog("Calling ceph_plugin") if filter(lambda x: x.startswith('host_'), sm_config.keys()): for key in filter(lambda x: x.startswith('host_'), sm_config.keys()): host_ref = key[len('host_'):] util.SMlog("Calling '%s' on host %s" % (op, host_ref)) if not self.session.xenapi.host.call_plugin( host_ref, "ceph_plugin", op, args): # Failed to pause node raise util.SMException("failed to %s VDI %s" % (op, mirror_uuid)) else: host_uuid = inventory.get_localhost_uuid() host_ref = self.session.xenapi.host.get_by_uuid(host_uuid) util.SMlog("Calling '%s' on localhost %s" % (op, host_ref)) if not self.session.xenapi.host.call_plugin( host_ref, "ceph_plugin", op, args): # Failed to pause node raise util.SMException("failed to %s VDI %s" % (op, mirror_uuid))
def sg_readcap(device): device = os.path.join('/dev', getdev(device)) readcapcommand = ['/usr/bin/sg_readcap', '-b', device] (rc, stdout, stderr) = util.doexec(readcapcommand) if rc == 6: # retry one time for "Capacity data has changed" (rc, stdout, stderr) = util.doexec(readcapcommand) if rc != 0: raise util.SMException("scsiutil.sg_readcap(%s) failed" % (device)) match = re.search('(^|.*\n)(0x[0-9a-fA-F]+) (0x[0-9a-fA-F]+)\n$', stdout) if not match: raise util.SMException("scsiutil.sg_readcap(%s) failed to parse: %s" % (device, stdout)) (blockcount, blocksize) = match.group(2, 3) return (int(blockcount, 0) * int(blocksize, 0))
def _do_clone(self, vdi_uuid, snap_uuid, clone_uuid, vdi_label): util.SMlog( "Calling cephutils.VDI._do_clone: vdi_uuid=%s, snap_uuid=%s, clone_uuid=%s, vdi_label=%s" % (vdi_uuid, snap_uuid, clone_uuid, vdi_label)) vdi_name = "%s%s" % (VDI_PREFIX, vdi_uuid) snapshot_name = "%s/%s@%s%s" % (self.sr.CEPH_POOL_NAME, vdi_name, SNAPSHOT_PREFIX, snap_uuid) clone_name = "%s/%s%s" % (self.sr.CEPH_POOL_NAME, CLONE_PREFIX, clone_uuid) vdi_ref = self.session.xenapi.VDI.get_by_uuid(vdi_uuid) sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref) if sm_config.has_key('attached') and not sm_config.has_key('paused'): if not blktap2.VDI.tap_pause(self.session, self.sr.uuid, vdi_uuid): raise util.SMException("failed to pause VDI %s" % vdi_uuid) self.__unmap_VHD(vdi_uuid) #--- util.pread2([ "rbd", "clone", snapshot_name, clone_name, "--name", self.sr.CEPH_USER ]) if self.sr.use_rbd_meta: util.pread2([ "rbd", "image-meta", "set", clone_name, "VDI_LABEL", vdi_label, "--pool", self.sr.CEPH_POOL_NAME, "--name", self.sr.CEPH_USER ]) util.pread2([ "rbd", "image-meta", "set", clone_name, "CLONE_OF", snap_uuid, "--pool", self.sr.CEPH_POOL_NAME, "--name", self.sr.CEPH_USER ]) #--- if sm_config.has_key('attached') and not sm_config.has_key('paused'): self.__map_VHD(vdi_uuid) blktap2.VDI.tap_unpause(self.session, self.sr.uuid, vdi_uuid, None)
def _do_snapshot(self, sr_uuid, vdi_uuid, snap_type, secondary=None, cbtlog=None): # If cbt enabled, save file consistency state if cbtlog is not None: if blktap2.VDI.tap_status(self.session, vdi_uuid): consistency_state = False else: consistency_state = True util.SMlog("Saving log consistency state of %s for vdi: %s" % (consistency_state, vdi_uuid)) else: consistency_state = None if self.vdi_type != vhdutil.VDI_TYPE_VHD: raise xs_errors.XenError('Unimplemented') if not blktap2.VDI.tap_pause(self.session, sr_uuid, vdi_uuid): raise util.SMException("failed to pause VDI %s" % vdi_uuid) try: return self._snapshot(snap_type, cbtlog, consistency_state) finally: blktap2.VDI.tap_unpause(self.session, sr_uuid, vdi_uuid, secondary)
def _merge_diffs (self, mirror_uuid, snap_uuid, base_uuid, size): if not blktap2.VDI.tap_pause(self.session, self.sr.uuid, mirror_uuid): raise util.SMException("failed to pause VDI %s" % mirror_uuid) #--- util.pread2(["unlink", self.RBD_VDI_MIRROR_DEV]) util.pread2(["dmsetup", "remove", self.DM_MIRROR_NAME]) util.pread2(["dmsetup", "remove", self.DM_ZERO_NAME]) self._map_VHD(base_uuid) self._setup_base(base_uuid, size) util.pread2(["dmsetup", "suspend", self.DM_BASE_NAME]) util.pread2(["dmsetup", "reload", self.DM_BASE_NAME, "--table", "0 %s snapshot-merge %s %s P 1" % (str(int(size) / 512), self.RBD_VDI_BASE_DEV, self.RBD_SXM_MIRROR_DEV)]) util.pread2(["dmsetup", "resume", self.DM_BASE_NAME]) # we should wait until the merge is completed util.pread2(["waitdmmerging.sh", self.DM_BASE_NAME]) # ------------------------------------------- util.pread2(["dmsetup", "remove", self.DM_BASE_NAME]) self._unmap_VHD(base_uuid) self._unmap_SXM(mirror_uuid) self._change_image_prefix_to_VHD(mirror_uuid) # ----- tmp_uuid = "temporary" # util.gen_uuid() self._rename_image(mirror_uuid, tmp_uuid) self._rename_image(base_uuid, mirror_uuid) self._rename_image(tmp_uuid, base_uuid) # ----- self._map_VHD(mirror_uuid) #--- blktap2.VDI.tap_unpause(self.session, self.sr.uuid, mirror_uuid, None)
def refresh_mapper_if_needed(primarydevice, SCSIid, currentcapacity): if "/dev/mapper/" in primarydevice \ and get_outdated_size_devices(currentcapacity, [primarydevice]): mpath_cli.resize_map(SCSIid) if get_outdated_size_devices(currentcapacity, [primarydevice]): raise util.SMException("Failed to get the mapper dev to agree " "on the current capacity.")
def detachThin(session, lvmCache, srUuid, vdiUuid): """Shrink the VDI to the minimal size if no one is using it""" lvName = LV_PREFIX[vhdutil.VDI_TYPE_VHD] + vdiUuid path = os.path.join(VG_LOCATION, VG_PREFIX + srUuid, lvName) lock = Lock(vhdutil.LOCK_TYPE_SR, srUuid) _tryAcquire(lock) vdiRef = session.xenapi.VDI.get_by_uuid(vdiUuid) vbds = session.xenapi.VBD.get_all_records_where( \ "field \"VDI\" = \"%s\"" % vdiRef) numPlugged = 0 for vbdRec in vbds.values(): if vbdRec["currently_attached"]: numPlugged += 1 if numPlugged > 1: raise util.SMException("%s still in use by %d others" % \ (vdiUuid, numPlugged - 1)) lvmCache.activate(NS_PREFIX_LVM + srUuid, vdiUuid, lvName, False) try: newSize = calcSizeLV(vhdutil.getSizePhys(path)) deflate(lvmCache, lvName, newSize) finally: lvmCache.deactivate(NS_PREFIX_LVM + srUuid, vdiUuid, lvName, False) lock.release()
def _do_clone(self, vdi_uuid, snap_uuid, clone_uuid, vdi_label): vdi_name = "%s%s" % (VDI_PREFIX, vdi_uuid) snapshot_name = "%s/%s@%s%s" % (self.sr.CEPH_POOL_NAME, vdi_name, SNAPSHOT_PREFIX, snap_uuid) clone_name = "%s/%s%s" % (self.sr.CEPH_POOL_NAME, CLONE_PREFIX, clone_uuid) if not blktap2.VDI.tap_pause(self.session, self.sr.uuid, vdi_uuid): raise util.SMException("failed to pause VDI %s" % vdi_uuid) self._unmap_VHD(vdi_uuid) #--- util.pread2([ "rbd", "clone", snapshot_name, clone_name, "--name", self.sr.CEPH_USER ]) util.pread2([ "rbd", "image-meta", "set", clone_name, "VDI_LABEL", vdi_label, "--pool", self.sr.CEPH_POOL_NAME, "--name", self.sr.CEPH_USER ]) util.pread2([ "rbd", "image-meta", "set", clone_name, "CLONE_OF", snap_uuid, "--pool", self.sr.CEPH_POOL_NAME, "--name", self.sr.CEPH_USER ]) #--- self._map_VHD(vdi_uuid) blktap2.VDI.tap_unpause(self.session, self.sr.uuid, vdi_uuid, None)
def _do_snapshot(self, vdi_uuid, snap_uuid): util.SMlog( "Calling cephutils.VDI._do_snapshot: vdi_uuid=%s, snap_uuid=%s" % (vdi_uuid, snap_uuid)) vdi_name = "%s%s" % (VDI_PREFIX, vdi_uuid) snapshot_name = "%s@%s%s" % (vdi_name, SNAPSHOT_PREFIX, snap_uuid) vdi_ref = self.session.xenapi.VDI.get_by_uuid(vdi_uuid) sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref) if sm_config.has_key('attached') and not sm_config.has_key('paused'): if not blktap2.VDI.tap_pause(self.session, self.sr.uuid, vdi_uuid): raise util.SMException("failed to pause VDI %s" % vdi_uuid) self.__unmap_VHD(vdi_uuid) #--- util.pread2([ "rbd", "snap", "create", snapshot_name, "--pool", self.sr.CEPH_POOL_NAME, "--name", self.sr.CEPH_USER ]) util.pread2([ "rbd", "snap", "protect", snapshot_name, "--pool", self.sr.CEPH_POOL_NAME, "--name", self.sr.CEPH_USER ]) #--- if sm_config.has_key('attached') and not sm_config.has_key('paused'): self.__map_VHD(vdi_uuid) blktap2.VDI.tap_unpause(self.session, self.sr.uuid, vdi_uuid, None)
def getParent(path, extractUuidFunction): cmd = [VHD_UTIL, "query", OPT_LOG_ERR, "-p", "-n", path] ret = ioretry(cmd) if ret.find("query failed") != -1 or ret.find("Failed opening") != -1: raise util.SMException("VHD query returned %s" % ret) if ret.find("no parent") != -1: return None return extractUuidFunction(ret)
def _do_snapshot(self, sr_uuid, vdi_uuid, snap_type, secondary=None): if self.vdi_type != vhdutil.VDI_TYPE_VHD: raise xs_errors.XenError('Unimplemented') if not blktap2.VDI.tap_pause(self.session, sr_uuid, vdi_uuid): raise util.SMException("failed to pause VDI %s" % vdi_uuid) try: return self._snapshot(snap_type) finally: blktap2.VDI.tap_unpause(self.session, sr_uuid, vdi_uuid, secondary)
def reset_leaf(self, sr_uuid, vdi_uuid): if self.vdi_type != vhdutil.VDI_TYPE_VHD: raise xs_errors.XenError('Unimplemented') # safety check if not vhdutil.hasParent(self.path): raise util.SMException("ERROR: VDI %s has no parent, " + \ "will not reset contents" % self.uuid) vhdutil.killData(self.path)
def from_session(cls, session): import util import SR as sm host_ref = util.get_localhost_uuid(session) _host = session.xenapi.host sr_ref = _host.get_local_cache_sr(host_ref) if not sr_ref: raise util.SMException("Local cache SR not specified") if sr_ref == 'OpaqueRef:NULL': raise util.SMException("Local caching not enabled.") _SR = session.xenapi.SR sr_uuid = _SR.get_uuid(sr_ref) target = sm.SR.from_uuid(session, sr_uuid) return cls(target.path)
def monkeyPatchedNormalizeType(type): if type in cleanup.LVHDSR.SUBTYPES: type = cleanup.SR.TYPE_LVHD if type in ["lvm", "lvmoiscsi", "lvmohba", "lvmofcoe"]: type = cleanup.SR.TYPE_LVHD # added "glusterfs" to this list if type in ["ext", "nfs", "ocfsoiscsi", "ocfsohba", "smb", "glusterfs"]: type = cleanup.SR.TYPE_FILE if type not in cleanup.SR.TYPES: raise util.SMException("Unsupported SR type: %s" % type) return type
def _flatten_clone(self, clone_uuid): if not blktap2.VDI.tap_pause(self.session, self.sr.uuid, clone_uuid): raise util.SMException("failed to pause VDI %s" % clone_uuid) self._unmap_VHD(clone_uuid) #--- ?????? CHECK For running VM. What if flattening takes a long time and vdi is paused during this process clone_name = "%s/%s%s" % (self.sr.CEPH_POOL_NAME, CLONE_PREFIX, clone_uuid) util.pread2( ["rbd", "flatten", clone_name, "--name", self.sr.CEPH_USER]) #--- ?????? self._map_VHD(clone_uuid) blktap2.VDI.tap_unpause(self.session, self.sr.uuid, clone_uuid, None)
def resize(self, sr_uuid, vdi_uuid, size): vdi_ref = self.session.xenapi.VDI.get_by_uuid(vdi_uuid) sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref) if sm_config.has_key('attached'): if not blktap2.VDI.tap_pause(self.session, self,sr.uuid, vdi_uuid): raise util.SMException("failed to pause VDI %s" % vdi_uuid) self._unmap_VHD(vdi_uuid) #--- image_size = size / 1024 / 1024 util.pread2(["rbd", "resize", "--size", str(image_size), "--allow-shrink", self.CEPH_VDI_NAME, "--pool", self.sr.CEPH_POOL_NAME, "--name", self.sr.CEPH_USER]) #--- if sm_config.has_key('attached'): self._map_VHD(vdi_uuid) blktap2.VDI.tap_unpause(self.session, self.sr.uuid, vdi_uuid, None)
def compose(self, sr_uuid, vdi1, vdi2): if self.vdi_type != vhdutil.VDI_TYPE_VHD: raise xs_errors.XenError('Unimplemented') parent_fn = vdi1 + vhdutil.FILE_EXTN[vhdutil.VDI_TYPE_VHD] parent_path = os.path.join(self.sr.path, parent_fn) assert (util.pathexists(parent_path)) vhdutil.setParent(self.path, parent_path, False) vhdutil.setHidden(parent_path) util.pread2( [vhdutil.VHD_UTIL, "modify", "-p", parent_path, "-n", self.path]) # Tell tapdisk the chain has changed if not blktap2.VDI.tap_refresh(self.session, sr_uuid, vdi2): raise util.SMException("failed to refresh VDI %s" % self.uuid) util.SMlog("VDI.compose: relinked %s->%s" % (vdi2, vdi1))
def resize(self, sr_uuid, vdi_uuid, size): if not blktap2.VDI.tap_pause(self.session, self, sr.uuid, vdi_uuid): raise util.SMException("failed to pause VDI %s" % vdi_uuid) self._unmap_VHD(vdi_uuid) #--- image_size = size / 1024 / 1024 util.pread2([ "rbd", "resize", "--size", str(image_size), "--allow-shrink", self.CEPH_VDI_NAME, "--pool", self.sr.CEPH_POOL_NAME, "--name", self.sr.CEPH_USER ]) #--- self._map_VHD(vdi_uuid) blktap2.VDI.tap_unpause(self.session, self.sr.uuid, vdi_uuid, None)
def _flatten_clone(self, clone_uuid): vdi_ref = self.session.xenapi.VDI.get_by_uuid(vdi_uuid) sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref) if sm_config.has_key('attached'): if not blktap2.VDI.tap_pause(self.session, self.sr.uuid, clone_uuid): raise util.SMException("failed to pause VDI %s" % clone_uuid) self._unmap_VHD(clone_uuid) #--- ?????? CHECK For running VM. What if flattening takes a long time and vdi is paused during this process clone_name = "%s/%s%s" % (self.sr.CEPH_POOL_NAME, CLONE_PREFIX, clone_uuid) util.pread2(["rbd", "flatten", clone_name, "--name", self.sr.CEPH_USER]) #--- ?????? if sm_config.has_key('attached'): self._map_VHD(clone_uuid) blktap2.VDI.tap_unpause(self.session, self.sr.uuid, clone_uuid, None)
def refresh_lun_size_by_SCSIid_on_slaves(session, SCSIid): for slave in util.get_all_slaves(session): util.SMlog("Calling on-slave.refresh_lun_size_by_SCSIid(%s) on %s." % (SCSIid, slave)) resulttext = session.xenapi.host.call_plugin( slave, "on-slave", "refresh_lun_size_by_SCSIid", {'SCSIid': SCSIid}) if "True" == resulttext: util.SMlog("Calling on-slave.refresh_lun_size_by_SCSIid(%s) on" " %s succeeded." % (SCSIid, slave)) else: message = ("Failed in on-slave.refresh_lun_size_by_SCSIid(%s) " "on %s." % (SCSIid, slave)) raise util.SMException("Slave %s failed in on-slave.refresh_lun_" "size_by_SCSIid(%s) " % (slave, SCSIid))
def snapshot(self, sr_uuid, vdi_uuid): if self.vdi_type != vhdutil.VDI_TYPE_VHD: raise xs_errors.XenError('Unimplemented') snap_type = self.SNAPSHOT_DOUBLE if self.sr.srcmd.params['driver_params'].get("type") and \ self.sr.srcmd.params['driver_params']["type"] == "single": snap_type = self.SNAPSHOT_SINGLE if not blktap2.VDI.tap_pause(self.session, sr_uuid, vdi_uuid): raise util.SMException("failed to pause VDI %s" % vdi_uuid) try: return self._snapshot(snap_type) finally: blktap2.VDI.tap_unpause(self.session, sr_uuid, vdi_uuid)
def refresh_devices_if_needed(primarydevice, SCSIid, currentcapacity): devices = get_devices_by_SCSIid(SCSIid) if "/dev/mapper/" in primarydevice: devices = set(devices + mpath_cli.list_paths(SCSIid)) devicesthatneedrefresh = get_outdated_size_devices( currentcapacity, devices) if devicesthatneedrefresh: # timing out avoids waiting for min(dev_loss_tmo, fast_io_fail_tmo) # if one or multiple devices don't answer util.timeout_call(10, refreshdev, devicesthatneedrefresh) if get_outdated_size_devices(currentcapacity, devicesthatneedrefresh): # in this state we shouldn't force resizing the mapper dev raise util.SMException("Failed to get %s to agree on the " "current capacity." % devicesthatneedrefresh)
def multi(session, args): """Perform several actions in one call (to save on round trips)""" util.SMlog("on-slave.multi: %s" % args) vgName = args["vgName"] lvmCache = LVMCache(vgName) i = 1 while True: action = args.get("action%d" % i) if not action: break util.SMlog("on-slave.action %d: %s" % (i, action)) if action == "activate": try: lvmCache.activate(args["ns%d" % i], args["uuid%d" % i], args["lvName%d" % i], False) except util.CommandException: util.SMlog("on-slave.activate failed") raise elif action == "deactivate": try: lvmCache.deactivate(args["ns%d" % i], args["uuid%d" % i], args["lvName%d" % i], False) except util.SMException: util.SMlog("on-slave.deactivate failed") raise elif action == "deactivateNoRefcount": try: lvmCache.deactivateNoRefcount(args["lvName%d" % i]) except util.SMException: util.SMlog("on-slave.deactivateNoRefcount failed") raise elif action == "refresh": try: lvmCache.activateNoRefcount(args["lvName%d" % i], True) except util.CommandException: util.SMlog("on-slave.refresh failed") raise elif action == "cleanupLockAndRefcount": from refcounter import RefCounter lock.Lock.cleanup(args["uuid%d" % i], args["ns%d" % i]) RefCounter.reset(args["uuid%d" % i], args["ns%d" % i]) else: raise util.SMException("unrecognized action: %s" % action) i += 1 return str(True)
def _delete_snapshot(self, vdi_uuid, snap_uuid): vdi_name = "%s%s" % (VDI_PREFIX, vdi_uuid) snapshot_name = "%s@%s%s" % (vdi_name, SNAPSHOT_PREFIX, snap_uuid) short_snap_name = "%s%s" % (SNAPSHOT_PREFIX, snap_uuid) vdi_ref = self.session.xenapi.VDI.get_by_uuid(vdi_uuid) sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref) if sm_config.has_key('attached'): if not blktap2.VDI.tap_pause(self.session, self.sr.uuid, vdi_uuid): raise util.SMException("failed to pause VDI %s" % vdi_uuid) self._unmap_VHD(vdi_uuid) #--- util.pread2(["rbd", "snap", "unprotect", snapshot_name, "--pool", self.sr.CEPH_POOL_NAME, "--name", self.sr.CEPH_USER]) util.pread2(["rbd", "snap", "rm", snapshot_name, "--pool", self.sr.CEPH_POOL_NAME, "--name", self.sr.CEPH_USER]) util.pread2(["rbd", "image-meta", "remove", vdi_name, short_snap_name, "--pool", self.sr.CEPH_POOL_NAME, "--name", self.sr.CEPH_USER]) #--- if sm_config.has_key('attached'): self._map_VHD(vdi_uuid) blktap2.VDI.tap_unpause(self.session, self.sr.uuid, vdi_uuid, None)
def _do_snapshot(self, vdi_uuid, snap_uuid): vdi_name = "%s%s" % (VDI_PREFIX, vdi_uuid) snapshot_name = "%s@%s%s" % (vdi_name, SNAPSHOT_PREFIX, snap_uuid) if not blktap2.VDI.tap_pause(self.session, self.sr.uuid, vdi_uuid): raise util.SMException("failed to pause VDI %s" % vdi_uuid) self._unmap_VHD(vdi_uuid) #--- util.pread2([ "rbd", "snap", "create", snapshot_name, "--pool", self.sr.CEPH_POOL_NAME, "--name", self.sr.CEPH_USER ]) util.pread2([ "rbd", "snap", "protect", snapshot_name, "--pool", self.sr.CEPH_POOL_NAME, "--name", self.sr.CEPH_USER ]) #--- self._map_VHD(vdi_uuid) blktap2.VDI.tap_unpause(self.session, self.sr.uuid, vdi_uuid, None)
def deactivate(self, ns, ref, lvName, binary): lock = Lock(ref, ns) lock.acquire() try: count = RefCounter.put(ref, binary, ns) if count > 0: return refreshed = False while True: lvInfo = self.getLVInfo(lvName) if len(lvInfo) != 1: raise util.SMException("LV info not found for %s" % ref) info = lvInfo[lvName] if info.open: if refreshed: # should never happen in normal conditions but in some # failure cases the recovery code may not be able to # determine what the correct refcount should be, so it # is not unthinkable that the value might be out of # sync util.SMlog("WARNING: deactivate: LV %s open" % lvName) return # check again in case the cached value is stale self.refresh() refreshed = True else: break try: self.deactivateNoRefcount(lvName) except util.CommandException: self.refresh() if self.getLVInfo(lvName): util.SMlog("LV %s could not be deactivated" % lvName) if lvInfo[lvName].active: util.SMlog("Reverting the refcount change") RefCounter.get(ref, binary, ns) raise else: util.SMlog("LV %s not found" % lvName) finally: lock.release()
def xapi_vfs_stats(self): f = self.statvfs() if not f.f_frsize: raise util.SMException("Cache FS does not report utilization.") fs_size = f.f_frsize * f.f_blocks fs_free = f.f_frsize * f.f_bfree fs_cache_total = 0 for path in self._fast_find_nodes(): st = os.stat(path) fs_cache_total += st.st_size return { 'FREE_CACHE_SPACE_AVAILABLE': fs_free, 'TOTAL_CACHE_UTILISATION': fs_cache_total, 'TOTAL_UTILISATION_BY_NON_CACHE_DATA': fs_size - fs_free - fs_cache_total }
def update(self, sr_uuid, vdi_uuid): util.SMlog("Calling cephutils.VDI.update: sr_uuid=%s, vdi_uuid=%s" % (sr_uuid, vdi_uuid)) vdi_name = "%s%s" % (VDI_PREFIX, vdi_uuid) vdi_ref = self.session.xenapi.VDI.get_by_uuid(vdi_uuid) sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref) if self.sr.use_rbd_meta: if sm_config.has_key( 'attached') and not sm_config.has_key('paused'): if not blktap2.VDI.tap_pause(self.session, self.sr.uuid, vdi_uuid): raise util.SMException("failed to pause VDI %s" % vdi_uuid) self.__unmap_VHD(vdi_uuid) if self.label: util.pread2([ "rbd", "image-meta", "set", vdi_name, "VDI_LABEL", self.label, "--pool", self.sr.CEPH_POOL_NAME, "--name", self.sr.CEPH_USER ]) if self.description: util.pread2([ "rbd", "image-meta", "set", vdi_name, "VDI_DESCRIPTION", self.description, "--pool", self.sr.CEPH_POOL_NAME, "--name", self.sr.CEPH_USER ]) for snapshot_uuid in self.snaps.keys(): snapshot_name = "%s%s" % (SNAPSHOT_PREFIX, snapshot_uuid) util.pread2([ "rbd", "image-meta", "set", vdi_name, snapshot_name, str(self.snaps[snapshot_uuid]), "--pool", self.sr.CEPH_POOL_NAME, "--name", self.sr.CEPH_USER ]) if sm_config.has_key( 'attached') and not sm_config.has_key('paused'): self.__map_VHD(vdi_uuid) blktap2.VDI.tap_unpause(self.session, self.sr.uuid, vdi_uuid, None)
def resize(self, sr_uuid, vdi_uuid, image_size_M): util.SMlog( "Calling cephutils.VDI.resize: sr_uuid=%s, vdi_uuid=%s, size=%sMB" % (sr_uuid, vdi_uuid, image_size_M)) vdi_ref = self.session.xenapi.VDI.get_by_uuid(vdi_uuid) sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref) ##self.size = int(self.session.xenapi.VDI.get_virtual_size(vdi_ref)) if sm_config.has_key('attached') and not sm_config.has_key('paused'): if not blktap2.VDI.tap_pause(self.session, self.sr.uuid, vdi_uuid): raise util.SMException("failed to pause VDI %s" % vdi_uuid) self.__unmap_VHD(vdi_uuid) #--- ##image_size = size / 1024 / 1024 util.pread2([ "rbd", "resize", "--size", str(image_size_M), "--allow-shrink", self.CEPH_VDI_NAME, "--pool", self.sr.CEPH_POOL_NAME, "--name", self.sr.CEPH_USER ]) #--- if sm_config.has_key('attached') and not sm_config.has_key('paused'): self.__map_VHD(vdi_uuid) blktap2.VDI.tap_unpause(self.session, self.sr.uuid, vdi_uuid, None)
def delete(self, sr_uuid, vdi_uuid, data_only=False): """Delete this VDI. This operation IS idempotent and should succeed if the VDI exists and can be deleted or if the VDI does not exist. It is the responsibility of the higher-level management tool to ensure that the detach() operation has been explicitly called prior to deletion, otherwise the delete() will fail if the disk is still attached. """ import blktap2 from lock import Lock if data_only == False and self._get_blocktracking_status(): logpath = self._get_cbt_logpath(vdi_uuid) parent_uuid = self._cbt_op(vdi_uuid, cbtutil.get_cbt_parent, logpath) parent_path = self._get_cbt_logpath(parent_uuid) child_uuid = self._cbt_op(vdi_uuid, cbtutil.get_cbt_child, logpath) child_path = self._get_cbt_logpath(child_uuid) lock = Lock("cbtlog", str(vdi_uuid)) if self._cbt_log_exists(parent_path): self._cbt_op(parent_uuid, cbtutil.set_cbt_child, parent_path, child_uuid) if self._cbt_log_exists(child_path): self._cbt_op(child_uuid, cbtutil.set_cbt_parent, child_path, parent_uuid) lock.acquire() try: # Coalesce contents of bitmap with child's bitmap # Check if child bitmap is currently attached paused_for_coalesce = False consistent = self._cbt_op(child_uuid, cbtutil.get_cbt_consistency, child_path) if not consistent: if not blktap2.VDI.tap_pause(self.session, sr_uuid, child_uuid): raise util.SMException("failed to pause VDI %s") paused_for_coalesce = True self._activate_cbt_log(self._get_cbt_logname(vdi_uuid)) self._cbt_op(child_uuid, cbtutil.coalesce_bitmap, logpath, child_path) lock.release() except util.CommandException: # If there is an exception in coalescing, # CBT log file is not deleted and pointers are reset # to what they were util.SMlog("Exception in coalescing bitmaps on VDI delete," " restoring to previous state") try: if self._cbt_log_exists(parent_path): self._cbt_op(parent_uuid, cbtutil.set_cbt_child, parent_path, vdi_uuid) if self._cbt_log_exists(child_path): self._cbt_op(child_uuid, cbtutil.set_cbt_parent, child_path, vdi_uuid) finally: lock.release() lock.cleanup("cbtlog", str(vdi_uuid)) return finally: # Unpause tapdisk if it wasn't originally paused if paused_for_coalesce: blktap2.VDI.tap_unpause(self.session, sr_uuid, child_uuid) lock.acquire() try: self._delete_cbt_log() finally: lock.release() lock.cleanup("cbtlog", str(vdi_uuid))