def setInnerNodeRefcounts(lvmCache, srUuid): """[Re]calculate and set the refcounts for inner VHD nodes based on refcounts of the leaf nodes. We can infer inner node refcounts on slaves directly because they are in use only when VDIs are attached - as opposed to the Master case where the coalesce process can also operate on inner nodes. Return all LVs (paths) that are active but not in use (i.e. that should be deactivated)""" vdiInfo = getVDIInfo(lvmCache) for uuid, vdi in vdiInfo.iteritems(): vdi.refcount = 0 ns = NS_PREFIX_LVM + srUuid for uuid, vdi in vdiInfo.iteritems(): if vdi.hidden: continue # only read leaf refcounts refcount = RefCounter.check(uuid, ns) assert (refcount == (0, 0) or refcount == (0, 1)) if refcount[1]: vdi.refcount = 1 while vdi.parentUuid: vdi = vdiInfo[vdi.parentUuid] vdi.refcount += 1 pathsNotInUse = [] for uuid, vdi in vdiInfo.iteritems(): if vdi.hidden: util.SMlog("Setting refcount for %s to %d" % (uuid, vdi.refcount)) RefCounter.set(uuid, vdi.refcount, 0, ns) if vdi.refcount == 0 and vdi.lvActive: path = os.path.join("/dev", lvmCache.vgName, vdi.lvName) pathsNotInUse.append(path) return pathsNotInUse
def setInnerNodeRefcounts(lvmCache, srUuid): """[Re]calculate and set the refcounts for inner VHD nodes based on refcounts of the leaf nodes. We can infer inner node refcounts on slaves directly because they are in use only when VDIs are attached - as opposed to the Master case where the coalesce process can also operate on inner nodes. Return all LVs (paths) that are active but not in use (i.e. that should be deactivated)""" vdiInfo = getVDIInfo(lvmCache) for uuid, vdi in vdiInfo.iteritems(): vdi.refcount = 0 ns = NS_PREFIX_LVM + srUuid for uuid, vdi in vdiInfo.iteritems(): if vdi.hidden: continue # only read leaf refcounts refcount = RefCounter.check(uuid, ns) assert(refcount == (0, 0) or refcount == (0, 1)) if refcount[1]: vdi.refcount = 1 while vdi.parentUuid: vdi = vdiInfo[vdi.parentUuid] vdi.refcount += 1 pathsNotInUse = [] for uuid, vdi in vdiInfo.iteritems(): if vdi.hidden: util.SMlog("Setting refcount for %s to %d" % (uuid, vdi.refcount)) RefCounter.set(uuid, vdi.refcount, 0, ns) if vdi.refcount == 0 and vdi.lvActive: path = os.path.join("/dev", lvmCache.vgName, vdi.lvName) pathsNotInUse.append(path) return pathsNotInUse
def activate(self, ns, ref, lvName, binary): lock = Lock(ref, ns) lock.acquire() try: count = RefCounter.get(ref, binary, ns) if count == 1: try: self.activateNoRefcount(lvName) except util.CommandException: RefCounter.put(ref, binary, ns) raise finally: lock.release()
def deactivateVdi(sr_uuid, vdi_uuid, vhd_path): name_space = lvhdutil.NS_PREFIX_LVM + sr_uuid lock = Lock(vdi_uuid, name_space) lock.acquire() try: count = RefCounter.put(vdi_uuid, False, name_space) if count > 0: return try: lvutil.deactivateNoRefcount(vhd_path) except Exception as e: util.SMlog(" lv de-activate failed for %s with error %s" % (vhd_path, str(e))) RefCounter.get(vdi_uuid, False, name_space) finally: lock.release()
def deactivateVdi(sr_uuid, vdi_uuid, vhd_path): name_space = lvhdutil.NS_PREFIX_LVM + sr_uuid lock = Lock(vdi_uuid, name_space) lock.acquire() try: count = RefCounter.put(vdi_uuid, False, name_space) if count > 0: return try: lvutil.deactivateNoRefcount(vhd_path) except Exception, e: util.SMlog(" lv de-activate failed for %s with error %s" % (vhd_path, str(e))) RefCounter.get(vdi_uuid, False, name_space) finally: lock.release()
def multi(session, args): """Perform several actions in one call (to save on round trips)""" util.SMlog("on-slave.multi: %s" % args) vgName = args["vgName"] lvmCache = LVMCache(vgName) i = 1 while True: action = args.get("action%d" % i) if not action: break util.SMlog("on-slave.action %d: %s" % (i, action)) if action == "activate": try: lvmCache.activate(args["ns%d" % i], args["uuid%d" % i], args["lvName%d" % i], False) except util.CommandException: util.SMlog("on-slave.activate failed") raise elif action == "deactivate": try: lvmCache.deactivate(args["ns%d" % i], args["uuid%d" % i], args["lvName%d" % i], False) except util.SMException: util.SMlog("on-slave.deactivate failed") raise elif action == "deactivateNoRefcount": try: lvmCache.deactivateNoRefcount(args["lvName%d" % i]) except util.SMException: util.SMlog("on-slave.deactivateNoRefcount failed") raise elif action == "refresh": try: lvmCache.activateNoRefcount(args["lvName%d" % i], True) except util.CommandException: util.SMlog("on-slave.refresh failed") raise elif action == "cleanupLockAndRefcount": from refcounter import RefCounter lock.Lock.cleanup(args["uuid%d" % i], args["ns%d" % i]) RefCounter.reset(args["uuid%d" % i], args["ns%d" % i]) else: raise util.SMException("unrecognized action: %s" % action) i += 1 return str(True)
def deactivate(self, ns, ref, lvName, binary): lock = Lock(ref, ns) lock.acquire() try: count = RefCounter.put(ref, binary, ns) if count > 0: return refreshed = False while True: lvInfo = self.getLVInfo(lvName) if len(lvInfo) != 1: raise util.SMException("LV info not found for %s" % ref) info = lvInfo[lvName] if info.open: if refreshed: # should never happen in normal conditions but in some # failure cases the recovery code may not be able to # determine what the correct refcount should be, so it # is not unthinkable that the value might be out of # sync util.SMlog("WARNING: deactivate: LV %s open" % lvName) return # check again in case the cached value is stale self.refresh() refreshed = True else: break try: self.deactivateNoRefcount(lvName) except util.CommandException: self.refresh() if self.getLVInfo(lvName): util.SMlog("LV %s could not be deactivated" % lvName) if lvInfo[lvName].active: util.SMlog("Reverting the refcount change") RefCounter.get(ref, binary, ns) raise else: util.SMlog("LV %s not found" % lvName) finally: lock.release()