def createDataDisk(poolUuid, size, isRoot): try: vol = OvmVolume() vol.size = long(size) vol.poolUuid = poolUuid pool = OvmStoragePool() sr = pool._getSrByNameLable(vol.poolUuid) if isRoot: path = join(sr.mountpoint, 'running_pool', get_uuid()) else: path = join(sr.mountpoint, 'shareDisk') if not exists(path): os.makedirs(path) freeSpace = pool._getSpaceinfoOfDir(path) if freeSpace < vol.size: raise Exception("%s has not enough space (available:%s, required:%s"%(path, freeSpace, vol.size)) vol.uuid = get_uuid() vol.name = vol.uuid + '.raw' filePath = join(path, vol.name) exceptionIfNoSuccess(xen_create_disk(filePath, BytesToM(vol.size)), "Create datadisk %s failed"%filePath) vol.path = filePath rs = fromOvmVlolume(vol) logger.debug(OvmVolume.createDataDisk, rs) return rs except Exception, e: errmsg = fmt_err_msg(e) logger.error(OvmVolume.createDataDisk, errmsg) raise XmlRpcFault(toErrCode(OvmVolume, OvmVolume.createDataDisk, errmsg))
def createDataDisk(poolUuid, size, isRoot): try: vol = OvmVolume() vol.size = long(size) vol.poolUuid = poolUuid pool = OvmStoragePool() sr = pool._getSrByNameLable(vol.poolUuid) if isRoot: path = join(sr.mountpoint, 'running_pool', get_uuid()) else: path = join(sr.mountpoint, 'shareDisk') if not exists(path): os.makedirs(path) freeSpace = pool._getSpaceinfoOfDir(path) if freeSpace < vol.size: raise Exception("%s has not enough space (available:%s, required:%s"%(path, freeSpace, vol.size)) vol.uuid = get_uuid() vol.name = vol.uuid + '.raw' filePath = join(path, vol.name) exceptionIfNoSuccess(xen_create_disk(filePath, BytesToM(vol.size)), "Create datadisk %s failed"%filePath) vol.path = filePath rs = fromOvmVolume(vol) logger.debug(OvmVolume.createDataDisk, rs) return rs except Exception, e: errmsg = fmt_err_msg(e) logger.error(OvmVolume.createDataDisk, errmsg) raise XmlRpcFault(toErrCode(OvmVolume, OvmVolume.createDataDisk, errmsg))
def createFromTemplate(poolUuid, templateUrl): try: if not exists(templateUrl): raise Exception("Cannot find template:%s"%templateUrl) sr = OvmStoragePool()._getSrByNameLable(poolUuid) volDirUuid = get_uuid() volUuid = get_uuid() priStorageMountPoint = sr.mountpoint volDir = join(priStorageMountPoint, 'running_pool', volDirUuid) if exists(volDir): raise Exception("Volume dir %s alreay existed, can not override"%volDir) os.makedirs(volDir) OvmStoragePool()._checkDirSizeForImage(volDir, templateUrl) volName = volUuid + '.raw' tgt = join(volDir, volName) cpVolCmd = ['cp', templateUrl, tgt] doCmd(cpVolCmd) volSize = os.path.getsize(tgt) vol = OvmVolume() vol.name = volName vol.path = tgt vol.size = volSize vol.uuid = volUuid vol.poolUuid = poolUuid rs = fromOvmVolume(vol) logger.debug(OvmVolume.createFromTemplate, rs) return rs except Exception, e: errmsg = fmt_err_msg(e) logger.error(OvmVolume.createFromTemplate, errmsg) raise XmlRpcFault(toErrCode(OvmVolume, OvmVolume.createFromTemplate), errmsg)
def cdBoot(vm, vmPath): isoMountPath = None try: cdrom = None for disk in vm.disks: if disk.isIso == True: cdrom = disk break if not cdrom: raise Exception("Cannot find Iso in disks") isoOnSecStorage = dirname(cdrom.path) isoName = basename(cdrom.path) isoMountPath = OvmVm()._getIsoMountPath(vmPath) OvmStoragePool()._mount(isoOnSecStorage, isoMountPath) isoPath = join(isoMountPath, isoName) if not exists(isoPath): raise Exception("Cannot found iso %s at %s which mounts to %s"%(isoName, isoOnSecStorage, isoMountPath)) stdout = run_cmd(args=['file', isoPath]) if not stdout.strip().endswith("(bootable)"): raise Exception("ISO %s is not bootable"%cdrom.path) #now alter cdrom to correct path cdrom.path = isoPath if len(vm.vifs) != 0: vif = vm.vifs[0] #ISO boot must be HVM vifCfg = ','.join([vif.mac, vif.bridge, 'ioemu']) else: vifCfg = '' rootDiskSize = os.path.getsize(vm.rootDisk.path) rooDiskCfg = ':'.join([join(vmPath, basename(vm.rootDisk.path)), str(BytesToG(rootDiskSize)), 'True']) disks = [rooDiskCfg] for d in vm.disks: if d.isIso: continue size = os.path.getsize(d.path) cfg = ':'.join([d.path, str(BytesToG(size)), 'True']) disks.append(cfg) disksCfg = ','.join(disks) server = successToMap(get_master_ip())['ip'] raiseExceptionIfFail(install_vm_hvm(vmPath, BytesToM(vm.memory), vm.cpuNum, vifCfg, disksCfg, cdrom.path, vncpassword='', dedicated_server=server)) rs = SUCC() return rs except Exception, e: if isoMountPath and OvmStoragePool()._isMounted(isoMountPath): doCmd(['umount', '-f', isoMountPath]) errmsg = fmt_err_msg(e) raise Exception(errmsg)
def destroy(poolUuid, path): try: OvmStoragePool()._getSrByNameLable(poolUuid) if not exists(path): raise Exception("Cannot find %s"%path) dir = dirname(path) if exists(join(dir, 'vm.cfg')): # delete root disk vmNamePath = join(dir, 'vmName') if exists(vmNamePath): vmNameFd = open(vmNamePath, 'r') vmName = vmNameFd.readline() vmName = vmName.rstrip('\n') link = join(dirname(dir), vmName) doCmd(['rm', '-rf', link]) vmNameFd.close() else: logger.warning(OvmVolume.destroy, "Can not find vmName file in %s"%dir) doCmd(['rm','-rf', dir]) else: doCmd(['rm', path]) return SUCC() except Exception, e: errmsg = fmt_err_msg(e) logger.error(OvmVolume.destroy, errmsg) raise XmlRpcFault(toErrCode(OvmVolume, OvmVolume.destroy), errmsg)
def detachOrAttachIso(vmName, iso, isAttach): try: if vmName in OvmHost.getAllVms(): scope = 'both' vmPath = OvmHost()._vmNameToPath(vmName) else: scope = 'cfg' vmPath = OvmHost()._getVmPathFromPrimaryStorage(vmName) vmType = OvmVm()._getVmTypeFromConfigFile(vmPath) if vmType != 'HVM': raise Exception("Only HVM supports attaching/detaching ISO") if not isAttach: iso = '' else: isoName = basename(iso) isoMountPoint = OvmVm()._getIsoMountPath(vmPath) isoOnSecStorage = dirname(iso) OvmStoragePool()._mount(isoOnSecStorage, isoMountPoint) iso = join(isoMountPoint, isoName) exceptionIfNoSuccess(xen_change_vm_cdrom(vmPath, iso, scope)) return SUCC() except Exception, e: errmsg = fmt_err_msg(e) logger.error(OvmVm.detachOrAttachIso, errmsg) raise XmlRpcFault(toErrCode(OvmVm, OvmVm.detachOrAttachIso), errmsg)
def scanStoppedVmOnPrimaryStorage(vms): def isMyVmDirLink(path): return (islink(path) and exists(join(path, 'vm.cfg')) and ('-' in basename(path)) and (exists(join(path, makeOwnerFileName())))) mps = OvmStoragePool()._getAllMountPoints() for mountPoint in mps: runningPool = join(mountPoint, 'running_pool') if not exists(runningPool): logger.debug( OvmHost.getAllVms, "Primary storage %s not existing, skip it. this should be first getAllVms() called from Ovm resource configure" % runningPool) continue for dir in os.listdir(runningPool): vmDir = join(runningPool, dir) if not isMyVmDirLink(vmDir): logger.debug( OvmHost.getAllVms, "%s is not our vm directory, skip it" % vmDir) continue if vms.has_key(dir): logger.debug( OvmHost.getAllVms, "%s is already in running list, skip it" % dir) continue logger.debug( OvmHost.getAllVms, "Found a stopped vm %s on primary storage %s, report it to management server" % (dir, mountPoint)) vms[dir] = "DOWN"
def setupHeartBeat(poolUuid, ip): try: sr = OvmStoragePool()._getSrByNameLable(poolUuid) OvmHaHeartBeat.start(sr.mountpoint, ip) return SUCC() except Exception, e: errmsg = fmt_err_msg(e) logger.error(OvmHost.setupHeartBeat, errmsg) raise XmlRpcFault(toErrCode(OvmHost, OvmHost.setupHeartBeat), errmsg)
def fence(ip): # try 3 times to avoid race condition that read when heartbeat file is being written def getTimeStamp(hbFile): for i in range(1, 3): f = open(hbFile, 'r') str = f.readline() items = re.findall(HEARTBEAT_TIMESTAMP_PATTERN, str) if len(items) == 0: logger.debug( OvmHost.fence, "Get an incorrect heartbeat data %s, will retry %s times" % (str, 3 - i)) f.close() time.sleep(5) else: f.close() timestamp = items[0] return timestamp.lstrip('<timestamp>').rstrip( '</timestamp>') # totally check in 6 mins, the update frequency is 2 mins def check(hbFile): for i in range(1, 6): ts = getTimeStamp(hbFile) time.sleep(60) nts = getTimeStamp(hbFile) if ts != nts: return True else: logger.debug( OvmHost.fence, '%s is not updated, old value=%s, will retry %s times' % (hbFile, ts, 6 - i)) return False try: mountpoints = OvmStoragePool()._getAllMountPoints() hbFile = None for m in mountpoints: p = join(m, HEARTBEAT_DIR, ipToHeartBeatFileName(ip)) if exists(p): hbFile = p break if not hbFile: raise Exception( 'Can not find heartbeat file for %s in pools %s' % (ip, mountpoints)) rs = toGson({"isLive": check(hbFile)}) logger.debug(OvmHost.fence, rs) return rs except Exception, e: errmsg = fmt_err_msg(e) logger.error(OvmHost.fence, errmsg) raise XmlRpcFault(toErrCode(OvmHost, OvmHost.fence), errmsg)
def _getVmPathFromPrimaryStorage(self, vmName): ''' we don't have a database to store vm states, so there is no way to retrieve information of a vm when it was already stopped. The trick is to try to find the vm path in primary storage then we can read information from its configure file. ''' mps = OvmStoragePool()._getAllMountPoints() vmPath = None for p in mps: vmPath = join(p, 'running_pool', vmName) if exists(vmPath): break if not vmPath: logger.error(self._getVmPathFromPrimaryStorage, "Cannot find link for %s in any primary storage, the vm was really gone!"%vmName) raise Exception("Cannot find link for %s in any primary storage, the vm was really gone!"%vmName) return vmPath