class VMConfigurator(): logger = Logger.getLogger() @staticmethod def __getConfiguratorByNameAndOsType(configurator, os): if configurator and configurator != "": if configurator == MediacatVMConfigurator.getIdentifier(): return MediacatVMConfigurator else: if os.lower() == "debian" or os.lower() == "ubuntu": return OfeliaDebianVMConfigurator raise Exception("Unknown configurator") ##Public methods @staticmethod def configureVmDisk(vm, pathToMountPoint): VMConfigurator.__getConfiguratorByNameAndOsType( vm.xen_configuration.configurator, vm.operating_system_distribution).configureVmDisk( vm, pathToMountPoint) @staticmethod def createVmConfigurationFile(vm): VMConfigurator.__getConfiguratorByNameAndOsType( vm.xen_configuration.configurator, vm.operating_system_distribution).createVmConfigurationFile(vm)
class XmlRpcServer(): logger = Logger.getLogger() @staticmethod def createInstanceAndEngage(callBackFunction,HandlerClass = SecureXMLRpcRequestHandler,ServerClass = SecureXMLRPCServer): """Test xml rpc over https server""" class xmlrpc_wrappers: def __init__(self): import string self.python_string = string def send(self, callBackUrl,amId,password,xml): #FIXME: XXX: use certificates instead of password based authentication if password != XMLRPC_SERVER_PASSWORD: raise Exception("Password mismatch") callBackFunction(callBackUrl,xml) return "" def ping(self, challenge): #XmlRpcServer.logger.debug("PING") return challenge server_address = (XMLRPC_SERVER_LISTEN_HOST, XMLRPC_SERVER_LISTEN_PORT) # (address, port) server = ServerClass(server_address, HandlerClass) server.register_instance(xmlrpc_wrappers()) sa = server.socket.getsockname() XmlRpcServer.logger.debug("Serving HTTPS XMLRPC requests on "+str(sa[0])+":"+ str(sa[1])) server.serve_forever()
class MonitoringDispatcher: logger = Logger.getLogger() @staticmethod def __getMonitoringDispatcher(vtype): #Import of Dispatchers must go here to avoid import circular dependecy from xen.monitoring.XenMonitoringDispatcher import XenMonitoringDispatcher if vtype == "xen": return XenMonitoringDispatcher else: raise Exception("Virtualization type not supported by the agent") @staticmethod def __dispatchAction(dispatcher,action,server): #Gathering information if action.type_ == "listActiveVMs": return dispatcher.listActiveVMs(action.id,server) raise Exception("Unknown action type") @staticmethod def processMonitoring(monitoring): for action in monitoring.action: server = action.server try: dispatcher = MonitoringDispatcher.__getMonitoringDispatcher(server.virtualization_type) except Exception as e: XmlRpcClient.sendAsyncMonitoringActionStatus(action.id,"FAILED",str(e)) MonitoringDispatcher.logger.error(str(e)) return try: #Send async notification XmlRpcClient.sendAsyncMonitoringActionStatus(action.id,"ONGOING","") MonitoringDispatcher.logger.debug("After sending ongoing") MonitoringDispatcher.__dispatchAction(dispatcher,action,server) except Exception as e: MonitoringDispatcher.logger.error(str(e)) raise e ##Abstract methods definition for MonitoringDispatchers #Inventory @staticmethod def listActiveVMs(id,server): raise Exception("Abstract method cannot be called")
class XmlParser(object): logger = Logger.getLogger() @staticmethod def parseXML(rawXML): #XmlParser.logger.debug("Parsing XML...") try: object = vtRspecInterface.parseString(rawXML) #XmlParser.logger.debug("Parsing of XML concluded without significant errors.") return object except Exception as e: XmlParser.logger.error(str(e)) raise XMLParsingException("Could not parse parse XML;"+str(e))
class XenMonitoringDispatcher(MonitoringDispatcher): logger = Logger.getLogger() ##Monitoring routines @staticmethod def listActiveVMs(id,server): try: doms = XendManager.retrieveActiveDomainsByUUID() XmlRpcClient.sendAsyncMonitoringActiveVMsInfo(id,"SUCCESS",doms,server) except Exception as e: #Send async notification XmlRpcClient.sendAsyncMonitoringActionStatus(id,"FAILED",str(e)) XenMonitoringDispatcher.logger.error(str(e)) return
class XmlRpcServer(): logger = Logger.getLogger() @staticmethod def createInstanceAndEngage(callBackFunction, HandlerClass=SecureXMLRpcRequestHandler, ServerClass=SecureXMLRPCServer): server_address = (XMLRPC_SERVER_LISTEN_HOST, XMLRPC_SERVER_LISTEN_PORT ) # (address, port) server = ServerClass(server_address, HandlerClass) server.register_instance(XmlRpcAPI.xmlrpc_wrappers(callBackFunction)) sa = server.socket.getsockname() XmlRpcServer.logger.debug("Serving HTTPS XMLRPC requests on " + str(sa[0]) + ":" + str(sa[1])) server.serve_forever()
class ServiceThread(Thread): logger = Logger.getLogger() def __init__(self, servMethod, param, url): Thread.__init__(self) self.__method = servMethod self.__param = param self.callBackURL = url @staticmethod def startMethodInNewThread(serviceMethod, param, url): thread = None ServiceThread(serviceMethod, param, url).start() def run(self): self.logger.debug("Starting service thread") self.__method(self.__param) self.logger.debug("Terminating service thread")
class XmlCrafter(object): logger = Logger.getLogger() @staticmethod def craftXML(XMLclass): #XmlCrafter.logger.debug("Crafting Model...") try: xml = StringIO() xml.write('<?xml version="1.0" encoding="UTF-8"?>\n') #XMLclass.export(xml, level=0) XMLclass.export(xml, level=0,namespacedef_='xmlns=\"http://www.fp7-ofelia.eu/CF/vt_am/rspec\"') #XmlCrafter.logger.debug("Crafting of the XML Class concluded without significant errors.") xmlString = xml.getvalue() xml.close() return xmlString except Exception as e: XmlCrafter.logger.error(str(e)) raise XMLParsingException("Could not craft Model;"+str(e))
class DomainMonitor: logger = Logger.getLogger() @staticmethod def retriveActiveDomainsByUUID(con): domainIds = con.listDomainsID() doms = list() for dId in domainIds: #Skip domain0 if dId == 0: continue domain = con.lookupByID(dId) doms.append(domain.UUIDString()) #DomainMonitor.logger.debug("Appending: "str(domain.UUIDString())) return doms
class VmMutexStore(): logger = Logger.getLogger() @staticmethod def __getKey(vm): return vm.project_id + vm.slice_id + vm.name @staticmethod def lock(vm): key = VmMutexStore.__getKey(vm) #VmMutexStore.logger.debug("Trying to lock>>>"+key) #This localExclusion is to prevent problems if never resources (dictionary entry) are freed in unlock method localmutex.acquire() if not _vmLocks.has_key(key): #VmMutexStore.logger.debug("Creating entry in the dict>>>"+key) #create Mutex for the VM _vmLocks[key] = Lock() #release local mutex localmutex.release() #Acquire specific VM lock #VmMutexStore.logger.debug("trying to acquire lock>>>"+key) _vmLocks.get(key).acquire() #VmMutexStore.logger.debug("Lock acquired>>>") return @staticmethod def unlock(vm): localmutex.acquire() #release specific VM lock #TODO: release resources in dict? _vmLocks.get(VmMutexStore.__getKey(vm)).release() #VmMutexStore.logger.debug("Lock released>>>"+VmMutexStore.__getKey(vm)) localmutex.release() return
class IratiDebianVMConfigurator: logger = Logger.getLogger() ''' Private methods ''' @staticmethod def __configureInterfacesFile(vm, iFile): #Loopback iFile.write("auto lo\niface lo inet loopback\n\n") #Interfaces for inter in vm.xen_configuration.interfaces.interface: if inter.ismgmt: #is a mgmt interface interfaceString = "auto "+inter.name+"\n"+\ "iface "+inter.name+" inet static\n"+\ "\taddress "+inter.ip +"\n"+\ "\tnetmask "+inter.mask+"\n" if inter.gw != None and inter.gw != "": interfaceString += "\tgateway " + inter.gw + "\n" if inter.dns1 != None and inter.dns1 != "": interfaceString += "\tdns-nameservers " + inter.dns1 if inter.dns2 != None and inter.dns2 != "": interfaceString += " " + inter.dns2 interfaceString += "\n\n" iFile.write(interfaceString) else: #is a data interface iFile.write("auto " + inter.name + "\n\n") @staticmethod def __configureUdevFile(vm, uFile): for inter in vm.xen_configuration.interfaces.interface: uFile.write( 'SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{address}=="' + inter.mac + '", ATTR{dev_id}=="0x0", ATTR{type}=="1", KERNEL=="eth*", NAME="' + inter.name + '"\n') @staticmethod def __configureHostname(vm, hFile): hFile.write(vm.name) @staticmethod def __createFullvirtualizationFileHdConfigFile(vm, env): template_name = "fullVirtualizedFileHd.pt" template = env.get_template(template_name) #Set vars&render output = template.render( kernelImg=OXA_XEN_SERVER_KERNEL, initrdImg=OXA_XEN_SERVER_INITRD, hdFilePath=HdManager.getHdPath(vm), #swapFilePath=HdManager.getSwapPath(vm), vm=vm) #write file cfile = open(HdManager.getConfigFilePath(vm), 'w') cfile.write(output) cfile.close() ''' Public methods ''' @staticmethod def getIdentifier(): return IratiDebianVMConfigurator.__name__ @staticmethod def _configureNetworking(vm, path): #Configure interfaces and udev settings try: try: #Backup current files shutil.copy( path + OXA_DEBIAN_INTERFACES_FILE_LOCATION, path + OXA_DEBIAN_INTERFACES_FILE_LOCATION + ".bak") shutil.copy(path + OXA_DEBIAN_UDEV_FILE_LOCATION, path + OXA_DEBIAN_UDEV_FILE_LOCATION + ".bak") except Exception as e: pass with open(path + OXA_DEBIAN_INTERFACES_FILE_LOCATION, 'w') as openif: IratiDebianVMConfigurator.__configureInterfacesFile(vm, openif) with open(path + OXA_DEBIAN_UDEV_FILE_LOCATION, 'w') as openudev: IratiDebianVMConfigurator.__configureUdevFile(vm, openudev) except Exception as e: IratiDebianVMConfigurator.logger.error(str(e)) raise Exception("Could not configure interfaces or Udev file") @staticmethod def _configureLDAPSettings(vm, path): try: file = open(path + OXA_DEBIAN_SECURITY_ACCESS_FILE_LOCATION, "r") text = file.read() file.close() file = open(path + OXA_DEBIAN_SECURITY_ACCESS_FILE_LOCATION, "w") #Scape spaces and tabs projectName = string.replace(vm.project_name, ' ', '_') projectName = string.replace(projectName, '\t', '__') file.write( text.replace("__projectId", "@proj_" + vm.project_id + "_" + projectName)) file.close() except Exception as e: IratiDebianVMConfigurator.logger.error( "Could not configure LDAP file!! - " + str(e)) @staticmethod def _configureHostName(vm, path): try: with open(path + OXA_DEBIAN_HOSTNAME_FILE_LOCATION, 'w') as openhost: IratiDebianVMConfigurator.__configureHostname(vm, openhost) except Exception as e: IratiDebianVMConfigurator.logger.error( "Could not configure hostname;skipping.. - " + str(e)) @staticmethod def _configureSSHServer(vm, path): try: IratiDebianVMConfigurator.logger.debug( "Regenerating SSH keys...\n Deleting old keys...") subprocess.check_call("rm -f " + path + "/etc/ssh/ssh_host_*", shell=True, stdout=None) #subprocess.check_call("chroot "+path+" dpkg-reconfigure openssh-server ", shell=True, stdout=None) IratiDebianVMConfigurator.logger.debug( "Creating SSH1 key; this may take some time...") subprocess.check_call("ssh-keygen -q -f " + path + "/etc/ssh/ssh_host_key -N '' -t rsa1", shell=True, stdout=None) IratiDebianVMConfigurator.logger.debug( "Creating SSH2 RSA key; this may take some time...") subprocess.check_call("ssh-keygen -q -f " + path + "/etc/ssh/ssh_host_rsa_key -N '' -t rsa", shell=True, stdout=None) IratiDebianVMConfigurator.logger.debug( "Creating SSH2 DSA key; this may take some time...") subprocess.check_call("ssh-keygen -q -f " + path + "/etc/ssh/ssh_host_dsa_key -N '' -t dsa", shell=True, stdout=None) except Exception as e: IratiDebianVMConfigurator.logger.error( "Fatal error; could not regenerate SSH keys. Aborting to prevent VM to be unreachable..." + str(e)) raise e #Public methods @staticmethod def createVmConfigurationFile(vm): #get env template_dirs = [] template_dirs.append( os.path.join(os.path.dirname(__file__), 'templates/')) env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dirs)) if vm.xen_configuration.hd_setup_type == "full-file-image" and vm.xen_configuration.virtualization_setup_type == "hvm": IratiDebianVMConfigurator.__createFullvirtualizationFileHdConfigFile( vm, env) else: raise Exception( "type of file or type of virtualization not supported for the creation of xen vm configuration file" ) @staticmethod def configureVmDisk(vm, path): if not path or not re.match( r'[\s]*\/\w+\/\w+\/.*', path, re.IGNORECASE): #For security, should never happen anyway raise Exception("Incorrect vm path") #Configure networking IratiDebianVMConfigurator._configureNetworking(vm, path) IratiDebianVMConfigurator.logger.info( "Network configured successfully...") #Configure LDAP settings IratiDebianVMConfigurator._configureLDAPSettings(vm, path) IratiDebianVMConfigurator.logger.info( "Authentication configured successfully...") #Configure Hostname IratiDebianVMConfigurator._configureHostName(vm, path) IratiDebianVMConfigurator.logger.info( "Hostname configured successfully...") #Regenerate SSH keys IratiDebianVMConfigurator._configureSSHServer(vm, path) IratiDebianVMConfigurator.logger.info( "SSH have been keys regenerated...")
import logging ''' @author: msune, omoya @organization: i2CAT, OFELIA FP7 Simple Logger wrapper for Libvirt Event Monitoring ''' from utils.Logger import Logger Libvirt = Logger.getLogger() #Libvirt Monitoring Log File LIBVIRT_LOG='/opt/ofelia/oxa/log/libvirtmonitor.log' class LibvirtLogger(): @staticmethod def getLogger(): #Simple wrapper. Ensures logging is always correctly configured (logging.basicConfig is executed) return Libvirt.addHandler(logging.FileHandler(LIBVIRT_LOG))
class FileHdManager(object): ''' File-type Hard Disk management routines ''' logger = Logger.getLogger() #Enables/disables the usage of Cache directory __useCache = OXA_FILEHD_USE_CACHE ##Utils @staticmethod def subprocessCall(command, priority=OXA_FILEHD_NICE_PRIORITY, ioPriority=OXA_FILEHD_IONICE_PRIORITY, ioClass=OXA_FILEHD_IONICE_CLASS, stdout=None): try: wrappedCmd = "/usr/bin/nice -n " + str( priority) + " /usr/bin/ionice -c " + str( ioClass) + " -n " + str(ioPriority) + " " + command FileHdManager.logger.debug("Executing: " + wrappedCmd) subprocess.check_call(wrappedCmd, shell=True, stdout=stdout) except Exception as e: FileHdManager.logger.error("Unable to execute command: " + command) raise e #Debug string @staticmethod def debugVM(vm): return " project:" + vm.project_id + ", slice:" + vm.slice_id + ", name:" + vm.name #Paths ''' Returns the container directory for the VM in remote FS''' @staticmethod def getRemoteHdDirectory(vm): return OXA_FILEHD_REMOTE_VMS + vm.project_id + "/" + vm.slice_id + "/" ''' Returns the container directory for the VM in remote Cache, if used''' @staticmethod def getHdDirectory(vm): if FileHdManager.__useCache: return OXA_FILEHD_CACHE_VMS + vm.project_id + "/" + vm.slice_id + "/" else: return OXA_FILEHD_REMOTE_VMS + vm.project_id + "/" + vm.slice_id + "/" ''' Returns the path of the hd file in Cache, if used''' @staticmethod def getHdPath(vm): if FileHdManager.__useCache: return OXA_FILEHD_CACHE_VMS + vm.project_id + "/" + vm.slice_id + "/" + vm.name + ".img" else: return OXA_FILEHD_REMOTE_VMS + vm.project_id + "/" + vm.slice_id + "/" + vm.name + ".img" ''' Returns the path of the hd file in Remote''' @staticmethod def getRemoteHdPath(vm): return OXA_FILEHD_REMOTE_VMS + vm.project_id + "/" + vm.slice_id + "/" + vm.name + ".img" ''' Returns the path of the swap hd file in Cache, if used''' @staticmethod def getSwapPath(vm): if FileHdManager.__useCache: return OXA_FILEHD_CACHE_VMS + vm.project_id + "/" + vm.slice_id + "/" + vm.name + "_swap" + ".img" else: return OXA_FILEHD_REMOTE_VMS + vm.project_id + "/" + vm.slice_id + "/" + vm.name + "_swap" + ".img" ''' Returns the path of the swap hd file in Remote''' @staticmethod def getRemoteSwapPath(vm): return OXA_FILEHD_REMOTE_VMS + vm.project_id + "/" + vm.slice_id + "/" + vm.name + "_swap" + ".img" ''' Returns the path of the config file in Cache, if used''' @staticmethod def getConfigFilePath(vm): if FileHdManager.__useCache: return OXA_FILEHD_CACHE_VMS + vm.project_id + "/" + vm.slice_id + "/" + vm.name + ".conf" else: return OXA_FILEHD_REMOTE_VMS + vm.project_id + "/" + vm.slice_id + "/" + vm.name + ".conf" ''' Returns the path of the config file in Remote''' @staticmethod def getRemoteConfigFilePath(vm): return OXA_FILEHD_REMOTE_VMS + vm.project_id + "/" + vm.slice_id + "/" + vm.name + ".conf" ''' Returns the path of the temporally mounted Hd in the dom0 filesystem''' @staticmethod def getTmpMountedHdPath(vm): return OXA_FILEHD_HD_TMP_MP + vm.name + "_" + vm.uuid + "/" ''' Returns the path of the templates origin''' @staticmethod def getTemplatesPath(vm): if FileHdManager.__useCache: return OXA_FILEHD_CACHE_TEMPLATES else: return OXA_FILEHD_REMOTE_TEMPLATES ##Hooks '''Pre-start Hook''' @staticmethod def startHook(vm): if not FileHdManager.isVMinCacheFS(vm): FileHdManager.moveVMToCacheFS(vm) '''Pre-reboot Hook''' @staticmethod def rebootHook(vm): return '''Post-stop Hook''' @staticmethod def stopHook(vm): if FileHdManager.isVMinCacheFS(vm): FileHdManager.moveVMToRemoteFS(vm) ##Hd management routines @staticmethod def __fileTemplateExistsOrImportFromRemote(filepath): #if Cache is not used skip if not FileHdManager.__useCache: return True #Check cache if os.path.exists(OXA_FILEHD_CACHE_TEMPLATES + filepath): return True path = os.path.dirname(filepath) #Check remote if os.path.exists(OXA_FILEHD_REMOTE_TEMPLATES + path): #import from remote to cache FileHdManager.logger.info("Importing image to cache directory:" + OXA_FILEHD_REMOTE_TEMPLATES + path + "->" + OXA_FILEHD_CACHE_TEMPLATES + path) try: #Copy all FileHdManager.subprocessCall( "/bin/cp " + str(OXA_FILEHD_REMOTE_TEMPLATES + path) + " " + str(OXA_FILEHD_CACHE_TEMPLATES + path)) except Exception as e: return False return True return False @staticmethod def clone(vm): ##Check file existance in CACHE #FileHdManager.logger.debug("Checking:"+FileHdManager.getHdPath(vm)) if os.path.exists(FileHdManager.getHdPath(vm)): raise VMalreadyExists( "Another VM with the same name exists in the same project and slice:" + FileHdManager.debugVM(vm)) #FileHdManager.logger.debug("Checking:"+FileHdManager.getRemoteHdPath(vm)) ##Check file existance in REMOTE if os.path.exists(FileHdManager.getRemoteHdPath(vm)): raise VMalreadyExists( "Another VM with the same name exists in the same project and slice:" + FileHdManager.debugVM(vm)) if FileHdManager.__fileTemplateExistsOrImportFromRemote( vm.xen_configuration.hd_origin_path): path = "" try: #TODO: user authentication template_path = FileHdManager.getTemplatesPath( vm) + vm.xen_configuration.hd_origin_path template_swap_path = FileHdManager.getTemplatesPath( vm) + vm.xen_configuration.hd_origin_path + "_swap" vm_path = FileHdManager.getHdPath(vm) swap_path = FileHdManager.getSwapPath(vm) FileHdManager.logger.debug("Trying to clone from:" + template_path + "->>" + vm_path) if not os.path.exists(os.path.dirname(vm_path)): os.makedirs(os.path.dirname(vm_path)) count = (vm.xen_configuration.hd_size_mb * 1024) / OXA_FILEHD_DD_BS_KB if (vm.xen_configuration.hd_size_mb * 1024) / OXA_FILEHD_DD_BS_KB > 0: FileHdManager.logger.warning("HD size will be normalized") count = int(count) #Create HD FileHdManager.logger.info("Creating disks...") if OXA_FILEHD_CREATE_SPARSE_DISK: FileHdManager.logger.info( "Main disk will be created as Sparse disk...") FileHdManager.subprocessCall("/bin/dd if=/dev/zero of=" + str(vm_path) + " bs=" + str(OXA_FILEHD_DD_BS_KB) + "k count=1 seek=" + str(count)) else: FileHdManager.subprocessCall("/bin/dd if=/dev/zero of=" + str(vm_path) + " bs=" + str(OXA_FILEHD_DD_BS_KB) + "k count=" + str(count)) #Create Swap and mkswap FileHdManager.logger.info("Creating swap disk...") swapCount = int( (OXA_DEFAULT_SWAP_SIZE_MB * 1024) / OXA_FILEHD_DD_BS_KB) FileHdManager.subprocessCall("/bin/dd if=/dev/zero of=" + str(swap_path) + " bs=" + str(OXA_FILEHD_DD_BS_KB) + "k count=" + str(swapCount)) FileHdManager.logger.info("Creating swap filesystem...") FileHdManager.subprocessCall("/sbin/mkswap " + str(swap_path)) #Format FileHdManager.logger.info("Creating EXT3 fs...") FileHdManager.subprocessCall("/sbin/mkfs.ext3 -F -q " + str(vm_path)) #Untar disk contents FileHdManager.logger.info("Uncompressing disk contents...") path = FileHdManager.mount(vm) #mount with open(os.devnull, 'w') as opendev: FileHdManager.subprocessCall("/bin/tar -xvf " + str(template_path) + " -C " + str(path), stdout=opendev) except Exception as e: FileHdManager.logger.error( "Could not clone image to working directory: " + str(e)) raise Exception("Could not clone image to working directory" + FileHdManager.debugVM(vm)) finally: try: FileHdManager.umount(path) except: pass else: raise Exception("Could not find origin hard-disk to clone" + FileHdManager.debugVM(vm)) @staticmethod def delete(vm): if not FileHdManager.isVMinRemoteFS(vm): FileHdManager.moveVMToRemoteFS(vm) os.remove(FileHdManager.getRemoteHdPath(vm)) os.remove(FileHdManager.getRemoteSwapPath(vm)) os.remove(FileHdManager.getRemoteConfigFilePath(vm)) #Mount/umount routines @staticmethod def mount(vm): path = FileHdManager.getTmpMountedHdPath(vm) if not os.path.isdir(path): os.makedirs(path) vm_path = FileHdManager.getHdPath(vm) FileHdManager.subprocessCall('/bin/mount -o loop ' + str(vm_path) + " " + str(path)) return path @staticmethod def umount(path): FileHdManager.subprocessCall('/bin/umount -d ' + str(path)) #remove dir os.removedirs(path) #Cache-Remote warehouse methods @staticmethod def isVMinRemoteFS(vm): return os.path.exists(FileHdManager.getRemoteHdPath(vm)) @staticmethod def isVMinCacheFS(vm): return os.path.exists(FileHdManager.getHdPath(vm)) @staticmethod def moveVMToRemoteFS(vm): #if Cache is not used skip if not FileHdManager.__useCache: return if FileHdManager.isVMinCacheFS(vm): #create dirs if do not exist already try: os.makedirs(FileHdManager.getRemoteHdDirectory(vm)) except Exception as e: pass #Move all files shutil.move(FileHdManager.getHdPath(vm), FileHdManager.getRemoteHdPath(vm)) shutil.move(FileHdManager.getSwapPath(vm), FileHdManager.getRemoteSwapPath(vm)) shutil.move(FileHdManager.getConfigFilePath(vm), FileHdManager.getRemoteConfigFilePath(vm)) else: raise Exception("Cannot find VM in CACHE FS" + FileHdManager.debugVM(vm)) @staticmethod def moveVMToCacheFS(vm): #if Cache is not used skip if not FileHdManager.__useCache: return if FileHdManager.isVMinRemoteFS(vm): if FileHdManager.isVMinCacheFS(vm): raise Exception("Machine is already in Cache FS" + FileHdManager.debugVM(vm)) #create dirs if do not exist already try: os.makedirs(FileHdManager.getHdDirectory(vm)) except Exception as e: pass #Move all files shutil.move(FileHdManager.getRemoteHdPath(vm), FileHdManager.getHdPath(vm)) shutil.move(FileHdManager.getRemoteSwapPath(vm), FileHdManager.getSwapPath(vm)) shutil.move(FileHdManager.getRemoteConfigFilePath(vm), FileHdManager.getConfigFilePath(vm)) else: raise Exception("Cannot find VM in REMOTE FS" + FileHdManager.debugVM(vm))
class XendManager(object): _mutex = Lock() _xendConnection = None #NEVER CLOSE IT _xendConnectionRO = None #Not really used logger = Logger.getLogger() #Shell util @staticmethod def sanitize_arg(arg): return arg.replace('\\', '\\\\').replace('\'', '\\\'').replace(' ', '\ ') @staticmethod def __getROConnection(): #return libvirt.openReadOnly(None) return XendManager.__getConnection() #By-passed @staticmethod def __getConnection(): with XendManager._mutex: if XendManager._xendConnection is None: XendManager._xendConnection = libvirt.open(None) return XendManager._xendConnection @staticmethod def __getDomainByVmName(conn, name): return conn.lookupByName(name) @staticmethod def __getDomainByVmUUID(conn, uuid): return conn.lookupByUUIDString(uuid) #Monitoring @staticmethod def isVmRunning(name): conn = XendManager.__getROConnection() try: dom = conn.lookupByName(name) return dom.isActive() except Exception as e: return False @staticmethod def isVmRunningByUUID(uuid): conn = XendManager.__getROConnection() try: dom = conn.lookupByUUIDString(uuid) return dom.isActive() except Exception as e: return False @staticmethod def retrieveActiveDomainsByUUID(): conn = XendManager.__getROConnection() domainIds = conn.listDomainsID() doms = list() for dId in domainIds: #Skip domain0 if dId == 0: continue domain = conn.lookupByID(dId) doms.append((domain.UUIDString(), domain.name())) return doms @staticmethod def __findAliasForDuplicatedVmName(vm): #Duplicated VM name; find new temporal alias newVmName = vm.name for i in range(OXA_XEN_MAX_DUPLICATED_NAME_VMS): if not XendManager.isVmRunning(vm.name + "_" + str(i)): return str(vm.name + "_" + str(i)) Exception("Could not generate an alias for a duplicated vm name.") #Provisioning routines @staticmethod def startDomain(vm): #Getting connection conn = XendManager.__getConnection() with open(HdManager.getConfigFilePath(vm), 'r') as openConfig: xmlConf = conn.domainXMLFromNative('xen-xm', openConfig.read(), 0) #con = libvirt.open('xen:///') #dom = con.createLinux(xmlConf,0) if XendManager.isVmRunning( vm.name) and not XendManager.isVmRunningByUUID(vm.uuid): #Duplicated name; trying to find an Alias newVmName = XendManager.__findAliasForDuplicatedVmName(vm) command_list = [ '/usr/sbin/xm', 'create', 'name=' + newVmName, XendManager.sanitize_arg(HdManager.getConfigFilePath(vm)) ] process = subprocess.Popen(command_list, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = process.communicate() else: try: #Try first using libvirt call #XendManager.logger.warning('creating vm using python-libvirt methods') #XendManager.logger.warning(xmlConf) #conn.createXML(xmlConf,0) #XendManager.logger.warning(XendManager.sanitize_arg(HdManager.getConfigFilePath(vm))) #XendManager.logger.warning('created vm?') raise Exception("Skip") #otherwise stop is ridicously slow except Exception as e: #Fallback solution; workarounds BUG that created wrong .conf files (extra spaces that libvirt cannot parse) command_list = [ '/usr/sbin/xm', 'create', XendManager.sanitize_arg(HdManager.getConfigFilePath(vm)) ] process = subprocess.Popen(command_list, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = process.communicate() time.sleep(OXA_XEN_CREATE_WAIT_TIME) if not XendManager.isVmRunningByUUID(vm.uuid): # Complete with other types of exceptions detailed_error = "" if "Not enough free memory" in err: detailed_error = " because there is not enough free memory in that server. Try another." raise Exception("Could not start VM%s" % detailed_error) @staticmethod def stopDomain(vm): #If is not running skip if not XendManager.isVmRunningByUUID(vm.uuid): return #dom = XendManager.__getDomainByVmName(XendManager.__getConnection(),vm.name) dom = XendManager.__getDomainByVmUUID(XendManager.__getConnection(), vm.uuid) #Attemp to be smart and let S.O. halt himself dom.shutdown() waitTime = 0 while (waitTime < OXA_XEN_STOP_MAX_SECONDS): if not XendManager.isVmRunningByUUID(vm.uuid): return waitTime += OXA_XEN_STOP_STEP_SECONDS time.sleep(OXA_XEN_STOP_STEP_SECONDS) #Let's behave impatiently dom.destroy() time.sleep(OXA_XEN_REBOOT_WAIT_TIME) if XendManager.isVmRunningByUUID(vm.uuid): raise Exception("Could not stop domain") @staticmethod def rebootDomain(vm): #dom = XendManager.__getDomainByVmName(XendManager.__getConnection(),vm.name) dom = XendManager.__getDomainByVmUUID(XendManager.__getConnection(), vm.uuid) dom.reboot(0) time.sleep(OXA_XEN_REBOOT_WAIT_TIME) if not XendManager.isVmRunningByUUID(vm.uuid): raise Exception( "Could not reboot domain (maybe rebooted before MINIMUM_RESTART_TIME?). Domain will remain in stop state" ) ''' XXX: To be implemented ''' @staticmethod def pauseDomain(vm): #XXX raise Exception("Not implemented") @staticmethod def resumeDomain(vm): #XXX raise Exception("Not implemented")
class SpirentCentOSVMConfigurator: logger = Logger.getLogger() ''' Private methods ''' @staticmethod def __configureInterfacesFile(vm, path): #Interfaces for inter in vm.xen_configuration.interfaces.interface: print "Processing interface:" + inter.name iFile = open( path + OXA_REDHAT_INTERFACES_FILE_LOCATION + "ifcfg-" + inter.name, "w") if inter.ismgmt: interfaceString = "DEVICE="+inter.name+"\n"+\ "HWADDR="+inter.mac+"\n"+\ "TYPE=Ethernet\n"+\ "BOOTPROTO=static\n"+\ "ONBOOT=yes\n"+\ "NM_CONTROLLED=yes\n" #is a mgmt interface interfaceString += "IPADDR="+inter.ip +"\n"+\ "NETMASK="+inter.mask +"\n" if inter.dns1 != None and inter.dns1 != "": interfaceString += "DNS1=" + inter.dns1 + "\n" if inter.dns2 != None and inter.dns2 != "": interfaceString += "DNS2=" + inter.dns2 + "\n" if inter.gw != None and inter.gw != "": interfaceString += "GATEWAY=" + inter.gw + "\n" interfaceString += "\n" else: interfaceString = "DEVICE="+inter.name+"\n"+\ "HWADDR="+inter.mac+"\n"+\ "TYPE=Ethernet\n"+\ "ONBOOT=no\n"+\ "NM_CONTROLLED=yes\n"+\ "BOOTPROTO=dhcp\n" iFile.write(interfaceString) os.close(iFile) print "Processing interface:" + inter.name + "FINISHED" @staticmethod def __configureHostname(vm, hFile): hFile.write(vm.name) @staticmethod def __createConfigFile(vm, env): template_name = "spirentSTCVMTemplate.pt" template = env.get_template(template_name) #Set vars&render output = template.render( kernelImg=OXA_XEN_SERVER_KERNEL, initrdImg=OXA_XEN_SERVER_INITRD, hdFilePath=HdManager.getHdPath(vm), #swapFilePath=HdManager.getSwapPath(vm), vm=vm) #write file cfile = open(HdManager.getConfigFilePath(vm), 'w') cfile.write(output) cfile.close() @staticmethod def __configureTestPorts(vm, configFile): configString = "[CFG]\n" #Interfaces for inter in vm.xen_configuration.interfaces.interface: if inter.ismgmt: #is a mgmt interface configString += "ADMIN_PORT=" + inter.name + "\n" else: configString += "TEST_PORT=" + inter.name + "\n" configFile.write(configString + "[END]") @staticmethod def __configureAdmin(vm, configFile): configString = "" gwString = "" for iface in vm.xen_configuration.interfaces.interface: if iface.ismgmt: inter = iface break if OXA_SPIRENT_NPORTS_PER_GROUP == "": raise Exception("OXA_SPIRENT_NPORTS_PER_GROUP variable not set") configString += "NPORTS_PER_GROUP=" + OXA_SPIRENT_NPORTS_PER_GROUP + "\n" if OXA_SPIRENT_NTPSERVER == "": raise Exception("OXA_SPIRENT_NTPSERVER variable not set") configString += "NTPSERVER=" + OXA_SPIRENT_NTPSERVER + "\n" configString += "HOSTNAME=" + vm.name + "\n" configString += "IPADDR=" + inter.ip + "\n" configString += "PROMISC=on\n" configString += "DRIVERMODE=sockets\n" configString += "NETMASK=" + inter.mask + "\n" configString += "ADDR_MODE=static\n" configString += "PORT_SPEED=1000M\n" configString += "DEVICE=" + inter.name + "\n" if OXA_SPIRENT_NPORTGROUPS == "": raise Exception("OXA_SPIRENT_NPORTGROUPS variable not set") configString += "NPORTGROUPS=" + OXA_SPIRENT_NPORTGROUPS + "\n" if OXA_SPIRENT_LSERVER == "": raise Exception("OXA_SPIRENT_LSERVER variable not set") configString += "LSERVERP=" + OXA_SPIRENT_LSERVER + "\n" if inter.gw != None: gwString = inter.gw configString += "GATEWAY=" + gwString + "\n" configFile.write(configString) ''' Public methods ''' @staticmethod def getIdentifier(): return SpirentCentOSVMConfigurator.__name__ @staticmethod def _configureNetworking(vm, path): #Configure interfaces try: try: #Remove all files under/etc/sysconfig/network-scripts/ifcfg-* os.system("rm -f " + path + "/" + OXA_REDHAT_INTERFACES_FILE_LOCATION + "ifcfg-eth*") SpirentCentOSVMConfigurator.__configureInterfacesFile(vm, path) except Exception as e: pass except Exception as e: SpirentCentOSVMConfigurator.logger.error(str(e)) raise Exception("Could not configure interfaces") @staticmethod def _configureHostName(vm, path): try: with open(path + OXA_REDHAT_HOSTNAME_FILE_LOCATION, 'w') as openhost: SpirentCentOSVMConfigurator.__configureHostname(vm, openhost) except Exception as e: SpirentCentOSVMConfigurator.logger.error( "Could not configure hostname;skipping.. - " + str(e)) @staticmethod def _configureTestPorts(vm, path): configPath = OXA_SPIRENT_STCA_INI_PATH try: with open(path + configPath, 'w') as configFile: SpirentCentOSVMConfigurator.__configureTestPorts( vm, configFile) except Exception as e: SpirentCentOSVMConfigurator.logger.error( "Could not configure test ports... - " + str(e)) raise e @staticmethod def _configureAdmin(vm, path): configPath = OXA_SPIRENT_ADMIN_CONF_PATH try: with open(path + configPath, 'w') as configFile: SpirentCentOSVMConfigurator.__configureAdmin(vm, configFile) except Exception as e: SpirentCentOSVMConfigurator.logger.error( "Could not configure admin file... - " + str(e)) raise e #Public methods @staticmethod def createVmConfigurationFile(vm): #get env template_dirs = [] template_dirs.append( os.path.join(os.path.dirname(__file__), 'templates/')) env = jinja2.Environment(loader=jinja2.FileSystemLoader(template_dirs)) if vm.xen_configuration.hd_setup_type == "full-file-image" and vm.xen_configuration.virtualization_setup_type == "hvm": SpirentCentOSVMConfigurator.__createConfigFile(vm, env) else: raise Exception( "type of file or type of virtualization not supported for the creation of xen vm configuration file" ) @staticmethod def configureVmDisk(vm, path): if not path or not re.match( r'[\s]*\/\w+\/\w+\/.*', path, re.IGNORECASE): #For security, should never happen anyway raise Exception("Incorrect vm path") #Configure networking SpirentCentOSVMConfigurator._configureNetworking(vm, path) SpirentCentOSVMConfigurator.logger.info( "Network configured successfully...") #Configure Hostname SpirentCentOSVMConfigurator._configureHostName(vm, path) SpirentCentOSVMConfigurator.logger.info( "Hostname configured successfully...") #Configure Test-Ports #SpirentCentOSVMConfigurator._configureTestPorts(vm,path) SpirentCentOSVMConfigurator.logger.info( "Test-ports configured successfully...") #Configure Admin file #SpirentCentOSVMConfigurator._configureAdmin(vm,path) SpirentCentOSVMConfigurator.logger.info( "Admin file configured successfully...")
class MonitoringDispatcher: logger = Logger.getLogger() @staticmethod def __getMonitoringDispatcher(vtype): #Import of Dispatchers must go here to avoid import circular dependecy from xen.monitoring.XenMonitoringDispatcher import XenMonitoringDispatcher if vtype == "xen": return XenMonitoringDispatcher else: raise Exception("Virtualization type not supported by the agent") @staticmethod def __dispatchAction(dispatcher, action, server): #Gathering information if action.type_ == "listActiveVMs": return dispatcher.listActiveVMs(action.id, server) raise Exception("Unknown action type") @staticmethod def processMonitoring(monitoring): for action in monitoring.action: server = action.server try: dispatcher = MonitoringDispatcher.__getMonitoringDispatcher( server.virtualization_type) except Exception as e: XmlRpcClient.sendAsyncMonitoringActionStatus( action.id, "FAILED", str(e)) MonitoringDispatcher.logger.error(str(e)) return try: #Send async notification XmlRpcClient.sendAsyncMonitoringActionStatus( action.id, "ONGOING", "") MonitoringDispatcher.logger.debug("After sending ongoing") MonitoringDispatcher.__dispatchAction(dispatcher, action, server) except Exception as e: MonitoringDispatcher.logger.error(str(e)) try: if "No route to host" in str(e): from settings.settingsLoader import VTAM_IP, VTAM_PORT, XMLRPC_USER, XMLRPC_PASS MonitoringDispatcher.logger.error( "Agent cannot reach the VT AM server. Please check that the following settings are correct: https://%s:%s@%s:%s" % (XMLRPC_USER, XMLRPC_PASS, VTAM_IP, VTAM_PORT)) except: pass raise e ##Abstract methods definition for MonitoringDispatchers #Inventory @staticmethod def listActiveVMs(id, server): raise Exception("Abstract method cannot be called")
class ProvisioningDispatcher: logger = Logger.getLogger() @staticmethod def __getProvisioningDispatcher(vtype): #Import of Dispatchers must go here to avoid import circular dependecy from xen.provisioning.XenProvisioningDispatcher import XenProvisioningDispatcher if vtype == "xen": return XenProvisioningDispatcher else: raise Exception("Virtualization type not supported by the agent") @staticmethod def __dispatchAction(dispatcher, action, vm): #Inventory if action.type_ == "create": return dispatcher.createVMfromImage(action.id, vm) if action.type_ == "modify": return dispatcher.modifyVM(action.id, vm) if action.type_ == "delete": return dispatcher.deleteVM(action.id, vm) #Scheduling if action.type_ == "start": return dispatcher.startVM(action.id, vm) if action.type_ == "reboot": return dispatcher.rebootVM(action.id, vm) if action.type_ == "stop": return dispatcher.stopVM(action.id, vm) if action.type_ == "hardStop": return dispatcher.hardStopVM(action.id, vm) raise Exception("Unknown action type") @staticmethod def processProvisioning(provisioning): for action in provisioning.action: vm = action.server.virtual_machines[0] try: dispatcher = ProvisioningDispatcher.__getProvisioningDispatcher( vm.virtualization_type) except Exception as e: XmlRpcClient.sendAsyncProvisioningActionStatus( action.id, "FAILED", str(e)) ProvisioningDispatcher.logger.error(str(e)) return try: #Acquire VM lock VmMutexStore.lock(vm) #Send async notification XmlRpcClient.sendAsyncProvisioningActionStatus( action.id, "ONGOING", "") ProvisioningDispatcher.__dispatchAction(dispatcher, action, vm) except Exception as e: ProvisioningDispatcher.logger.error(str(e)) raise e finally: #Release VM lock VmMutexStore.unlock(vm) ##Abstract methods definition for ProvisioningDispatchers #Inventory @staticmethod def createVMfromImage(id, vm): raise Exception("Abstract method cannot be called") @staticmethod def modifyVM(id, vm): raise Exception("Abstract method cannot be called") @staticmethod def deleteVM(id, vm): raise Exception("Abstract method cannot be called") #Scheduling def startVM(id, vm): raise Exception("Abstract method cannot be called") def rebootVM(id, vm): raise Exception("Abstract method cannot be called") def stopVM(id, vm): raise Exception("Abstract method cannot be called") def hardStopVM(id, vm): raise Exception("Abstract method cannot be called")
class XmlRpcClient: logger = Logger.getLogger() ##Provisioning @staticmethod def __craftProvisioningResponseXml(actionId, status, description): rspec = XmlUtils.getEmptyProvisioningResponseObject() rspec.response.provisioning.action[0].id = actionId rspec.response.provisioning.action[0].status = status rspec.response.provisioning.action[0].description = description return XmlCrafter.craftXML(rspec) @staticmethod def sendAsyncProvisioningActionStatus(actionId, status, description): XmlRpcClient.logger.debug("Sending asynchronous " + status + " provisioning message to: " + threading.current_thread().callBackURL) server = xmlrpclib.Server(threading.current_thread().callBackURL) XmlRpcClient.logger.debug( XmlRpcClient.__craftProvisioningResponseXml( actionId, status, description)) server.sendAsync( XmlRpcClient.__craftProvisioningResponseXml( actionId, status, description)) XmlRpcClient.logger.debug("Sent (" + threading.current_thread().callBackURL + ")") ##Monitoring @staticmethod def __craftMonitoringResponseXml(actionId, status, description): rspec = XmlUtils.getEmptyMonitoringResponseObject() rspec.response.monitoring.action[0].id = actionId rspec.response.monitoring.action[0].type_ = "listActiveVMs" rspec.response.monitoring.action[0].status = status rspec.response.monitoring.action[0].description = description return XmlCrafter.craftXML(rspec) @staticmethod def __craftMonitoringActiveVMsInfoResponseXml(actionId, status, vms, serverInfo): rspec = XmlUtils.getEmptyMonitoringResponseObject() rspec.response.monitoring.action[0].id = actionId rspec.response.monitoring.action[0].status = status rspec.response.monitoring.action[0].type_ = "listActiveVMs" server = server_type() rspec.response.monitoring.action[0].server = server server.name = serverInfo.name server.id = serverInfo.id server.uuid = serverInfo.uuid for dom in vms: vm = virtual_machine_type() vm.uuid = dom[0] vm.name = dom[1] server.virtual_machines.append(vm) return XmlCrafter.craftXML(rspec) @staticmethod def sendAsyncMonitoringActionStatus(actionId, status, description): XmlRpcClient.logger.debug("Sending asynchronous " + status + " monitoring message to: " + threading.current_thread().callBackURL) server = xmlrpclib.Server(threading.current_thread().callBackURL) XmlRpcClient.logger.debug( XmlRpcClient.__craftMonitoringResponseXml(actionId, status, description)) server.sendAsync( XmlRpcClient.__craftMonitoringResponseXml(actionId, status, description)) XmlRpcClient.logger.debug("Sent (" + threading.current_thread().callBackURL + ")") @staticmethod def sendAsyncMonitoringActiveVMsInfo(actionId, status, vms, serverInfo): XmlRpcClient.logger.debug("Sending asynchronous " + status + " monitoring message to: " + threading.current_thread().callBackURL) server = xmlrpclib.Server(threading.current_thread().callBackURL) XmlRpcClient.logger.debug( XmlRpcClient.__craftMonitoringActiveVMsInfoResponseXml( actionId, status, vms, serverInfo)) server.sendAsync( XmlRpcClient.__craftMonitoringActiveVMsInfoResponseXml( actionId, status, vms, serverInfo)) XmlRpcClient.logger.debug("Sent (" + threading.current_thread().callBackURL + ")")
from monitoring.LibvirtMonitoring import LibvirtMonitor from communications.XmlRpcServer import XmlRpcServer from utils.ServiceThread import ServiceThread from utils.XmlUtils import * from utils.xml.vtRspecInterface import rspec from settings.settingsLoader import OXA_LOG from utils.Logger import Logger """ @author: msune OXA: Ofelia XEN Agent. """ logger = Logger.getLogger() """ Usage message """ def usage(): # return "\nOXA: Ofelia XEN Agent utility. This utility expects as an argument an XML query, and performs\nXEN VM provisioning and monitoring operations. \n\n Usage:\n \t OXA.py xmlQuery" return "TODO" # TODO """ Argument checking """ def checkArgs(): if len(sys.argv) > 2: raise Exception("Illegal number of arguments\n\n" + usage())
class XenProvisioningDispatcher(ProvisioningDispatcher): logger = Logger.getLogger() ##Inventory routines @staticmethod def createVMfromImage(id, vm): pathToMountPoint = "" XenProvisioningDispatcher.logger.info( "Initiating creation process for VM: " + vm.name + " under project: " + vm.project_id + " and slice: " + vm.slice_id) try: #Clone HD HdManager.clone(vm) XenProvisioningDispatcher.logger.debug("HD cloned successfully...") #Mount copy pathToMountPoint = HdManager.mount(vm) XenProvisioningDispatcher.logger.debug("Mounting at:" + pathToMountPoint) XenProvisioningDispatcher.logger.debug( "HD mounted successfully...") #Configure VM OS VMConfigurator.configureVmDisk(vm, pathToMountPoint) #Synthesize config file VMConfigurator.createVmConfigurationFile(vm) XenProvisioningDispatcher.logger.debug( "XEN configuration file created successfully...") #Umount copy HdManager.umount(vm, pathToMountPoint) XenProvisioningDispatcher.logger.debug( "HD unmounted successfully...") XenProvisioningDispatcher.logger.info("Creation of VM " + vm.name + " has been successful!!") #Send async notification XmlRpcClient.sendAsyncProvisioningActionStatus(id, "SUCCESS", "") except Exception as e: XenProvisioningDispatcher.logger.error(str(e)) #Send async notification try: HdManager.umount(vm, pathToMountPoint) except: pass try: #Delete VM disc and conf file XenProvisioningDispatcher.deleteVM(id, vm) except: pass XmlRpcClient.sendAsyncProvisioningActionStatus( id, "FAILED", str(e)) return @staticmethod def modifyVM(id, vm): #Check existance of VM raise Exception("Not implemented") @staticmethod def deleteVM(id, vm): try: try: #if it wasn't stopped, do it XendManager.stopDomain(vm) except Exception as e: pass #Trigger Hd Deletion in Remote HdManager.delete(vm) #Send async notification XmlRpcClient.sendAsyncProvisioningActionStatus(id, "SUCCESS", "") except Exception as e: XenProvisioningDispatcher.logger.error(str(e)) #Send async notification XmlRpcClient.sendAsyncProvisioningActionStatus( id, "FAILED", str(e)) return ##Scheduling routines @staticmethod def startVM(id, vm): try: #Trigger HdManager.startHook(vm) XendManager.startDomain(vm) XenProvisioningDispatcher.logger.info("VM named " + vm.name + " has been started.") #Send async notification XmlRpcClient.sendAsyncProvisioningActionStatus(id, "SUCCESS", "") except Exception as e: XenProvisioningDispatcher.logger.error(str(e)) #Send async notification XmlRpcClient.sendAsyncProvisioningActionStatus( id, "FAILED", str(e)) return @staticmethod def rebootVM(id, vm): try: #Just try to reboot HdManager.rebootHook(vm) XendManager.rebootDomain(vm) XenProvisioningDispatcher.logger.info("VM named " + vm.name + " has been rebooted.") #Send async notification XmlRpcClient.sendAsyncProvisioningActionStatus(id, "SUCCESS", "") except Exception as e: XenProvisioningDispatcher.logger.error(str(e)) #Send async notification XmlRpcClient.sendAsyncProvisioningActionStatus( id, "FAILED", str(e)) return @staticmethod def stopVM(id, vm): try: #Just try to stop XendManager.stopDomain(vm) XenProvisioningDispatcher.logger.info("VM named " + vm.name + " has been stopped.") #Send async notification XmlRpcClient.sendAsyncProvisioningActionStatus(id, "SUCCESS", "") except Exception as e: XenProvisioningDispatcher.logger.error(str(e)) #Send async notification XmlRpcClient.sendAsyncProvisioningActionStatus( id, "FAILED", str(e)) return @staticmethod def hardStopVM(id, vm): try: #First stop domain XendManager.stopDomain(vm) HdManager.stopHook(vm) XenProvisioningDispatcher.logger.info("VM named " + vm.name + " has been stopped.") #Send async notification XmlRpcClient.sendAsyncProvisioningActionStatus(id, "SUCCESS", "") except Exception as e: XenProvisioningDispatcher.logger.error(str(e)) #Send async notification XmlRpcClient.sendAsyncProvisioningActionStatus( id, "FAILED", str(e)) return