def enqueue(detail): ''' add new task to queue detail = a dictionary of {'command', 'each parameter'} return taskID if success return None if error ''' try: form = static.commandToDetail[detail['command']] except: print 'Error: no command found' return None opcode = form['opcode'] for parameter in form['param']: if parameter not in detail.keys(): print 'Error: parameter not found -> ' + parameter return None if form['isMission']: isMission = 1 else: isMission = 0 try: infoHost = cacheFile.getDatabaseIP() db = MySQLdb.connect(infoHost, setting.DB_USERNAME, setting.DB_PASSWORD, setting.DB_NAME) cursor = db.cursor() cursor.execute( ''' INSERT INTO `tasks` (`opcode`, `isMission`, `detail`, `createTimestamp`) VALUES ( %(opcode)s, %(isMission)s, '%(detail)s', NOW()); ''' % { 'opcode': str(opcode), 'isMission': str(isMission), 'detail': json.dumps(detail) }) taskID = cursor.lastrowid db.close() connection.socketCall('localhost', setting.WORKER_PORT, 'start_work', ['{socket_connection}']) return taskID except: print 'have someting wrong with database' return None
def broadcastNewMasterInformationServer(masterHostIP, masterHostMAC=None): ''' can be call at any host ''' #myIP=network.getMyIPAddr() print "masterHostIP:", masterHostIP db = MySQLdb.connect(masterHostIP, setting.DB_USERNAME, setting.DB_PASSWORD, setting.DB_NAME) cursor = db.cursor() if masterHostMAC == None: cursor.execute( "SELECT `MACAddress` FROM `hosts` WHERE `IPAddress`='%s'" % (str(masterHostIP))) masterHostMAC = cursor.fetchone()[0] cursor.execute("SELECT `IPAddress` FROM `hosts` WHERE `status`=1 ;") activeHost = cursor.fetchall() db.close() dataString = json.dumps({ 'masterDB': str(masterHostIP), 'masterDB_MAC': str(masterHostMAC), }) for host in activeHost: result = connection.socketCall(host[0], setting.LOCAL_PORT, 'update_cloud_info', ['{socket_connection}', dataString]) return True
def broadcastNewSlaveInformationServer( slaveHostIP, masterHostIP=None): #if leave slaveHostIP='-' means slave was delete ''' can be call at any host tell every active host to keep slave ip in file ''' if masterHostIP == None: masterHostIP = cacheFile.getDatabaseIP() print "slaveHostIP:", slaveHostIP db = MySQLdb.connect(masterHostIP, setting.DB_USERNAME, setting.DB_PASSWORD, setting.DB_NAME) cursor = db.cursor() cursor.execute("SELECT `IPAddress` FROM `hosts` WHERE `status`=1 ;") activeHost = cursor.fetchall() db.close() dataString = json.dumps({'slaveDB': str(slaveHostIP)}) for host in activeHost: result = connection.socketCall(host[0], setting.LOCAL_PORT, 'update_cloud_info', ['{socket_connection}', dataString]) return True
def migrate(targetHostIP=None): ''' only move 2 file from oldCA to newCA and update database ''' infoHost = cacheFile.getDatabaseIP() db = MySQLdb.connect(infoHost, setting.DB_USERNAME, setting.DB_PASSWORD, setting.DB_NAME) cursor = db.cursor() cursor.execute('''SELECT `IPAddress` FROM `hosts` WHERE `isCA`=1''') oldCAData = cursor.fetchone() db.close() if oldCAData == None: print 'Old CA not found' return False oldCAIP = oldCAData[0] result = connection.socketCall(oldCAIP, setting.LOCAL_PORT, "clone_ca", [targetHostIP, 'migrate']) if result != "OK": return False return True
def clone_ca(argv): ''' only real ca can do this method (source of ca migration) ''' targetHostIP = argv[0] mode = argv[1] cakeyString = open(setting.CA_PATH + 'cakey.pem', 'r').read() cacertString = open(setting.CA_PATH + 'cacert.pem', 'r').read() result = connection.socketCall(targetHostIP, setting.LOCAL_PORT, "you_are_next_ca", [cakeyString, cacertString, mode]) if mode == 'migrate': infoHost = cacheFile.getDatabaseIP() db = MySQLdb.connect(infoHost, setting.DB_USERNAME, setting.DB_PASSWORD, setting.DB_NAME) cursor = db.cursor() cursor.execute("UPDATE `hosts` SET `isCA`=0 WHERE `IPAddress`='%s';" % (str(network.getMyIPAddr()))) db.close() elif mode == 'makeSlave': pass return result
def index(self, guestID): infoHost = cacheFile.getDatabaseIP() db = MySQLdb.connect(infoHost, setting.DB_USERNAME, setting.DB_PASSWORD, setting.DB_NAME) cursor = db.cursor() #find hostIPAddress, UUID cursor.execute('''SELECT `hosts`.`IPAddress` , `lastUUID` FROM `hosts` INNER JOIN `guests` ON `hosts`.`hostID`=`guests`.`lastHostID` AND `guests`.`status`=1 AND `guests`.`activity`=0 AND `guests`.`guestID`=%s;''' % (guestID)) targetData = cursor.fetchone() db.close() if targetData == None: return shortcut.response( 'error', '', 'Invalid guestID or Machine was closed or doing activity') hostIP = targetData[0] UUID = targetData[1] result = connection.socketCall(hostIP, setting.LOCAL_PORT, 'guest_send_reboot_signal', [UUID]) if result != "OK": return shortcut.response('error', '', result) #no content return shortcut.response( 'success', '', 'Reboot signal was sending to the guest.')
def template_remove(taskID, detail): #get parameter templateID = detail['templateID'] infoHost = cacheFile.getDatabaseIP() db = MySQLdb.connect(infoHost, setting.DB_USERNAME, setting.DB_PASSWORD, setting.DB_NAME) cursor = db.cursor() cursor.execute("SELECT `guestID` FROM `guests` WHERE `templateID`=%s" % (str(detail['templateID']))) if cursor.fetchone() != None: return 'There are guests that use this template' cursor.execute("SELECT `IPAddress` FROM `hosts` WHERE `isStorageHolder`=1") storageHolder = cursor.fetchone()[0] db.close() result = connection.socketCall(str(storageHolder), setting.LOCAL_PORT, 'template_remove', [str(templateID)]) if result != 'OK': return result content = '' if not shortcut.storeFinishMessage(taskID, content): return "cannot storeFinishMessage" return "OK"
def guest_shutoff(taskID, detail): #get parameter guestID = detail['guestID'] infoHost = cacheFile.getDatabaseIP() db = MySQLdb.connect(infoHost, setting.DB_USERNAME, setting.DB_PASSWORD, setting.DB_NAME) cursor = db.cursor() #find hostIPAddress, UUID cursor.execute('''SELECT `hosts`.`IPAddress` , `lastUUID` FROM `hosts` INNER JOIN `guests` ON `hosts`.`hostID`=`guests`.`lastHostID` AND `guests`.`status`=1 AND `guests`.`activity`=0 AND `guests`.`guestID`=%s;''' % (guestID)) targetData = cursor.fetchone() db.close() if targetData == None: return 'Invalid guestID or cannot force-off in this status' hostIP = targetData[0] UUID = targetData[1] result = connection.socketCall(hostIP, setting.LOCAL_PORT, 'guest_force_off', [str(guestID), UUID]) if result != 'OK': return result content = "" if not shortcut.storeFinishMessage(taskID, content): return "cannot storeFinishMessage" return "OK"
def do_work(self, i): #real work from self.workList[i] print self.ip, "%.2f" % (getCurrentTime() - self.zeroPoint), self.workList[i] work = self.workList[i] if work[1] == 'start': globalIP = cacheFile.getGlobalControllerIP() if fixHost and work[2] != None: result = requestTo( globalIP, setting.API_PORT, '/guest/startWithIP?guestIP=%s&targetHostID=%s' % (str(self.ip), work[2])) else: result = requestTo( globalIP, setting.API_PORT, '/guest/startWithIP?guestIP=%s' % (str(self.ip))) elif work[1] == 'stop': globalIP = cacheFile.getGlobalControllerIP() result = requestTo( globalIP, setting.API_PORT, '/guest/forceOffWithIP?guestIP=%s' % (str(self.ip))) else: #result=connection.socketCall(self.ip,setting.LOCAL_PORT,work[1]) result = connection.socketCall( self.ip, setting.LOCAL_PORT, 'set_work', [ json.dumps([work[1], work[3], work[0], work[2]]), json.dumps([]), '{socket_connection}' ]) self.totalWork += 1 if result == 'OK': pass else: #error self.errorWork += 1
def check_nfs_migrate_destination_area(argv): ''' for nfs migration destination only argv[0] is json of list of [filename(path),size] ''' #check required additional area on this host allFileData = json.loads(argv[0]) requireMore = 0 for fileData in allFileData: try: oldFileSize = os.path.getsize(fileData[0]) except os.error: oldFileSize = 0 requireMore += (fileData[1] - oldFileSize) #check the free area from monitoring service myIP = str(network.getMyIPAddr()) data = connection.socketCall("localhost", setting.MONITOR_PORT, "get_current_storage_info", [myIP]) print "##data from z##", data data = json.loads(data) if len(data) == 0: return "data is not ready" else: data = data[0] free = data['storage_info']['free'] * 1024 if free < requireMore: return "not enough free space on target host" else: return "OK"
def ask_your_spec(argv): ''' answer my spec (for init host, so cannot use complex things in this method) ''' myIP = network.getMyIPAddr() specData = connection.socketCall("localhost", setting.MONITOR_PORT, "get_my_spec", []) return specData
def index(self, hostID=None): infoHost = cacheFile.getDatabaseIP() db = MySQLdb.connect(infoHost, setting.DB_USERNAME, setting.DB_PASSWORD, setting.DB_NAME) cursor = db.cursor() if hostID == None: conditionString = "" else: conditionString = "WHERE `hostID`=%s" % (str(hostID)) cursor.execute('''SELECT `hostID`, `IPAddress` FROM `hosts`''' + conditionString + ";") table = cursor.fetchall() if len(table) == 0: return shortcut.response('error', '', 'hostID not found') if hostID == None: argv = [] else: argv = [str(table[0][1])] #IPAddr data = connection.socketCall("localhost", setting.MONITOR_PORT, "get_current_memory_info", argv) data = json.loads(data) templateString = open( setting.MAIN_PATH + 'webapi/template/host_getCurrentMemoryInfo.xml').read() result = '' for row in table: hostDict = {'hostID': str(row[0]), 'IP': str(row[1])} #set default value in case of no data hostDict['memTotal'] = "-" hostDict['memTotalUnit'] = "-" hostDict['memFree'] = "-" hostDict['memFreeUnit'] = "-" for element in data: if element['IP'] == hostDict['IP']: hostDict['memTotal'] = element['memory_info'][ 'MemTotal'][0] hostDict['memTotalUnit'] = element['memory_info'][ 'MemTotal'][1] hostDict['memFree'] = element['memory_info'][ 'MemFree'][0] hostDict['memFreeUnit'] = element['memory_info'][ 'MemFree'][1] break result += templateString % hostDict db.close() return shortcut.response('success', result)
def makeSlave(targetHostIP,masterHostIP=None): if masterHostIP==None: masterHostIP=cacheFile.getDatabaseIP() result=connection.socketCall(masterHostIP, setting.LOCAL_PORT, 'create_your_slave_db',[str(targetHostIP)]) if result!='OK': return False return True
def index(self,fileName,OS,description,minimumMemory,maximumMemory): #number format checking try: if int(minimumMemory)<0 or int(maximumMemory)<int(minimumMemory): return shortcut.response('error', '', 'maximumMemory must larger or equal to minimumMemory') except: return shortcut.response('error', '', 'memory must be integer') infoHost=cacheFile.getDatabaseIP() db = MySQLdb.connect(infoHost, setting.DB_USERNAME, setting.DB_PASSWORD, setting.DB_NAME ) cursor = db.cursor() #search for fileName (must ask to the nfs server) cursor.execute("SELECT `IPAddress` FROM `hosts` WHERE `isStorageHolder`=1") storageHolderIP=cursor.fetchone()[0] result=connection.socketCall(str(storageHolderIP),setting.LOCAL_PORT,'get_file_size',[setting.TEMPLATE_PATH+fileName]) try: fileSize=int(json.loads(result)[0]) if fileSize<=0: return shortcut.response('error', '', 'file not found') except: return shortcut.response('error', '', result) #check replication of fileName cursor.execute("SELECT `fileName` FROM `templates` WHERE `fileName`='%s'"%(fileName)) if cursor.fetchone()!=None: return shortcut.response('error', '', 'fileName was used by other template') #template use different directory with guest #cursor.execute("SELECT `volumeFileName` FROM `guests` WHERE `volumeFileName`='%s'"%(fileName)) #if cursor.fetchone()!=None: # return shortcut.response('error', '', 'fileName was used by a guest') if not fileName.endswith('.img'): return shortcut.response('error', '', 'please set fileName in pattern of something.img format') cursor.execute(''' INSERT INTO `templates` (`fileName`, `OS`, `description`, `minimumMemory`, `maximumMemory`,`size`,`activity`) VALUES ('%(fileName)s', '%(OS)s', '%(description)s', '%(minimumMemory)s', '%(maximumMemory)s', '%(size)s', '%(activity)s'); '''%{ 'fileName':fileName, 'OS':MySQLdb.escape_string(OS), 'description':MySQLdb.escape_string(description), 'minimumMemory':minimumMemory, 'maximumMemory':maximumMemory, 'size':fileSize, 'activity':0, }) templateID=cursor.lastrowid db.close() content='<template templateID="%s" />'%(str(templateID)) return shortcut.response('success', content)
def create_your_slave_db(argv): ''' dest = master only and should be call after finish NFS system ''' targetHostIP = argv[0] db = MySQLdb.connect("localhost", "root", setting.DB_ROOT_PASSWORD) cursor = db.cursor() cursor.execute("FLUSH TABLES WITH READ LOCK;") cursor.execute("SHOW MASTER STATUS;") data = cursor.fetchone() fileData = data[0] positionData = data[1] myIP = network.getMyIPAddr() result = subprocess.Popen( shlex.split("mysqldump %s -u root --password='******'" % (setting.DB_NAME, setting.DB_ROOT_PASSWORD)), stdout=subprocess.PIPE) #result.wait() output = result.communicate()[0] #print "before write file" #dumpFile=open(setting.DB_DUMP_FILE,'w') #dumpFile.write(output) #dumpFile.close() result = connection.socketCall(targetHostIP, setting.LOCAL_PORT, 'you_are_slave_db', [ '{socket_connection}', str(myIP), fileData, str(positionData), output ]) print "~~~", result cursor.execute("UNLOCK TABLES;") db.close() if result == 'OK': #this is the first transaction that will be replicated to slave automaticaly db = MySQLdb.connect("localhost", "root", setting.DB_ROOT_PASSWORD, setting.DB_NAME) cursor = db.cursor() cursor.execute( "UPDATE `hosts` SET `isInformationServer`=2 WHERE `IPAddress`='%s'" % (str(targetHostIP))) db.close() #tell slave ip to every host (it will be used when master down) dbController.broadcastNewSlaveInformationServer( slaveHostIP=targetHostIP, masterHostIP=str(myIP)) return result
def migrate(targetHostIP): ''' this method can be called at any host ''' infoHost=cacheFile.getDatabaseIP() result=connection.socketCall(infoHost,setting.LOCAL_PORT,'migrate_database_to',[str(targetHostIP)]) if result=='OK': return True else: return False
def guest_scale_memory(taskID, detail): #get parameter guestID = detail['guestID'] memory = detail['memory'] infoHost = cacheFile.getDatabaseIP() db = MySQLdb.connect(infoHost, setting.DB_USERNAME, setting.DB_PASSWORD, setting.DB_NAME) cursor = db.cursor() #update vCPU to database try: cursor.execute("UPDATE `guests` SET `memory`=%s WHERE `guestID`=%s" % (int(memory), guestID)) except: db.close() return 'guestID not found or memory parameter is invalid' #find hostIPAddress, UUID cursor.execute('''SELECT `hosts`.`IPAddress` , `lastUUID` FROM `hosts` INNER JOIN `guests` ON `hosts`.`hostID`=`guests`.`lastHostID` AND `guests`.`status`=1 AND `guests`.`activity`=0 AND `guests`.`guestID`=%s;''' % (guestID)) targetData = cursor.fetchone() db.close() if targetData == None: #this is the case when status of that host is 0(shutoff) or 2(saved) shortcut.storeFinishMessage( taskID, "This vCPU will not scale until you reboot your guest.") return 'OK' hostIP = targetData[0] UUID = targetData[1] result = connection.socketCall(hostIP, setting.LOCAL_PORT, 'guest_scale_memory', [UUID, memory]) if result != 'OK': shortcut.storeFinishMessage( taskID, "This memory will not scale until you reboot your guest.") return 'OK' #i intend to do this (not a bug) content = "" if not shortcut.storeFinishMessage(taskID, content): return "cannot storeFinishMessage" return "OK"
def index(self, guestID): infoHost = cacheFile.getDatabaseIP() db = MySQLdb.connect(infoHost, setting.DB_USERNAME, setting.DB_PASSWORD, setting.DB_NAME) cursor = db.cursor() #find hostIPAddress, UUID cursor.execute('''SELECT `hosts`.`IPAddress` , `lastUUID` FROM `hosts` INNER JOIN `guests` ON `hosts`.`hostID`=`guests`.`lastHostID` AND `guests`.`status`=1 AND `guests`.`activity`=0 AND `guests`.`guestID`=%s;''' % (guestID)) targetData = cursor.fetchone() db.close() if targetData == None: return shortcut.response( 'error', '', 'Invalid guestID or cannot suspend in this status') hostIP = targetData[0] UUID = targetData[1] result = connection.socketCall(hostIP, setting.LOCAL_PORT, 'guest_get_current_info', [UUID, 'io']) try: result = json.loads(result) except: if type(result) == type('str'): return shortcut.response('error', '', result) else: return shortcut.response('error', '', 'May be network error') rx = result['ioInfo']['rxRate'] wx = result['ioInfo']['wxRate'] sumRx = result['ioInfo']['rxUsed'] sumWx = result['ioInfo']['wxUsed'] content = ''' <guest guestID="%s"> <rx>%s</rx> <wx>%s</wx> <sumRx>%s</sumRx> <sumWx>%s</sumWx> </guest> ''' % (str(guestID), str(rx), str(wx), str(sumRx), str(sumWx)) return shortcut.response('success', content)
def guest_migrate(taskID, detail): #get parameter guestID = detail['guestID'] targetHostID = detail['targetHostID'] infoHost = cacheFile.getDatabaseIP() db = MySQLdb.connect(infoHost, setting.DB_USERNAME, setting.DB_PASSWORD, setting.DB_NAME) cursor = db.cursor() #find hostIPAddress, UUID cursor.execute('''SELECT `hosts`.`IPAddress` , `lastUUID` FROM `hosts` INNER JOIN `guests` ON `hosts`.`hostID`=`guests`.`lastHostID` AND `guests`.`status`=1 AND `guests`.`activity`=0 AND `guests`.`guestID`=%s;''' % (guestID)) targetData = cursor.fetchone() if targetData == None: return 'Invalid guestID or cannot migrate in this status' currentHostIP = targetData[0] currentUUID = targetData[1] #find targetHostIP *** do not push isHost to condition *** (let it be in guest migration only) cursor.execute( '''SELECT `IPAddress` FROM `hosts` WHERE `status`=1 AND `activity`=0 AND `hostID`=%s''' % (targetHostID)) targetData = cursor.fetchone() if targetData == None: return 'Invalid targetHostID or targetHost is not running' targetHostIP = targetData[0] db.close() result = connection.socketCall( currentHostIP, setting.LOCAL_PORT, 'guest_migrate', [str(guestID), currentUUID, currentHostIP, targetHostIP, targetHostID]) if result != 'OK': return result content = '<guest guestID="%s" />\n' % (str(guestID)) if not shortcut.storeFinishMessage(taskID, content): return "cannot storeFinishMessage" return "OK"
def promote(): ''' this method turn that slave to a master must can do without master ''' slaveHostIP=cacheFile.getSlaveDatabaseIP() if slaveHostIP==None: return False result=connection.socketCall(slaveHostIP,setting.LOCAL_PORT,'turn_slave_to_master_db',['{socket_connection}']) if result=='OK': return True else: return False
def you_are_next_global_controller(argv): ''' promote myself to be global_controller and DHCP server ''' conn = argv[0][0] s = argv[0][1] mode = argv[1] whitelistString = argv[2] #config and start dhcp server dhcpInfo=dhcpController.getDHCPInfoFromDatabase() dhcpController.configAll(dhcpInfo['networkID'],dhcpInfo['subnetMask'],dhcpInfo['defaultRoute'],dhcpInfo['dns'],dhcpInfo['hostBindings'],conn,s) #network.configToStaticFromCacheFile() #(conn,s) #new system no need to do this #generate whitelist file aFile=open(setting.API_WHITELIST_FILE,'w') aFile.write(whitelistString) aFile.close() #start global controller (mkapi and mkworker and [scheduler]) general.runDaemonCommand(command="service mkapi start",conn=conn,sock=s,pipe=True) #can be True in log system #general.runDaemonCommand("service mkworker start debug",conn,s) infoHost=cacheFile.getDatabaseIP() db = MySQLdb.connect(infoHost, setting.DB_USERNAME, setting.DB_PASSWORD, setting.DB_NAME ) cursor = db.cursor() if mode=='migrate': #must tell old GlobalController to stop service cursor.execute("SELECT `IPAddress` FROM `hosts` WHERE `isGlobalController`=1;") hostData=cursor.fetchone() if hostData!=None: result=connection.socketCall(hostData[0],setting.LOCAL_PORT,"close_global_controller_and_dhcp_server",['{socket_connection}']) if result!='OK': print 'close_global_controller_and_dhcp_server was not complete.(can leave it, no problem)' cursor.execute("UPDATE `hosts` SET `isGlobalController`=1 WHERE `IPAddress`='%s';"%(str(network.getMyIPAddr()))) db.close() return "OK"
def index(self): infoHost = cacheFile.getDatabaseIP() db = MySQLdb.connect(infoHost, setting.DB_USERNAME, setting.DB_PASSWORD, setting.DB_NAME) cursor = db.cursor() cursor.execute('''SELECT `hostID`, `IPAddress` FROM `hosts` WHERE `isStorageHolder`=1''') hostData = cursor.fetchone() hostID = hostData[0] hostIP = hostData[1] data = connection.socketCall("localhost", setting.MONITOR_PORT, "get_current_storage_info", [hostIP]) data = json.loads(data) if len(data) == 0: errorMessage = "Data is not ready" capacity = errorMessage free = errorMessage image_usage = errorMessage maekin_usage = errorMessage else: data = data[0] capacity = data['storage_info']['capacity'] free = data['storage_info']['free'] image_usage = data['storage_info']['image_usage'] maekin_usage = data['storage_info']['maekin_usage'] content = ''' <capacity>%s</capacity> <maekinUsage>%s</maekinUsage> <imageUsage>%s</imageUsage> <free>%s</free> ''' % (str(capacity), str(maekin_usage), str(image_usage), str(free)) db.close() return shortcut.response('success', content)
def guest_resume(taskID, detail): #get parameter guestID = detail['guestID'] infoHost = cacheFile.getDatabaseIP() db = MySQLdb.connect(infoHost, setting.DB_USERNAME, setting.DB_PASSWORD, setting.DB_NAME) cursor = db.cursor() #find hostIPAddress, UUID cursor.execute('''SELECT `hosts`.`IPAddress` , `lastUUID` FROM `hosts` INNER JOIN `guests` ON `hosts`.`hostID`=`guests`.`lastHostID` AND `guests`.`status`=1 AND `guests`.`activity`=0 AND `guests`.`guestID`=%s;''' % (guestID)) targetData = cursor.fetchone() db.close() if targetData == None: return 'Invalid guestID or cannot resume in this status' hostIP = targetData[0] UUID = targetData[1] runningState = connection.socketCall(hostIP, setting.LOCAL_PORT, 'guest_resume', [UUID]) try: if not (int(runningState) in range(7)): return 'Something go wrong!!!' except: return str(runningState) content = "<runningState>%s</runningState>\n" % (str(runningState)) if not shortcut.storeFinishMessage(taskID, content): return "cannot storeFinishMessage" return "OK"
def moveGlobalService(mode,targetHostIP=None): ''' if targetHostIP=None must random from active host ''' if targetHostIP!=None: condition="AND `IPAddress`='%s'"%(str(targetHostIP)) else: condition='' infoHost=cacheFile.getDatabaseIP() db = MySQLdb.connect(infoHost, setting.DB_USERNAME, setting.DB_PASSWORD, setting.DB_NAME ) cursor = db.cursor() cursor.execute("SELECT `hostID`, `IPAddress` FROM `hosts` WHERE `isGlobalController`=0 AND `status`=1 %s"%(condition)) candidates=cursor.fetchall() if len(candidates)==0: print 'host to be promoted to globalController not found' return False tmpHostList=[] for element in candidates: tmpHostList.append(element[0]) targetHostID=planTool.weightRandom(tmpHostList) for element in candidates: if element[0]==targetHostID: targetHostIP=element[1] break aFile=open(setting.API_WHITELIST_FILE,'r') whitelistString=aFile.read() aFile.close() result=connection.socketCall(targetHostIP,setting.LOCAL_PORT,"you_are_next_global_controller",['{socket_connection}',mode,whitelistString]) if result!="OK": return False #tell monitoring service connection.socketCall("localhost",setting.MONITOR_PORT,'hello_monitoring_service', [str(targetHostIP),str(setting.API_PORT), str(setting.LOCAL_PORT)]) #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! #!!!!!!tell management tools!!!!!!! (must talk with k2w2) #!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! #broadcast to every active host cursor.execute("SELECT `IPAddress` FROM `hosts` WHERE `status`=1") activeHosts=cursor.fetchall() db.close() dataString=json.dumps({ 'globalController':str(targetHostIP) }) for host in activeHosts: result=connection.socketCall(host[0], setting.LOCAL_PORT, 'update_cloud_info', ['{socket_connection}',dataString]) if result!='OK': print "connection problem, cannot update_cloud_info to",host[0] return True
def run ( self ): closeSocketEvent.wait() network.configToStaticFromCacheFile() #(conn,s) #runServer may be error currentDict=cacheFile.getCurrentDict() if currentDict['globalController']==currentDict['masterDB']: #masterDB is down if currentDict['slaveDB']==None: print "cloud is stopping service, cannot restore(no slave db)" return else: #try to connect to slave db while True: result=connection.socketCall(currentDict['slaveDB'], setting.LOCAL_PORT, 'hello') if result=='OK': break time.sleep(2) #must promote the slave db up print "wait 10 sec" time.sleep(10) #wait for every host to repair own ip dbService.promote() currentDict=cacheFile.getCurrentDict() #get new masterDB else: #try to connect to master db while True: result=connection.socketCall(currentDict['masterDB'], setting.LOCAL_PORT, 'hello') if result=='OK': break time.sleep(2) infoHost=currentDict['masterDB'] #lock dequeuing by global_lock db = MySQLdb.connect(infoHost, setting.DB_USERNAME, setting.DB_PASSWORD, setting.DB_NAME ) cursor = db.cursor() cursor.execute("UPDATE `cloud_variables` SET `value`='1' WHERE `key`='global_lock'") #don't forget to open when finish everything db.close() #config and start dhcp server dhcpInfo=dhcpController.getDHCPInfoFromDatabase() dhcpController.configAll(dhcpInfo['networkID'],dhcpInfo['subnetMask'],dhcpInfo['defaultRoute'],dhcpInfo['dns'],dhcpInfo['hostBindings'],conn,s) db = MySQLdb.connect(infoHost, setting.DB_USERNAME, setting.DB_PASSWORD, setting.DB_NAME ) cursor = db.cursor() #must repair CA here cursor.execute("SELECT `IPAddress` FROM `hosts` WHERE `isCA`=1") caIP=cursor.fetchone()[0] if caIP==currentDict['masterDB']: #CA is down if caService.promote()==False: print "cannot promote CA, ending" return #update database cursor.execute("UPDATE `hosts` SET `status`=0, `isGlobalController`=0, `inInformationServer`=0, `isCA`=0 WHERE `IPAddress`=%s"%(currentDict['globalController'])) cursor.execute("UPDATE `hosts` SET `isGlobalController`=1 WHERE `IPAddress`=%s"%(currentDict['myLastIP'])) #broadcast new global controller cursor.execute("SELECT `IPAddress` FROM `hosts` WHERE `status`=1") activeHosts=cursor.fetchall() dataString=json.dumps({ 'globalController':str(currentDict['myLastIP']) }) for host in activeHosts: #every host should be static as it can, in new system #if host[0]==currentDict['myLastIP']: # option=[] #else: # option=['dynamic'] result=connection.socketCall(host[0], setting.LOCAL_PORT, 'update_cloud_info', ['{socket_connection}',dataString,'planner']+option) if result!='OK': print "connection problem, cannot update_cloud_info to",host[0] general.runDaemonCommand("service mkapi start",conn,s,True) #can be true in log system #fix queue cursor.execute("SELECT `taskID`, `processID` FROM `tasks` WHERE `status`=1") tmpData=cursor.fetchall() for element in tmpData: queue.propagateError(element[0]) #next is check and repair host and guests ha.recover() cursor.execute("UPDATE `cloud_variables` SET `value`='0' WHERE `key`='global_lock'") #unlock global db.close() #tell queue to do next work connection.socketCall("127.0.0.1",setting.WORKER_PORT,'start_work',['{socket_connection}']) return
def nfs_migrate(taskID, detail): ''' migrate nfs server *** this method should be fixed if you want to use *** *** I'm sure that now it's full of error *** ''' #no need of global_lock because this method use -1 as very big lock #get parameter targetHostID = detail['targetHostID'] #start work infoHost = cacheFile.getDatabaseIP() db = MySQLdb.connect(infoHost, setting.DB_USERNAME, setting.DB_PASSWORD, setting.DB_NAME) cursor = db.cursor() #cursor.execute("UPDATE `cloud_variables` SET `value`='1' WHERE `key`='global_lock'") #check that no guest is running or having activity cursor.execute( "SELECT `guestID` FROM `guests` WHERE `activity`<>0 OR `status`=1") if cursor.fetchone() != None: db.close() return 'All guests must not be running or doing any activity' #check that no host have activity cursor.execute("SELECT `hostID` FROM `hosts` WHERE `activity`<>0") if cursor.fetchone() != None: db.close() return 'All hosts must not be doing any activity' #check that no working task in queue (except me) cursor.execute("SELECT `taskID` FROM `tasks` WHERE `status`=1") while True: tmp = cursor.fetchone() if tmp == None: break elif tmp[0] == taskID: continue else: db.close() return 'All tasks must finish before migrate nfs server' #check that targetHostID is ON cursor.execute( "SELECT `status`, `IPAddress` FROM `hosts` WHERE `hostID`=%s" % (targetHostID)) tmp = cursor.fetchone() if tmp == None: db.close() return 'target host not found' elif tmp[0] != 1: db.close() return 'target host is shutedoff' targetHostIP = tmp[1] #check ip of old nfs server cursor.execute( "SELECT `status`, `IPAddress` FROM `hosts` WHERE `isStorageHolder`=1") tmp = cursor.fetchone() if tmp == None: db.close() return 'nfs host not found???' elif tmp[0] != 1: db.close() return 'nfs host is shutedoff???' nfsHostIP = tmp[1] #list all file to copy ( template, .img , .sav ) filenameList = [] #template cursor.execute("SELECT `fileName` FROM `templates`") templatePathList = [] while True: tmp = cursor.fetchone() if tmp != None: templatePathList.append(setting.TEMPLATE_PATH + tmp[0]) else: break result = connection.socketCall(str(nfsHostIP), setting.LOCAL_PORT, 'get_file_size', templatePathList) try: sizeList = json.loads(result) except: return result for i in range(len(templatePathList)): if sizeList[i] <= 0: return "file " + templatePathList[i] + " have problem" filenameList.append([templatePathList[i], sizeList[i]]) #guest image cursor.execute("SELECT `volumeFileName` FROM `guests`") while True: tmp = cursor.fetchone() if tmp != None: filenameList.append([ setting.IMAGE_PATH + tmp[0], os.path.getsize(setting.TARGET_IMAGE_PATH + tmp[0]) ]) else: break #saved image cursor.execute("SELECT `volumeFileName` FROM `guests` WHERE `status`=2") while True: tmp = cursor.fetchone() if tmp != None: savFilename = general.imgToSav(tmp[0]) filenameList.append([ setting.IMAGE_PATH + savFilename, os.path.getsize(setting.TARGET_IMAGE_PATH + savFilename) ]) else: break #check that all of the file has coppied to destination result = connection.socketCall( targetHostIP, setting.LOCAL_PORT, 'check_nfs_migrate_destination_has_coppied', [json.dumps(filenameList)]) if result != 'OK': return result """ #check rest area on disk weather it's enough or not result=connection.socketCall(targetHostIP,setting.LOCAL_PORT,'check_nfs_migrate_destination_area',[json.dumps(filenameList)]) if result!='OK': return result #copying file via nfs (when each file finish i should put result in finish message) result=connection.socketCall(targetHostIP,setting.LOCAL_PORT,'transfer_all_file',[json.dumps(filenameList),str(taskID)]) if result!='OK': return result """ #setup config for new nfs host cursor.execute("SELECT `IPAddress` FROM `hosts`") hostIPList = [] while True: tmp = cursor.fetchone() if tmp != None: hostIPList.append(tmp[0]) else: break result = connection.socketCall( targetHostIP, setting.LOCAL_PORT, 'you_are_nfs_server', [json.dumps(hostIPList), "{socket_connection}"]) if result != 'OK': return result #close old nfs server result = connection.socketCall(nfsHostIP, setting.LOCAL_PORT, 'stop_nfs_server') if result != 'OK': return result #update storageHolder in database cursor.execute("UPDATE `hosts` SET `isStorageHolder`=0") cursor.execute("UPDATE `hosts` SET `isStorageHolder`=1 WHERE `hostID`=%s" % (targetHostID)) #umount and mount on every hosts (via update cloud info) cursor.execute("SELECT `IPAddress` FROM `hosts` WHERE `status`=1") activeHostIP = [] while True: tmp = cursor.fetchone() if tmp != None: activeHostIP.append(tmp[0]) else: break for hostIP in activeHostIP: result = connection.socketCall(hostIP, setting.LOCAL_PORT, 'update_cloud_info', ['{socket_connection}', {}, 'nfs']) if result != 'OK': return result #unlock global lock #cursor.execute("UPDATE `cloud_variables` SET `value`='0' WHERE `key`='global_lock'") db.close() return "OK"
import subprocess, shlex from util import connection, network import setting startIP = "158.108.34.85" stopIP = "158.108.34.99" for product in range( network.IPAddr(startIP).getProduct(), network.IPAddr(stopIP).getProduct() + 1): currentIP = str(network.IPAddr(product)) errorFlag = False try: content = connection.socketCall(currentIP, setting.LOCAL_PORT, 'clean_shutdown') except: pass print currentIP, "shutdown"
network.forceConfigToStaticFromCacheFile() wakeList=[] #wake up database dbIP=cacheFile.getDatabaseIP() dbMAC=cacheFile.getDatabaseMAC() myMAC=network.getMyMACAddr() wakeList.append(myMAC) waker.wakeAndWait(dbMAC,dbIP) if str(dbMAC) not in wakeList: wakeList.append(str(dbMAC)) #now i can say with that host result=connection.socketCall(dbIP,setting.LOCAL_PORT,'wake_up_database',['{socket_connection}']) if result!='OK': #no, that is not real database if len(result)==17: #this should be recomended mac address subprocess.Popen(shlex.split("ether-wake -i br0 %s"%(str(result)))) print "You may see a host that just opened automatically, try again at that host" else: print "Sorry, data on this host is obsolete. Try again at another host." sys.exit() print "Bingo, you found the appropiate host to restore cloud." print "Please wait..." #now database is up db = MySQLdb.connect(dbIP, setting.DB_USERNAME, setting.DB_PASSWORD, setting.DB_NAME ) cursor = db.cursor()
def template_create_from_guest(taskID, detail): #get parameter sourceGuestID = detail['sourceGuestID'] description = detail['description'] #start the work infoHost = cacheFile.getDatabaseIP() db = MySQLdb.connect(infoHost, setting.DB_USERNAME, setting.DB_PASSWORD, setting.DB_NAME) cursor = db.cursor() #find sourceGuestID and template data cursor.execute("""SELECT `templates`.`size`, `templates`.`minimumMemory`, `templates`.`maximumMemory`, `templates`.`OS`, `guests`.`volumeFileName`, `guests`.`status`, `guests`.`activity` FROM `templates` INNER JOIN `guests` ON `templates`.`templateID`=`guests`.`templateID` WHERE `guests`.`guestID`=%s """ % (sourceGuestID)) tmpData = cursor.fetchone() if tmpData == None: return 'sourceGuestID not found' if tmpData[5] != 0 or tmpData[6] != 0: return 'source guest is busy' imageSize = int(tmpData[0]) #unit is byte #find storageHolder cursor.execute( "SELECT `hostID`, `IPAddress` FROM `hosts` WHERE `isStorageHolder`=1") storageHolder = cursor.fetchone() #check storage storageInfo = connection.socketCall('localhost', setting.MONITOR_PORT, 'get_current_storage_info', [str(storageHolder[1])]) if json.loads(storageInfo) == []: return "Data is not ready, please try again later." freeSpace = int( json.loads(storageInfo)[0]['storage_info']['free']) #unit is Kbyte if imageSize > freeSpace * 1024: return "Do not have enough space for duplicate this image" #find image file name usedImageNames = [] cursor.execute("SELECT `fileName` FROM `templates`") tmpFileName = cursor.fetchone() while tmpFileName != None: usedImageNames.append(tmpFileName[0]) tmpFileName = cursor.fetchone() volumeFileName = tmpData[4].split('.')[0] + '_template.img' count = 2 while volumeFileName in usedImageNames: volumeFileName = guestName + '(' + str(count) + ').img' count += 1 cursor.execute( ''' INSERT INTO `templates` (`fileName`, `OS`, `size`, `description`, `minimumMemory`, `maximumMemory`, `activity`) VALUES ('%(filename)s', '%(OS)s', '%(size)s', '%(description)s', '%(minimumMemory)s', '%(maximumMemory)s', '%(activity)s'); ''' % { 'size': tmpData[0], 'minimumMemory': tmpData[1], 'maximumMemory': tmpData[2], 'OS': tmpData[3], 'filename': volumeFileName, 'description': description, 'activity': 1 #cloning }) templateID = cursor.lastrowid #add activity duplicating cursor.execute("UPDATE `guests` SET `activity`=6 WHERE `guestID`=%s" % (sourceGuestID)) result = connection.socketCall( str(storageHolder[1]), setting.LOCAL_PORT, 'template_create', [str(tmpData[4]), volumeFileName, str(templateID), str(taskID)]) cursor.execute("UPDATE `guests` SET `activity`=0 WHERE `guestID`=%s" % (sourceGuestID)) db.close() if result != 'OK': return 'something error' content = '<template templateID="%s" />' % (str(templateID)) if not shortcut.storeFinishMessage(taskID, content): return "cannot storeFinishMessage" return "OK"
import subprocess,shlex from util import connection,network import setting result = subprocess.Popen(shlex.split('''mkdir -p %s'''%(setting.TEST_LOG_PATH)), stdout=subprocess.PIPE) result.wait() startIP="158.108.34.85" stopIP="158.108.34.85" for product in range(network.IPAddr(startIP).getProduct(),network.IPAddr(stopIP).getProduct()+1): currentIP=str(network.IPAddr(product)) errorFlag=False try: content=connection.socketCall(currentIP,setting.LOCAL_PORT,'get_time_log_file') if content==None or content=="File not found": errorFlag=True except: errorFlag=True print currentIP, (not errorFlag) if errorFlag==False: aFile=open(setting.TEST_LOG_PATH+currentIP+'.log','w') aFile.write(content) aFile.close()