def run(self): registerClientEvent() work = Work() work.hostName = getHostName() work.port = "1024" work.status = "aa" self.kazooClient = ZookeeperClientHolder.getClient() self.store = ZookeeperStore() self.kazooClient.addChildListener("/cabbage/jobs", jobChildWatch) self.store.saveWork(work) self.t1 = threading.Thread(target=run) self.t1.setDaemon(True) self.t1.start() time.sleep(5) self.app = CabbageHolder.getCabbage().getApp() # clientDir = ConfigHolder.getConfig().getProperty(BASE,CLIENT_FILE_DIRECTORY) # if os.path.exists(clientDir) is False: # os.mkdir(clientDir) time.sleep(10) i = self.app.control.inspect() result = i.stats() print result print i.registered_tasks() print self.app.events.State().workers.items()
def stop(self): CabbageControlHolder.getCabbageControl().stopCelery() work = Work() work.hostName = HOST_NAME work.status = REMOVE self.status = work.status with storeFactory.store() as store: store.updateWorkStatus(work) tornado.ioloop.IOLoop.current().stop()
def test_status(self): work=Work() work.hostName=getHostName() workContorl = CeleryWorkContorl(work) workContorl.stopService() time.sleep(10) workContorl.startService() print workContorl.serviceIsAlive() time.sleep(20) workContorl.stop()
def getWork(self, hostName): parent = "/" + CABBAGE + "/" + WORKS + "/" + hostName port = self.client.getData(parent + "/" + PORT) ip = self.client.getData(parent + "/" + IP) status = self.client.getData(parent + "/" + STATUS) brokerServer = None serviceStatus = None queues = [] if self.client.isExistPath(parent + "/" + BROKER_SERVER): brokerServer = self.client.getData(parent + "/" + BROKER_SERVER) if self.client.isExistPath(parent + "/" + SERVICE_STATUS): serviceStatus = self.client.getData(parent + "/" + SERVICE_STATUS) if self.client.isExistPath(parent + "/" + QUEUES): queues = self.client.getChildren(parent + "/" + QUEUES) return Work(port=port, ip=ip, status=status, hostName=hostName, brokerServer=brokerServer, queues=queues, serviceStatus=serviceStatus)
def setUp(self): registerClientEvent() work = Work() work.hostName = getHostName() work.port = "1024" work.status = "aa" self.kazooClient = ZookeeperClientHolder.getClient() self.store = ZookeeperStore() self.kazooClient.addChildListener("/cabbage/jobs", jobChildWatch) self.store.saveWork(work) self.t1 = threading.Thread(target=run) self.t1.setDaemon(True) self.t1.start() time.sleep(5) self.app = CabbageHolder.getCabbage().getApp()
def getJob(self, jobId): parent = "/" + CABBAGE + "/" + JOBS + "/" + jobId # log.debug(self.client) jobName = self.client.getData(parent + "/" + JOB_NAME) log.debug(parent) filePath = self.client.getData(parent + "/" + FILE_PATH) fileName = self.client.getData(parent + "/" + FILE_NAME) fileType = self.client.getData(parent + "/" + FILE_TYPE) status = self.client.getData(parent + "/" + STATUS) auditStatus = self.client.getData(parent + "/" + AUDIT_STATUS) runStrategy = self.client.getData(parent + "/" + RUN_STRATEGY) resultBackend = None if self.client.isExistPath(parent + "/" + REULST_BACKEND): resultBackend = self.client.getData(parent + "/" + REULST_BACKEND) files = self.client.getChildren(parent + "/" + ATTACH_FILES) log.debug(parent + "/" + WORKS + "/" + LIST) ws = self.client.getChildren(parent + "/" + WORKS + "/" + LIST) attachFiles = [] works = [] for attachFile in files: fp = self.client.getData(parent + "/" + ATTACH_FILES + "/" + attachFile) ft = self.client.getData(parent + "/" + ATTACH_FILES + "/" + attachFile + "/" + FILE_TYPE) f = File(fileName=attachFile, filePath=fp, fileType=ft) attachFiles.append(f) for w in ws: pt = self.client.getData(parent + "/" + WORKS + "/" + LIST + "/" + w) works.append(Work(hostName=w, port=pt)) tasks = [] if self.client.isExistPath(parent + "/" + TASKS): tasks = self.client.getChildren(parent + "/" + TASKS) brokerServer = None brokerQueue = None if self.client.isExistPath(parent + "/" + BROKER_SERVER): brokerServer = self.client.getData(parent + "/" + BROKER_SERVER) if self.client.isExistPath(parent + "/" + QUEUE): brokerQueue = self.client.getData(parent + "/" + QUEUE) return Job(jobId=jobId, jobName=jobName, filePath=filePath, status=status, auditStatus=auditStatus, fileType=fileType, fileName=fileName, attachFiles=attachFiles, works=works, runStrategy=runStrategy, brokerServer=brokerServer, brokerQueue=brokerQueue, tasks=tasks, resultBackend=resultBackend)
def test_status(self): self.store = StoreHolder.getStore() work = Work() work.hostName = getHostName() work.status = OFF_LINE self.store.updateWorkStatus(work) time.sleep(10) work.status = ON_LINE self.store.updateWorkStatus(work) time.sleep(20) work.status = REMOVE self.store.updateWorkStatus(work)
def save_job(self): job = Job(jobName="12121212", filePath="1212", auditStatus="a1212", fileType="python", fileName="1212", status="1212", attachFiles=None, works=None, runStrategy="one") attachFiles = [] attachFiles.append( File(fileName="1212", jobId=job.jobId, jobName=job.jobName, filePath="121212", fileType="test")) works = [] works.append(Work(ip="1212", port="1212", status="aaa")) job.attachFiles = attachFiles job.works = works self.store.saveJob(job)
def start(self): work = Work() work.hostName = HOST_NAME work.port = "1024" work.ip = LOCAL_IP work.status = READY with storeFactory.store() as store: if not store.isExistWork(work): store.saveWork(work) CacheHolder.getCache().put(HOST_NAME, work, WORKS) else: CacheHolder.getCache().put(HOST_NAME, store.getWork(HOST_NAME), WORKS) store.updateWorkStatus(work) self.runCeleryServer(work, store) t2 = threading.Thread(target=scheduler) t2.setDaemon(True) t2.start() tornado.ioloop.IOLoop.current().start()
def test_base_entity(self): work = Work(ip=1,port=2,status=3) print work.asDict()
def onLine(self): work = Work() work.hostName = HOST_NAME work.status = ON_LINE with storeFactory.store() as store: self.runCeleryServer(work, store)
def save_work(self): work = Work(ip="192.168.108.211", port="1024", status="status") self.store.saveWork(work)