def updateProgress(self, sysdate): for doc in self.meteor.activity.find({"start_time":{"$gt":trunc(sysdate)}, "status":"Running"}): dbinstance = doc["maddb"] jobids = ",".join(map(lambda x: str(x["job_id"]), self.meteor.rrqueue.find({"session_id":doc["session_id"]},{"_id":0, "job_id":1}))) if len(jobids): df = self.mad.select_progress(dbinstance, jobids) df.apply(lambda x: self.meteor.rrqueue.update({"job_id":x.JOB_ID},{"$set":{"sts_cod":x.STS_COD,"progress":x.PROPORTION_DONE}}),axis=1)
farm = v["mad"] for scene, vv in v["scenes"].items(): for batch, jobs in vv["monitor"].items(): self.mongo.refreshQueue(farm, baseDt, batch, jobs, scene, inception) self.mongo.rebuildIndex(self.reglib) def livefeed(self): logger.info("PriceChangeLive") df = PriceChangeLive() self.mongo.updateLiveFeed(json.loads(df.to_json(orient="records"))) if __name__ == "__main__": app = Application() http_server = tornado.httpserver.HTTPServer(app) http_server.listen(8080) #app.listen(443) delta = trunc(dateoffset(app.sysdate, 1))-app.sysdate tornado.ioloop.IOLoop.instance().add_timeout(timedelta(seconds=delta.seconds+7*60*60), app.rollsysdate) app.jobqueueRefresh(True) tornado.ioloop.PeriodicCallback(app.jobqueueRefresh, 5*1000*60).start() app.livefeed() tornado.ioloop.PeriodicCallback(app.livefeed, 1*1000*60).start() tornado.ioloop.PeriodicCallback(app.rrqueueRefresh, 1000*1).start() logger.info("interstellar started") tornado.ioloop.IOLoop.instance().start()
def query_session(self, session_id, sysdate): username, _ = session_id.split("_") summary = [(t["session_id"], t["status"]) for t in self.meteor.activity.find({"start_time":{"$gt":trunc(sysdate)}, "user":username})] book = self.meteor.activity.find({"session_id":session_id})[0]["book"] jobs = [] for t in self.meteor.rrqueue.find({"session_id":session_id}, {"_id":0}): jobs.append("%d,%s,%.2f" % (t["job_id"], t["job_name"], t["progress"])) return {"book":book, "joblist":";".join(jobs)}