def test4(): app = InfluxQueryClient() start, stop = MyTool.getStartStopTS(days=1) rlt = app.getNodeJobProcData('worker5057', 951025, start) with open("tmp.out") as f: for item in rlt: print("{}\n".format(item)) json.dump(item, f)
def test6(): node, jid = 'workergpu45', 952296 start_time, stop_time = MyTool.getStartStopTS(days=1) app = InfluxQueryClient() t1 = time.time() query = "select * from one_month.node_proc_mon where hostname='" + node + "' and jid=" + str( jid) #jid is int type in node_proc_mon query = app.extendQuery(query, start_time, None) results = app.query(query) print("Influx query take time {}".format(time.time() - t1)) count = 0 with open("tmp.out", "w") as f: for point in results.get_points(): json.dump(point, f) count += 1 print("\treturn {} points".format(count))
def test1(user): print("Test user {}'s history".format(user)) start, stop = MyTool.getStartStopTS(days=30) uid = MyTool.getUid(user) print(gendata_user(uid, start, stop))
def test5(): app = InfluxQueryClient() start, stop = MyTool.getStartStopTS(days=3, setStop=False) rlt = app.getPendingCount(start, stop) print(rlt)
def savJobRequestHistory(self, filename='jobRequestHistory', days=7): st, et = MyTool.getStartStopTS(days=days) rltSet = self.getJobRequestHistory(st, et) with open('./data/{}_{}.pickle'.format(filename, days), 'wb') as f: pickle.dump(rltSet, f)
def savNodeHistory(self, filename='nodeHistory', days=7): st, et = MyTool.getStartStopTS(days=days) rltSet = self.getNodeHistory(st, et) with open('./{}_{}.pickle'.format(filename, days), 'wb') as f: pickle.dump(rltSet, f)
def test3(): st, stp = MyTool.getStartStopTS(days=30) st, stp, df1, df2, df3, df4 = SlurmDBQuery.getClusterJobHistory( 'slurm', st, stp) print("{}-{}: df4={}".format(st, stp, df4))