def njobs_slurmdb_jobs_get(start, end, uid=None): """ Count the number of jobs reported by slurmdb """ if uid is None: jobs = pyslurm.slurmdb_jobs().get(starttime=start.encode('utf-8'), endtime=end.encode('utf-8')) else: jobs = pyslurm.slurmdb_jobs().get(starttime=start.encode('utf-8'), endtime=end.encode('utf-8'), userids=[uid]) return len(jobs)
def test_slurmdb_jobs_get_steps(): """ Slurmdb: Get jobs with steps for all users """ job = { "wrap": """ srun hostname srun sleep 1 """, "job_name": "pyslurm_test_job_steps", "ntasks": 1, "cpus_per_task": 1, } job_id = pyslurm.job().submit_batch_job(job) # wait for job to finish time.sleep(10) # get `sacct` jobs start = (datetime.datetime.now() - datetime.timedelta(days=1)).strftime("%Y-%m-%dT00:00:00") end = (datetime.datetime.now() + datetime.timedelta(days=1)).strftime("%Y-%m-%dT00:00:00") jobs = pyslurm.slurmdb_jobs().get(starttime=start.encode('utf-8'), endtime=end.encode('utf-8')) # make sure results are valid json assert json.dumps(jobs, sort_keys=True, indent=4) # we should get our job in the results assert jobs.get(job_id, None) # and it should have steps assert jobs[job_id]["steps"] # and 3 steps, 1 batch + 2 srun assert 3 == len(jobs[job_id]["steps"])
def __init__(self, user, time=31): self.user = user self.user_id = None self.group_id = None self.job_table = {} self.group_table = set() self.group_job_table = {} self.all_jobs = None self.full_table = {} end = datetime.now() + timedelta(days=31) end = end.strftime("%m%d%y") t_delta = timedelta(days=time) start = datetime.now() - t_delta start = start.strftime("%m%d%y") self.all_jobs = pyslurm.slurmdb_jobs().get( starttime=start.encode('utf-8'), endtime=end.encode('utf-8')) for i in self.all_jobs: if self.all_jobs[i]['user'] == self.user: self.user_id = self.all_jobs[i]['gid'] self.group_id = self.all_jobs[i]['account'] break for j in self.all_jobs: if self.all_jobs[j]['account'] == self.group_id: self.group_table.add(self.all_jobs[j]['user']) if self.all_jobs[j]['user'] == self.user: self.job_table.update({j: self.all_jobs[j]}) if self.all_jobs[j]['account'] not in self.full_table.keys(): self.full_table.update({ self.all_jobs[j]['account']: { self.all_jobs[j]['user']: { j: self.all_jobs[j] } } }) elif self.all_jobs[j]['user'] not in self.full_table[ self.all_jobs[j]['account']].keys(): self.full_table[self.all_jobs[j]['account']].update( {self.all_jobs[j]['user']: { j: self.all_jobs[j] }}) else: self.full_table[self.all_jobs[j]['account']][ self.all_jobs[j]['user']].update({j: self.all_jobs[j]}) self.group_table = list(self.group_table) for i in self.group_table: self.group_job_table[i] = {} self.group_job_table.update({self.user: self.job_table}) for k in self.all_jobs: if self.all_jobs[k]['user'] in self.group_table: if self.all_jobs[k]['user'] != self.user: self.group_job_table[self.all_jobs[k]['user']].update( {k: self.all_jobs[k]})
def get_prod_time(): jobs = pyslurm.slurmdb_jobs() jdct = jobs.get(jobids=[16478, 16498, 16549, 16569, 16591, 16626, 16673]) h = list() for v in jdct.values(): h.append(v['elapsed']) return sum(h) / 3600
def CleanUp(request): ''' this function actually collects the personal job queue.it finds all the recent jobs that are yours and creates a 2d matrix for the job queue this is then passed to the template to be rendered. ''' username = request.user.username JobQueue = [] jobs = pyslurm.job().get() AllJobs = pyslurm.slurmdb_jobs().get() fields = [ "job_id", "user", "name", "job_state", "run_time_str", "num_nodes", "nodes", "start_time", "submit_time" ] JobQueue.append(fields) times = ["start_time", "submit_time"] for key, value in jobs.items(): JobInQ = [] jobid = value["job_id"] for field in fields: if field in times: temp_time = float(value[field]) if temp_time < 10000: #Check for uninitialized time Added 4-20-19 JobInQ.append("0000-00-00 00:00:00") else: JobInQ.append( datetime.utcfromtimestamp(float( value[field])).strftime('%Y-%m-%d %H:%M:%S')) #JobInQ.append(datetime.utcfromtimestamp(float(value[field])).strftime('%Y-%m-%d %H:%M:%S')) elif field == "user": if jobid in AllJobs: jobid = value["job_id"] user = AllJobs[jobid]['user'] JobInQ.append(user) else: for jobid2 in AllJobs: if value['user_id'] == AllJobs[jobid2]['gid']: user = AllJobs[jobid2]['user'] JobInQ.append(user) break else: JobInQ.append(value[field]) EntryUser = JobInQ[1] #print(EntryUser) if EntryUser == username: JobQueue.append(JobInQ) #return HttpResponse("Cleanup time") return render(request, 'ScriptGen/queue.html', {'queue': JobQueue})
def MajorUsers(request): #path = STATIC_ROOT = os.path.join(os.getcwd(), '\\static\\images\\user-jobs-submitted.png') #pngPath = image_data = open(path, "rb").read() user = request.user.username AdminAccess = IsAdmin(user) print("about to do admin access function") if AdminAccess == False: return render(request, 'error_pages/403.html') AllJobs = pyslurm.slurmdb_jobs().get() userSubDict = {} for jobid in AllJobs: user = AllJobs[jobid]['user'] if user in userSubDict: userSubDict[user] += 1 else: userSubDict[user] = 1 totalSub = sum(userSubDict.values()) newUserDict = {'other': 0} for user in userSubDict: jobs = userSubDict[user] twoPercent = totalSub / 100 twoPercent *= 2 if user == None: newUserDict['Cameron'] = userSubDict[None] elif jobs > twoPercent: newUserDict[user] = jobs else: newUserDict['other'] += jobs labels = [user for user in newUserDict] sizes = [newUserDict[user] for user in newUserDict] fig, ax = plt.subplots() ax.pie(sizes, autopct='%1.0f%%', startangle=90) ax.axis('equal') plt.title("Major Job Submitters by percentage") plt.legend(labels) g = mpld3.fig_to_html(fig) data_folder = Path("/static/images/") path = data_folder / "user-jobs-submitted.png" return render(request, 'stats/graphic.html', {'graph': g})
def getSlurmDBJob(jid, req_fields=DEF_REQ_FLD): job = pyslurm.slurmdb_jobs().get(jobids=[jid]).get(jid, None) if not job: # cannot find return None job['user_id'] = MyTool.getUid(job['user']) for f in req_fields: if f in job: continue if f in PyslurmQuery.MAP_JOB2DBJ: # can be converted if type(PyslurmQuery.MAP_JOB2DBJ[f]) != list: db_fld = PyslurmQuery.MAP_JOB2DBJ[f] job[f] = job[db_fld] else: db_fld, cvtFunc = PyslurmQuery.MAP_JOB2DBJ[f] job[f] = cvtFunc(job[db_fld]) else: # cannot be converted logger.error( "Cannot find/map reqested job field {} in job {}".format( f, job)) return job
List all jobs in Slurm, similar to `sacct` """ import time import datetime import pyslurm def job_display(job): """Format output""" for key, value in job.items(): print("\t{}={}".format(key, value)) if __name__ == "__main__": try: start = (datetime.datetime.utcnow() - datetime.timedelta(days=1)).strftime("%Y-%m-%dT00:00:00") end = (datetime.datetime.utcnow() + datetime.timedelta(days=1)).strftime("%Y-%m-%dT00:00:00") jobs = pyslurm.slurmdb_jobs() jobs_dict = jobs.get(starttime=start.encode('utf-8'), endtime=end.encode('utf-8')) if jobs_dict: for key, value in jobs_dict.items(): print("{} Job: {}".format("{", key)) job_display(value) print("}") else: print("No job found") except ValueError as job_exception: print("Error:{}".format(job_exception.args[0]))
#!/usr/bin/env python import time import pyslurm def job_display( job ): for key,value in job.items(): print("\t{}={}".format(key, value)) if __name__ == "__main__": try: end = time.time() start = end - (30*24*60*60) print("start={}, end={}".format(start,end)) jobs = pyslurm.slurmdb_jobs() jobs_dict = jobs.get(starttime=start, endtime=end) if len(jobs_dict): for key, value in jobs_dict.items(): print("{} Job: {}".format('{',key)) job_display(value) print("}") else: print("No job found") except ValueError as e: print("Error:{}".format(e.args[0]))
def JobSubStats(request): ''' cnx = mysql.connector.connect(user=dbcreds.user, password=dbcreds.pwd, host=dbcreds.host, database=dbcreds.db) cursor = cnx.cursor() query = 'select time_submit from hpcc_big.msuhpcc_job_table;' cursor.execute(query) date_dict = {} for line in cursor: secs = int(line[0]) FullTime = datetime.utcfromtimestamp(secs).strftime('%Y-%m-%d %H:%M:%S') date = FullTime.split()[0] if date in date_dict: date_dict[date] += 1 else: date_dict[date] = 1 cursor.close() cnx.close() dates = [date for date in date_dict] x = [datetime.strptime(d, '%Y-%m-%d').date() for d in dates] y = [date_dict[date] for date in date_dict] fig, ax = plt.subplots() plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%m/%d/%Y')) plt.gca().xaxis.set_major_locator(mdates.MonthLocator()) ax.plot(x,y) plt.xlabel("Dates") plt.ylabel("Jobs Submitted") plt.title("Jobs Submitted In 2018") plt.gcf().autofmt_xdate() g = mpld3.fig_to_html(fig) ''' user = request.user.username AdminAccess = IsAdmin(user) print("about to do admin access function") if AdminAccess == False: return render(request, 'error_pages/403.html') print("finished ") AllJobs = pyslurm.slurmdb_jobs().get() DateDict = {} for jobid in AllJobs: startTime = AllJobs[jobid]['start'] date = datetime.fromtimestamp(startTime).date() if int(startTime) > 0: if date in DateDict: DateDict[date] += 1 else: DateDict[date] = 1 dates = [(date, DateDict[date]) for date in DateDict] x = sorted(dates) xDates = [pair[0] for pair in x] y = [pair[1] for pair in x] fig, ax = plt.subplots() plt.gca().xaxis.set_major_formatter(mdates.DateFormatter('%m/%d/%Y')) plt.gca().xaxis.set_major_locator(mdates.AutoDateLocator()) ax.xaxis_date() ax.plot(xDates, y) #plt.xlabel("Dates") plt.ylabel("Jobs Submitted") plt.title("Jobs Submitted In 2019") plt.gcf().autofmt_xdate() g = mpld3.fig_to_html(fig) data_folder = Path("/static/images/") path = data_folder / "submission-stats.png" path = "/static/images/submission-stats.png" return render(request, 'stats/graphic.html', {'graph': g})
def JobFailure(request): ''' cnx = mysql.connector.connect(user=dbcreds.user, password=dbcreds.pwd, host=dbcreds.host, database=dbcreds.db) cursor = cnx.cursor() query = 'select id_user,exit_code from hpcc_big.msuhpcc_job_table where id_group=2000 ;' cursor.execute(query) errorDict = {} for line in cursor: user = int(line[0]) code = int(line[1]) # if we have an error if code != 0: # increment users errors if theyre already in the dictionary if user in errorDict: errorDict[user] += 1 # if they arent in the dictionary the user=1 else: errorDict[user] = 1 i = 0 total = sum(errorDict.values()) labels = [user for user in errorDict] sizes = [errorDict[user] for user in errorDict if errorDict[user] > total / 100] fig, ax = plt.subplots() ax.pie(sizes,autopct='%1.0f%%', startangle=90) ax.axis('equal') plt.title("Failed Jobs by user id in group 2000") plt.legend(labels) cnx.close() cursor.close() g = mpld3.fig_to_html(fig) ''' user = request.user.username AdminAccess = IsAdmin(user) print("about to do admin access function") if AdminAccess == False: return render(request, 'error_pages/403.html') AllJobs = pyslurm.slurmdb_jobs().get() ErrorDict = {} for jobid in AllJobs: user = AllJobs[jobid]['user'] errorCode = AllJobs[jobid]['exit_code'] if int(errorCode) != 0: if user in ErrorDict: ErrorDict[user] += 1 else: ErrorDict[user] = 1 TotalErrors = sum(ErrorDict.values()) NewErrorDict = {'other': 0} for user in ErrorDict: jobs = ErrorDict[user] twoPercent = TotalErrors / 100 twoPercent *= 2 if jobs > twoPercent: NewErrorDict[user] = jobs else: NewErrorDict['other'] += jobs labels = [user for user in NewErrorDict] sizes = [NewErrorDict[user] for user in NewErrorDict] fig, ax = plt.subplots() ax.pie(sizes, autopct='%1.0f%%', startangle=90) ax.axis('equal') plt.title("Failed Jobs by user") plt.legend(labels) g = mpld3.fig_to_html(fig) data_folder = Path("/static/images/") path = data_folder / "failed-jobs.png" return render(request, 'stats/graphic.html', {'graph': g})
def __init__( self, user, time=31 ): #Inputs Username, Time in days for which data will be available self.user = user self.user_id = None self.group_id = None self.job_table = {} self.group_table = set() self.group_job_table = {} self.all_jobs = None self.full_table = {} end = datetime.now() + timedelta(days=31) end = end.strftime("%m%d%y") t_delta = timedelta(days=time) start = datetime.now() - t_delta start = start.strftime("%m%d%y") self.all_jobs = pyslurm.slurmdb_jobs().get( starttime=start.encode('utf-8'), endtime=end.encode('utf-8')) #Initial Call to Pyslurm for i in self.all_jobs: #Search for User information matching supplied username if self.all_jobs[i]['user'] == self.user: self.user_id = self.all_jobs[i]['gid'] self.group_id = self.all_jobs[i]['account'] break for j in self.all_jobs: #Assemble table of user in the supplied username's group and assemble an organized version of all the available data if self.all_jobs[j]['account'] == self.group_id: self.group_table.add(self.all_jobs[j]['user']) if self.all_jobs[j]['user'] == self.user: self.job_table.update({j: self.all_jobs[j]}) if self.all_jobs[j]['account'] not in self.full_table.keys(): self.full_table.update({ self.all_jobs[j]['account']: { self.all_jobs[j]['user']: { j: self.all_jobs[j] } } }) elif self.all_jobs[j]['user'] not in self.full_table[ self.all_jobs[j]['account']].keys(): self.full_table[self.all_jobs[j]['account']].update( {self.all_jobs[j]['user']: { j: self.all_jobs[j] }}) else: self.full_table[self.all_jobs[j]['account']][ self.all_jobs[j]['user']].update({j: self.all_jobs[j]}) self.group_table = list(self.group_table) for i in self.group_table: self.group_job_table[i] = {} self.group_job_table.update({self.user: self.job_table}) for k in self.all_jobs: #Add group data to seperate table if self.all_jobs[k]['user'] in self.group_table: if self.all_jobs[k]['user'] != self.user: self.group_job_table[self.all_jobs[k]['user']].update( {k: self.all_jobs[k]})