def harvesterWorkList(request): valid, response = initRequest(request) query, extra, LAST_N_HOURS_MAX = setupView(request, hours=24 * 3, wildCardExt=True) statusDefined = False if 'status__in' in query: statusDefined = True tquery = {} if statusDefined: tquery['status__in'] = list( set(query['status__in']).intersection([ 'missed', 'submitted', 'idle', 'finished', 'failed', 'cancelled' ])) else: tquery['status__in'] = [ 'missed', 'submitted', 'idle', 'finished', 'failed', 'cancelled' ] tquery['lastupdate__range'] = query['modificationtime__range'] workerslist = [] if len(tquery['status__in']) > 0: workerslist.extend( HarvesterWorkers.objects.values( 'computingsite', 'status', 'submittime', 'harvesterid', 'workerid').filter(**tquery).extra(where=[extra])) if statusDefined: tquery['status__in'] = list( set(query['status__in']).intersection(['ready', 'running'])) del tquery['lastupdate__range'] if len(tquery['status__in']) > 0: workerslist.extend( HarvesterWorkers.objects.values( 'computingsite', 'status', 'submittime', 'harvesterid', 'workerid').filter(**tquery).extra(where=[extra])) data = { 'workerslist': workerslist, 'request': request, 'viewParams': request.session['viewParams'], 'requestParams': request.session['requestParams'], 'built': datetime.now().strftime("%H:%M:%S"), } endSelfMonitor(request) response = render_to_response('harvworkerslist.html', data, content_type='text/html') return response
def harvesterWorkersDash(request): valid, response = initRequest(request) hours = 24 * 3 if 'days' in request.session['requestParams']: days = int(request.session['requestParams']['days']) hours = days * 24 query = setupView(request, hours=hours, wildCardExt=False) tquery = {} tquery['status__in'] = [ 'missed', 'submitted', 'idle', 'finished', 'failed', 'cancelled' ] tquery['lastupdate__range'] = query['modificationtime__range'] if 'harvesterid__in' in query: tquery['harvesterid__in'] = query['harvesterid__in'] harvesterWorkers = [] harvesterWorkers.extend( HarvesterWorkers.objects.values('computingsite', 'status').filter( **tquery).annotate(Count('status')).order_by('computingsite')) # This is for exclusion of intermediate states from time window tquery['status__in'] = ['ready', 'running'] del tquery['lastupdate__range'] harvesterWorkers.extend( HarvesterWorkers.objects.values('computingsite', 'status').filter( **tquery).annotate(Count('status')).order_by('computingsite')) statusesSummary = OrderedDict() for harvesterWorker in harvesterWorkers: if not harvesterWorker['computingsite'] in statusesSummary: statusesSummary[harvesterWorker['computingsite']] = OrderedDict() for harwWorkStatus in harvWorkStatuses: statusesSummary[ harvesterWorker['computingsite']][harwWorkStatus] = 0 statusesSummary[harvesterWorker['computingsite']][ harvesterWorker['status']] = harvesterWorker['status__count'] # SELECT computingsite,status, workerid, LASTUPDATE, row_number() over (partition by workerid, computingsite ORDER BY LASTUPDATE ASC) partid FROM ATLAS_PANDA.HARVESTER_WORKERS /*GROUP BY WORKERID ORDER BY COUNT(WORKERID) DESC*/ data = { 'statusesSummary': statusesSummary, 'harvWorkStatuses': harvWorkStatuses, 'request': request, 'hours': hours, 'viewParams': request.session['viewParams'], 'requestParams': request.session['requestParams'], 'built': datetime.now().strftime("%H:%M:%S"), } endSelfMonitor(request) response = render_to_response('harvworksummarydash.html', data, content_type='text/html') return response
def globalshares(request): valid, response = initRequest(request) data = getCacheEntry(request, "globalshares") if data is not None: data = json.loads(data) data['request'] = request gsPlotData = {} oldGsPlotData = data['gsPlotData'] for shareName, shareValue in oldGsPlotData.iteritems(): gsPlotData[str(shareName)] = int(shareValue) data['gsPlotData'] = gsPlotData if not valid: return response setupView(request, hours=180 * 24, limit=9999999) gs, tablerows = __get_hs_leave_distribution() gsPlotData = { } #{'Upgrade':130049 , 'Reprocessing default':568841, 'Data Derivations': 202962, 'Event Index': 143 } for shareName, shareValue in gs.iteritems(): shareValue['delta'] = shareValue['executing'] - shareValue['pledged'] shareValue[ 'used'] = shareValue['ratio'] if 'ratio' in shareValue else None gsPlotData[str(shareName)] = int(shareValue['executing']) for shareValue in tablerows: shareValue['used'] = shareValue['ratio'] * Decimal( shareValue['value']) / 100 if 'ratio' in shareValue else None ordtablerows = {} ordtablerows['childlist'] = [] level1 = '' level2 = '' level3 = '' for shareValue in tablerows: if len(shareValue['level1']) != 0: level1 = shareValue['level1'] ordtablerows[level1] = {} ordtablerows['childlist'].append(level1) ordtablerows[level1]['childlist'] = [] if len(shareValue['level2']) != 0: level2 = shareValue['level2'] ordtablerows[level1][level2] = {} ordtablerows[level1]['childlist'].append(level2) ordtablerows[level1][level2]['childlist'] = [] if len(shareValue['level3']) != 0: level3 = shareValue['level3'] ordtablerows[level1][level2][level3] = {} ordtablerows[level1][level2]['childlist'].append(level3) resources_list, resources_dict = get_resources_gshare() newTablesRow = [] for ordValueLevel1 in sorted(ordtablerows['childlist']): for shareValue in tablerows: if ordValueLevel1 in shareValue['level1']: ord1Short = re.sub('\[(.*)\]', '', ordValueLevel1).rstrip().lower() shareValue['level'] = 'level1' shareValue['gshare'] = ord1Short.replace(' ', '_') newTablesRow.append(shareValue) tablerows.remove(shareValue) if len(ordtablerows[ordValueLevel1]['childlist']) == 0: add_resources(ord1Short, newTablesRow, resources_list, shareValue['level']) else: childsgsharelist = [] get_child_elements(ordtablerows[ordValueLevel1], childsgsharelist) resources_dict = get_child_sumstats( childsgsharelist, resources_dict, ord1Short) short_resource_list = resourcesDictToList(resources_dict) add_resources(ord1Short, newTablesRow, short_resource_list, shareValue['level']) break for ordValueLevel2 in sorted( ordtablerows[ordValueLevel1]['childlist']): for shareValue in tablerows: if ordValueLevel2 in shareValue['level2']: if len(ordtablerows[ordValueLevel1][ordValueLevel2] ['childlist']) == 0: ord1Short = re.sub('\[(.*)\]', '', ordValueLevel1).rstrip().lower() ord2Short = re.sub('\[(.*)\]', '', ordValueLevel2).rstrip().lower() link = "?jobtype=%s&display_limit=100&gshare=%s" % ( ord1Short, ord2Short) shareValue['link'] = link shareValue['level'] = 'level2' shareValue['gshare'] = ord2Short.replace(' ', '_') newTablesRow.append(shareValue) tablerows.remove(shareValue) if 'level' in shareValue: add_resources(ord2Short, newTablesRow, resources_list, shareValue['level']) break for ordValueLevel3 in sorted( ordtablerows[ordValueLevel1][ordValueLevel2]['childlist']): for shareValue in tablerows: if ordValueLevel3 in shareValue['level3']: if len(ordtablerows[ordValueLevel1][ordValueLevel2] ['childlist']) > 0: ord1Short = re.sub( '\[(.*)\]', '', ordValueLevel1).rstrip().lower() ord3Short = re.sub( '\[(.*)\]', '', ordValueLevel3).rstrip().lower() link = "?jobtype=%s&display_limit=100&gshare=%s" % ( ord1Short, ord3Short) shareValue['link'] = link shareValue['level'] = 'level3' shareValue['gshare'] = ord3Short.replace(' ', '_') newTablesRow.append(shareValue) tablerows.remove(shareValue) if 'level' in shareValue: add_resources(ord3Short, newTablesRow, resources_list, shareValue['level']) break tablerows = newTablesRow del request.session['TFIRST'] del request.session['TLAST'] ##self monitor endSelfMonitor(request) if (not (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and ('json' not in request.session['requestParams'])): data = { 'request': request, 'viewParams': request.session['viewParams'], 'requestParams': request.session['requestParams'], 'globalshares': gs, 'xurl': extensibleURL(request), 'gsPlotData': gsPlotData, 'tablerows': tablerows, 'built': datetime.now().strftime("%H:%M:%S"), } response = render_to_response('globalshares.html', data, content_type='text/html') setCacheEntry(request, "globalshares", json.dumps(data, cls=DateEncoder), 60 * 20) patch_response_headers( response, cache_timeout=request.session['max_age_minutes'] * 60) return response else: return HttpResponse(json.dumps(gs), content_type='text/html')
def globalshares(request): valid, response = initRequest(request) data = getCacheEntry(request, "globalshares") if data is not None: data = json.loads(data) data['request'] = request gsPlotData = {} oldGsPlotData = data['gsPlotData'] for shareName, shareValue in oldGsPlotData.iteritems(): gsPlotData[str(shareName)] = int(shareValue) data['gsPlotData'] = gsPlotData if not valid: return response setupView(request, hours=180 * 24, limit=9999999) gs, tablerows = __get_hs_leave_distribution() gsPlotData = {}#{'Upgrade':130049 , 'Reprocessing default':568841, 'Data Derivations': 202962, 'Event Index': 143 } for shareName, shareValue in gs.iteritems(): shareValue['delta'] = shareValue['executing'] - shareValue['pledged'] shareValue['used'] = shareValue['ratio'] if 'ratio' in shareValue else None gsPlotData[str(shareName)] = int(shareValue['executing']) for shareValue in tablerows: shareValue['used'] = shareValue['ratio']*Decimal(shareValue['value'])/100 if 'ratio' in shareValue else None ordtablerows ={} ordtablerows['childlist']=[] level1='' level2='' level3='' for shareValue in tablerows: if len(shareValue['level1'])!=0: level1 = shareValue['level1'] ordtablerows[level1] = {} ordtablerows['childlist'].append(level1) ordtablerows[level1]['childlist'] = [] if len(shareValue['level2'])!=0: level2 = shareValue['level2'] ordtablerows[level1][level2] = {} ordtablerows[level1]['childlist'].append(level2) ordtablerows[level1][level2]['childlist'] = [] if len(shareValue['level3'])!=0: level3 = shareValue['level3'] ordtablerows[level1][level2][level3] = {} ordtablerows[level1][level2]['childlist'].append(level3) resources_list, resources_dict = get_resources_gshare() newTablesRow =[] for ordValueLevel1 in sorted(ordtablerows['childlist']): for shareValue in tablerows: if ordValueLevel1 in shareValue['level1']: ord1Short = re.sub('\[(.*)\]', '', ordValueLevel1).rstrip().lower() shareValue['level'] = 'level1' shareValue['gshare'] = ord1Short.replace(' ', '_') newTablesRow.append(shareValue) tablerows.remove(shareValue) if len(ordtablerows[ordValueLevel1]['childlist']) == 0: add_resources(ord1Short,newTablesRow,resources_list,shareValue['level']) else: childsgsharelist = [] get_child_elements(ordtablerows[ordValueLevel1],childsgsharelist) resources_dict = get_child_sumstats(childsgsharelist,resources_dict,ord1Short) short_resource_list= resourcesDictToList(resources_dict) add_resources(ord1Short, newTablesRow, short_resource_list, shareValue['level']) break for ordValueLevel2 in sorted(ordtablerows[ordValueLevel1]['childlist']): for shareValue in tablerows: if ordValueLevel2 in shareValue['level2']: if len(ordtablerows[ordValueLevel1][ordValueLevel2]['childlist'])==0: ord1Short = re.sub('\[(.*)\]','',ordValueLevel1).rstrip().lower() ord2Short = re.sub('\[(.*)\]', '', ordValueLevel2).rstrip().lower() link = "?jobtype=%s&display_limit=100&gshare=%s"%(ord1Short,ord2Short) shareValue['link'] = link shareValue['level'] = 'level2' shareValue['gshare'] = ord2Short.replace(' ', '_') newTablesRow.append(shareValue) tablerows.remove(shareValue) if 'level' in shareValue: add_resources(ord2Short, newTablesRow, resources_list, shareValue['level']) break for ordValueLevel3 in sorted(ordtablerows[ordValueLevel1][ordValueLevel2]['childlist']): for shareValue in tablerows: if ordValueLevel3 in shareValue['level3']: if len(ordtablerows[ordValueLevel1][ordValueLevel2]['childlist']) > 0: ord1Short = re.sub('\[(.*)\]', '', ordValueLevel1).rstrip().lower() ord3Short = re.sub('\[(.*)\]', '', ordValueLevel3).rstrip().lower() link = "?jobtype=%s&display_limit=100&gshare=%s" % (ord1Short, ord3Short) shareValue['link'] = link shareValue['level'] = 'level3' shareValue['gshare'] = ord3Short.replace(' ', '_') newTablesRow.append(shareValue) tablerows.remove(shareValue) if 'level' in shareValue: add_resources(ord3Short, newTablesRow, resources_list, shareValue['level']) break tablerows = newTablesRow del request.session['TFIRST'] del request.session['TLAST'] ##self monitor endSelfMonitor(request) if (not (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and ( 'json' not in request.session['requestParams'])): data = { 'request': request, 'viewParams': request.session['viewParams'], 'requestParams': request.session['requestParams'], 'globalshares': gs, 'xurl': extensibleURL(request), 'gsPlotData':gsPlotData, 'tablerows':tablerows, 'built': datetime.now().strftime("%H:%M:%S"), } response = render_to_response('globalshares.html', data, content_type='text/html') setCacheEntry(request, "globalshares", json.dumps(data, cls=DateEncoder), 60 * 20) patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60) return response else: return HttpResponse(DecimalEncoder().encode(gs), content_type='text/html')
def errorsScatteringDetailed(request, cloud, reqid): valid, response = initRequest(request) if not valid: return response # Here we try to get cached data data = getCacheEntry(request, "errorsScatteringDetailed") if data is not None: data = json.loads(data) data['request'] = request response = render_to_response('errorsScatteringDetailed.html', data, content_type='text/html') patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60) endSelfMonitor(request) return response grouping = [] homeCloud = {} cloudsDict ={} sflist = ('siteid', 'site', 'status', 'cloud', 'tier', 'comment_field', 'objectstore', 'catchall', 'corepower') sites = Schedconfig.objects.filter().exclude(cloud='CMS').values(*sflist) for site in sites: homeCloud[site['siteid']] = site['cloud'] if site['cloud'] not in cloudsDict: cloudsDict[site['cloud']] = [] cloudsDict[site['cloud']].append(site['siteid']) sitesDictForOrdering = {} i = 0 for cloudname in sorted(cloudsDict.keys()): for sitename in sorted(cloudsDict[cloudname]): sitesDictForOrdering[sitename] = i i += 1 clouds = sorted(list(set(homeCloud.values()))) condition = '(1=1)' if cloud == '' or len(cloud)==0: return HttpResponse("No cloud supplied", content_type='text/html') elif cloud == 'ALL': grouping.append('reqid') elif cloud not in clouds: return HttpResponse("The provided cloud name does not exist", content_type='text/html') if reqid == '' or len(reqid)==0: return HttpResponse("No request ID supplied", content_type='text/html') elif reqid == 'ALL': grouping.append('cloud') else: try: reqid = int(reqid) except: return HttpResponse("The provided request ID is not valid", content_type='text/html') view = None if 'view' in request.session['requestParams'] and request.session['requestParams']['view'] == 'queues': view = 'queues' if len(grouping) == 2 and view != 'queues': return redirect('/errorsscat/') limit = 100000 if 'hours' in request.session['requestParams']: try: hours = int(request.session['requestParams']['hours']) except: hours = 8 else: hours = 8 isExcludeScouts = False if 'scouts' in request.session['requestParams']: if request.session['requestParams']['scouts'] == 'exclude': isExcludeScouts = True try: del request.session['requestParams']['scouts'] except: pass query, wildCardExtension, LAST_N_HOURS_MAX = setupView(request, hours=hours, limit=9999999, querytype='task', wildCardExt=True) query['tasktype'] = 'prod' query['superstatus__in'] = ['submitting', 'running'] # exclude paused tasks wildCardExtension += ' AND STATUS != \'paused\'' if reqid != 'ALL': query['reqid'] = reqid request.session['requestParams']['reqid'] = reqid if cloud != 'ALL': request.session['requestParams']['region'] = cloud cloudstr = '' for sn, cn in homeCloud.iteritems(): if cn == cloud: cloudstr += "\'%s\'," % (str(sn)) if cloudstr.endswith(','): cloudstr = cloudstr[:-1] condition = "COMPUTINGSITE in ( %s )" % (str(cloudstr)) tasks = JediTasksOrdered.objects.filter(**query).extra(where=[wildCardExtension])[:limit].values("jeditaskid", "reqid") print 'tasks found %i' % (len(tasks)) random.seed() if dbaccess['default']['ENGINE'].find('oracle') >= 0: tmpTableName = "ATLAS_PANDABIGMON.TMP_IDS1DEBUG" else: tmpTableName = "TMP_IDS1" taskListByReq = {} transactionKey = random.randrange(1000000) executionData = [] for id in tasks: executionData.append((id['jeditaskid'], transactionKey)) # full the list of jeditaskids for each reqid to put into cache for consistentcy with jobList if id['reqid'] not in taskListByReq: taskListByReq[id['reqid']] = '' taskListByReq[id['reqid']] += str(id['jeditaskid']) + ',' new_cur = connection.cursor() insquery = """INSERT INTO """ + tmpTableName + """(ID,TRANSACTIONKEY) VALUES (%s, %s)""" new_cur.executemany(insquery, executionData) connection.commit() jcondition = '(1=1)' if isExcludeScouts: jcondition = """specialhandling NOT LIKE '%%sj'""" querystr = """ SELECT SUM(FINISHEDC) as FINISHEDC, SUM(FAILEDC) as FAILEDC, SUM(ALLC) as ALLC, REQID, JEDITASKID, COMPUTINGSITE, sc.cloud as CLOUD from ( SELECT SUM(case when JOBSTATUS = 'failed' then 1 else 0 end) as FAILEDC, SUM(case when JOBSTATUS = 'finished' then 1 else 0 end) as FINISHEDC, SUM(case when JOBSTATUS in ('finished', 'failed') then 1 else 0 end) as ALLC, COMPUTINGSITE, REQID, JEDITASKID FROM ATLAS_PANDA.JOBSARCHIVED4 WHERE JEDITASKID in ( SELECT ID FROM %s WHERE TRANSACTIONKEY=%i) AND modificationtime > TO_DATE('%s', 'YYYY-MM-DD HH24:MI:SS') AND %s group by COMPUTINGSITE, JEDITASKID, REQID UNION SELECT SUM(case when JOBSTATUS = 'failed' then 1 else 0 end) as FAILEDC, SUM(case when JOBSTATUS = 'finished' then 1 else 0 end) as FINISHEDC, SUM(case when JOBSTATUS in ('finished', 'failed') then 1 else 0 end) as ALLC, COMPUTINGSITE, REQID, JEDITASKID FROM ATLAS_PANDAARCH.JOBSARCHIVED WHERE JEDITASKID in ( SELECT ID FROM %s WHERE TRANSACTIONKEY=%i) AND modificationtime > TO_DATE('%s', 'YYYY-MM-DD HH24:MI:SS') AND %s group by COMPUTINGSITE, JEDITASKID, REQID ) j, ( select siteid, cloud from ATLAS_PANDAMETA.SCHEDCONFIG ) sc where j.computingsite = sc.siteid AND j.ALLC > 0 AND %s group by jeditaskid, COMPUTINGSITE, REQID, cloud """ % (tmpTableName, transactionKey, query['modificationtime__castdate__range'][0], jcondition, tmpTableName, transactionKey, query['modificationtime__castdate__range'][0], jcondition, condition) new_cur.execute(querystr) errorsRaw = dictfetchall(new_cur) # new_cur.execute("DELETE FROM %s WHERE TRANSACTIONKEY=%i" % (tmpTableName, transactionKey)) computingSites = [] tasksErrorsList = [] taskserrors = {} reqErrorsList = [] reqerrors = {} successrateIntervals = {'green': [80, 100], 'yellow':[50,79], 'red':[0, 49]} statsParams = ['percent', 'finishedc', 'failedc', 'allc'] if len(grouping) == 0 or (len(grouping) == 1 and 'reqid' in grouping and view == 'queues'): # we fill here the dict for errorEntry in errorsRaw: jeditaskid = errorEntry['JEDITASKID'] if jeditaskid not in taskserrors: taskentry = {} taskserrors[jeditaskid] = taskentry taskserrors[jeditaskid]['jeditaskid'] = jeditaskid taskserrors[jeditaskid]['columns'] = {} taskserrors[jeditaskid]['totalstats'] = {} for param in statsParams: taskserrors[jeditaskid]['totalstats'][param] = 0 if errorEntry['COMPUTINGSITE'] not in taskserrors[jeditaskid]['columns']: taskserrors[jeditaskid]['columns'][errorEntry['COMPUTINGSITE']] = {} for param in statsParams: taskserrors[jeditaskid]['columns'][errorEntry['COMPUTINGSITE']][param] = 0 taskserrors[jeditaskid]['columns'][errorEntry['COMPUTINGSITE']]['allc'] = errorEntry['FINISHEDC'] + errorEntry['FAILEDC'] taskserrors[jeditaskid]['columns'][errorEntry['COMPUTINGSITE']]['percent'] = int(math.ceil( errorEntry['FINISHEDC'] * 100. / taskserrors[jeditaskid]['columns'][errorEntry['COMPUTINGSITE']]['allc'])) if \ taskserrors[jeditaskid]['columns'][errorEntry['COMPUTINGSITE']]['allc'] > 0 else 0 taskserrors[jeditaskid]['columns'][errorEntry['COMPUTINGSITE']]['finishedc'] = errorEntry['FINISHEDC'] taskserrors[jeditaskid]['columns'][errorEntry['COMPUTINGSITE']]['failedc'] = errorEntry['FAILEDC'] taskserrors[jeditaskid]['totalstats']['finishedc'] += errorEntry['FINISHEDC'] taskserrors[jeditaskid]['totalstats']['failedc'] += errorEntry['FAILEDC'] taskserrors[jeditaskid]['totalstats']['allc'] += errorEntry['FINISHEDC'] + errorEntry['FAILEDC'] ### calculate totalstats for jeditaskid, taskEntry in taskserrors.iteritems(): taskserrors[jeditaskid]['totalstats']['percent'] = int(math.ceil( taskEntry['totalstats']['finishedc']*100./taskEntry['totalstats']['allc'])) if taskEntry['totalstats']['allc'] > 0 else 0 tasksToDel = [] # make cleanup of full none erroneous tasks for jeditaskid, taskEntry in taskserrors.iteritems(): notNone = False if taskEntry['totalstats']['allc'] == 0: notNone = True if notNone: tasksToDel.append(jeditaskid) for taskToDel in tasksToDel: del taskserrors[taskToDel] for jeditaskid, taskentry in taskserrors.iteritems(): for sitename, siteval in taskentry['columns'].iteritems(): computingSites.append(sitename) computingSites = sorted(set(computingSites), key=lambda x: sitesDictForOrdering.get(x)) ### fill for jeditaskid, taskentry in taskserrors.iteritems(): for computingSite in computingSites: if computingSite not in taskentry['columns']: taskserrors[jeditaskid]['columns'][computingSite] = {} for param in statsParams: taskserrors[jeditaskid]['columns'][computingSite][param] = 0 ### calculate stats for column columnstats = {} for cn in computingSites: cns = str(cn) columnstats[cns] = {} for param in statsParams: columnstats[cns][param] = 0 for jeditaskid, taskEntry in taskserrors.iteritems(): for cs in computingSites: for cname, cEntry in taskEntry['columns'].iteritems(): if cs == cname: columnstats[cs]['finishedc'] += cEntry['finishedc'] columnstats[cs]['failedc'] += cEntry['failedc'] columnstats[cs]['allc'] += cEntry['allc'] for csn, stats in columnstats.iteritems(): columnstats[csn]['percent'] = int( math.ceil(columnstats[csn]['finishedc'] * 100. / columnstats[csn]['allc'])) if \ columnstats[csn]['allc'] > 0 else 0 ### transform requesterrors dict to list for sorting on template for jeditaskid, taskEntry in taskserrors.iteritems(): columnlist = [] for columnname, stats in taskEntry['columns'].iteritems(): stats['computingsite'] = columnname columnlist.append(stats) taskEntry['columns'] = sorted(columnlist, key=lambda x: sitesDictForOrdering.get(x['computingsite'])) for jeditaskid, taskEntry in taskserrors.iteritems(): tasksErrorsList.append(taskEntry) tasksErrorsList = sorted(tasksErrorsList, key=lambda x: x['totalstats']['percent']) elif len(grouping) == 1 and 'reqid' in grouping: clouderrors = {} # we fill here the dict for errorEntry in errorsRaw: jeditaskid = errorEntry['JEDITASKID'] if jeditaskid not in taskserrors: taskentry = {} taskserrors[jeditaskid] = taskentry taskserrors[jeditaskid]['jeditaskid'] = jeditaskid taskserrors[jeditaskid]['columns'] = {} taskserrors[jeditaskid]['totalstats'] = {} for param in statsParams: taskserrors[jeditaskid]['totalstats'][param] = 0 if errorEntry['CLOUD'] not in taskserrors[jeditaskid]['columns']: taskserrors[jeditaskid]['columns'][errorEntry['CLOUD']] = {} for param in statsParams: taskserrors[jeditaskid]['columns'][errorEntry['CLOUD']][param] = 0 taskserrors[jeditaskid]['columns'][errorEntry['CLOUD']]['allc'] += errorEntry['FINISHEDC'] + errorEntry['FAILEDC'] taskserrors[jeditaskid]['columns'][errorEntry['CLOUD']]['finishedc'] += errorEntry['FINISHEDC'] taskserrors[jeditaskid]['columns'][errorEntry['CLOUD']]['failedc'] += errorEntry['FAILEDC'] taskserrors[jeditaskid]['totalstats']['finishedc'] += errorEntry['FINISHEDC'] taskserrors[jeditaskid]['totalstats']['failedc'] += errorEntry['FAILEDC'] taskserrors[jeditaskid]['totalstats']['allc'] += errorEntry['FINISHEDC'] + errorEntry['FAILEDC'] if errorEntry['CLOUD'] not in clouderrors: clouderrors[errorEntry['CLOUD']] = {} if errorEntry['COMPUTINGSITE'] not in clouderrors[errorEntry['CLOUD']]: clouderrors[errorEntry['CLOUD']][errorEntry['COMPUTINGSITE']] = {} clouderrors[errorEntry['CLOUD']][errorEntry['COMPUTINGSITE']]['finishedc'] = 0 clouderrors[errorEntry['CLOUD']][errorEntry['COMPUTINGSITE']]['failedc'] = 0 clouderrors[errorEntry['CLOUD']][errorEntry['COMPUTINGSITE']]['allc'] = 0 clouderrors[errorEntry['CLOUD']][errorEntry['COMPUTINGSITE']]['finishedc'] += errorEntry['FINISHEDC'] clouderrors[errorEntry['CLOUD']][errorEntry['COMPUTINGSITE']]['failedc'] += errorEntry['FAILEDC'] clouderrors[errorEntry['CLOUD']][errorEntry['COMPUTINGSITE']]['allc'] += (errorEntry['FINISHEDC'] + errorEntry['FAILEDC']) ### calculate totalstats for jeditaskid, taskEntry in taskserrors.iteritems(): taskserrors[jeditaskid]['totalstats']['percent'] = int( math.ceil(taskEntry['totalstats']['finishedc'] * 100. / taskEntry['totalstats']['allc'])) if \ taskEntry['totalstats']['allc'] > 0 else 0 tasksToDel = [] #make cleanup of full none erroneous tasks for jeditaskid, taskEntry in taskserrors.iteritems(): notNone = False if taskEntry['totalstats']['allc'] == 0: notNone = True if notNone: tasksToDel.append(jeditaskid) for taskToDel in tasksToDel: del taskserrors[taskToDel] for jeditaskid, taskentry in taskserrors.iteritems(): for c in clouds: if not c in taskentry['columns']: taskentry['columns'][c] = {} for param in statsParams: taskentry['columns'][c][param] = 0 else: taskentry['columns'][c]['percent'] = int(math.ceil(taskentry['columns'][c]['finishedc']*100./taskentry['columns'][c]['allc'])) if \ taskentry['columns'][c]['allc'] > 0 else 0 ### calculate stats for columns columnstats = {} for cn in clouds: cns = str(cn) columnstats[cns] = {} for param in statsParams: columnstats[cns][param] = 0 columnstats[cns]['minpercent'] = 100 for color, srint in successrateIntervals.items(): columnstats[cns][color + 'c'] = 0 for cloudname, sites in clouderrors.iteritems(): for sitename, sstats in sites.iteritems(): columnstats[cloudname]['finishedc'] += sstats['finishedc'] columnstats[cloudname]['failedc'] += sstats['failedc'] columnstats[cloudname]['allc'] += sstats['allc'] srpct = int(sstats['finishedc'] * 100. / sstats['allc']) for color, srint in successrateIntervals.items(): columnstats[cloudname][color + 'c'] += 1 if (srpct >= srint[0] and srpct <= srint[1]) else 0 columnstats[cloudname]['minpercent'] = min( int(cstats['finishedc'] * 100. / cstats['allc']) for cstats in sites.values()) for cn, stats in columnstats.iteritems(): columnstats[cn]['percent'] = int( math.ceil(columnstats[cn]['finishedc'] * 100. / columnstats[cn]['allc'])) if \ columnstats[cn]['allc'] > 0 else 0 ### transform requesterrors dict to list for sorting on template for jeditaskid, taskEntry in taskserrors.iteritems(): tasksErrorsList.append(taskEntry) tasksErrorsList = sorted(tasksErrorsList, key=lambda x: x['totalstats']['percent']) elif 'cloud' in grouping or view == 'queues': print '%s starting data aggregation' % (datetime.now().strftime("%Y-%m-%d %H:%M:%S")) # we fill here the dict for errorEntry in errorsRaw: rid = errorEntry['REQID'] if rid not in reqerrors: reqentry = {} reqerrors[rid] = reqentry reqerrors[rid]['columns'] = {} reqerrors[rid]['reqid'] = rid reqerrors[rid]['totalstats'] = {} reqerrors[rid]['totalstats']['greenc'] = 0 reqerrors[rid]['totalstats']['yellowc'] = 0 reqerrors[rid]['totalstats']['redc'] = 0 reqerrors[rid]['tasks'] = {} for param in statsParams: reqerrors[rid]['totalstats'][param] = 0 if errorEntry['COMPUTINGSITE'] not in reqerrors[rid]['columns']: reqerrors[rid]['columns'][errorEntry['COMPUTINGSITE']] = {} for param in statsParams: reqerrors[rid]['columns'][errorEntry['COMPUTINGSITE']][param] = 0 if errorEntry['JEDITASKID'] not in reqerrors[rid]['tasks']: reqerrors[rid]['tasks'][errorEntry['JEDITASKID']] = {} reqerrors[rid]['tasks'][errorEntry['JEDITASKID']]['finishedc'] = 0 reqerrors[rid]['tasks'][errorEntry['JEDITASKID']]['allc'] = 0 reqerrors[rid]['columns'][errorEntry['COMPUTINGSITE']]['finishedc'] += errorEntry['FINISHEDC'] reqerrors[rid]['columns'][errorEntry['COMPUTINGSITE']]['failedc'] += errorEntry['FAILEDC'] reqerrors[rid]['columns'][errorEntry['COMPUTINGSITE']]['allc'] += errorEntry['FINISHEDC'] + errorEntry['FAILEDC'] reqerrors[rid]['tasks'][errorEntry['JEDITASKID']]['finishedc'] += errorEntry['FINISHEDC'] reqerrors[rid]['tasks'][errorEntry['JEDITASKID']]['allc'] += errorEntry['FINISHEDC'] + errorEntry['FAILEDC'] reqerrors[rid]['totalstats']['finishedc'] += reqerrors[rid]['columns'][errorEntry['COMPUTINGSITE']]['finishedc'] reqerrors[rid]['totalstats']['failedc'] += reqerrors[rid]['columns'][errorEntry['COMPUTINGSITE']]['failedc'] reqerrors[rid]['totalstats']['allc'] += reqerrors[rid]['columns'][errorEntry['COMPUTINGSITE']]['allc'] for rid, reqentry in reqerrors.iteritems(): reqerrors[rid]['totalstats']['percent'] = int( math.ceil(reqerrors[rid]['totalstats']['finishedc'] * 100. / reqerrors[rid]['totalstats']['allc'])) if \ reqerrors[rid]['totalstats']['allc'] > 0 else 0 reqerrors[rid]['totalstats']['minpercent'] = min( int(tstats['finishedc'] * 100. / tstats['allc']) for tstats in reqentry['tasks'].values()) for tstats in reqentry['tasks'].values(): srpct = int(tstats['finishedc'] * 100. / tstats['allc']) for color, srint in successrateIntervals.items(): reqerrors[rid]['totalstats'][color + 'c'] += 1 if (srpct >= srint[0] and srpct <= srint[1]) else 0 print '%s starting cleaning of non-errorneous requests' % (datetime.now().strftime("%Y-%m-%d %H:%M:%S")) reqsToDel = [] #make cleanup of full none erroneous tasks for rid, reqentry in reqerrors.iteritems(): notNone = False if reqentry['totalstats']['allc'] == 0: notNone = True if notNone: reqsToDel.append(rid) for reqToDel in reqsToDel: del reqerrors[reqToDel] print '%s starting calculation of row average stats' % (datetime.now().strftime("%Y-%m-%d %H:%M:%S")) for rid, reqentry in reqerrors.iteritems(): for sn, sv in reqentry['columns'].iteritems(): computingSites.append(str(sn)) computingSites = sorted(set(computingSites), key=lambda x: sitesDictForOrdering.get(x)) for rid, reqentry in reqerrors.iteritems(): for s in computingSites: if not s in reqentry['columns']: reqentry['columns'][s] = {} for param in statsParams: reqentry['columns'][s][param] = 0 else: reqentry['columns'][s]['percent'] = int(math.ceil(reqentry['columns'][s]['finishedc']*100./reqentry['columns'][s]['allc'])) if \ reqentry['columns'][s]['allc'] > 0 else 0 print '%s starting calculation of columns average stats' % (datetime.now().strftime("%Y-%m-%d %H:%M:%S")) ### calculate stats for columns columnstats = {} for cn in computingSites: cns = str(cn) columnstats[cns] = {} for param in statsParams: columnstats[cns][param] = 0 for rid, reqEntry in reqerrors.iteritems(): for cn in computingSites: for cname, cEntry in reqEntry['columns'].iteritems(): if cn == cname: columnstats[cn]['finishedc'] += cEntry['finishedc'] columnstats[cn]['failedc'] += cEntry['failedc'] columnstats[cn]['allc'] += cEntry['allc'] for cn, stats in columnstats.iteritems(): columnstats[cn]['percent'] = int( math.ceil(columnstats[cn]['finishedc'] * 100. / columnstats[cn]['allc'])) if \ columnstats[cn]['allc'] > 0 else 0 print '%s starting set unique cache for each request' % (datetime.now().strftime("%Y-%m-%d %H:%M:%S")) ### Introducing unique tk for each reqid for rid, reqentry in reqerrors.iteritems(): if rid in taskListByReq and len(taskListByReq[rid]) > 0: tk = setCacheData(request, lifetime=60*20, jeditaskid=taskListByReq[rid][:-1]) reqentry['tk'] = tk print '%s starting transform dict to list' % (datetime.now().strftime("%Y-%m-%d %H:%M:%S")) ### transform requesterrors dict to list for sorting on template for rid, reqEntry in reqerrors.iteritems(): columnlist = [] for columnname, stats in reqEntry['columns'].iteritems(): stats['computingsite'] = columnname columnlist.append(stats) reqEntry['columns'] = sorted(columnlist, key=lambda x: sitesDictForOrdering.get(x['computingsite'])) reqErrorsList = [] for rid, reqEntry in reqerrors.iteritems(): reqErrorsList.append(reqEntry) reqErrorsList = sorted(reqErrorsList, key=lambda x: x['totalstats']['percent']) data = { 'request': request, 'viewParams': request.session['viewParams'], 'requestParams': request.session['requestParams'], 'cloud': cloud, 'reqid': reqid, 'grouping': grouping, 'view': view, 'computingSites': computingSites, 'clouds': clouds, 'columnstats': columnstats, 'taskserrors': tasksErrorsList, 'reqerrors': reqErrorsList, 'scouts': 'exclude' if isExcludeScouts else 'include', 'nrows': max(len(tasksErrorsList), len(reqErrorsList)), 'built': datetime.now().strftime("%H:%M:%S"), } print '%s starting rendering of the page' % (datetime.now().strftime("%Y-%m-%d %H:%M:%S")) ##self monitor endSelfMonitor(request) setCacheEntry(request, "errorsScatteringDetailed", json.dumps(data, cls=DateEncoder), 60 * 20) response = render_to_response('errorsScatteringDetailed.html', data, content_type='text/html') patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60) return response
def tasksErrorsScattering(request): initRequest(request) limit = 100000 hours = 4 query, wildCardExtension, LAST_N_HOURS_MAX = setupView(request, hours=hours, limit=9999999, querytype='task', wildCardExt=True) query['tasktype'] = 'prod' query['superstatus__in'] = ['submitting', 'running'] tasks = JediTasksOrdered.objects.filter(**query).extra(where=[wildCardExtension])[:limit].values("jeditaskid") random.seed() if dbaccess['default']['ENGINE'].find('oracle') >= 0: tmpTableName = "ATLAS_PANDABIGMON.TMP_IDS1DEBUG" else: tmpTableName = "TMP_IDS1" transactionKey = random.randrange(1000000) executionData = [] for id in tasks: executionData.append((id['jeditaskid'], transactionKey)) new_cur = connection.cursor() query = """INSERT INTO """ + tmpTableName + """(ID,TRANSACTIONKEY) VALUES (%s, %s)""" new_cur.executemany(query, executionData) connection.commit() query = """ SELECT SUM(FAILEDC) / SUM(ALLC) as FPERC, COMPUTINGSITE, JEDITASKID, SUM(FAILEDC) as FAILEDC from ( SELECT SUM(case when JOBSTATUS = 'failed' then 1 else 0 end) as FAILEDC, SUM(1) as ALLC, COMPUTINGSITE, JEDITASKID FROM ATLAS_PANDA.JOBSARCHIVED4 WHERE JEDITASKID in (SELECT ID FROM %s WHERE TRANSACTIONKEY=%i) group by COMPUTINGSITE, JEDITASKID UNION SELECT SUM(case when JOBSTATUS = 'failed' then 1 else 0 end) as FAILEDC, SUM(1) as ALLC, COMPUTINGSITE, JEDITASKID FROM ATLAS_PANDAARCH.JOBSARCHIVED WHERE JEDITASKID in (SELECT ID FROM %s WHERE TRANSACTIONKEY=%i) group by COMPUTINGSITE, JEDITASKID ) group by COMPUTINGSITE, JEDITASKID """ % (tmpTableName, transactionKey, tmpTableName, transactionKey) new_cur.execute(query) errorsRaw = dictfetchall(new_cur) new_cur.execute("DELETE FROM %s WHERE TRANSACTIONKEY=%i" % (tmpTableName, transactionKey)) computingSites = [] taskserrors = {} # we fill here the dict for errorEntry in errorsRaw: jeditaskid = errorEntry['JEDITASKID'] if jeditaskid not in taskserrors: taskentry = {} taskserrors[jeditaskid] = taskentry labelForLink = (str(int(errorEntry['FPERC'] * 100)) + "%" + " ("+str(int(errorEntry['FAILEDC']))+")") if errorEntry['FPERC'] else " " taskserrors[jeditaskid][errorEntry['COMPUTINGSITE']] = labelForLink tasksToDel = [] #make cleanup of full none erroneous tasks for jeditaskid,taskentry in taskserrors.iteritems(): notNone = False for sitename, siteval in taskentry.iteritems(): if siteval != " ": notNone = True if not notNone: tasksToDel.append(jeditaskid) for taskToDel in tasksToDel: del taskserrors[taskToDel] for jeditaskid,taskentry in taskserrors.iteritems(): for sitename, siteval in taskentry.iteritems(): computingSites.append(sitename) computingSites = set(computingSites) for jeditaskid,taskentry in taskserrors.iteritems(): for computingSite in computingSites: if not computingSite in taskentry: taskentry[computingSite] = ' ' data = { 'request': request, 'computingSites': computingSites, 'taskserrors':taskserrors, } response = render_to_response('tasksscatteringmatrix.html', data, content_type='text/html') patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60) return response
def errorsScattering(request): initRequest(request) # Here we try to get cached data data = getCacheEntry(request, "errorsScattering") if data is not None: data = json.loads(data) data['request'] = request response = render_to_response('errorsScattering.html', data, content_type='text/html') patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60) endSelfMonitor(request) return response limit = 100000 if 'hours' in request.session['requestParams']: try: hours = int(request.session['requestParams']['hours']) except: hours = 8 else: hours = 8 isExcludeScouts = False if 'scouts' in request.session['requestParams']: if request.session['requestParams']['scouts'] == 'exclude': isExcludeScouts = True try: del request.session['requestParams']['scouts'] except: pass query, wildCardExtension, LAST_N_HOURS_MAX = setupView(request, hours=hours, limit=9999999, querytype='task', wildCardExt=True) query['tasktype'] = 'prod' query['superstatus__in'] = ['submitting', 'running'] # exclude paused tasks wildCardExtension += ' AND STATUS != \'paused\'' tasks = JediTasksOrdered.objects.filter(**query).extra(where=[wildCardExtension])[:limit].values("jeditaskid", "reqid") # print ('tasks found %i') % len(tasks) random.seed() if dbaccess['default']['ENGINE'].find('oracle') >= 0: tmpTableName = "ATLAS_PANDABIGMON.TMP_IDS1DEBUG" else: tmpTableName = "TMP_IDS1" taskListByReq = {} transactionKey = random.randrange(1000000) executionData = [] for id in tasks: executionData.append((id['jeditaskid'], transactionKey)) # full the list of jeditaskids for each reqid to put into cache for consistentcy with jobList if id['reqid'] not in taskListByReq: taskListByReq[id['reqid']] = '' taskListByReq[id['reqid']] += str(id['jeditaskid']) + ',' new_cur = connection.cursor() ins_query = """INSERT INTO """ + tmpTableName + """(ID,TRANSACTIONKEY) VALUES (%s, %s)""" new_cur.executemany(ins_query, executionData) connection.commit() jcondition = '(1=1)' if isExcludeScouts: jcondition = """specialhandling NOT LIKE '%%sj'""" querystr = """ SELECT j.FINISHEDC, j.REQID, j.FAILEDC, sc.cloud as CLOUD, j.jeditaskid, j.COMPUTINGSITE from ( SELECT SUM(case when JOBSTATUS = 'failed' then 1 else 0 end) as FAILEDC, SUM(case when JOBSTATUS = 'finished' then 1 else 0 end) as FINISHEDC, SUM(case when JOBSTATUS in ('finished', 'failed') then 1 else 0 end) as ALLC, COMPUTINGSITE, REQID, JEDITASKID FROM ATLAS_PANDA.JOBSARCHIVED4 WHERE JEDITASKID != REQID AND JEDITASKID in ( SELECT ID FROM %s WHERE TRANSACTIONKEY=%i) AND modificationtime > TO_DATE('%s', 'YYYY-MM-DD HH24:MI:SS') AND %s group by COMPUTINGSITE, REQID, JEDITASKID UNION SELECT SUM(case when JOBSTATUS = 'failed' then 1 else 0 end) as FAILEDC, SUM(case when JOBSTATUS = 'finished' then 1 else 0 end) as FINISHEDC, SUM(case when JOBSTATUS in ('finished', 'failed') then 1 else 0 end) as ALLC, COMPUTINGSITE, REQID, JEDITASKID FROM ATLAS_PANDAARCH.JOBSARCHIVED WHERE JEDITASKID != REQID AND JEDITASKID in ( SELECT ID FROM %s WHERE TRANSACTIONKEY=%i) AND modificationtime > TO_DATE('%s', 'YYYY-MM-DD HH24:MI:SS') AND %s group by COMPUTINGSITE, REQID, JEDITASKID ) j, ( select siteid, cloud from ATLAS_PANDAMETA.SCHEDCONFIG ) sc where j.computingsite = sc.siteid and j.ALLC > 0 """ % (tmpTableName, transactionKey, query['modificationtime__castdate__range'][0], jcondition, tmpTableName, transactionKey, query['modificationtime__castdate__range'][0], jcondition) new_cur.execute(querystr) errorsRaw = dictfetchall(new_cur) # new_cur.execute("DELETE FROM %s WHERE TRANSACTIONKEY=%i" % (tmpTableName, transactionKey)) homeCloud = {} sflist = ('siteid', 'site', 'status', 'cloud', 'tier', 'comment_field', 'objectstore', 'catchall', 'corepower') sites = Schedconfig.objects.filter().exclude(cloud='CMS').values(*sflist) for site in sites: homeCloud[site['siteid']] = site['cloud'] clouds = [] clouds = sorted(list(set(homeCloud.values()))) reqerrors = {} clouderrors = {} successrateIntervals = {'green': [80, 100], 'yellow':[50,79], 'red':[0, 49]} # we fill here the dict for errorEntry in errorsRaw: rid = errorEntry['REQID'] if rid not in reqerrors: reqentry = {} reqerrors[rid] = reqentry reqerrors[rid]['reqid'] = rid reqerrors[rid]['totalstats'] = {} reqerrors[rid]['totalstats']['percent'] = 0 reqerrors[rid]['totalstats']['minpercent'] = 100 reqerrors[rid]['totalstats']['finishedc'] = 0 reqerrors[rid]['totalstats']['failedc'] = 0 reqerrors[rid]['totalstats']['allc'] = 0 reqerrors[rid]['totalstats']['greenc'] = 0 reqerrors[rid]['totalstats']['yellowc'] = 0 reqerrors[rid]['totalstats']['redc'] = 0 reqerrors[rid]['tasks'] = {} for cloudname in clouds: reqerrors[rid][cloudname] = {} reqerrors[rid][cloudname]['percent'] = 0 reqerrors[rid][cloudname]['finishedc'] = 0 reqerrors[rid][cloudname]['failedc'] = 0 reqerrors[rid][cloudname]['allc'] = 0 if errorEntry['JEDITASKID'] not in reqerrors[rid]['tasks']: reqerrors[rid]['tasks'][errorEntry['JEDITASKID']] = {} reqerrors[rid]['tasks'][errorEntry['JEDITASKID']]['finishedc'] = 0 reqerrors[rid]['tasks'][errorEntry['JEDITASKID']]['allc'] = 0 reqerrors[rid][errorEntry['CLOUD']]['finishedc'] += errorEntry['FINISHEDC'] reqerrors[rid][errorEntry['CLOUD']]['failedc'] += errorEntry['FAILEDC'] reqerrors[rid][errorEntry['CLOUD']]['allc'] += errorEntry['FINISHEDC'] + errorEntry['FAILEDC'] reqerrors[rid]['tasks'][errorEntry['JEDITASKID']]['finishedc'] += errorEntry['FINISHEDC'] reqerrors[rid]['tasks'][errorEntry['JEDITASKID']]['allc'] += errorEntry['FINISHEDC'] + errorEntry['FAILEDC'] reqerrors[rid]['totalstats']['finishedc'] += reqerrors[rid][errorEntry['CLOUD']]['finishedc'] reqerrors[rid]['totalstats']['failedc'] += reqerrors[rid][errorEntry['CLOUD']]['failedc'] reqerrors[rid]['totalstats']['allc'] += reqerrors[rid][errorEntry['CLOUD']]['allc'] if errorEntry['CLOUD'] not in clouderrors: clouderrors[errorEntry['CLOUD']] = {} if errorEntry['COMPUTINGSITE'] not in clouderrors[errorEntry['CLOUD']]: clouderrors[errorEntry['CLOUD']][errorEntry['COMPUTINGSITE']] = {} clouderrors[errorEntry['CLOUD']][errorEntry['COMPUTINGSITE']]['finishedc'] = 0 clouderrors[errorEntry['CLOUD']][errorEntry['COMPUTINGSITE']]['failedc'] = 0 clouderrors[errorEntry['CLOUD']][errorEntry['COMPUTINGSITE']]['allc'] = 0 clouderrors[errorEntry['CLOUD']][errorEntry['COMPUTINGSITE']]['finishedc'] += errorEntry['FINISHEDC'] clouderrors[errorEntry['CLOUD']][errorEntry['COMPUTINGSITE']]['failedc'] += errorEntry['FAILEDC'] clouderrors[errorEntry['CLOUD']][errorEntry['COMPUTINGSITE']]['allc'] += (errorEntry['FINISHEDC'] + errorEntry['FAILEDC']) for rid, reqentry in reqerrors.iteritems(): reqerrors[rid]['totalstats']['percent'] = int(math.ceil(reqerrors[rid]['totalstats']['finishedc']*100./reqerrors[rid]['totalstats']['allc'])) if reqerrors[rid]['totalstats']['allc'] > 0 else 0 reqerrors[rid]['totalstats']['minpercent'] = min(int(tstats['finishedc'] * 100. / tstats['allc']) for tstats in reqentry['tasks'].values()) for tstats in reqentry['tasks'].values(): srpct = int(tstats['finishedc'] * 100. / tstats['allc']) for color, srint in successrateIntervals.items(): reqerrors[rid]['totalstats'][color + 'c'] += 1 if (srpct >= srint[0] and srpct <= srint[1]) else 0 for cloudname, stats in reqentry.iteritems(): if cloudname not in ('reqid', 'totalstats', 'tasks'): reqerrors[rid][cloudname]['percent'] = int(stats['finishedc'] * 100. / stats['allc']) if stats['allc'] > 0 else -1 reqsToDel = [] #make cleanup of full none erroneous requests for rid, reqentry in reqerrors.iteritems(): notNone = False if reqentry['totalstats']['allc'] != 0 and reqentry['totalstats']['allc'] != reqentry['totalstats']['finishedc']: notNone = True # for cname, cval in reqentry.iteritems(): # if cval['allc'] != 0: # notNone = True if not notNone: reqsToDel.append(rid) for reqToDel in reqsToDel: del reqerrors[reqToDel] ### calculate stats for clouds columnstats = {} for cn in clouds: cns = str(cn) columnstats[cns] = {} columnstats[cns]['percent'] = 0 columnstats[cns]['finishedc'] = 0 columnstats[cns]['failedc'] = 0 columnstats[cns]['allc'] = 0 columnstats[cns]['minpercent'] = 100 for color, srint in successrateIntervals.items(): columnstats[cns][color + 'c'] = 0 for cloudname, sites in clouderrors.iteritems(): for sitename, sstats in sites.iteritems(): columnstats[cloudname]['finishedc'] += sstats['finishedc'] columnstats[cloudname]['failedc'] += sstats['failedc'] columnstats[cloudname]['allc'] += sstats['allc'] srpct = int(sstats['finishedc'] * 100. / sstats['allc']) for color, srint in successrateIntervals.items(): columnstats[cloudname][color + 'c'] += 1 if (srpct >= srint[0] and srpct <= srint[1]) else 0 columnstats[cloudname]['minpercent'] = min(int(cstats['finishedc'] * 100. / cstats['allc']) for cstats in sites.values()) for cn, stats in columnstats.iteritems(): columnstats[cn]['percent'] = int(math.ceil(columnstats[cn]['finishedc']*100./columnstats[cn]['allc'])) if columnstats[cn]['allc'] > 0 else 0 ### Introducing unique tk for each reqid for rid, reqentry in reqerrors.iteritems(): if rid in taskListByReq and len(taskListByReq[rid]) > 0: tk = setCacheData(request, lifetime=60*20, jeditaskid=taskListByReq[rid][:-1]) reqentry['tk'] = tk ### transform requesterrors dict to list for sorting on template reqErrorsList = [] for rid, reqEntry in reqerrors.iteritems(): reqErrorsList.append(reqEntry) reqErrorsList = sorted(reqErrorsList, key=lambda x: x['totalstats']['percent']) data = { 'request': request, 'viewParams': request.session['viewParams'], 'requestParams': request.session['requestParams'], 'clouds' : clouds, 'columnstats': columnstats, 'reqerrors': reqErrorsList, 'scouts': 'exclude' if isExcludeScouts else 'include', 'built': datetime.now().strftime("%H:%M:%S"), } ##self monitor endSelfMonitor(request) setCacheEntry(request, "errorsScattering", json.dumps(data, cls=DateEncoder), 60 * 20) response = render_to_response('errorsScattering.html', data, content_type='text/html') patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60) return response
def runningMCProdTasks(request): # redirect to united runningProdTasks page return redirect('/runningprodtasks/?preset=MC') valid, response = initRequest(request) # Here we try to get cached data data = getCacheEntry(request, "runningMCProdTasks") if data is not None: data = json.loads(data) data['request'] = request response = render_to_response('runningMCProdTasks.html', data, content_type='text/html') patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60) endSelfMonitor(request) return response # xurl = extensibleURL(request) xurl = request.get_full_path() if xurl.find('?') > 0: xurl += '&' else: xurl += '?' nosorturl = removeParam(xurl, 'sortby', mode='extensible') tquery, wildCardExtension, LAST_N_HOURS_MAX = setupView(request, hours=0, limit=9999999, querytype='task', wildCardExt=True) tasks = RunningMCProductionTasks.objects.filter(**tquery).extra(where=[wildCardExtension]).values() ntasks = len(tasks) slots = 0 ages = [] neventsAFIItasksSum = {'evgen': 0, 'pile': 0, 'simul': 0, 'recon': 0} neventsFStasksSum = {'evgen': 0, 'pile': 0, 'simul': 0, 'recon': 0} neventsTotSum = 0 neventsUsedTotSum = 0 rjobs1coreTot = 0 rjobs8coreTot = 0 for task in tasks: if task['rjobs'] is None: task['rjobs'] = 0 task['neventsused'] = task['totev'] - task['totevrem'] if task['totev'] is not None else 0 task['percentage'] = round(100. * task['neventsused'] / task['totev'], 1) if task['totev'] > 0 else 0. neventsTotSum += task['totev'] if task['totev'] is not None else 0 neventsUsedTotSum += task['neventsused'] slots += task['rjobs'] * task['corecount'] if task['corecount'] == 1: rjobs1coreTot += task['rjobs'] if task['corecount'] == 8: rjobs8coreTot += task['rjobs'] task['age'] = (datetime.now() - task['creationdate']).days ages.append(task['age']) if len(task['campaign'].split(':')) > 1: task['cutcampaign'] = task['campaign'].split(':')[1] else: task['cutcampaign'] = task['campaign'].split(':')[0] task['datasetname'] = task['taskname'].split('.')[1] ltag = len(task['taskname'].split("_")) rtag = task['taskname'].split("_")[ltag - 1] if "." in rtag: rtag = rtag.split(".")[len(rtag.split(".")) - 1] if 'a' in rtag: task['simtype'] = 'AFII' neventsAFIItasksSum[task['processingtype']] += task['totev'] if task['totev'] is not None else 0 else: task['simtype'] = 'FS' neventsFStasksSum[task['processingtype']] += task['totev'] if task['totev'] is not None else 0 plotageshistogram = 1 if sum(ages) == 0: plotageshistogram = 0 sumd = taskSummaryDict(request, tasks, ['status', 'processingtype', 'simtype']) if 'sortby' in request.session['requestParams']: sortby = request.session['requestParams']['sortby'] if sortby == 'campaign-asc': tasks = sorted(tasks, key=lambda x: x['campaign']) elif sortby == 'campaign-desc': tasks = sorted(tasks, key=lambda x: x['campaign'], reverse=True) elif sortby == 'reqid-asc': tasks = sorted(tasks, key=lambda x: x['reqid']) elif sortby == 'reqid-desc': tasks = sorted(tasks, key=lambda x: x['reqid'], reverse=True) elif sortby == 'jeditaskid-asc': tasks = sorted(tasks, key=lambda x: x['jeditaskid']) elif sortby == 'jeditaskid-desc': tasks = sorted(tasks, key=lambda x: x['jeditaskid'], reverse=True) elif sortby == 'rjobs-asc': tasks = sorted(tasks, key=lambda x: x['rjobs']) elif sortby == 'rjobs-desc': tasks = sorted(tasks, key=lambda x: x['rjobs'], reverse=True) elif sortby == 'status-asc': tasks = sorted(tasks, key=lambda x: x['status']) elif sortby == 'status-desc': tasks = sorted(tasks, key=lambda x: x['status'], reverse=True) elif sortby == 'processingtype-asc': tasks = sorted(tasks, key=lambda x: x['processingtype']) elif sortby == 'processingtype-desc': tasks = sorted(tasks, key=lambda x: x['processingtype'], reverse=True) elif sortby == 'nevents-asc': tasks = sorted(tasks, key=lambda x: x['totev']) elif sortby == 'nevents-desc': tasks = sorted(tasks, key=lambda x: x['totev'], reverse=True) elif sortby == 'neventsused-asc': tasks = sorted(tasks, key=lambda x: x['neventsused']) elif sortby == 'neventsused-desc': tasks = sorted(tasks, key=lambda x: x['neventsused'], reverse=True) elif sortby == 'neventstobeused-asc': tasks = sorted(tasks, key=lambda x: x['totevrem']) elif sortby == 'neventstobeused-desc': tasks = sorted(tasks, key=lambda x: x['totevrem'], reverse=True) elif sortby == 'percentage-asc': tasks = sorted(tasks, key=lambda x: x['percentage']) elif sortby == 'percentage-desc': tasks = sorted(tasks, key=lambda x: x['percentage'], reverse=True) elif sortby == 'nfilesfailed-asc': tasks = sorted(tasks, key=lambda x: x['nfilesfailed']) elif sortby == 'nfilesfailed-desc': tasks = sorted(tasks, key=lambda x: x['nfilesfailed'], reverse=True) elif sortby == 'priority-asc': tasks = sorted(tasks, key=lambda x: x['currentpriority']) elif sortby == 'priority-desc': tasks = sorted(tasks, key=lambda x: x['currentpriority'], reverse=True) elif sortby == 'simtype-asc': tasks = sorted(tasks, key=lambda x: x['simtype']) elif sortby == 'simtype-desc': tasks = sorted(tasks, key=lambda x: x['simtype'], reverse=True) elif sortby == 'age-asc': tasks = sorted(tasks, key=lambda x: x['age']) elif sortby == 'age-desc': tasks = sorted(tasks, key=lambda x: x['age'], reverse=True) elif sortby == 'corecount-asc': tasks = sorted(tasks, key=lambda x: x['corecount']) elif sortby == 'corecount-desc': tasks = sorted(tasks, key=lambda x: x['corecount'], reverse=True) elif sortby == 'username-asc': tasks = sorted(tasks, key=lambda x: x['username']) elif sortby == 'username-desc': tasks = sorted(tasks, key=lambda x: x['username'], reverse=True) elif sortby == 'datasetname-asc': tasks = sorted(tasks, key=lambda x: x['datasetname']) elif sortby == 'datasetname-desc': tasks = sorted(tasks, key=lambda x: x['datasetname'], reverse=True) else: sortby = 'age-asc' tasks = sorted(tasks, key=lambda x: x['age']) if (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('text/json', 'application/json'))) or ( 'json' in request.session['requestParams']): dump = json.dumps(tasks, cls=DateEncoder) ##self monitor endSelfMonitor(request) return HttpResponse(dump, content_type='text/html') else: data = { 'request': request, 'viewParams': request.session['viewParams'], 'requestParams': request.session['requestParams'], 'xurl': xurl, 'nosorturl': nosorturl, 'tasks': tasks, 'ntasks': ntasks, 'sortby': sortby, 'ages': ages, 'slots': slots, 'sumd': sumd, 'neventsUsedTotSum': round(neventsUsedTotSum / 1000000., 1), 'neventsTotSum': round(neventsTotSum / 1000000., 1), 'rjobs1coreTot': rjobs1coreTot, 'rjobs8coreTot': rjobs8coreTot, 'neventsAFIItasksSum': neventsAFIItasksSum, 'neventsFStasksSum': neventsFStasksSum, 'plotageshistogram': plotageshistogram, 'built': datetime.now().strftime("%H:%M:%S"), } ##self monitor endSelfMonitor(request) setCacheEntry(request, "runningMCProdTasks", json.dumps(data, cls=DateEncoder), 60 * 20) response = render_to_response('runningMCProdTasks.html', data, content_type='text/html') patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60) return response
def runningProdTasks(request): valid, response = initRequest(request) if ('dt' in request.session['requestParams'] and 'tk' in request.session['requestParams']): tk = request.session['requestParams']['tk'] data = getCacheEntry(request, tk, isData=True) return HttpResponse(data, content_type='text/html') # Here we try to get cached data data = getCacheEntry(request, "runningProdTasks") # data = None if data is not None: data = json.loads(data) data['request'] = request if 'ages' in data: data['ages'] = preparePlotData(data['ages']) if 'neventsFStasksSum' in data: data['neventsFStasksSum'] = preparePlotData(data['neventsFStasksSum']) if 'neventsAFIItasksSum' in data: data['neventsAFIItasksSum'] = preparePlotData(data['neventsAFIItasksSum']) if 'neventsByProcessingType' in data: data['neventsByProcessingType'] = preparePlotData(data['neventsByProcessingType']) if 'aslotsByType' in data: data['aslotsByType'] = preparePlotData(data['aslotsByType']) if 'neventsByTaskStatus' in data: data['neventsByTaskStatus'] = preparePlotData(data['neventsByTaskStatus']) if 'neventsByTaskPriority' in data: data['neventsByTaskPriority'] = preparePlotData(data['neventsByTaskPriority']) if 'neventsByStatus' in data: data['neventsByStatus'] = preparePlotData(data['neventsByStatus']) response = render_to_response('runningProdTasks.html', data, content_type='text/html') patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60) endSelfMonitor(request) return response # xurl = extensibleURL(request) xurl = request.get_full_path() if xurl.find('?') > 0: xurl += '&' else: xurl += '?' nosorturl = removeParam(xurl, 'sortby', mode='extensible') nohashtagurl = removeParam(xurl, 'hashtags', mode='extensible') exquery = {} productiontype = '' if 'preset' in request.session['requestParams']: if request.session['requestParams']['preset'] and request.session['requestParams']['preset'].upper() == 'MC': productiontype = 'MC' if 'workinggroup' not in request.session['requestParams']: request.session['requestParams']['workinggroup'] = '!AP_REPR,!AP_VALI,!GP_PHYS,!GP_THLT' if 'processingtype' not in request.session['requestParams']: request.session['requestParams']['processingtype'] = 'evgen|pile|simul|recon' if 'campaign' not in request.session['requestParams']: request.session['requestParams']['campaign'] = 'mc*' if request.session['requestParams']['preset'] and request.session['requestParams']['preset'].upper() == 'DPD': productiontype = 'DPD' if 'workinggroup' not in request.session['requestParams']: request.session['requestParams']['workinggroup'] = 'GP_*' if 'processingtype' not in request.session['requestParams']: request.session['requestParams']['processingtype'] = 'merge|deriv' if request.session['requestParams']['preset'] and request.session['requestParams']['preset'].upper() == 'DATA': productiontype = 'DATA' if 'workinggroup' not in request.session['requestParams']: request.session['requestParams']['workinggroup'] = 'AP_REPR' if 'processingtype' not in request.session['requestParams']: request.session['requestParams']['processingtype'] = 'reprocessing' tquery, wildCardExtension, LAST_N_HOURS_MAX = setupView(request, hours=0, limit=9999999, querytype='task', wildCardExt=True) if 'workinggroup' in tquery and 'preset' in request.session['requestParams'] and request.session['requestParams']['preset'] == 'MC' and ',' in tquery['workinggroup']: # excludeWGList = list(str(wg[1:]) for wg in request.session['requestParams']['workinggroup'].split(',')) # exquery['workinggroup__in'] = excludeWGList try: del tquery['workinggroup'] except: pass if 'status' in request.session['requestParams'] and request.session['requestParams']['status'] == '': try: del tquery['status'] except: pass if 'site' in request.session['requestParams'] and request.session['requestParams']['site'] == 'hpc': try: del tquery['site'] except: pass exquery['site__isnull'] = True if 'hashtags' in request.session['requestParams']: wildCardExtension += ' AND (' wildCards = request.session['requestParams']['hashtags'].split(',') currentCardCount = 1 countCards = len(wildCards) for card in wildCards: if '*' not in card: card = '*' + card + '*' elif card.startswith('*'): card = card + '*' elif card.endswith('*'): card = '*' + card wildCardExtension += preprocessWildCardString(card, 'hashtags') if (currentCardCount < countCards): wildCardExtension += ' AND ' currentCardCount += 1 wildCardExtension += ')' if 'sortby' in request.session['requestParams'] and '-' in request.session['requestParams']['sortby'] : sortby = request.session['requestParams']['sortby'] else: sortby = 'creationdate-desc' oquery = '-' + sortby.split('-')[0] if sortby.split('-')[1].startswith('d') else sortby.split('-')[0] # if "((UPPER(status) LIKE UPPER('all')))" in wildCardExtension and tquery['eventservice'] == 1: if 'eventservice' in tquery and tquery['eventservice'] == 1 and 'days' in request.session['requestParams']: setupView(request) if 'status__in' in tquery: del tquery['status__in'] excludedTimeQuery = copy.deepcopy(tquery) if ('days' in request.GET) and (request.GET['days']): days = int(request.GET['days']) hours = 24 * days startdate = timezone.now() - timedelta(hours=hours) startdate = startdate.strftime(defaultDatetimeFormat) enddate = timezone.now().strftime(defaultDatetimeFormat) tquery['modificationtime__range'] = [startdate, enddate] if "((UPPER(status) LIKE UPPER('all')))" in wildCardExtension: wildCardExtension = wildCardExtension.replace("((UPPER(status) LIKE UPPER('all')))", "(1=1)") tasks = [] tasks.extend(RunningProdTasksModel.objects.filter(**excludedTimeQuery).extra(where=[wildCardExtension]).exclude( **exquery).values().annotate(nonetoend=Count(sortby.split('-')[0])).order_by('-nonetoend', oquery)[:]) tasks.extend(FrozenProdTasksModel.objects.filter(**tquery).extra(where=[wildCardExtension]).exclude( **exquery).values().annotate(nonetoend=Count(sortby.split('-')[0])).order_by('-nonetoend', oquery)[:]) else: tasks = RunningProdTasksModel.objects.filter(**tquery).extra(where=[wildCardExtension]).exclude(**exquery).values().annotate(nonetoend=Count(sortby.split('-')[0])).order_by('-nonetoend', oquery) qtime = datetime.now() task_list = [t for t in tasks] ntasks = len(tasks) slots = 0 aslots = 0 ages = [] neventsAFIItasksSum = {} neventsFStasksSum = {} neventsByProcessingType = {} neventsByTaskStatus = {} neventsByTaskPriority = {} aslotsByType = {} neventsTotSum = 0 neventsUsedTotSum = 0 neventsToBeUsedTotSum = 0 neventsRunningTotSum = 0 rjobs1coreTot = 0 rjobs8coreTot = 0 for task in task_list: task['rjobs'] = 0 if task['rjobs'] is None else task['rjobs'] task['percentage'] = round(100 * task['percentage'],1) neventsTotSum += task['nevents'] if task['nevents'] is not None else 0 neventsUsedTotSum += task['neventsused'] if 'neventsused' in task and task['neventsused'] is not None else 0 neventsToBeUsedTotSum += task['neventstobeused'] if 'neventstobeused' in task and task['neventstobeused'] is not None else 0 neventsRunningTotSum += task['neventsrunning'] if 'neventsrunning' in task and task['neventsrunning'] is not None else 0 slots += task['slots'] if task['slots'] else 0 aslots += task['aslots'] if task['aslots'] else 0 if not task['processingtype'] in aslotsByType.keys(): aslotsByType[str(task['processingtype'])] = 0 aslotsByType[str(task['processingtype'])] += task['aslots'] if task['aslots'] else 0 if not task['status'] in neventsByTaskStatus.keys(): neventsByTaskStatus[str(task['status'])] = 0 neventsByTaskStatus[str(task['status'])] += task['nevents'] if task['nevents'] is not None else 0 if not task['priority'] in neventsByTaskPriority.keys(): neventsByTaskPriority[task['priority']] = 0 neventsByTaskPriority[task['priority']] += task['nevents'] if task['nevents'] is not None else 0 if task['corecount'] == 1: rjobs1coreTot += task['rjobs'] if task['corecount'] == 8: rjobs8coreTot += task['rjobs'] task['age'] = round( (datetime.now() - task['creationdate']).days + (datetime.now() - task['creationdate']).seconds / 3600. / 24, 1) ages.append(task['age']) if len(task['campaign'].split(':')) > 1: task['cutcampaign'] = task['campaign'].split(':')[1] else: task['cutcampaign'] = task['campaign'].split(':')[0] if 'reqid' in task and 'jeditaskid' in task and task['reqid'] == task['jeditaskid']: task['reqid'] = None if 'runnumber' in task: task['inputdataset'] = task['runnumber'] else: task['inputdataset'] = None if task['inputdataset'] and task['inputdataset'].startswith('00'): task['inputdataset'] = task['inputdataset'][2:] task['outputtypes'] = '' if 'outputdatasettype' in task: outputtypes = task['outputdatasettype'].split(',') else: outputtypes = [] if len(outputtypes) > 0: for outputtype in outputtypes: task['outputtypes'] += outputtype.split('_')[1] + ' ' if '_' in outputtype else '' if productiontype == 'MC': if task['simtype'] == 'AFII': if not task['processingtype'] in neventsAFIItasksSum.keys(): neventsAFIItasksSum[str(task['processingtype'])] = 0 neventsAFIItasksSum[str(task['processingtype'])] += task['nevents'] if task['nevents'] is not None else 0 elif task['simtype'] == 'FS': if not task['processingtype'] in neventsFStasksSum.keys(): neventsFStasksSum[str(task['processingtype'])] = 0 neventsFStasksSum[str(task['processingtype'])] += task['nevents'] if task['nevents'] is not None else 0 else: if not task['processingtype'] in neventsByProcessingType.keys(): neventsByProcessingType[str(task['processingtype'])] = 0 neventsByProcessingType[str(task['processingtype'])] += task['nevents'] if task['nevents'] is not None else 0 if 'hashtags' in task and len(task['hashtags']) > 1: task['hashtaglist'] = [] for hashtag in task['hashtags'].split(','): task['hashtaglist'].append(hashtag) neventsByStatus = {} neventsByStatus['done'] = neventsUsedTotSum neventsByStatus['running'] = neventsRunningTotSum neventsByStatus['waiting'] = neventsToBeUsedTotSum - neventsRunningTotSum plotageshistogram = 1 if sum(ages) == 0: plotageshistogram = 0 sumd = taskSummaryDict(request, task_list, ['status','workinggroup','cutcampaign', 'processingtype']) ### Putting list of tasks to cache separately for dataTables plugin transactionKey = random.randrange(100000000) setCacheEntry(request, transactionKey, json.dumps(task_list, cls=DateEncoder), 60 * 30, isData=True) if (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('text/json', 'application/json'))) or ( 'json' in request.session['requestParams']): ##self monitor endSelfMonitor(request) if 'snap' in request.session['requestParams']: snapdata = prepareNeventsByProcessingType(task_list) if saveNeventsByProcessingType(snapdata, qtime): data = {'message': 'success'} else: data = {'message': 'fail'} dump = json.dumps(data, cls=DateEncoder) return HttpResponse(dump, content_type='text/html') dump = json.dumps(task_list, cls=DateEncoder) return HttpResponse(dump, content_type='text/html') else: data = { 'request': request, 'viewParams': request.session['viewParams'], 'requestParams': request.session['requestParams'], 'xurl': xurl, 'nosorturl': nosorturl, 'nohashtagurl': nohashtagurl, 'tasks': task_list, 'ntasks': ntasks, 'sortby': sortby, 'ages': ages, 'slots': slots, 'aslots': aslots, 'aslotsByType' : aslotsByType, 'sumd': sumd, 'neventsUsedTotSum': round(neventsUsedTotSum / 1000000., 1), 'neventsTotSum': round(neventsTotSum / 1000000., 1), 'neventsWaitingTotSum': round((neventsToBeUsedTotSum - neventsRunningTotSum)/1000000., 1), 'neventsRunningTotSum': round(neventsRunningTotSum / 1000000., 1), 'rjobs1coreTot': rjobs1coreTot, 'rjobs8coreTot': rjobs8coreTot, 'neventsAFIItasksSum': neventsAFIItasksSum, 'neventsFStasksSum': neventsFStasksSum, 'neventsByProcessingType': neventsByProcessingType, 'neventsByTaskStatus': neventsByTaskStatus, 'neventsByTaskPriority': neventsByTaskPriority, 'neventsByStatus' : neventsByStatus, 'plotageshistogram': plotageshistogram, 'productiontype' : json.dumps(productiontype), 'built': datetime.now().strftime("%H:%M:%S"), 'transKey': transactionKey, 'qtime': qtime, } ##self monitor endSelfMonitor(request) response = render_to_response('runningProdTasks.html', data, content_type='text/html') setCacheEntry(request, "runningProdTasks", json.dumps(data, cls=DateEncoder), 60 * 20) patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60) return response