Ejemplo n.º 1
0
def art(request):
    valid, response = initRequest(request)

    # Here we try to get cached data
    data = getCacheEntry(request, "artMain")
    # data = None
    if data is not None:
        data = json.loads(data)
        data['request'] = request
        response = render_to_response('artMainPage.html', data, content_type='text/html')
        patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
        endSelfMonitor(request)
        return response

    tquery = {}
    tquery['platform__endswith'] = 'opt'
    packages = ARTTests.objects.filter(**tquery).values('package').distinct().order_by('package')
    branches = ARTTests.objects.filter(**tquery).values('nightly_release_short', 'platform','project').annotate(branch=Concat('nightly_release_short', V('/'), 'project', V('/'), 'platform')).values('branch').distinct().order_by('-branch')
    ntags = ARTTests.objects.values('nightly_tag').annotate(nightly_tag_date=Substr('nightly_tag', 1, 10)).values('nightly_tag_date').distinct().order_by('-nightly_tag_date')[:5]


    data = {
        'viewParams': request.session['viewParams'],
        'packages':[p['package'] for p in packages],
        'branches':[b['branch'] for b in branches],
        'ntags':[t['nightly_tag_date'] for t in ntags]
    }
    if (not (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and (
                'json' not in request.session['requestParams'])):
        response = render_to_response('artMainPage.html', data, content_type='text/html')
    else:
        response = HttpResponse(json.dumps(data, cls=DateEncoder), content_type='text/html')
    setCacheEntry(request, "artMain", json.dumps(data, cls=DateEncoder), 60 * cache_timeout)
    patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
    return response
def getPlots(request):
    valid, response = initRequest(request)
    if not valid:
        return response

    if not 'pandaid' in request.session['requestParams']:
        data = {
            'viewParams': request.session['viewParams'],
            'requestParams': request.session['requestParams'],
            "errormessage": "No pandaid provided!",
        }
        return render_to_response('errorPage.html', data, content_type='text/html')
    else:
        pandaid = request.session['requestParams']['pandaid']
        try:
            pandaid = int(pandaid)
        except:
            data = {
                'viewParams': request.session['viewParams'],
                'requestParams': request.session['requestParams'],
                "errormessage": "Illegal value {} for pandaid provided! Check the URL please!".format(pandaid),
            }
            return render_to_response('errorPage.html', data, content_type='text/html')

    return collectData(pandaid)
def getPlots(request):
    valid, response = initRequest(request)
    if not valid:
        return response

    if not 'pandaid' in request.session['requestParams']:
        data = {
            'viewParams': request.session['viewParams'],
            'requestParams': request.session['requestParams'],
            "errormessage": "No pandaid provided!",
        }
        return render_to_response('errorPage.html',
                                  data,
                                  content_type='text/html')
    else:
        pandaid = request.session['requestParams']['pandaid']
        try:
            pandaid = int(pandaid)
        except:
            data = {
                'viewParams':
                request.session['viewParams'],
                'requestParams':
                request.session['requestParams'],
                "errormessage":
                "Illegal value {} for pandaid provided! Check the URL please!".
                format(pandaid),
            }
            return render_to_response('errorPage.html',
                                      data,
                                      content_type='text/html')

    return collectData(pandaid)
Ejemplo n.º 4
0
def monitorJson(request):
    notcachedRemoteAddress = ['188.184.185.129', '188.185.80.72','188.185.165.248']
    x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
    if x_forwarded_for:
        ip = x_forwarded_for.split(',')[0]
    else:
        ip = request.META.get('REMOTE_ADDR')
    if ip in notcachedRemoteAddress:
        valid, response = initRequest(request)
        test = False
        if 'test' in request.GET:
            test = True
        totalSessionCount = 0
        totalActiveSessionCount = 0
        sesslist = ('num_active_sess','num_sess','machine','program')
        sessions = AtlasDBA.objects.filter().values(*sesslist)
        for session in sessions:
            totalSessionCount += session['num_sess']
            totalActiveSessionCount += session['num_active_sess']
        if totalSessionCount>=50 or test:
            logger = logging.getLogger('bigpandamon-error')
            message = 'Internal Server Error: ' + 'Attention!!! Total session count: ' + str(totalSessionCount) + ' Total active session count: ' + str (totalActiveSessionCount)
            logger.error(message)
        data = list(sessions)
    #url = "https://atlas-service-dbmonitor.web.cern.ch/atlas-service-dbmonitor/dashboard/show_sessions.php?user=ATLAS_PANDABIGMON_R&db=ADCR"
    #page = urllib2.urlopen(url)
    #from bs4 import BeautifulSoup
    #soup = BeautifulSoup(page)
    #all_tables = soup.find_all('table')
        response = HttpResponse(json.dumps(data, cls=DateEncoder), content_type='text/html')
        return response
    return HttpResponse(json.dumps({'message':'Forbidden!'}), content_type='text/html')
Ejemplo n.º 5
0
def harvesterWorkList(request):
    valid, response = initRequest(request)
    query, extra, LAST_N_HOURS_MAX = setupView(request,
                                               hours=24 * 3,
                                               wildCardExt=True)

    statusDefined = False
    if 'status__in' in query:
        statusDefined = True

    tquery = {}

    if statusDefined:
        tquery['status__in'] = list(
            set(query['status__in']).intersection([
                'missed', 'submitted', 'idle', 'finished', 'failed',
                'cancelled'
            ]))
    else:
        tquery['status__in'] = [
            'missed', 'submitted', 'idle', 'finished', 'failed', 'cancelled'
        ]

    tquery['lastupdate__range'] = query['modificationtime__range']

    workerslist = []
    if len(tquery['status__in']) > 0:
        workerslist.extend(
            HarvesterWorkers.objects.values(
                'computingsite', 'status', 'submittime', 'harvesterid',
                'workerid').filter(**tquery).extra(where=[extra]))

    if statusDefined:
        tquery['status__in'] = list(
            set(query['status__in']).intersection(['ready', 'running']))

    del tquery['lastupdate__range']
    if len(tquery['status__in']) > 0:
        workerslist.extend(
            HarvesterWorkers.objects.values(
                'computingsite', 'status', 'submittime', 'harvesterid',
                'workerid').filter(**tquery).extra(where=[extra]))

    data = {
        'workerslist': workerslist,
        'request': request,
        'viewParams': request.session['viewParams'],
        'requestParams': request.session['requestParams'],
        'built': datetime.now().strftime("%H:%M:%S"),
    }
    endSelfMonitor(request)
    response = render_to_response('harvworkerslist.html',
                                  data,
                                  content_type='text/html')
    return response
Ejemplo n.º 6
0
def harvesterWorkersDash(request):
    valid, response = initRequest(request)

    hours = 24 * 3
    if 'days' in request.session['requestParams']:
        days = int(request.session['requestParams']['days'])
        hours = days * 24
    query = setupView(request, hours=hours, wildCardExt=False)

    tquery = {}
    tquery['status__in'] = [
        'missed', 'submitted', 'idle', 'finished', 'failed', 'cancelled'
    ]
    tquery['lastupdate__range'] = query['modificationtime__range']
    if 'harvesterid__in' in query:
        tquery['harvesterid__in'] = query['harvesterid__in']

    harvesterWorkers = []
    harvesterWorkers.extend(
        HarvesterWorkers.objects.values('computingsite', 'status').filter(
            **tquery).annotate(Count('status')).order_by('computingsite'))

    # This is for exclusion of intermediate states from time window
    tquery['status__in'] = ['ready', 'running']
    del tquery['lastupdate__range']
    harvesterWorkers.extend(
        HarvesterWorkers.objects.values('computingsite', 'status').filter(
            **tquery).annotate(Count('status')).order_by('computingsite'))

    statusesSummary = OrderedDict()
    for harvesterWorker in harvesterWorkers:
        if not harvesterWorker['computingsite'] in statusesSummary:
            statusesSummary[harvesterWorker['computingsite']] = OrderedDict()
            for harwWorkStatus in harvWorkStatuses:
                statusesSummary[
                    harvesterWorker['computingsite']][harwWorkStatus] = 0
        statusesSummary[harvesterWorker['computingsite']][
            harvesterWorker['status']] = harvesterWorker['status__count']

    # SELECT computingsite,status, workerid, LASTUPDATE, row_number() over (partition by workerid, computingsite ORDER BY LASTUPDATE ASC) partid FROM ATLAS_PANDA.HARVESTER_WORKERS /*GROUP BY WORKERID ORDER BY COUNT(WORKERID) DESC*/

    data = {
        'statusesSummary': statusesSummary,
        'harvWorkStatuses': harvWorkStatuses,
        'request': request,
        'hours': hours,
        'viewParams': request.session['viewParams'],
        'requestParams': request.session['requestParams'],
        'built': datetime.now().strftime("%H:%M:%S"),
    }
    endSelfMonitor(request)
    response = render_to_response('harvworksummarydash.html',
                                  data,
                                  content_type='text/html')
    return response
Ejemplo n.º 7
0
def harvesterWorkerInfo(request):
    valid, response = initRequest(request)
    harvesterid = None
    workerid = None
    workerinfo = {}

    if 'harvesterid' in request.session['requestParams']:
        harvesterid = escapeInput(
            request.session['requestParams']['harvesterid'])
    if 'workerid' in request.session['requestParams']:
        workerid = int(request.session['requestParams']['workerid'])

    workerslist = []
    error = None
    if harvesterid and workerid:
        tquery = {}
        tquery['harvesterid'] = harvesterid
        tquery['workerid'] = workerid
        workerslist.extend(
            HarvesterWorkers.objects.filter(**tquery).values(
                'harvesterid', 'workerid', 'lastupdate', 'status', 'batchid',
                'nodeid', 'queuename', 'computingsite', 'submittime',
                'starttime', 'endtime', 'ncore', 'errorcode', 'stdout',
                'stderr', 'batchlog'))

        if len(workerslist) > 0:
            corrJobs = []
            corrJobs.extend(
                HarvesterRelJobsWorkers.objects.filter(
                    **tquery).values('pandaid'))
            workerinfo = workerslist[0]
            workerinfo['corrJobs'] = []
            for corrJob in corrJobs:
                workerinfo['corrJobs'].append(corrJob['pandaid'])
        else:
            workerinfo = None
    else:
        error = "Harvesterid + Workerid is not specified"

    data = {
        'request': request,
        'error': error,
        'workerinfo': workerinfo,
        'viewParams': request.session['viewParams'],
        'requestParams': request.session['requestParams'],
        'built': datetime.now().strftime("%H:%M:%S"),
    }

    endSelfMonitor(request)
    response = render_to_response('harvworkerinfo.html',
                                  data,
                                  content_type='text/html')
    return response
Ejemplo n.º 8
0
def adMain(request):
   
    valid, response = initRequest(request)
    if not valid: return response

    valid, response = login(request)
    if not valid: return response

    
    data = {\
       'request': request,
       'user': request.session['username'],
       'url' : request.path,\
    }

    return render_to_response('adMain.html', data, RequestContext(request))
Ejemplo n.º 9
0
def adMain(request):

    valid, response = initRequest(request)
    if not valid: return response

    valid, response = login(request)
    if not valid: return response


    data = {\
       'request': request,
       'user': request.session['username'],
       'url' : request.path,\
    }

    return render_to_response('adMain.html', data, RequestContext(request))
Ejemplo n.º 10
0
def clearComparison(request):
    valid, response = initRequest(request)
    if not valid: return response

    if 'object' in request.session['requestParams']:
        object = request.session['requestParams']['object']

    newList = []
    if request.user.is_authenticated():
        userid = request.user.id
        # try:
        result = clear_comparison_list(object, userid)
        # except:
        #     pass

    data = {'result': result}
    dump = json.dumps(data, cls=DateEncoder)
    ##self monitor
    endSelfMonitor(request)
    return HttpResponse(dump, content_type='text/html')
Ejemplo n.º 11
0
def clearComparison(request):
    valid, response = initRequest(request)
    if not valid: return response

    if 'object' in request.session['requestParams']:
        object = request.session['requestParams']['object']

    newList = []
    if request.user.is_authenticated():
        userid = request.user.id
        # try:
        result = clear_comparison_list(object, userid)
        # except:
        #     pass

    data = {'result': result}
    dump = json.dumps(data, cls=DateEncoder)
    ##self monitor
    endSelfMonitor(request)
    return HttpResponse(dump, content_type='text/html')
Ejemplo n.º 12
0
def deleteFromComparison(request):
    valid, response = initRequest(request)
    if not valid: return response

    if 'object' in request.session['requestParams']:
        object = request.session['requestParams']['object']
    if 'value' in request.session['requestParams']:
        value = request.session['requestParams']['value']

    newList = []
    if request.user.is_authenticated():
        userid = request.user.id
        # try:
        newList = delete_from_comparison(object, userid, value)
        # except:
        #     pass

    data = {'newList': newList}
    dump = json.dumps(data, cls=DateEncoder)
    ##self monitor
    endSelfMonitor(request)
    return HttpResponse(dump, content_type='text/html')
Ejemplo n.º 13
0
def deleteFromComparison(request):
    valid, response = initRequest(request)
    if not valid: return response

    if 'object' in request.session['requestParams']:
        object = request.session['requestParams']['object']
    if 'value' in request.session['requestParams']:
        value = request.session['requestParams']['value']

    newList = []
    if request.user.is_authenticated():
        userid = request.user.id
        # try:
        newList = delete_from_comparison(object, userid, value)
        # except:
        #     pass

    data = {'newList': newList}
    dump = json.dumps(data, cls=DateEncoder)
    ##self monitor
    endSelfMonitor(request)
    return HttpResponse(dump, content_type='text/html')
Ejemplo n.º 14
0
def getJobSubResults(request):
    valid, response = initRequest(request)


    guid = request.session['requestParams']['guid'] if 'guid' in request.session['requestParams'] else ''
    lfn = request.session['requestParams']['lfn'] if 'lfn' in request.session['requestParams'] else ''
    scope = request.session['requestParams']['scope'] if 'scope' in request.session['requestParams'] else ''
    pandaid = request.session['requestParams']['pandaid'] if 'pandaid' in request.session['requestParams'] else None
    jeditaskid = request.session['requestParams']['jeditaskid'] if 'jeditaskid' in request.session['requestParams'] else None
    data = getJobReport(guid, lfn, scope)
    results = getARTjobSubResults(data)
    # if len(results) > 0:
    #     saveJobSubResults(results,jeditaskid, pandaid)

    data = {
        'requestParams': request.session['requestParams'],
        'viewParams': request.session['viewParams'],
        'jobSubResults': results
    }
    response = render_to_response('artJobSubResults.html', data, content_type='text/html')
    patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
    endSelfMonitor(request)
    return response
Ejemplo n.º 15
0
def updateARTJobList(request):
    valid, response = initRequest(request)
    query = setupView(request, 'job')
    starttime = datetime.now()

    ### Getting full list of jobs
    cur = connection.cursor()
    cur.execute("SELECT taskid, ntag, pandaid, guid, scope, lfn, taskstatus, status as jobstatus, testname, taskmodificationtime, jobmodificationtime  FROM table(ATLAS_PANDABIGMON.ARTTESTS_1('%s','%s','%s')) WHERE pandaid is not NULL" % (query['ntag_from'], query['ntag_to'], query['strcondition']))
    jobs = cur.fetchall()
    cur.close()

    artJobsNames = ['jeditaskid', 'ntag', 'pandaid', 'guid', 'scope', 'lfn', 'taskstatus', 'jobstatus', 'testname', 'taskmodificationtime', 'jobmodificationtime']
    fulljoblist = [dict(zip(artJobsNames, row)) for row in jobs]
    ntagslist = list(sorted(set([x['ntag'] for x in fulljoblist])))

    i = 0
    ci = 0
    ii = 0
    if len(fulljoblist) > 0:
        for j in fulljoblist:
            i +=1
            get_query = {}
            get_query['jeditaskid'] = j['jeditaskid']
            get_query['testname'] = j['testname']


            blockedRowsConditions = Q(lock_time__gt=(datetime.now() - timedelta(minutes=30))) # This save from rerunning jobs which currnetly (first condition)
            # or recently (second condition) updated by another worker

            is_result_update = False
            existedRow = None

            try:
                existedRow = ARTResults.objects.filter(**get_query).exclude(blockedRowsConditions).get()
            except:
                
                # Here we check if test is really missing, not blocked due to update
                if ARTResults.objects.filter(**get_query).count() == 0:
                    if getjflag(j) == 1:
                        sqlRequest = "SELECT ATLAS_PANDABIGMON.ART_RESULTS_SEQ.NEXTVAL as my_req_token FROM dual;"
                        cur = connection.cursor()
                        cur.execute(sqlRequest)
                        requestToken = cur.fetchall()
                        cur.close()
                        newRowID = requestToken[0][0]

                        insertRow = ARTResults.objects.create(row_id=newRowID, jeditaskid=j['jeditaskid'], pandaid=j['pandaid'],
                                               is_task_finished=None,
                                               is_job_finished=None, testname=j['testname'],
                                               task_flag_updated=None,
                                               job_flag_updated=None,
                                               result=None,
                                               is_locked = 1,
                                               lock_time = datetime.now())

                        results = getARTjobSubResults(getJobReport(j['guid'], j['lfn'], j['scope'])) if getjflag(j) == 1 else {}

                        #updateLockedRow =  ARTResults.objects.get(row_id=insertRow.row_id)
                        insertRow.result = json.dumps(results)
                        insertRow.is_locked = 0
                        insertRow.lock_time = datetime.now()
                        insertRow.save(update_fields=['result', 'is_locked','lock_time'])
                        # insertRow = ARTResults(jeditaskid=j['jeditaskid'], pandaid=j['pandaid'], is_task_finished=gettflag(j),
                        #                        is_job_finished=getjflag(j), testname=j['testname'],
                        #                        task_flag_updated=datetime.now(),
                        #                        job_flag_updated=datetime.now(),
                        #                        result=json.dumps(results),
                        #                        is_locked=0,
                        #                        lock_time = None)
                    else:
                        insertRow = ARTResults(jeditaskid=j['jeditaskid'], pandaid=j['pandaid'],
                                               is_task_finished=gettflag(j),
                                               is_job_finished=getjflag(j), testname=j['testname'],
                                               task_flag_updated=datetime.now(),
                                               job_flag_updated=datetime.now(),
                                               result=None,
                                               lock_time=datetime.now())
                        insertRow.save()
                    # if getjflag(j) == 1:
                    #     insertRow.save(update_fields=['pandaid','is_job_finished','task_flag_updated','job_flag_updated','result','is_locked','lock_time'])
                    # else:
                    #     insertRow.save()

                    ii += 1
                    print ('%s row inserted (%s out of %s)' % (ii, i, len(fulljoblist)))

            if existedRow is not None:
                try:
                    existedResult = json.loads(existedRow.result)
                except:
                    existedResult = None
                ### check whether a job was retried
                if j['pandaid'] != existedRow.pandaid:
                    ### update pandaid -> it is needed to load json
                    existedRow.pandaid = j['pandaid']
                    if getjflag(j) == 1:
                        is_result_update = True
                    else:
                        existedRow.result = None
                        existedRow.save(update_fields=['pandaid','result'])
                elif existedResult is None:
                    ### no result in table, check whether a job finished already
                    if existedRow.is_job_finished < gettflag(j) or getjflag(j) == 1:
                        is_result_update = True
                else:
                    ### result is not empty, check whether a job was updated
                    if existedRow.job_flag_updated and j['jobmodificationtime'] > existedRow.job_flag_updated:
                        ### job state was updated results needs to be updated too
                        is_result_update = True

                if is_result_update:
                    existedRow.is_locked = 1
                    existedRow.lock_time = datetime.now()
                    existedRow.save(update_fields=['is_locked','lock_time'])
                    results = getARTjobSubResults(getJobReport(j['guid'], j['lfn'], j['scope']))
                    existedRow.is_job_finished = getjflag(j)
                    existedRow.is_task_finished = gettflag(j)
                    existedRow.job_flag_updated = datetime.now()
                    existedRow.result = json.dumps(results)
                    existedRow.is_locked = 0
                    existedRow.lock_time = datetime.now()
                    existedRow.save(update_fields=['pandaid', 'is_task_finished','is_job_finished', 'job_flag_updated', 'result', 'is_locked','lock_time'])

                    ci += 1
                    print ('%s row updated (%s out of %s)' % (ci,i,len(fulljoblist)))




    # ### Getting list of existed jobs
    # extra = 'jeditaskid in ( '
    # fulljoblistdict = {}
    # for job in fulljoblist:
    #     if job['jeditaskid'] not in fulljoblistdict.keys():
    #         fulljoblistdict[job['jeditaskid']] = {}
    #         extra +=  str(job['jeditaskid']) + ','
    #     fulljoblistdict[job['jeditaskid']][job['pandaid']] = []
    # if extra.endswith(','):
    #     extra = extra[:-1]
    # if extra.endswith('( '):
    #     extra = ' ( 1=1'
    # extra += ' ) '

    # existedjoblist = ARTResults.objects.extra(where=[extra]).values()
    # existedjobdict = {}
    # if len(existedjoblist) > 0:
    #     for job in existedjoblist:
    #         if job['jeditaskid'] not in existedjobdict.keys():
    #             existedjobdict[job['jeditaskid']] = {}
    #         if job['testname'] not in existedjobdict[job['jeditaskid']].keys():
    #             existedjobdict[job['jeditaskid']][job['testname']] = {}
    #         existedjobdict[job['jeditaskid']][job['testname']][job['pandaid']] = job
    #
    # tableName = 'ATLAS_PANDABIGMON.ART_RESULTS'
    # ###
    # insertData = []
    # updateData = []
    # # updateResultsData = []
    # if len(existedjoblist) > 0:
    #     print ('to be filtered')
    #
    #     for j in fulljoblist:
    #         print ('%s rows to insert' % (len(insertData)))
    #         print ('%s rows to update' % (len(updateData)))
    #         if j['jeditaskid'] in existedjobdict:
    #             ### check whether a job was retried
    #             if j['pandaid'] not in existedjobdict[j['jeditaskid']][j['testname']]:
    #                 ### add to update list
    #                 results = getARTjobSubResults(getJobReport(j['guid'], j['lfn'], j['scope']))
    #                 updateData.append((j['pandaid'], gettflag(j), getjflag(j), datetime.now().strftime(defaultDatetimeFormat), json.dumps(results), j['jeditaskid'], j['testname']))
    #             elif existedjobdict[j['jeditaskid']][j['testname']][j['pandaid']]['result'] is None or len(existedjobdict[j['jeditaskid']][j['testname']][j['pandaid']]['result']) == 0:
    #                 ### no result in table, check whether a job finished already
    #                 if existedjobdict[j['jeditaskid']][j['testname']][j['pandaid']]['is_job_finished'] < gettflag(j):
    #                     ### job state was updated results needs to be updated too
    #                     results = getARTjobSubResults(getJobReport(j['guid'],j['lfn'],j['scope']))
    #                     updateData.append((j['pandaid'], gettflag(j), getjflag(j), datetime.now().strftime(defaultDatetimeFormat), json.dumps(results), j['jeditaskid'], j['testname']))
    #             else:
    #                 ### result is not empty, check whether a job was updated
    #                 if j['jobmodificationtime'] > existedjobdict[j['jeditaskid']][j['testname']][j['pandaid']]['job_flag_updated']:
    #                     ### job state was updated results needs to be updated too
    #                     results = getARTjobSubResults(getJobReport(j['guid'],j['lfn'],j['scope']))
    #                     updateData.append((j['pandaid'], gettflag(j), getjflag(j), datetime.now().strftime(defaultDatetimeFormat), json.dumps(results), j['jeditaskid'], j['testname']))
    #         else:
    #             ### a new task that needs to be added to insert list
    #             if getjflag(j) == 1:
    #                 results = getARTjobSubResults(getJobReport(j['guid'],j['lfn'],j['scope']))
    #             else:
    #                 results = {}
    #             insertData.append((j['taskid'], j['pandaid'], gettflag(j), getjflag(j), j['testname'],
    #                                    datetime.now().strftime(defaultDatetimeFormat),
    #                                    datetime.now().strftime(defaultDatetimeFormat), json.dumps(results)))
    #
    #
    # else:
    #     print ('preparing data to insert into artresults table')
    #     for j in fulljoblist:
    #         print ('%s rows to insert' % (len(insertData)))
    #         if j['pandaid'] is not None and j['jeditaskid'] is not None:
    #             if getjflag(j) == 1:
    #                 results = getARTjobSubResults(getJobReport(j['guid'],j['lfn'],j['scope']))
    #             else:
    #                 results = {}
    #             insertData.append((j['jeditaskid'], j['pandaid'], gettflag(j), getjflag(j), j['testname'], datetime.now().strftime(defaultDatetimeFormat), datetime.now().strftime(defaultDatetimeFormat),json.dumps(results)))
    #
    #
    # if len(insertData) > 0:
    #     new_cur = connection.cursor()
    #     insert_query = """INSERT INTO """ + tableName + """(JEDITASKID,PANDAID,IS_TASK_FINISHED,IS_JOB_FINISHED,TESTNAME,TASK_FLAG_UPDATED,JOB_FLAG_UPDATED,RESULT_JSON ) VALUES (%s, %s, %s, %s, %s, TO_TIMESTAMP( %s , 'YYYY-MM-DD HH24:MI:SS' ), TO_TIMESTAMP( %s , 'YYYY-MM-DD HH24:MI:SS' ), %s)"""
    #     new_cur.executemany(insert_query, insertData)
    # print ('data inserted (%s)' % (len(insertData)))
    #
    # if len(updateData) > 0:
    #     new_cur = connection.cursor()
    #     update_query = """UPDATE """ + tableName + """ SET PANDAID = %s, IS_TASK_FINISHED = %s ,IS_JOB_FINISHED = %s , JOB_FLAG_UPDATED = TO_TIMESTAMP( %s , 'YYYY-MM-DD HH24:MI:SS' ), RESULT_JSON = %s WHERE JEDITASKID = %s AND TESTNAME = %s """
    #     new_cur.executemany(update_query, updateData)
    # print ('data updated (%s rows updated)' % (len(updateData)))



    result = True
    data = {
        'result': result,
        'strt': starttime,
        'endt': datetime.now()
    }
    return HttpResponse(json.dumps(data, cls=DateEncoder), content_type='text/html')
Ejemplo n.º 16
0
def listReqPlot(request):
    valid, response = initRequest(request)
    if not valid: return response

    valid, response = login(request)
    if not valid: return response

    sortby='id'
    if 'sortby' in request.GET:
        sortby=request.GET['sortby']
 
    LAST_N_HOURS_MAX=7*24
    limit=5000
    if 'hours' in request.session['requestParams']:
        LAST_N_HOURS_MAX = int(request.session['requestParams']['hours'])
    if 'days' in request.session['requestParams']:
        LAST_N_HOURS_MAX = int(request.session['requestParams']['days'])*24

    if u'display_limit' in request.session['requestParams']:
        display_limit = int(request.session['requestParams']['display_limit'])
    else:
        display_limit = 1000
    nmax = display_limit

    if LAST_N_HOURS_MAX>=168:
       flag=12
    elif LAST_N_HOURS_MAX>=48:
       flag=6
    else:
       flag=2

    startdate = None
    if not startdate:
        startdate = timezone.now() - timedelta(hours=LAST_N_HOURS_MAX)
    enddate = None
    if enddate == None:
        enddate = timezone.now()#.strftime(defaultDatetimeFormat)

    query = { 'qtime__range' : [startdate.strftime(defaultDatetimeFormat), enddate.strftime(defaultDatetimeFormat)] }

    values = 'urls', 'qtime','remote','qduration','duration'
    reqs=[]
    reqs = RequestStat.objects.filter(**query).order_by(sortby).reverse().values(*values)

    reqHist = {}
    drHist =[]

    mons=[]
    for req in reqs:
        mon={}
        #mon['duration'] = (req['qduration'] - req['qtime']).seconds
        mon['duration'] = req['duration']
        mon['urls'] = req['urls']
        mon['remote'] = req['remote']
        mon['qduration']=req['qduration'].strftime('%Y-%m-%d %H:%M:%S')
        mon['qtime'] =  req['qtime'].strftime('%Y-%m-%d %H:%M:%S')
        mons.append(mon)

        ##plot
        tm=req['qtime']
        tm = tm - timedelta(hours=tm.hour % flag, minutes=tm.minute, seconds=tm.second, microseconds=tm.microsecond)
        if not tm in reqHist: reqHist[tm] = 0
        reqHist[tm] += 1

        ##plot -view duration
        dr=int(mon['duration'])
        drHist.append(dr)

    kys = reqHist.keys()
    kys.sort()
    reqHists = []
    for k in kys:
        reqHists.append( [ k, reqHist[k] ] )

    drcount=[[x,drHist.count(x)] for x in set(drHist)]
    drcount.sort()

    #do paging

    paginator = Paginator(mons, 200)
    page = request.GET.get('page')
    try:
        reqPages = paginator.page(page)
    except PageNotAnInteger:
        reqPages = paginator.page(1)
    except EmptyPage:
        reqPages = paginator.page(paginator.num_pages)

    url= request.get_full_path()
    if url.count('?')>0:
       url += '&'
    else:
       url += '?'

    data = {\
       'mons': mons[:nmax],
       'nmax': nmax,
       'request': request,
       'user': request.session['username'],
       'reqPages': reqPages,
       'url' : url,
       'drHist': drcount,
       'reqHist': reqHists,\
    }

    return render_to_response('req_plot.html', data, RequestContext(request))
Ejemplo n.º 17
0
def compareJobs(request):
    valid, response = initRequest(request)
    if not valid: return response

    pandaidstr = None
    if 'pandaid' in request.session['requestParams']:
        pandaidstr = request.session['requestParams']['pandaid'].split('|')
    else:
        query = {}
        query['userid'] = request.user.id
        query['object'] = 'job'
        try:
            jobsComparison = ObjectsComparison.objects.get(**query)
            pandaidstr = json.loads(jobsComparison.comparisonlist)
        except ObjectsComparison.DoesNotExist:
            pandaidstr = None

    if not pandaidstr:
        return render_to_response(
            'errorPage.html',
            {'errormessage': 'No pandaids for comparison provided'},
            content_type='text/html')

    pandaids = []
    for pid in pandaidstr:
        try:
            pid = int(pid)
            pandaids.append(pid)
        except:
            pass
    maxNJobs = 5
    if len(pandaids) > maxNJobs:
        pandaids = pandaids[:maxNJobs]

    jobInfoJSON = []

    # Looking for a job in cache
    pandaidsToBeLoad = []
    for pandaid in pandaids:
        data = getCacheEntry(request,
                             "compareJob_" + str(pandaid),
                             isData=True)
        # data = None
        if data is not None:
            jobInfoJSON.append(json.loads(data))
        else:
            pandaidsToBeLoad.append(pandaid)

    #Loading jobs info in parallel
    nprocesses = maxNJobs
    if len(pandaidsToBeLoad) > 0:
        url_params = [('?json=1&pandaid=' + str(pid))
                      for pid in pandaidsToBeLoad]
        pool = multiprocessing.Pool(processes=nprocesses)
        jobInfoJSON.extend(pool.map(job_info_getter, url_params))
        pool.close()
        pool.join()

    #Put loaded jobs info to cache
    for job in jobInfoJSON:
        setCacheEntry(request,
                      "compareJob_" + str(job.keys()[0]),
                      json.dumps(job.values()[0], cls=DateEncoder),
                      60 * 30,
                      isData=True)

    compareParamNames = {
        'produsername': '******',
        'reqid': 'Request ID',
        'jeditaskid': 'Task ID',
        'jobstatus': 'Status',
        'attemptnr': 'Attempt',
        'creationtime': 'Created',
        'waittime': 'Time to start',
        'duration': 'Duration',
        'modificationtime': 'Modified',
        'cloud': 'Cloud',
        'computingsite': 'Site',
        'currentpriority': 'Priority',
        'jobname': 'Name',
        'processingtype': 'Type',
        'transformation': 'Transformation',
        'proddblock': 'Input',
        'destinationdblock': 'Output',
        'jobsetid': 'Jobset ID',
        'batchid': 'Batch ID',
        'eventservice': 'Event Service'
    }

    compareParams = [
        'produsername', 'reqid', 'jeditaskid', 'jobstatus', 'attemptnr',
        'creationtime', 'waittime', 'duration', 'modificationtime', 'cloud',
        'computingsite', 'currentpriority', 'jobname', 'processingtype',
        'transformation', 'proddblock', 'destinationdblock', 'jobsetid',
        'batchid', 'eventservice'
    ]

    ###Excluded params because of too long values###
    excludedParams = ['metadata', 'metastruct']

    jobsComparisonMain = []
    for param in compareParams:
        row = [{'paramname': compareParamNames[param]}]
        for jobd in jobInfoJSON:
            job = jobd['job']
            if param in job:
                row.append({'value': job[param]})
            else:
                row.append({'value': '-'})
        if len(set([d['value'] for d in row if 'value' in d])) == 1:
            row[0]['mark'] = 'equal'
        jobsComparisonMain.append(row)

    all_params = []
    for jobd in jobInfoJSON:
        all_params.extend(list(jobd['job'].keys()))
    all_params = sorted(set(all_params))

    jobsComparisonAll = []
    for param in all_params:
        if param not in excludedParams:
            row = [{'paramname': param}]
            for jobd in jobInfoJSON:
                job = jobd['job']
                if param in job and job[param] is not None:
                    row.append({'value': job[param]})
                else:
                    row.append({'value': '-'})
            if len(set([d['value'] for d in row if 'value' in d])) == 1:
                row[0]['mark'] = 'equal'
            jobsComparisonAll.append(row)

    xurl = extensibleURL(request)
    data = {
        'request': request,
        'viewParams': request.session['viewParams'],
        'requestParams': request.session['requestParams'],
        'url': request.path,
        'jobsComparisonMain': jobsComparisonMain,
        'jobsComparisonAll': jobsComparisonAll,
        'pandaids': pandaids,
        'xurl': xurl,
        'built': datetime.now().strftime("%H:%M:%S"),
    }

    ##self monitor
    endSelfMonitor(request)
    response = render_to_response('compareJobs.html',
                                  data,
                                  content_type='text/html')
    patch_response_headers(response,
                           cache_timeout=request.session['max_age_minutes'] *
                           60)
    return response
Ejemplo n.º 18
0
def runningMCProdTasks(request):
    # redirect to united runningProdTasks page
    return redirect('/runningprodtasks/?preset=MC')
    valid, response = initRequest(request)

    # Here we try to get cached data
    data = getCacheEntry(request, "runningMCProdTasks")
    if data is not None:
        data = json.loads(data)
        data['request'] = request
        response = render_to_response('runningMCProdTasks.html', data, content_type='text/html')
        patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
        endSelfMonitor(request)
        return response


    # xurl = extensibleURL(request)
    xurl = request.get_full_path()
    if xurl.find('?') > 0:
        xurl += '&'
    else:
        xurl += '?'
    nosorturl = removeParam(xurl, 'sortby', mode='extensible')
    tquery, wildCardExtension, LAST_N_HOURS_MAX = setupView(request, hours=0, limit=9999999, querytype='task',
                                                           wildCardExt=True)

    tasks = RunningMCProductionTasks.objects.filter(**tquery).extra(where=[wildCardExtension]).values()
    ntasks = len(tasks)
    slots = 0
    ages = []
    neventsAFIItasksSum = {'evgen': 0, 'pile': 0, 'simul': 0, 'recon': 0}
    neventsFStasksSum = {'evgen': 0, 'pile': 0, 'simul': 0, 'recon': 0}

    neventsTotSum = 0
    neventsUsedTotSum = 0
    rjobs1coreTot = 0
    rjobs8coreTot = 0
    for task in tasks:
        if task['rjobs'] is None:
            task['rjobs'] = 0
        task['neventsused'] = task['totev'] - task['totevrem'] if task['totev'] is not None else 0
        task['percentage'] = round(100. * task['neventsused'] / task['totev'], 1) if task['totev'] > 0 else 0.
        neventsTotSum += task['totev'] if task['totev'] is not None else 0
        neventsUsedTotSum += task['neventsused']
        slots += task['rjobs'] * task['corecount']
        if task['corecount'] == 1:
            rjobs1coreTot += task['rjobs']
        if task['corecount'] == 8:
            rjobs8coreTot += task['rjobs']
        task['age'] = (datetime.now() - task['creationdate']).days
        ages.append(task['age'])
        if len(task['campaign'].split(':')) > 1:
            task['cutcampaign'] = task['campaign'].split(':')[1]
        else:
            task['cutcampaign'] = task['campaign'].split(':')[0]
        task['datasetname'] = task['taskname'].split('.')[1]
        ltag = len(task['taskname'].split("_"))
        rtag = task['taskname'].split("_")[ltag - 1]
        if "." in rtag:
            rtag = rtag.split(".")[len(rtag.split(".")) - 1]
        if 'a' in rtag:
            task['simtype'] = 'AFII'
            neventsAFIItasksSum[task['processingtype']] += task['totev'] if task['totev'] is not None else 0
        else:
            task['simtype'] = 'FS'
            neventsFStasksSum[task['processingtype']] += task['totev'] if task['totev'] is not None else 0
    plotageshistogram = 1
    if sum(ages) == 0: plotageshistogram = 0
    sumd = taskSummaryDict(request, tasks, ['status', 'processingtype', 'simtype'])

    if 'sortby' in request.session['requestParams']:
        sortby = request.session['requestParams']['sortby']
        if sortby == 'campaign-asc':
            tasks = sorted(tasks, key=lambda x: x['campaign'])
        elif sortby == 'campaign-desc':
            tasks = sorted(tasks, key=lambda x: x['campaign'], reverse=True)
        elif sortby == 'reqid-asc':
            tasks = sorted(tasks, key=lambda x: x['reqid'])
        elif sortby == 'reqid-desc':
            tasks = sorted(tasks, key=lambda x: x['reqid'], reverse=True)
        elif sortby == 'jeditaskid-asc':
            tasks = sorted(tasks, key=lambda x: x['jeditaskid'])
        elif sortby == 'jeditaskid-desc':
            tasks = sorted(tasks, key=lambda x: x['jeditaskid'], reverse=True)
        elif sortby == 'rjobs-asc':
            tasks = sorted(tasks, key=lambda x: x['rjobs'])
        elif sortby == 'rjobs-desc':
            tasks = sorted(tasks, key=lambda x: x['rjobs'], reverse=True)
        elif sortby == 'status-asc':
            tasks = sorted(tasks, key=lambda x: x['status'])
        elif sortby == 'status-desc':
            tasks = sorted(tasks, key=lambda x: x['status'], reverse=True)
        elif sortby == 'processingtype-asc':
            tasks = sorted(tasks, key=lambda x: x['processingtype'])
        elif sortby == 'processingtype-desc':
            tasks = sorted(tasks, key=lambda x: x['processingtype'], reverse=True)
        elif sortby == 'nevents-asc':
            tasks = sorted(tasks, key=lambda x: x['totev'])
        elif sortby == 'nevents-desc':
            tasks = sorted(tasks, key=lambda x: x['totev'], reverse=True)
        elif sortby == 'neventsused-asc':
            tasks = sorted(tasks, key=lambda x: x['neventsused'])
        elif sortby == 'neventsused-desc':
            tasks = sorted(tasks, key=lambda x: x['neventsused'], reverse=True)
        elif sortby == 'neventstobeused-asc':
            tasks = sorted(tasks, key=lambda x: x['totevrem'])
        elif sortby == 'neventstobeused-desc':
            tasks = sorted(tasks, key=lambda x: x['totevrem'], reverse=True)
        elif sortby == 'percentage-asc':
            tasks = sorted(tasks, key=lambda x: x['percentage'])
        elif sortby == 'percentage-desc':
            tasks = sorted(tasks, key=lambda x: x['percentage'], reverse=True)
        elif sortby == 'nfilesfailed-asc':
            tasks = sorted(tasks, key=lambda x: x['nfilesfailed'])
        elif sortby == 'nfilesfailed-desc':
            tasks = sorted(tasks, key=lambda x: x['nfilesfailed'], reverse=True)
        elif sortby == 'priority-asc':
            tasks = sorted(tasks, key=lambda x: x['currentpriority'])
        elif sortby == 'priority-desc':
            tasks = sorted(tasks, key=lambda x: x['currentpriority'], reverse=True)
        elif sortby == 'simtype-asc':
            tasks = sorted(tasks, key=lambda x: x['simtype'])
        elif sortby == 'simtype-desc':
            tasks = sorted(tasks, key=lambda x: x['simtype'], reverse=True)
        elif sortby == 'age-asc':
            tasks = sorted(tasks, key=lambda x: x['age'])
        elif sortby == 'age-desc':
            tasks = sorted(tasks, key=lambda x: x['age'], reverse=True)
        elif sortby == 'corecount-asc':
            tasks = sorted(tasks, key=lambda x: x['corecount'])
        elif sortby == 'corecount-desc':
            tasks = sorted(tasks, key=lambda x: x['corecount'], reverse=True)
        elif sortby == 'username-asc':
            tasks = sorted(tasks, key=lambda x: x['username'])
        elif sortby == 'username-desc':
            tasks = sorted(tasks, key=lambda x: x['username'], reverse=True)
        elif sortby == 'datasetname-asc':
            tasks = sorted(tasks, key=lambda x: x['datasetname'])
        elif sortby == 'datasetname-desc':
            tasks = sorted(tasks, key=lambda x: x['datasetname'], reverse=True)
    else:
        sortby = 'age-asc'
        tasks = sorted(tasks, key=lambda x: x['age'])

    if (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('text/json', 'application/json'))) or (
        'json' in request.session['requestParams']):

        dump = json.dumps(tasks, cls=DateEncoder)
        ##self monitor
        endSelfMonitor(request)
        return HttpResponse(dump, content_type='text/html')
    else:
        data = {
            'request': request,
            'viewParams': request.session['viewParams'],
            'requestParams': request.session['requestParams'],
            'xurl': xurl,
            'nosorturl': nosorturl,
            'tasks': tasks,
            'ntasks': ntasks,
            'sortby': sortby,
            'ages': ages,
            'slots': slots,
            'sumd': sumd,
            'neventsUsedTotSum': round(neventsUsedTotSum / 1000000., 1),
            'neventsTotSum': round(neventsTotSum / 1000000., 1),
            'rjobs1coreTot': rjobs1coreTot,
            'rjobs8coreTot': rjobs8coreTot,
            'neventsAFIItasksSum': neventsAFIItasksSum,
            'neventsFStasksSum': neventsFStasksSum,
            'plotageshistogram': plotageshistogram,
            'built': datetime.now().strftime("%H:%M:%S"),
        }
        ##self monitor
        endSelfMonitor(request)
        setCacheEntry(request, "runningMCProdTasks", json.dumps(data, cls=DateEncoder), 60 * 20)
        response = render_to_response('runningMCProdTasks.html', data, content_type='text/html')
        patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
        return response
Ejemplo n.º 19
0
def globalshares(request):
    valid, response = initRequest(request)
    data = getCacheEntry(request, "globalshares")
    if data is not None:
        data = json.loads(data)
        data['request'] = request
        gsPlotData = {}
        oldGsPlotData = data['gsPlotData']
        for shareName, shareValue in oldGsPlotData.iteritems():
            gsPlotData[str(shareName)] = int(shareValue)
        data['gsPlotData'] = gsPlotData

    if not valid: return response
    setupView(request, hours=180 * 24, limit=9999999)
    gs, tablerows = __get_hs_leave_distribution()
    gsPlotData = {}#{'Upgrade':130049 , 'Reprocessing default':568841, 'Data Derivations': 202962, 'Event Index': 143 }

    for shareName, shareValue in gs.iteritems():
        shareValue['delta'] = shareValue['executing'] - shareValue['pledged']
        shareValue['used'] = shareValue['ratio'] if 'ratio' in shareValue else None
        gsPlotData[str(shareName)] = int(shareValue['executing'])


    for shareValue in tablerows:
        shareValue['used'] = shareValue['ratio']*Decimal(shareValue['value'])/100 if 'ratio' in shareValue else None
    ordtablerows ={}
    ordtablerows['childlist']=[]
    level1=''
    level2=''
    level3=''

    for shareValue in tablerows:
        if len(shareValue['level1'])!=0:
            level1 = shareValue['level1']
            ordtablerows[level1] = {}
            ordtablerows['childlist'].append(level1)
            ordtablerows[level1]['childlist'] = []
        if len(shareValue['level2'])!=0:
            level2 = shareValue['level2']
            ordtablerows[level1][level2] = {}
            ordtablerows[level1]['childlist'].append(level2)
            ordtablerows[level1][level2]['childlist'] = []
        if len(shareValue['level3'])!=0:
            level3 = shareValue['level3']
            ordtablerows[level1][level2][level3] = {}
            ordtablerows[level1][level2]['childlist'].append(level3)

    resources_list, resources_dict = get_resources_gshare()

    newTablesRow =[]
    for ordValueLevel1 in sorted(ordtablerows['childlist']):
        for shareValue in tablerows:
            if ordValueLevel1 in shareValue['level1']:
                ord1Short = re.sub('\[(.*)\]', '', ordValueLevel1).rstrip().lower()
                shareValue['level'] = 'level1'
                shareValue['gshare'] = ord1Short.replace(' ', '_')
                newTablesRow.append(shareValue)
                tablerows.remove(shareValue)
                if len(ordtablerows[ordValueLevel1]['childlist']) == 0:
                    add_resources(ord1Short,newTablesRow,resources_list,shareValue['level'])
                else:
                    childsgsharelist = []
                    get_child_elements(ordtablerows[ordValueLevel1],childsgsharelist)
                    resources_dict = get_child_sumstats(childsgsharelist,resources_dict,ord1Short)
                    short_resource_list= resourcesDictToList(resources_dict)
                    add_resources(ord1Short, newTablesRow, short_resource_list, shareValue['level'])
                break
        for ordValueLevel2 in sorted(ordtablerows[ordValueLevel1]['childlist']):
            for shareValue in tablerows:
                if ordValueLevel2 in shareValue['level2']:
                    if len(ordtablerows[ordValueLevel1][ordValueLevel2]['childlist'])==0:
                        ord1Short = re.sub('\[(.*)\]','',ordValueLevel1).rstrip().lower()
                        ord2Short = re.sub('\[(.*)\]', '', ordValueLevel2).rstrip().lower()
                        link = "?jobtype=%s&display_limit=100&gshare=%s"%(ord1Short,ord2Short)
                        shareValue['link'] = link
                        shareValue['level'] = 'level2'
                        shareValue['gshare'] = ord2Short.replace(' ', '_')
                    newTablesRow.append(shareValue)
                    tablerows.remove(shareValue)
                    if 'level' in shareValue:
                        add_resources(ord2Short, newTablesRow, resources_list, shareValue['level'])
                    break
            for ordValueLevel3 in sorted(ordtablerows[ordValueLevel1][ordValueLevel2]['childlist']):
                for shareValue in tablerows:
                    if ordValueLevel3 in shareValue['level3']:
                        if len(ordtablerows[ordValueLevel1][ordValueLevel2]['childlist']) > 0:
                            ord1Short = re.sub('\[(.*)\]', '', ordValueLevel1).rstrip().lower()
                            ord3Short = re.sub('\[(.*)\]', '', ordValueLevel3).rstrip().lower()
                            link = "?jobtype=%s&display_limit=100&gshare=%s" % (ord1Short, ord3Short)
                            shareValue['link'] = link
                            shareValue['level'] = 'level3'
                            shareValue['gshare'] = ord3Short.replace(' ', '_')
                        newTablesRow.append(shareValue)
                        tablerows.remove(shareValue)
                        if 'level' in shareValue:
                            add_resources(ord3Short, newTablesRow, resources_list, shareValue['level'])
                        break

    tablerows = newTablesRow

    del request.session['TFIRST']
    del request.session['TLAST']
    ##self monitor
    endSelfMonitor(request)
    if (not (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and (
                'json' not in request.session['requestParams'])):
        data = {
            'request': request,
            'viewParams': request.session['viewParams'],
            'requestParams': request.session['requestParams'],
            'globalshares': gs,
            'xurl': extensibleURL(request),
            'gsPlotData':gsPlotData,
            'tablerows':tablerows,
            'built': datetime.now().strftime("%H:%M:%S"),
        }
        response = render_to_response('globalshares.html', data, content_type='text/html')
        setCacheEntry(request, "globalshares", json.dumps(data, cls=DateEncoder), 60 * 20)
        patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
        return response
    else:
        return HttpResponse(DecimalEncoder().encode(gs), content_type='text/html')
Ejemplo n.º 20
0
def compareJobs(request):
    valid, response = initRequest(request)
    if not valid: return response

    pandaidstr = None
    if 'pandaid' in request.session['requestParams']:
        pandaidstr = request.session['requestParams']['pandaid'].split('|')
    else:
        query = {}
        query['userid'] = request.user.id
        query['object'] = 'job'
        try:
            jobsComparison = ObjectsComparison.objects.get(**query)
            pandaidstr = json.loads(jobsComparison.comparisonlist)
        except ObjectsComparison.DoesNotExist:
            pandaidstr = None


    if not pandaidstr:
        return render_to_response('errorPage.html', {'errormessage': 'No pandaids for comparison provided'}, content_type='text/html')

    pandaids = []
    for pid in pandaidstr:
        try:
            pid = int(pid)
            pandaids.append(pid)
        except:
            pass
    maxNJobs = 5
    if len(pandaids) > maxNJobs:
        pandaids = pandaids[:maxNJobs]


    jobInfoJSON = []

    # Looking for a job in cache
    pandaidsToBeLoad = []
    for pandaid in pandaids:
        data = getCacheEntry(request, "compareJob_" + str(pandaid), isData=True)
        # data = None
        if data is not None:
            jobInfoJSON.append(json.loads(data))
        else:
            pandaidsToBeLoad.append(pandaid)

    #Loading jobs info in parallel
    nprocesses = maxNJobs
    if len(pandaidsToBeLoad) > 0:
        url_params = [('?json=1&pandaid=' + str(pid)) for pid in pandaidsToBeLoad]
        pool = multiprocessing.Pool(processes=nprocesses)
        jobInfoJSON.extend(pool.map(job_info_getter, url_params))
        pool.close()
        pool.join()

    #Put loaded jobs info to cache
    for job in jobInfoJSON:
        setCacheEntry(request, "compareJob_" + str(job.keys()[0]),
                      json.dumps(job.values()[0], cls=DateEncoder), 60 * 30, isData=True)

    compareParamNames = {'produsername': '******', 'reqid': 'Request ID', 'jeditaskid': 'Task ID', 'jobstatus': 'Status',
                     'attemptnr': 'Attempt', 'creationtime': 'Created', 'waittime': 'Time to start', 'duration': 'Duration',
                     'modificationtime': 'Modified', 'cloud': 'Cloud', 'computingsite': 'Site', 'currentpriority': 'Priority',
                     'jobname': 'Name', 'processingtype': 'Type', 'transformation': 'Transformation', 'proddblock': 'Input',
                     'destinationdblock': 'Output', 'jobsetid': 'Jobset ID', 'batchid': 'Batch ID', 'eventservice': 'Event Service'}

    compareParams = ['produsername', 'reqid', 'jeditaskid', 'jobstatus', 'attemptnr','creationtime', 'waittime', 'duration',
                         'modificationtime', 'cloud', 'computingsite','currentpriority',
                         'jobname', 'processingtype', 'transformation','proddblock','destinationdblock', 'jobsetid', 'batchid','eventservice']

    ###Excluded params because of too long values###
    excludedParams = ['metadata', 'metastruct']

    jobsComparisonMain = []
    for param in compareParams:
        row = [{'paramname': compareParamNames[param]}]
        for jobd in jobInfoJSON:
            job = jobd['job']
            if param in job:
                row.append({'value': job[param]})
            else:
                row.append({'value': '-'})
        if len(set([d['value'] for d in row if 'value' in d])) == 1:
            row[0]['mark'] = 'equal'
        jobsComparisonMain.append(row)


    all_params = []
    for jobd in jobInfoJSON:
        all_params.extend(list(jobd['job'].keys()))
    all_params = sorted(set(all_params))

    jobsComparisonAll = []
    for param in all_params:
        if param not in excludedParams:
            row = [{'paramname': param}]
            for jobd in jobInfoJSON:
                job = jobd['job']
                if param in job and job[param] is not None:
                    row.append({'value': job[param]})
                else:
                    row.append({'value': '-'})
            if len(set([d['value'] for d in row if 'value' in d])) == 1:
                row[0]['mark'] = 'equal'
            jobsComparisonAll.append(row)


    xurl = extensibleURL(request)
    data = {
        'request': request,
        'viewParams': request.session['viewParams'],
        'requestParams': request.session['requestParams'],
        'url': request.path,
        'jobsComparisonMain': jobsComparisonMain,
        'jobsComparisonAll': jobsComparisonAll,
        'pandaids': pandaids,
        'xurl': xurl,
        'built': datetime.now().strftime("%H:%M:%S"),
    }

    ##self monitor
    endSelfMonitor(request)
    response = render_to_response('compareJobs.html', data, content_type='text/html')
    patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
    return response
Ejemplo n.º 21
0
def errorsScattering(request):
    initRequest(request)

    # Here we try to get cached data
    data = getCacheEntry(request, "errorsScattering")
    if data is not None:
        data = json.loads(data)
        data['request'] = request
        response = render_to_response('errorsScattering.html', data, content_type='text/html')
        patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
        endSelfMonitor(request)
        return response

    limit = 100000
    if 'hours' in request.session['requestParams']:
        try:
            hours = int(request.session['requestParams']['hours'])
        except:
            hours = 8
    else:
        hours = 8

    isExcludeScouts = False
    if 'scouts' in request.session['requestParams']:
        if request.session['requestParams']['scouts'] == 'exclude':
            isExcludeScouts = True
        try:
            del request.session['requestParams']['scouts']
        except:
            pass

    query, wildCardExtension, LAST_N_HOURS_MAX = setupView(request, hours=hours, limit=9999999, querytype='task', wildCardExt=True)
    query['tasktype'] = 'prod'
    query['superstatus__in'] = ['submitting', 'running']
    # exclude paused tasks
    wildCardExtension += ' AND STATUS != \'paused\''
    tasks = JediTasksOrdered.objects.filter(**query).extra(where=[wildCardExtension])[:limit].values("jeditaskid", "reqid")

    # print ('tasks found %i') % len(tasks)

    random.seed()
    if dbaccess['default']['ENGINE'].find('oracle') >= 0:
        tmpTableName = "ATLAS_PANDABIGMON.TMP_IDS1DEBUG"
    else:
        tmpTableName = "TMP_IDS1"

    taskListByReq = {}
    transactionKey = random.randrange(1000000)
    executionData = []
    for id in tasks:
        executionData.append((id['jeditaskid'], transactionKey))
        # full the list of jeditaskids for each reqid to put into cache for consistentcy with jobList
        if id['reqid'] not in taskListByReq:
            taskListByReq[id['reqid']] = ''
        taskListByReq[id['reqid']] += str(id['jeditaskid']) + ','

    new_cur = connection.cursor()
    ins_query = """INSERT INTO """ + tmpTableName + """(ID,TRANSACTIONKEY) VALUES (%s, %s)"""
    new_cur.executemany(ins_query, executionData)
    connection.commit()

    jcondition = '(1=1)'
    if isExcludeScouts:
        jcondition = """specialhandling NOT LIKE '%%sj'"""

    querystr = """
        SELECT j.FINISHEDC, j.REQID, j.FAILEDC, sc.cloud as CLOUD, j.jeditaskid, j.COMPUTINGSITE from (
            SELECT SUM(case when JOBSTATUS = 'failed' then 1 else 0 end) as FAILEDC, 
                   SUM(case when JOBSTATUS = 'finished' then 1 else 0 end) as FINISHEDC, 
                   SUM(case when JOBSTATUS in ('finished', 'failed') then 1 else 0 end) as ALLC, 
                   COMPUTINGSITE, REQID, JEDITASKID 
              FROM ATLAS_PANDA.JOBSARCHIVED4 WHERE JEDITASKID != REQID AND JEDITASKID in (
                SELECT ID FROM %s WHERE TRANSACTIONKEY=%i) AND modificationtime > TO_DATE('%s', 'YYYY-MM-DD HH24:MI:SS') AND %s
                    group by COMPUTINGSITE, REQID, JEDITASKID
            UNION
            SELECT SUM(case when JOBSTATUS = 'failed' then 1 else 0 end) as FAILEDC, 
                   SUM(case when JOBSTATUS = 'finished' then 1 else 0 end) as FINISHEDC, 
                   SUM(case when JOBSTATUS in ('finished', 'failed') then 1 else 0 end) as ALLC, 
                   COMPUTINGSITE, REQID, JEDITASKID 
              FROM ATLAS_PANDAARCH.JOBSARCHIVED 
              WHERE JEDITASKID != REQID AND JEDITASKID in (
                  SELECT ID FROM %s WHERE TRANSACTIONKEY=%i) AND modificationtime > TO_DATE('%s', 'YYYY-MM-DD HH24:MI:SS') AND %s
                    group by COMPUTINGSITE, REQID, JEDITASKID
        ) j,
        ( select siteid, cloud from ATLAS_PANDAMETA.SCHEDCONFIG  
        ) sc
        where j.computingsite = sc.siteid and j.ALLC > 0    
    """ % (tmpTableName, transactionKey, query['modificationtime__castdate__range'][0], jcondition, tmpTableName, transactionKey, query['modificationtime__castdate__range'][0], jcondition)

    new_cur.execute(querystr)

    errorsRaw = dictfetchall(new_cur)
    # new_cur.execute("DELETE FROM %s WHERE TRANSACTIONKEY=%i" % (tmpTableName, transactionKey))

    homeCloud = {}
    sflist = ('siteid', 'site', 'status', 'cloud', 'tier', 'comment_field', 'objectstore', 'catchall', 'corepower')
    sites = Schedconfig.objects.filter().exclude(cloud='CMS').values(*sflist)
    for site in sites:
        homeCloud[site['siteid']] = site['cloud']

    clouds = []
    clouds = sorted(list(set(homeCloud.values())))
    reqerrors = {}
    clouderrors = {}
    successrateIntervals = {'green': [80, 100], 'yellow':[50,79], 'red':[0, 49]}

    # we fill here the dict
    for errorEntry in errorsRaw:
        rid = errorEntry['REQID']
        if rid not in reqerrors:
            reqentry = {}
            reqerrors[rid] = reqentry
            reqerrors[rid]['reqid'] = rid
            reqerrors[rid]['totalstats'] = {}
            reqerrors[rid]['totalstats']['percent'] = 0
            reqerrors[rid]['totalstats']['minpercent'] = 100
            reqerrors[rid]['totalstats']['finishedc'] = 0
            reqerrors[rid]['totalstats']['failedc'] = 0
            reqerrors[rid]['totalstats']['allc'] = 0
            reqerrors[rid]['totalstats']['greenc'] = 0
            reqerrors[rid]['totalstats']['yellowc'] = 0
            reqerrors[rid]['totalstats']['redc'] = 0
            reqerrors[rid]['tasks'] = {}
            for cloudname in clouds:
                reqerrors[rid][cloudname] = {}
                reqerrors[rid][cloudname]['percent'] = 0
                reqerrors[rid][cloudname]['finishedc'] = 0
                reqerrors[rid][cloudname]['failedc'] = 0
                reqerrors[rid][cloudname]['allc'] = 0
        if errorEntry['JEDITASKID'] not in reqerrors[rid]['tasks']:
            reqerrors[rid]['tasks'][errorEntry['JEDITASKID']] = {}
            reqerrors[rid]['tasks'][errorEntry['JEDITASKID']]['finishedc'] = 0
            reqerrors[rid]['tasks'][errorEntry['JEDITASKID']]['allc'] = 0
        reqerrors[rid][errorEntry['CLOUD']]['finishedc'] += errorEntry['FINISHEDC']
        reqerrors[rid][errorEntry['CLOUD']]['failedc'] += errorEntry['FAILEDC']
        reqerrors[rid][errorEntry['CLOUD']]['allc'] += errorEntry['FINISHEDC'] + errorEntry['FAILEDC']

        reqerrors[rid]['tasks'][errorEntry['JEDITASKID']]['finishedc'] += errorEntry['FINISHEDC']
        reqerrors[rid]['tasks'][errorEntry['JEDITASKID']]['allc'] += errorEntry['FINISHEDC'] + errorEntry['FAILEDC']

        reqerrors[rid]['totalstats']['finishedc'] += reqerrors[rid][errorEntry['CLOUD']]['finishedc']
        reqerrors[rid]['totalstats']['failedc'] += reqerrors[rid][errorEntry['CLOUD']]['failedc']
        reqerrors[rid]['totalstats']['allc'] += reqerrors[rid][errorEntry['CLOUD']]['allc']

        if errorEntry['CLOUD'] not in clouderrors:
            clouderrors[errorEntry['CLOUD']] = {}
        if errorEntry['COMPUTINGSITE'] not in clouderrors[errorEntry['CLOUD']]:
            clouderrors[errorEntry['CLOUD']][errorEntry['COMPUTINGSITE']] = {}
            clouderrors[errorEntry['CLOUD']][errorEntry['COMPUTINGSITE']]['finishedc'] = 0
            clouderrors[errorEntry['CLOUD']][errorEntry['COMPUTINGSITE']]['failedc'] = 0
            clouderrors[errorEntry['CLOUD']][errorEntry['COMPUTINGSITE']]['allc'] = 0
        clouderrors[errorEntry['CLOUD']][errorEntry['COMPUTINGSITE']]['finishedc'] += errorEntry['FINISHEDC']
        clouderrors[errorEntry['CLOUD']][errorEntry['COMPUTINGSITE']]['failedc'] += errorEntry['FAILEDC']
        clouderrors[errorEntry['CLOUD']][errorEntry['COMPUTINGSITE']]['allc'] += (errorEntry['FINISHEDC'] + errorEntry['FAILEDC'])

    for rid, reqentry in reqerrors.iteritems():
        reqerrors[rid]['totalstats']['percent'] = int(math.ceil(reqerrors[rid]['totalstats']['finishedc']*100./reqerrors[rid]['totalstats']['allc'])) if reqerrors[rid]['totalstats']['allc'] > 0 else 0
        reqerrors[rid]['totalstats']['minpercent'] = min(int(tstats['finishedc'] * 100. / tstats['allc']) for tstats in reqentry['tasks'].values())
        for tstats in reqentry['tasks'].values():
            srpct = int(tstats['finishedc'] * 100. / tstats['allc'])
            for color, srint in successrateIntervals.items():
                reqerrors[rid]['totalstats'][color + 'c'] += 1 if (srpct >= srint[0] and srpct <= srint[1]) else 0
        for cloudname, stats in reqentry.iteritems():
            if cloudname not in ('reqid', 'totalstats', 'tasks'):
                reqerrors[rid][cloudname]['percent'] = int(stats['finishedc'] * 100. / stats['allc']) if stats['allc'] > 0 else -1

    reqsToDel = []

    #make cleanup of full none erroneous requests
    for rid, reqentry in reqerrors.iteritems():
        notNone = False
        if reqentry['totalstats']['allc'] != 0 and reqentry['totalstats']['allc'] != reqentry['totalstats']['finishedc']:
            notNone = True
        # for cname, cval in reqentry.iteritems():
        #     if cval['allc'] != 0:
        #         notNone = True
        if not notNone:
            reqsToDel.append(rid)

    for reqToDel in reqsToDel:
        del reqerrors[reqToDel]

    ### calculate stats for clouds
    columnstats = {}
    for cn in clouds:
        cns = str(cn)
        columnstats[cns] = {}
        columnstats[cns]['percent'] = 0
        columnstats[cns]['finishedc'] = 0
        columnstats[cns]['failedc'] = 0
        columnstats[cns]['allc'] = 0
        columnstats[cns]['minpercent'] = 100
        for color, srint in successrateIntervals.items():
            columnstats[cns][color + 'c'] = 0

    for cloudname, sites in clouderrors.iteritems():
        for sitename, sstats in sites.iteritems():
            columnstats[cloudname]['finishedc'] += sstats['finishedc']
            columnstats[cloudname]['failedc'] += sstats['failedc']
            columnstats[cloudname]['allc'] += sstats['allc']
            srpct = int(sstats['finishedc'] * 100. / sstats['allc'])
            for color, srint in successrateIntervals.items():
                columnstats[cloudname][color + 'c'] += 1 if (srpct >= srint[0] and srpct <= srint[1]) else 0
        columnstats[cloudname]['minpercent'] = min(int(cstats['finishedc'] * 100. / cstats['allc']) for cstats in sites.values())
    for cn, stats in columnstats.iteritems():
        columnstats[cn]['percent'] = int(math.ceil(columnstats[cn]['finishedc']*100./columnstats[cn]['allc'])) if columnstats[cn]['allc'] > 0 else 0


    ### Introducing unique tk for each reqid
    for rid, reqentry in reqerrors.iteritems():
        if rid in taskListByReq and len(taskListByReq[rid]) > 0:
            tk = setCacheData(request, lifetime=60*20, jeditaskid=taskListByReq[rid][:-1])
            reqentry['tk'] = tk

    ### transform requesterrors dict to list for sorting on template
    reqErrorsList = []
    for rid, reqEntry in reqerrors.iteritems():
        reqErrorsList.append(reqEntry)
    reqErrorsList = sorted(reqErrorsList, key=lambda x: x['totalstats']['percent'])

    data = {
        'request': request,
        'viewParams': request.session['viewParams'],
        'requestParams': request.session['requestParams'],
        'clouds' : clouds,
        'columnstats': columnstats,
        'reqerrors': reqErrorsList,
        'scouts': 'exclude' if isExcludeScouts else 'include',
        'built': datetime.now().strftime("%H:%M:%S"),
    }
    ##self monitor
    endSelfMonitor(request)
    setCacheEntry(request, "errorsScattering", json.dumps(data, cls=DateEncoder), 60 * 20)
    response = render_to_response('errorsScattering.html', data, content_type='text/html')
    patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
    return response
Ejemplo n.º 22
0
def harvesters(request):
    import json
    valid, response = initRequest(request)
    #query, extra, LAST_N_HOURS_MAX = setupView(request, wildCardExt=True)
    extra = '1=1'
    xurl = extensibleURL(request)

    if 'instance' in request.session['requestParams']:
        instance = request.session['requestParams']['instance']
        # if (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('text/json', 'application/json'))) or ('json' in request.session['requestParams']):
        #     data = getCacheEntry(request, instance,isData=True)
        #     import json
        #     return HttpResponse(data, content_type='text/html')
        data = getCacheEntry(request, "harvester")
        if data is not None:
            import json
            data = json.loads(data)
            data['request'] = request
            response = render_to_response('harvesters.html',
                                          data,
                                          content_type='text/html')
            patch_response_headers(
                response,
                cache_timeout=request.session['max_age_minutes'] * 60)
            endSelfMonitor(request)
            return response
        if ('workersstats' in request.session['requestParams']
                and 'instance' in request.session['requestParams']):
            harvsterworkerstats = []
            tquery = {}
            tquery['harvesterid'] = instance
            limit = 100
            if 'limit' in request.session['requestParams']:
                limit = request.session['requestParams']['limit']
            harvsterworkerstat = HarvesterWorkerStats.objects.filter(
                **tquery).values(
                    'computingsite', 'resourcetype', 'status', 'nworkers',
                    'lastupdate').filter(**tquery).extra(
                        where=[extra]).order_by('-lastupdate')[:limit]
            # dialogs.extend(HarvesterDialogs.objects.filter(**tquery).values('creationtime','modulename', 'messagelevel','diagmessage').filter(**tquery).extra(where=[extra]).order_by('-creationtime'))
            old_format = '%Y-%m-%d %H:%M:%S'
            new_format = '%d-%m-%Y %H:%M:%S'
            for stat in harvsterworkerstat:
                stat['lastupdate'] = datetime.strptime(str(
                    stat['lastupdate']), old_format).strftime(new_format)
                harvsterworkerstats.append(stat)
            return HttpResponse(json.dumps(harvsterworkerstats,
                                           cls=DateTimeEncoder),
                                content_type='text/html')
        if ('dialogs' in request.session['requestParams']
                and 'instance' in request.session['requestParams']):
            dialogs = []
            tquery = {}
            tquery['harvesterid'] = instance
            limit = 100
            if 'limit' in request.session['requestParams']:
                limit = request.session['requestParams']['limit']
            dialogsList = HarvesterDialogs.objects.filter(**tquery).values(
                'creationtime', 'modulename', 'messagelevel',
                'diagmessage').filter(**tquery).extra(
                    where=[extra]).order_by('-creationtime')[:limit]
            # dialogs.extend(HarvesterDialogs.objects.filter(**tquery).values('creationtime','modulename', 'messagelevel','diagmessage').filter(**tquery).extra(where=[extra]).order_by('-creationtime'))
            old_format = '%Y-%m-%d %H:%M:%S'
            new_format = '%d-%m-%Y %H:%M:%S'
            for dialog in dialogsList:
                dialog['creationtime'] = datetime.strptime(
                    str(dialog['creationtime']),
                    old_format).strftime(new_format)
                dialogs.append(dialog)
            return HttpResponse(json.dumps(dialogs, cls=DateTimeEncoder),
                                content_type='text/html')
        if ('dt' in request.session['requestParams']
                and 'tk' in request.session['requestParams']):
            tk = request.session['requestParams']['tk']
            data = getCacheEntry(request, tk, isData=True)
            return HttpResponse(data, content_type='text/html')
        lastupdateCache = ''
        workersListCache = []

        data = {}
        setCacheEntry(request,
                      instance,
                      json.dumps(data, cls=DateEncoder),
                      1,
                      isData=True)

        workersListisEmty = True
        if 'status' not in request.session[
                'requestParams'] and 'computingsite' not in request.session[
                    'requestParams'] and 'days' not in request.session[
                        'requestParams']:
            data = getCacheEntry(request, instance, isData=True)
            if data is not None and data != "null":
                if 'lastupdate' in data:
                    data = json.loads(data)
                    lastupdateCache = data['lastupdate'].replace('T', ' ')
                    lastupdateCache = """ AND "wrklastupdate" >= to_date('%s','yyyy-mm-dd hh24:mi:ss')""" % (
                        lastupdateCache)
                    workersListCache = data['workersList']
                    workersListisEmty = False

                    tmpworkerList = data['workersList'].keys()
                    for worker in tmpworkerList:
                        if datetime.strptime(
                                data['workersList'][worker]['wrklastupdate'],
                                '%d-%m-%Y %H:%M:%S'
                        ) < datetime.now() - timedelta(days=60):
                            del data['workersList'][worker]
        else:
            lastupdateCache = ''
            workersListCache = []

        status = ''
        computingsite = ''
        workerid = ''
        days = ''
        if 'status' in request.session['requestParams']:
            status = """AND status like '%s'""" % (str(
                request.session['requestParams']['status']))
        if 'computingsite' in request.session['requestParams']:
            computingsite = """AND computingsite like '%s'""" % (str(
                request.session['requestParams']['computingsite']))
        if 'workerid' in request.session['requestParams']:
            workerid = """AND workerid in (%s)""" % (
                request.session['requestParams']['workerid'])
        if 'days' in request.session['requestParams']:
            days = """AND to_date("wrklastupdate", 'dd-mm-yyyy hh24:mi:ss') > sysdate - %s """ % (
                request.session['requestParams']['days'])
        sqlquery = """
        select * from (SELECT
        ff.harvester_id,
        ff.description,
        to_char(ff.starttime, 'dd-mm-yyyy hh24:mi:ss') as "insstarttime",
        ff.owner,
        ff.hostname,
        ff.sw_version,
        ff.commit_stamp,
        gg.workerid,
        to_char((select max(lastupdate) from atlas_panda.harvester_workers where harvesterid like '%s'), 'dd-mm-yyyy hh24:mi:ss') as "inslastupdate",
        gg.status,
        gg.batchid,
        gg.nodeid,
        gg.queuename,
        gg.computingsite,
        to_char(gg.submittime, 'dd-mm-yyyy hh24:mi:ss') as "submittime",
        to_char(gg.lastupdate , 'dd-mm-yyyy hh24:mi:ss') as "wrklastupdate",
        to_char(gg.starttime , 'dd-mm-yyyy hh24:mi:ss') as "wrkstarttime",
        to_char(gg.endtime, 'dd-mm-yyyy hh24:mi:ss') as "wrkendtime",
        gg.ncore,
        gg.errorcode,
        gg.stdout,
        gg.stderr,
        gg.batchlog,
        gg.resourcetype,
        gg.nativeexitcode,
        gg.nativestatus,
        gg.diagmessage,
        gg.computingelement,
        gg.njobs
        FROM
        atlas_panda.harvester_workers gg,
        atlas_panda.harvester_instances ff
        WHERE
        ff.harvester_id = gg.harvesterid) where harvester_id like '%s' %s %s %s %s %s
        order by workerid DESC
        """ % (str(instance), str(instance), status, computingsite, workerid,
               lastupdateCache, days)
        workersList = []
        cur = connection.cursor()
        cur.execute(sqlquery)
        columns = [str(i[0]).lower() for i in cur.description]
        workersDictinoary = {}

        timeLastUpdate = ''
        if workersListisEmty == False:
            for worker in cur:
                object = {}
                object = dict(zip(columns, worker))
                workersListCache[int(object['workerid'])] = object
                timeLastUpdate = object['inslastupdate']
            workersList = workersListCache.values()
            workersDictinoary = workersListCache

        else:
            for worker in cur:
                object = {}
                object = dict(zip(columns, worker))
                workersDictinoary[int(object['workerid'])] = object
                workersList.append(object)
                timeLastUpdate = object['inslastupdate']

        # dbCache = {
        #     "workersList": workersDictinoary,
        #     "lastupdate": timeLastUpdate
        # }
        # print len(workersListCache)
        # if 'status' not in request.session['requestParams'] and 'computingsite' not in request.session['requestParams'] and 'workerid' not in request.session['requestParams'] :
        #     setCacheEntry(request, instance, json.dumps(dbCache, cls=DateEncoder), 86400, isData=True)

        statuses = {}
        computingsites = {}
        workerIDs = set()
        generalInstanseInfo = {}

        if 'display_limit_workers' in request.session['requestParams']:
            display_limit_workers = int(
                request.session['requestParams']['display_limit_workers'])
        else:
            display_limit_workers = 30000

        generalWorkersFields = [
            'workerid', 'status', 'batchid', 'nodeid', 'queuename',
            'computingsite', 'submittime', 'wrklastupdate', 'wrkstarttime',
            'wrkendtime', 'ncore', 'errorcode', 'stdout', 'stderr', 'batchlog',
            'resourcetype', 'nativeexitcode', 'nativestatus', 'diagmessage',
            'njobs', 'computingelement'
        ]
        generalWorkersList = []

        wrkPandaIDs = {}
        for i, worker in enumerate(workersList):
            object = {}
            computingsites.setdefault(worker['computingsite'],
                                      []).append(worker['workerid'])
            statuses.setdefault(worker['status'],
                                []).append(worker['workerid'])
            # if worker['njobs'] is not None:
            #     wrkPandaIDs[worker['workerid']] = worker['njobs']
            # else: wrkPandaIDs[worker['workerid']] = 0
            #workerIDs.add(worker['workerid'])
            for field in generalWorkersFields:
                if worker[field] is not None:
                    object[field] = worker[field]
                else:
                    object[field] = 0
            generalWorkersList.append(object)
            if i == len(workersList) - 1:
                for computingsite in computingsites.keys():
                    computingsites[computingsite] = len(
                        computingsites[computingsite])
                for status in statuses.keys():
                    statuses[status] = len(statuses[status])
                generalInstanseInfo = {
                    'HarvesterID': worker['harvester_id'],
                    'Description': worker['description'],
                    'Starttime': worker['insstarttime'],
                    'Owner': worker['owner'],
                    'Hostname': worker['hostname'],
                    'Lastupdate': worker['inslastupdate'],
                    'Computingsites': computingsites,
                    'Statuses': statuses,
                    'Software version': worker['sw_version'],
                    'Commit stamp': worker['commit_stamp']
                }
        # 'wrkpandaids': OrderedDict(sorted(wrkPandaIDs.items(), key=lambda x: x[1], reverse=True)[:200])
        transactionKey = random.randrange(1000000)
        data = {
            'generalInstanseInfo': generalInstanseInfo,
            'type': 'workers',
            'instance': instance,
            'xurl': xurl,
            'tk': transactionKey,
            'request': request,
            'requestParams': request.session['requestParams'],
            'viewParams': request.session['viewParams'],
            'built': datetime.now().strftime("%H:%M:%S"),
        }
        setCacheEntry(request,
                      transactionKey,
                      json.dumps(generalWorkersList[:display_limit_workers],
                                 cls=DateEncoder),
                      60 * 60,
                      isData=True)
        setCacheEntry(request, 'harvester', json.dumps(data, cls=DateEncoder),
                      60 * 60)
        endSelfMonitor(request)
        return render_to_response('harvesters.html',
                                  data,
                                  content_type='text/html')

    # elif 'instance' in request.session['requestParams'] and 'workerid' in 'instance' in request.session['requestParams']:
    #     pass
    else:
        sqlquery = """
        select  
        R.harvid,
        count(R.workid) as total,
        (select cnt from   (select harvid, count(*) as cnt from (
        SELECT
        a.harvester_id as harvid, 
        b.workerid as workid,
        to_char(b.lastupdate, 'dd-mm-yyyy hh24:mi:ss') as alldate,
        (SELECT
        to_char(max(O.lastupdate), 'dd-mm-yyyy hh24:mi:ss')
        FROM atlas_panda.harvester_workers O WHERE O.harvesterid = a.harvester_id   Group by O.harvesterid) as recently, 
        a.DESCRIPTION as description
        FROM
        atlas_panda.harvester_workers b,
        atlas_panda.harvester_instances a
        WHERE a.harvester_id = b.harvesterid
        ) WHERE alldate = recently Group by harvid) W WHERE W.harvid=R.harvid) as recent,
        R.recently,
        R.sw_version,
        R.commit_stamp,
        R.lastupdate,
        R.description
        FROM (SELECT
        a.harvester_id as harvid, 
        b.workerid as workid,
        to_char(b.lastupdate, 'dd-mm-yyyy hh24:mi:ss') as alldate,
        (SELECT
        to_char(max(O.lastupdate), 'dd-mm-yyyy hh24:mi:ss')
        FROM atlas_panda.harvester_rel_jobs_workers O where  O.harvesterid = a.harvester_id   Group by O.harvesterid) as recently,
        a.sw_version,
        a.commit_stamp,
        to_char(a.lastupdate, 'dd-mm-yyyy hh24:mi:ss') as lastupdate, 
        a.DESCRIPTION as description
        FROM
        atlas_panda.harvester_workers b,
        atlas_panda.harvester_instances a
        WHERE a.harvester_id = b.harvesterid) R group by harvid,recently,sw_version,commit_stamp,lastupdate,description
        """
        instanceDictionary = []
        cur = connection.cursor()
        cur.execute(sqlquery)

        for instance in cur:
            instanceDictionary.append({
                'instance': instance[0],
                'total': instance[1],
                'recently': instance[2],
                'when': instance[3],
                'sw_version': instance[4],
                'commit_stamp': instance[5],
                'lastupdate': instance[6],
                'descr': instance[7]
            })

        data = {
            'instances': instanceDictionary,
            'type': 'instances',
            'xurl': xurl,
            'request': request,
            'requestParams': request.session['requestParams'],
            'viewParams': request.session['viewParams']
        }
        #data =json.dumps(data,cls=DateEncoder)
        response = render_to_response('harvesters.html',
                                      data,
                                      content_type='text/html')
    return response
Ejemplo n.º 23
0
def runningProdRequests(request):
    valid, response = initRequest(request)

    if ('dt' in request.session['requestParams'] and 'tk' in request.session['requestParams']):
        tk = request.session['requestParams']['tk']
        data = getCacheEntry(request, tk, isData=True)
        return HttpResponse(data, content_type='text/html')

    # Here we try to get cached data
    data = getCacheEntry(request, "runningProdRequests")
    # data = None
    if data is not None:
        data = json.loads(data)
        data['request'] = request
        # if 'ages' in data:
        #     data['ages'] = preparePlotData(data['ages'])
        # if 'neventsFStasksSum' in data:
        #     data['neventsFStasksSum'] = preparePlotData(data['neventsFStasksSum'])
        # if 'neventsAFIItasksSum' in data:
        #     data['neventsAFIItasksSum'] = preparePlotData(data['neventsAFIItasksSum'])
        # if 'neventsByProcessingType' in data:
        #     data['neventsByProcessingType'] = preparePlotData(data['neventsByProcessingType'])
        # if 'aslotsByType' in data:
        #     data['aslotsByType'] = preparePlotData(data['aslotsByType'])
        response = render_to_response('runningProdRequests.html', data, content_type='text/html')
        patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
        endSelfMonitor(request)
        return response



    xurl = request.get_full_path()
    if xurl.find('?') > 0:
        xurl += '&'
    else:
        xurl += '?'
    nosorturl = removeParam(xurl, 'sortby', mode='extensible')
    exquery = {}

    rquery = {}
    if 'fullcampaign' in request.session['requestParams']:
        if ':' in request.session['requestParams']['fullcampaign']:
            rquery['campaign'] = request.session['requestParams']['fullcampaign'].split(':')[0]
            rquery['subcampaign'] = request.session['requestParams']['fullcampaign'].split(':')[1]
        else:
            rquery['campaign'] = request.session['requestParams']['fullcampaign']

    if 'group' in request.session['requestParams'] and '_' in request.session['requestParams']['group']:
        rquery['provenance'] = request.session['requestParams']['group'].split('_')[0]
        rquery['physgroup'] = request.session['requestParams']['group'].split('_')[1]

    if 'requesttype' in request.session['requestParams']:
        rquery['requesttype'] = request.session['requestParams']['requesttype']

    if 'status' in request.session['requestParams']:
        rquery['status'] = request.session['requestParams']['status']


    rrequests = RunningProdRequestsModel.objects.filter(**rquery).values()

    request_list = [t for t in rrequests]
    nrequests = len(request_list)
    slots = 0
    aslots = 0
    # ages = []
    neventsTotSum = 0
    neventsUsedTotSum = 0
    # rjobs1coreTot = 0
    # rjobs8coreTot = 0
    for req in request_list:
        neventsTotSum += req['nevents'] if req['nevents'] is not None else 0
        neventsUsedTotSum += req['neventsused']
        slots += req['slots'] if req['slots'] else 0
        aslots += req['aslots'] if req['aslots'] else 0
        req['fullcampaign'] = req['campaign'] + ':' + req['subcampaign'] if req['subcampaign'] is not None and len(req['subcampaign']) > 0 else req['campaign']
        req['group'] = req['provenance'] + '_' + req['physgroup']

        # ages.append(req['age'])


    plotageshistogram = 0
    # if sum(ages) == 0: plotageshistogram = 0
    # sumd = taskSummaryDict(request, task_list, ['status','workinggroup','cutcampaign', 'processingtype'])

    ### Putting list of requests to cache separately for dataTables plugin
    transactionKey = random.randrange(100000000)
    setCacheEntry(request, transactionKey, json.dumps(request_list, cls=DateEncoder), 60 * 30, isData=True)

    if (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('text/json', 'application/json'))) or (
        'json' in request.session['requestParams']):
        ##self monitor
        endSelfMonitor(request)
        dump = json.dumps(request_list, cls=DateEncoder)
        return HttpResponse(dump, content_type='text/html')
    else:
        data = {
            'request': request,
            'viewParams': request.session['viewParams'],
            'requestParams': request.session['requestParams'],
            'xurl': xurl,
            'nosorturl': nosorturl,
            'requests': request_list,
            'nrequests': nrequests,
            # 'ages': ages,
            'slots': slots,
            'aslots': aslots,
            # 'sumd': sumd,
            'neventsUsedTotSum': round(neventsUsedTotSum / 1000000., 1),
            'neventsTotSum': round(neventsTotSum / 1000000., 1),
            # 'plotageshistogram': plotageshistogram,
            'built': datetime.now().strftime("%H:%M:%S"),
            'transKey': transactionKey,
        }
        ##self monitor
        endSelfMonitor(request)
        response = render_to_response('runningProdRequests.html', data, content_type='text/html')
        setCacheEntry(request, "runningProdRequests", json.dumps(data, cls=DateEncoder), 60 * 20)
        patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
        return response
Ejemplo n.º 24
0
def tasksErrorsScattering(request):
    initRequest(request)
    limit = 100000
    hours = 4
    query, wildCardExtension, LAST_N_HOURS_MAX = setupView(request, hours=hours, limit=9999999, querytype='task', wildCardExt=True)
    query['tasktype'] = 'prod'
    query['superstatus__in'] = ['submitting', 'running']
    tasks = JediTasksOrdered.objects.filter(**query).extra(where=[wildCardExtension])[:limit].values("jeditaskid")

    random.seed()
    if dbaccess['default']['ENGINE'].find('oracle') >= 0:
        tmpTableName = "ATLAS_PANDABIGMON.TMP_IDS1DEBUG"
    else:
        tmpTableName = "TMP_IDS1"

    transactionKey = random.randrange(1000000)
    executionData = []
    for id in tasks:
        executionData.append((id['jeditaskid'], transactionKey))

    new_cur = connection.cursor()
    query = """INSERT INTO """ + tmpTableName + """(ID,TRANSACTIONKEY) VALUES (%s, %s)"""
    new_cur.executemany(query, executionData)
    connection.commit()

    query = """

        SELECT SUM(FAILEDC) / SUM(ALLC) as FPERC, COMPUTINGSITE, JEDITASKID, SUM(FAILEDC) as FAILEDC from (

            SELECT SUM(case when JOBSTATUS = 'failed' then 1 else 0 end) as FAILEDC, SUM(1) as ALLC, COMPUTINGSITE, JEDITASKID 
                FROM ATLAS_PANDA.JOBSARCHIVED4 
                WHERE JEDITASKID in (SELECT ID FROM %s WHERE TRANSACTIONKEY=%i) 
                group by COMPUTINGSITE, JEDITASKID
            UNION
            SELECT SUM(case when JOBSTATUS = 'failed' then 1 else 0 end) as FAILEDC, SUM(1) as ALLC, COMPUTINGSITE, JEDITASKID 
                FROM ATLAS_PANDAARCH.JOBSARCHIVED 
                WHERE JEDITASKID in (SELECT ID FROM %s WHERE TRANSACTIONKEY=%i) 
                group by COMPUTINGSITE, JEDITASKID
        ) group by COMPUTINGSITE, JEDITASKID
    """ % (tmpTableName, transactionKey, tmpTableName, transactionKey)

    new_cur.execute(query)

    errorsRaw = dictfetchall(new_cur)
    new_cur.execute("DELETE FROM %s WHERE TRANSACTIONKEY=%i" % (tmpTableName, transactionKey))

    computingSites = []
    taskserrors = {}


    # we fill here the dict
    for errorEntry in errorsRaw:
        jeditaskid = errorEntry['JEDITASKID']
        if jeditaskid not in taskserrors:
            taskentry = {}
            taskserrors[jeditaskid] = taskentry
        labelForLink = (str(int(errorEntry['FPERC'] * 100)) + "%" + " ("+str(int(errorEntry['FAILEDC']))+")") if errorEntry['FPERC'] else " "
        taskserrors[jeditaskid][errorEntry['COMPUTINGSITE']] = labelForLink

    tasksToDel = []

    #make cleanup of full none erroneous tasks
    for jeditaskid,taskentry  in taskserrors.iteritems():
        notNone = False
        for sitename, siteval in taskentry.iteritems():
            if siteval != " ":
                notNone = True
        if not notNone:
            tasksToDel.append(jeditaskid)

    for taskToDel in tasksToDel:
        del taskserrors[taskToDel]

    for jeditaskid,taskentry in taskserrors.iteritems():
        for sitename, siteval in taskentry.iteritems():
            computingSites.append(sitename)

    computingSites = set(computingSites)

    for jeditaskid,taskentry  in taskserrors.iteritems():
        for computingSite in computingSites:
            if not computingSite in taskentry:
                taskentry[computingSite] = ' '


    data = {
        'request': request,
        'computingSites': computingSites,
        'taskserrors':taskserrors,
    }

    response = render_to_response('tasksscatteringmatrix.html', data, content_type='text/html')
    patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
    return response
Ejemplo n.º 25
0
def listReqPlot(request):
    valid, response = initRequest(request)
    if not valid: return response

    valid, response = login(request)
    if not valid: return response

    sortby = 'id'
    if 'sortby' in request.GET:
        sortby = request.GET['sortby']

    LAST_N_HOURS_MAX = 7 * 24
    limit = 5000
    if 'hours' in request.session['requestParams']:
        LAST_N_HOURS_MAX = int(request.session['requestParams']['hours'])
    if 'days' in request.session['requestParams']:
        LAST_N_HOURS_MAX = int(request.session['requestParams']['days']) * 24

    if u'display_limit' in request.session['requestParams']:
        display_limit = int(request.session['requestParams']['display_limit'])
    else:
        display_limit = 1000
    nmax = display_limit

    if LAST_N_HOURS_MAX >= 168:
        flag = 12
    elif LAST_N_HOURS_MAX >= 48:
        flag = 6
    else:
        flag = 2

    startdate = None
    if not startdate:
        startdate = timezone.now() - timedelta(hours=LAST_N_HOURS_MAX)
    enddate = None
    if enddate == None:
        enddate = timezone.now()  #.strftime(defaultDatetimeFormat)

    query = {
        'qtime__range': [
            startdate.strftime(defaultDatetimeFormat),
            enddate.strftime(defaultDatetimeFormat)
        ]
    }

    values = 'urls', 'qtime', 'remote', 'qduration', 'duration'
    reqs = []
    reqs = RequestStat.objects.filter(
        **query).order_by(sortby).reverse().values(*values)

    reqHist = {}
    drHist = []

    mons = []
    for req in reqs:
        mon = {}
        #mon['duration'] = (req['qduration'] - req['qtime']).seconds
        mon['duration'] = req['duration']
        mon['urls'] = req['urls']
        mon['remote'] = req['remote']
        mon['qduration'] = req['qduration'].strftime('%Y-%m-%d %H:%M:%S')
        mon['qtime'] = req['qtime'].strftime('%Y-%m-%d %H:%M:%S')
        mons.append(mon)

        ##plot
        tm = req['qtime']
        tm = tm - timedelta(hours=tm.hour % flag,
                            minutes=tm.minute,
                            seconds=tm.second,
                            microseconds=tm.microsecond)
        if not tm in reqHist: reqHist[tm] = 0
        reqHist[tm] += 1

        ##plot -view duration
        dr = int(mon['duration'])
        drHist.append(dr)

    kys = reqHist.keys()
    kys.sort()
    reqHists = []
    for k in kys:
        reqHists.append([k, reqHist[k]])

    drcount = [[x, drHist.count(x)] for x in set(drHist)]
    drcount.sort()

    #do paging

    paginator = Paginator(mons, 200)
    page = request.GET.get('page')
    try:
        reqPages = paginator.page(page)
    except PageNotAnInteger:
        reqPages = paginator.page(1)
    except EmptyPage:
        reqPages = paginator.page(paginator.num_pages)

    url = request.get_full_path()
    if url.count('?') > 0:
        url += '&'
    else:
        url += '?'

    data = {\
       'mons': mons[:nmax],
       'nmax': nmax,
       'request': request,
       'user': request.session['username'],
       'reqPages': reqPages,
       'url' : url,
       'drHist': drcount,
       'reqHist': reqHists,\
    }

    return render_to_response('req_plot.html', data, RequestContext(request))
Ejemplo n.º 26
0
def runningProdTasks(request):
    valid, response = initRequest(request)

    if ('dt' in request.session['requestParams'] and 'tk' in request.session['requestParams']):
        tk = request.session['requestParams']['tk']
        data = getCacheEntry(request, tk, isData=True)
        return HttpResponse(data, content_type='text/html')
    # Here we try to get cached data
    data = getCacheEntry(request, "runningProdTasks")
    # data = None
    if data is not None:
        data = json.loads(data)
        data['request'] = request
        if 'ages' in data:
            data['ages'] = preparePlotData(data['ages'])
        if 'neventsFStasksSum' in data:
            data['neventsFStasksSum'] = preparePlotData(data['neventsFStasksSum'])
        if 'neventsAFIItasksSum' in data:
            data['neventsAFIItasksSum'] = preparePlotData(data['neventsAFIItasksSum'])
        if 'neventsByProcessingType' in data:
            data['neventsByProcessingType'] = preparePlotData(data['neventsByProcessingType'])
        if 'aslotsByType' in data:
            data['aslotsByType'] = preparePlotData(data['aslotsByType'])
        if 'neventsByTaskStatus' in data:
            data['neventsByTaskStatus'] = preparePlotData(data['neventsByTaskStatus'])
        if 'neventsByTaskPriority' in data:
            data['neventsByTaskPriority'] = preparePlotData(data['neventsByTaskPriority'])
        if 'neventsByStatus' in data:
            data['neventsByStatus'] = preparePlotData(data['neventsByStatus'])
        response = render_to_response('runningProdTasks.html', data, content_type='text/html')
        patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
        endSelfMonitor(request)
        return response


    # xurl = extensibleURL(request)
    xurl = request.get_full_path()
    if xurl.find('?') > 0:
        xurl += '&'
    else:
        xurl += '?'
    nosorturl = removeParam(xurl, 'sortby', mode='extensible')
    nohashtagurl = removeParam(xurl, 'hashtags', mode='extensible')
    exquery = {}

    productiontype = ''
    if 'preset' in request.session['requestParams']:
        if request.session['requestParams']['preset'] and request.session['requestParams']['preset'].upper() == 'MC':
            productiontype = 'MC'
            if 'workinggroup' not in request.session['requestParams']:
                request.session['requestParams']['workinggroup'] = '!AP_REPR,!AP_VALI,!GP_PHYS,!GP_THLT'
            if 'processingtype' not in request.session['requestParams']:
                request.session['requestParams']['processingtype'] = 'evgen|pile|simul|recon'
            if 'campaign' not in request.session['requestParams']:
                request.session['requestParams']['campaign'] = 'mc*'
        if request.session['requestParams']['preset'] and request.session['requestParams']['preset'].upper() == 'DPD':
            productiontype = 'DPD'
            if 'workinggroup' not in request.session['requestParams']:
                request.session['requestParams']['workinggroup'] = 'GP_*'
            if 'processingtype' not in request.session['requestParams']:
                request.session['requestParams']['processingtype'] = 'merge|deriv'
        if request.session['requestParams']['preset'] and request.session['requestParams']['preset'].upper() == 'DATA':
            productiontype = 'DATA'
            if 'workinggroup' not in request.session['requestParams']:
                request.session['requestParams']['workinggroup'] = 'AP_REPR'
            if 'processingtype' not in request.session['requestParams']:
                request.session['requestParams']['processingtype'] = 'reprocessing'

    tquery, wildCardExtension, LAST_N_HOURS_MAX = setupView(request, hours=0, limit=9999999, querytype='task',
                                                            wildCardExt=True)

    if 'workinggroup' in tquery and 'preset' in request.session['requestParams'] and request.session['requestParams']['preset'] == 'MC' and ',' in tquery['workinggroup']:
        #     excludeWGList = list(str(wg[1:]) for wg in request.session['requestParams']['workinggroup'].split(','))
        #     exquery['workinggroup__in'] = excludeWGList
        try:
            del tquery['workinggroup']
        except:
            pass
    if 'status' in request.session['requestParams'] and request.session['requestParams']['status'] == '':
        try:
            del tquery['status']
        except:
            pass
    if 'site' in request.session['requestParams'] and request.session['requestParams']['site'] == 'hpc':
        try:
            del tquery['site']
        except:
            pass
        exquery['site__isnull'] = True
    if 'hashtags' in request.session['requestParams']:
        wildCardExtension += ' AND ('
        wildCards = request.session['requestParams']['hashtags'].split(',')
        currentCardCount = 1
        countCards = len(wildCards)
        for card in wildCards:
            if '*' not in card:
                card = '*' + card + '*'
            elif card.startswith('*'):
                card = card + '*'
            elif card.endswith('*'):
                card = '*' + card
            wildCardExtension += preprocessWildCardString(card, 'hashtags')
            if (currentCardCount < countCards): wildCardExtension += ' AND '
            currentCardCount += 1
        wildCardExtension += ')'
    if 'sortby' in request.session['requestParams'] and '-' in request.session['requestParams']['sortby'] :
        sortby = request.session['requestParams']['sortby']
    else:
        sortby = 'creationdate-desc'
    oquery = '-' + sortby.split('-')[0] if sortby.split('-')[1].startswith('d') else sortby.split('-')[0]

#    if "((UPPER(status)  LIKE UPPER('all')))" in wildCardExtension and tquery['eventservice'] == 1:
    if 'eventservice' in tquery and tquery['eventservice'] == 1 and 'days' in request.session['requestParams']:

        setupView(request)
        if 'status__in' in tquery:
            del tquery['status__in']
        excludedTimeQuery = copy.deepcopy(tquery)

        if ('days' in request.GET) and (request.GET['days']):
            days = int(request.GET['days'])
            hours = 24 * days
            startdate = timezone.now() - timedelta(hours=hours)
            startdate = startdate.strftime(defaultDatetimeFormat)
            enddate = timezone.now().strftime(defaultDatetimeFormat)
            tquery['modificationtime__range'] = [startdate, enddate]

        if "((UPPER(status)  LIKE UPPER('all')))" in wildCardExtension:
            wildCardExtension = wildCardExtension.replace("((UPPER(status)  LIKE UPPER('all')))", "(1=1)")
        tasks = []
        tasks.extend(RunningProdTasksModel.objects.filter(**excludedTimeQuery).extra(where=[wildCardExtension]).exclude(
            **exquery).values().annotate(nonetoend=Count(sortby.split('-')[0])).order_by('-nonetoend', oquery)[:])
        tasks.extend(FrozenProdTasksModel.objects.filter(**tquery).extra(where=[wildCardExtension]).exclude(
            **exquery).values().annotate(nonetoend=Count(sortby.split('-')[0])).order_by('-nonetoend', oquery)[:])
    else:
        tasks = RunningProdTasksModel.objects.filter(**tquery).extra(where=[wildCardExtension]).exclude(**exquery).values().annotate(nonetoend=Count(sortby.split('-')[0])).order_by('-nonetoend', oquery)

    qtime = datetime.now()
    task_list = [t for t in tasks]
    ntasks = len(tasks)
    slots = 0
    aslots = 0
    ages = []
    neventsAFIItasksSum = {}
    neventsFStasksSum = {}
    neventsByProcessingType = {}
    neventsByTaskStatus = {}
    neventsByTaskPriority = {}
    aslotsByType = {}
    neventsTotSum = 0
    neventsUsedTotSum = 0
    neventsToBeUsedTotSum = 0
    neventsRunningTotSum = 0
    rjobs1coreTot = 0
    rjobs8coreTot = 0
    for task in task_list:
        task['rjobs'] = 0 if task['rjobs'] is None else task['rjobs']
        task['percentage'] = round(100 * task['percentage'],1)
        neventsTotSum += task['nevents'] if task['nevents'] is not None else 0
        neventsUsedTotSum += task['neventsused'] if 'neventsused' in task and task['neventsused'] is not None else 0
        neventsToBeUsedTotSum += task['neventstobeused'] if 'neventstobeused' in task and task['neventstobeused'] is not None else 0
        neventsRunningTotSum += task['neventsrunning'] if 'neventsrunning' in task and task['neventsrunning'] is not None else 0
        slots += task['slots'] if task['slots'] else 0
        aslots += task['aslots'] if task['aslots'] else 0
        if not task['processingtype'] in aslotsByType.keys():
            aslotsByType[str(task['processingtype'])] = 0
        aslotsByType[str(task['processingtype'])] += task['aslots'] if task['aslots'] else 0

        if not task['status'] in neventsByTaskStatus.keys():
            neventsByTaskStatus[str(task['status'])] = 0
        neventsByTaskStatus[str(task['status'])] += task['nevents'] if task['nevents'] is not None else 0

        if not task['priority'] in neventsByTaskPriority.keys():
            neventsByTaskPriority[task['priority']] = 0
        neventsByTaskPriority[task['priority']] += task['nevents'] if task['nevents'] is not None else 0

        if task['corecount'] == 1:
            rjobs1coreTot += task['rjobs']
        if task['corecount'] == 8:
            rjobs8coreTot += task['rjobs']
        task['age'] = round(
            (datetime.now() - task['creationdate']).days + (datetime.now() - task['creationdate']).seconds / 3600. / 24,
            1)
        ages.append(task['age'])
        if len(task['campaign'].split(':')) > 1:
            task['cutcampaign'] = task['campaign'].split(':')[1]
        else:
            task['cutcampaign'] = task['campaign'].split(':')[0]
        if 'reqid' in task and 'jeditaskid' in task and task['reqid'] == task['jeditaskid']:
            task['reqid'] = None
        if 'runnumber' in task:
            task['inputdataset'] = task['runnumber']
        else:
            task['inputdataset'] = None

        if task['inputdataset'] and task['inputdataset'].startswith('00'):
            task['inputdataset'] = task['inputdataset'][2:]
        task['outputtypes'] = ''

        if 'outputdatasettype' in task:
            outputtypes = task['outputdatasettype'].split(',')
        else:
            outputtypes = []
        if len(outputtypes) > 0:
            for outputtype in outputtypes:
                task['outputtypes'] += outputtype.split('_')[1] + ' ' if '_' in outputtype else ''
        if productiontype == 'MC':
            if  task['simtype'] == 'AFII':
                if not task['processingtype'] in neventsAFIItasksSum.keys():
                    neventsAFIItasksSum[str(task['processingtype'])] = 0
                neventsAFIItasksSum[str(task['processingtype'])] += task['nevents'] if task['nevents'] is not None else 0
            elif task['simtype'] == 'FS':
                if not task['processingtype'] in neventsFStasksSum.keys():
                    neventsFStasksSum[str(task['processingtype'])] = 0
                neventsFStasksSum[str(task['processingtype'])] += task['nevents'] if task['nevents'] is not None else 0
        else:
            if not task['processingtype'] in neventsByProcessingType.keys():
                neventsByProcessingType[str(task['processingtype'])] = 0
            neventsByProcessingType[str(task['processingtype'])] += task['nevents'] if task['nevents'] is not None else 0
        if 'hashtags' in task and len(task['hashtags']) > 1:
            task['hashtaglist'] = []
            for hashtag in task['hashtags'].split(','):
                task['hashtaglist'].append(hashtag)

    neventsByStatus = {}
    neventsByStatus['done'] = neventsUsedTotSum
    neventsByStatus['running'] = neventsRunningTotSum
    neventsByStatus['waiting'] = neventsToBeUsedTotSum - neventsRunningTotSum

    plotageshistogram = 1
    if sum(ages) == 0: plotageshistogram = 0
    sumd = taskSummaryDict(request, task_list, ['status','workinggroup','cutcampaign', 'processingtype'])

    ### Putting list of tasks to cache separately for dataTables plugin
    transactionKey = random.randrange(100000000)
    setCacheEntry(request, transactionKey, json.dumps(task_list, cls=DateEncoder), 60 * 30, isData=True)

    if (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('text/json', 'application/json'))) or (
        'json' in request.session['requestParams']):
        ##self monitor
        endSelfMonitor(request)
        if 'snap' in request.session['requestParams']:
            snapdata = prepareNeventsByProcessingType(task_list)
            if saveNeventsByProcessingType(snapdata, qtime):
                data = {'message': 'success'}
            else:
                data = {'message': 'fail'}
            dump = json.dumps(data, cls=DateEncoder)
            return HttpResponse(dump, content_type='text/html')
        dump = json.dumps(task_list, cls=DateEncoder)
        return HttpResponse(dump, content_type='text/html')
    else:
        data = {
            'request': request,
            'viewParams': request.session['viewParams'],
            'requestParams': request.session['requestParams'],
            'xurl': xurl,
            'nosorturl': nosorturl,
            'nohashtagurl': nohashtagurl,
            'tasks': task_list,
            'ntasks': ntasks,
            'sortby': sortby,
            'ages': ages,
            'slots': slots,
            'aslots': aslots,
            'aslotsByType' : aslotsByType,
            'sumd': sumd,
            'neventsUsedTotSum': round(neventsUsedTotSum / 1000000., 1),
            'neventsTotSum': round(neventsTotSum / 1000000., 1),
            'neventsWaitingTotSum': round((neventsToBeUsedTotSum - neventsRunningTotSum)/1000000., 1),
            'neventsRunningTotSum': round(neventsRunningTotSum / 1000000., 1),
            'rjobs1coreTot': rjobs1coreTot,
            'rjobs8coreTot': rjobs8coreTot,
            'neventsAFIItasksSum': neventsAFIItasksSum,
            'neventsFStasksSum': neventsFStasksSum,
            'neventsByProcessingType': neventsByProcessingType,
            'neventsByTaskStatus': neventsByTaskStatus,
            'neventsByTaskPriority': neventsByTaskPriority,
            'neventsByStatus' : neventsByStatus,
            'plotageshistogram': plotageshistogram,
            'productiontype' : json.dumps(productiontype),
            'built': datetime.now().strftime("%H:%M:%S"),
            'transKey': transactionKey,
            'qtime': qtime,
        }
        ##self monitor
        endSelfMonitor(request)
        response = render_to_response('runningProdTasks.html', data, content_type='text/html')
        setCacheEntry(request, "runningProdTasks", json.dumps(data, cls=DateEncoder), 60 * 20)
        patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
        return response
Ejemplo n.º 27
0
def runningDPDProdTasks(request):
    return redirect('/runningprodtasks/?preset=DPD')
    valid, response = initRequest(request)

    data = getCacheEntry(request, "runningDPDProdTasks")
    if data is not None:
        data = json.loads(data)
        data['request'] = request
        response = render_to_response('runningDPDProdTasks.html', data, content_type='text/html')
        patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
        endSelfMonitor(request)
        return response


    # xurl = extensibleURL(request)
    xurl = request.get_full_path()
    if xurl.find('?') > 0:
        xurl += '&'
    else:
        xurl += '?'
    nosorturl = removeParam(xurl, 'sortby', mode='extensible')
    tquery = {}
    if 'campaign' in request.session['requestParams']:
        tquery['campaign__contains'] = request.session['requestParams']['campaign']
    if 'corecount' in request.session['requestParams']:
        tquery['corecount'] = request.session['requestParams']['corecount']
    if 'status' in request.session['requestParams']:
        tquery['status'] = request.session['requestParams']['status']
    if 'reqid' in request.session['requestParams']:
        tquery['reqid'] = request.session['requestParams']['reqid']
    if 'inputdataset' in request.session['requestParams']:
        tquery['taskname__contains'] = request.session['requestParams']['inputdataset']
    tasks = RunningDPDProductionTasks.objects.filter(**tquery).values()
    ntasks = len(tasks)
    slots = 0
    ages = []

    neventsTotSum = 0
    neventsUsedTotSum = 0
    rjobs1coreTot = 0
    rjobs8coreTot = 0
    for task in tasks:
        if task['rjobs'] is None:
            task['rjobs'] = 0
        task['neventsused'] = task['totev'] - task['totevrem'] if task['totev'] is not None else 0
        task['percentage'] = round(100. * task['neventsused'] / task['totev'], 1) if task['totev'] > 0 else 0.
        neventsTotSum += task['totev'] if task['totev'] is not None else 0
        neventsUsedTotSum += task['neventsused']
        slots += task['rjobs'] * task['corecount']
        if task['corecount'] == 1:
            rjobs1coreTot += task['rjobs']
        if task['corecount'] == 8:
            rjobs8coreTot += task['rjobs']
        task['age'] = round(
            (datetime.now() - task['creationdate']).days + (datetime.now() - task['creationdate']).seconds / 3600. / 24,
            1)
        ages.append(task['age'])
        if len(task['campaign'].split(':')) > 1:
            task['cutcampaign'] = task['campaign'].split(':')[1]
        else:
            task['cutcampaign'] = task['campaign'].split(':')[0]
        task['inputdataset'] = task['taskname'].split('.')[1]
        if task['inputdataset'].startswith('00'):
            task['inputdataset'] = task['inputdataset'][2:]
        task['tid'] = task['outputtype'].split('_tid')[1].split('_')[0] if '_tid' in task['outputtype'] else None
        task['outputtypes'] = ''
        outputtypes = []
        outputtypes = task['outputtype'].split(',')
        if len(outputtypes) > 0:
            for outputtype in outputtypes:
                task['outputtypes'] += outputtype.split('_')[1].split('_p')[0] + ' ' if '_' in outputtype else ''
        task['ptag'] = task['outputtype'].split('_')[2] if '_' in task['outputtype'] else ''
    plotageshistogram = 1
    if sum(ages) == 0: plotageshistogram = 0
    sumd = taskSummaryDict(request, tasks, ['status'])

    if 'sortby' in request.session['requestParams']:
        sortby = request.session['requestParams']['sortby']
        if sortby == 'campaign-asc':
            tasks = sorted(tasks, key=lambda x: x['campaign'])
        elif sortby == 'campaign-desc':
            tasks = sorted(tasks, key=lambda x: x['campaign'], reverse=True)
        elif sortby == 'reqid-asc':
            tasks = sorted(tasks, key=lambda x: x['reqid'])
        elif sortby == 'reqid-desc':
            tasks = sorted(tasks, key=lambda x: x['reqid'], reverse=True)
        elif sortby == 'jeditaskid-asc':
            tasks = sorted(tasks, key=lambda x: x['jeditaskid'])
        elif sortby == 'jeditaskid-desc':
            tasks = sorted(tasks, key=lambda x: x['jeditaskid'], reverse=True)
        elif sortby == 'rjobs-asc':
            tasks = sorted(tasks, key=lambda x: x['rjobs'])
        elif sortby == 'rjobs-desc':
            tasks = sorted(tasks, key=lambda x: x['rjobs'], reverse=True)
        elif sortby == 'status-asc':
            tasks = sorted(tasks, key=lambda x: x['status'])
        elif sortby == 'status-desc':
            tasks = sorted(tasks, key=lambda x: x['status'], reverse=True)
        elif sortby == 'nevents-asc':
            tasks = sorted(tasks, key=lambda x: x['totev'])
        elif sortby == 'nevents-desc':
            tasks = sorted(tasks, key=lambda x: x['totev'], reverse=True)
        elif sortby == 'neventsused-asc':
            tasks = sorted(tasks, key=lambda x: x['neventsused'])
        elif sortby == 'neventsused-desc':
            tasks = sorted(tasks, key=lambda x: x['neventsused'], reverse=True)
        elif sortby == 'neventstobeused-asc':
            tasks = sorted(tasks, key=lambda x: x['totevrem'])
        elif sortby == 'neventstobeused-desc':
            tasks = sorted(tasks, key=lambda x: x['totevrem'], reverse=True)
        elif sortby == 'percentage-asc':
            tasks = sorted(tasks, key=lambda x: x['percentage'])
        elif sortby == 'percentage-desc':
            tasks = sorted(tasks, key=lambda x: x['percentage'], reverse=True)
        elif sortby == 'nfilesfailed-asc':
            tasks = sorted(tasks, key=lambda x: x['nfilesfailed'])
        elif sortby == 'nfilesfailed-desc':
            tasks = sorted(tasks, key=lambda x: x['nfilesfailed'], reverse=True)
        elif sortby == 'priority-asc':
            tasks = sorted(tasks, key=lambda x: x['currentpriority'])
        elif sortby == 'priority-desc':
            tasks = sorted(tasks, key=lambda x: x['currentpriority'], reverse=True)
        elif sortby == 'ptag-asc':
            tasks = sorted(tasks, key=lambda x: x['ptag'])
        elif sortby == 'ptag-desc':
            tasks = sorted(tasks, key=lambda x: x['ptag'], reverse=True)
        elif sortby == 'outputtype-asc':
            tasks = sorted(tasks, key=lambda x: x['outputtypes'])
        elif sortby == 'output-desc':
            tasks = sorted(tasks, key=lambda x: x['outputtypes'], reverse=True)
        elif sortby == 'age-asc':
            tasks = sorted(tasks, key=lambda x: x['age'])
        elif sortby == 'age-desc':
            tasks = sorted(tasks, key=lambda x: x['age'], reverse=True)
        elif sortby == 'corecount-asc':
            tasks = sorted(tasks, key=lambda x: x['corecount'])
        elif sortby == 'corecount-desc':
            tasks = sorted(tasks, key=lambda x: x['corecount'], reverse=True)
        elif sortby == 'username-asc':
            tasks = sorted(tasks, key=lambda x: x['username'])
        elif sortby == 'username-desc':
            tasks = sorted(tasks, key=lambda x: x['username'], reverse=True)
        elif sortby == 'inputdataset-asc':
            tasks = sorted(tasks, key=lambda x: x['inputdataset'])
        elif sortby == 'inputdataset-desc':
            tasks = sorted(tasks, key=lambda x: x['inputdataset'], reverse=True)
    else:
        sortby = 'age-asc'
        tasks = sorted(tasks, key=lambda x: x['age'])

    if (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('text/json', 'application/json'))) or (
        'json' in request.session['requestParams']):
        ##self monitor
        endSelfMonitor(request)

        dump = json.dumps(tasks, cls=DateEncoder)
        return HttpResponse(dump, content_type='text/html')
    else:
        data = {
            'request': request,
            'viewParams': request.session['viewParams'],
            'requestParams': request.session['requestParams'],
            'xurl': xurl,
            'nosorturl': nosorturl,
            'tasks': tasks,
            'ntasks': ntasks,
            'sortby': sortby,
            'ages': ages,
            'slots': slots,
            'sumd': sumd,
            'neventsUsedTotSum': round(neventsUsedTotSum / 1000000., 1),
            'neventsTotSum': round(neventsTotSum / 1000000., 1),
            'rjobs1coreTot': rjobs1coreTot,
            'rjobs8coreTot': rjobs8coreTot,
            'plotageshistogram': plotageshistogram,
            'built': datetime.now().strftime("%H:%M:%S"),
        }
        ##self monitor
        endSelfMonitor(request)
        response = render_to_response('runningDPDProdTasks.html', data, content_type='text/html')
        setCacheEntry(request, "runningDPDProdTasks", json.dumps(data, cls=DateEncoder), 60 * 20)
        patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
        return response
Ejemplo n.º 28
0
def errorsScatteringDetailed(request, cloud, reqid):
    valid, response = initRequest(request)
    if not valid: return response

    # Here we try to get cached data
    data = getCacheEntry(request, "errorsScatteringDetailed")
    if data is not None:
        data = json.loads(data)
        data['request'] = request
        response = render_to_response('errorsScatteringDetailed.html', data, content_type='text/html')
        patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
        endSelfMonitor(request)
        return response

    grouping = []

    homeCloud = {}
    cloudsDict ={}
    sflist = ('siteid', 'site', 'status', 'cloud', 'tier', 'comment_field', 'objectstore', 'catchall', 'corepower')
    sites = Schedconfig.objects.filter().exclude(cloud='CMS').values(*sflist)
    for site in sites:
        homeCloud[site['siteid']] = site['cloud']

        if site['cloud'] not in cloudsDict:
            cloudsDict[site['cloud']] = []
        cloudsDict[site['cloud']].append(site['siteid'])

    sitesDictForOrdering = {}
    i = 0
    for cloudname in sorted(cloudsDict.keys()):
        for sitename in sorted(cloudsDict[cloudname]):
            sitesDictForOrdering[sitename] = i
            i += 1

    clouds = sorted(list(set(homeCloud.values())))
    condition = '(1=1)'
    if cloud == '' or len(cloud)==0:
        return HttpResponse("No cloud supplied", content_type='text/html')
    elif cloud == 'ALL':
        grouping.append('reqid')
    elif cloud not in clouds:
        return HttpResponse("The provided cloud name does not exist", content_type='text/html')

    if reqid == '' or len(reqid)==0:
        return HttpResponse("No request ID supplied", content_type='text/html')
    elif reqid == 'ALL':
        grouping.append('cloud')
    else:
        try:
            reqid = int(reqid)
        except:
            return HttpResponse("The provided request ID is not valid", content_type='text/html')
    view = None
    if 'view' in request.session['requestParams'] and request.session['requestParams']['view'] == 'queues':
        view = 'queues'

    if len(grouping) == 2 and view != 'queues':
        return redirect('/errorsscat/')

    limit = 100000
    if 'hours' in request.session['requestParams']:
        try:
            hours = int(request.session['requestParams']['hours'])
        except:
            hours = 8
    else:
        hours = 8

    isExcludeScouts = False
    if 'scouts' in request.session['requestParams']:
        if request.session['requestParams']['scouts'] == 'exclude':
            isExcludeScouts = True
        try:
            del request.session['requestParams']['scouts']
        except:
            pass


    query, wildCardExtension, LAST_N_HOURS_MAX = setupView(request, hours=hours, limit=9999999, querytype='task', wildCardExt=True)
    query['tasktype'] = 'prod'
    query['superstatus__in'] = ['submitting', 'running']
    # exclude paused tasks
    wildCardExtension += ' AND STATUS != \'paused\''
    if reqid != 'ALL':
        query['reqid'] = reqid
        request.session['requestParams']['reqid'] = reqid
    if cloud != 'ALL':
        request.session['requestParams']['region'] = cloud
        cloudstr = ''
        for sn, cn in homeCloud.iteritems():
            if cn == cloud:
                cloudstr += "\'%s\'," % (str(sn))
        if cloudstr.endswith(','):
            cloudstr = cloudstr[:-1]
        condition = "COMPUTINGSITE in ( %s )" % (str(cloudstr))


    tasks = JediTasksOrdered.objects.filter(**query).extra(where=[wildCardExtension])[:limit].values("jeditaskid", "reqid")

    print 'tasks found %i' % (len(tasks))

    random.seed()
    if dbaccess['default']['ENGINE'].find('oracle') >= 0:
        tmpTableName = "ATLAS_PANDABIGMON.TMP_IDS1DEBUG"
    else:
        tmpTableName = "TMP_IDS1"


    taskListByReq = {}
    transactionKey = random.randrange(1000000)
    executionData = []
    for id in tasks:
        executionData.append((id['jeditaskid'], transactionKey))
        # full the list of jeditaskids for each reqid to put into cache for consistentcy with jobList
        if id['reqid'] not in taskListByReq:
            taskListByReq[id['reqid']] = ''
        taskListByReq[id['reqid']] += str(id['jeditaskid']) + ','

    new_cur = connection.cursor()
    insquery = """INSERT INTO """ + tmpTableName + """(ID,TRANSACTIONKEY) VALUES (%s, %s)"""
    new_cur.executemany(insquery, executionData)
    connection.commit()

    jcondition = '(1=1)'
    if isExcludeScouts:
        jcondition = """specialhandling NOT LIKE '%%sj'"""

    querystr = """
            SELECT SUM(FINISHEDC) as FINISHEDC, 
                   SUM(FAILEDC) as FAILEDC,
                   SUM(ALLC) as ALLC,  
                   REQID, JEDITASKID, COMPUTINGSITE, sc.cloud as CLOUD from (
                        SELECT SUM(case when JOBSTATUS = 'failed' then 1 else 0 end) as FAILEDC, 
                               SUM(case when JOBSTATUS = 'finished' then 1 else 0 end) as FINISHEDC, 
                               SUM(case when JOBSTATUS in ('finished', 'failed') then 1 else 0 end) as ALLC,  
                               COMPUTINGSITE, REQID, JEDITASKID 
                        FROM ATLAS_PANDA.JOBSARCHIVED4 WHERE JEDITASKID in (
                            SELECT ID FROM %s WHERE TRANSACTIONKEY=%i) AND modificationtime > TO_DATE('%s', 'YYYY-MM-DD HH24:MI:SS') AND %s
                                group by COMPUTINGSITE, JEDITASKID, REQID
                        UNION
                        SELECT SUM(case when JOBSTATUS = 'failed' then 1 else 0 end) as FAILEDC, 
                               SUM(case when JOBSTATUS = 'finished' then 1 else 0 end) as FINISHEDC,  
                               SUM(case when JOBSTATUS in ('finished', 'failed') then 1 else 0 end) as ALLC,
                               COMPUTINGSITE, REQID, JEDITASKID 
                        FROM ATLAS_PANDAARCH.JOBSARCHIVED WHERE JEDITASKID in (
                              SELECT ID FROM %s WHERE TRANSACTIONKEY=%i) AND modificationtime > TO_DATE('%s', 'YYYY-MM-DD HH24:MI:SS') AND %s
                                group by COMPUTINGSITE, JEDITASKID, REQID
            ) j,
            ( select siteid, cloud from ATLAS_PANDAMETA.SCHEDCONFIG  
            ) sc
            where j.computingsite = sc.siteid AND j.ALLC > 0  AND %s
            group by jeditaskid, COMPUTINGSITE, REQID, cloud
    """ % (tmpTableName, transactionKey, query['modificationtime__castdate__range'][0], jcondition, tmpTableName, transactionKey, query['modificationtime__castdate__range'][0], jcondition, condition)

    new_cur.execute(querystr)

    errorsRaw = dictfetchall(new_cur)
    # new_cur.execute("DELETE FROM %s WHERE TRANSACTIONKEY=%i" % (tmpTableName, transactionKey))

    computingSites = []
    tasksErrorsList = []
    taskserrors = {}
    reqErrorsList = []
    reqerrors = {}

    successrateIntervals = {'green': [80, 100], 'yellow':[50,79], 'red':[0, 49]}

    statsParams = ['percent', 'finishedc', 'failedc', 'allc']

    if len(grouping) == 0 or (len(grouping) == 1 and 'reqid' in grouping and view == 'queues'):

        # we fill here the dict
        for errorEntry in errorsRaw:
            jeditaskid = errorEntry['JEDITASKID']
            if jeditaskid not in taskserrors:
                taskentry = {}
                taskserrors[jeditaskid] = taskentry
                taskserrors[jeditaskid]['jeditaskid'] = jeditaskid
                taskserrors[jeditaskid]['columns'] = {}
                taskserrors[jeditaskid]['totalstats'] = {}
                for param in statsParams:
                    taskserrors[jeditaskid]['totalstats'][param] = 0
            if errorEntry['COMPUTINGSITE'] not in taskserrors[jeditaskid]['columns']:
                taskserrors[jeditaskid]['columns'][errorEntry['COMPUTINGSITE']] = {}
                for param in statsParams:
                    taskserrors[jeditaskid]['columns'][errorEntry['COMPUTINGSITE']][param] = 0
            taskserrors[jeditaskid]['columns'][errorEntry['COMPUTINGSITE']]['allc'] = errorEntry['FINISHEDC'] + errorEntry['FAILEDC']
            taskserrors[jeditaskid]['columns'][errorEntry['COMPUTINGSITE']]['percent'] = int(math.ceil(
                errorEntry['FINISHEDC'] * 100. / taskserrors[jeditaskid]['columns'][errorEntry['COMPUTINGSITE']]['allc'])) if \
                    taskserrors[jeditaskid]['columns'][errorEntry['COMPUTINGSITE']]['allc'] > 0 else 0
            taskserrors[jeditaskid]['columns'][errorEntry['COMPUTINGSITE']]['finishedc'] = errorEntry['FINISHEDC']
            taskserrors[jeditaskid]['columns'][errorEntry['COMPUTINGSITE']]['failedc'] = errorEntry['FAILEDC']
            taskserrors[jeditaskid]['totalstats']['finishedc'] += errorEntry['FINISHEDC']
            taskserrors[jeditaskid]['totalstats']['failedc'] += errorEntry['FAILEDC']
            taskserrors[jeditaskid]['totalstats']['allc'] += errorEntry['FINISHEDC'] + errorEntry['FAILEDC']

        ### calculate totalstats
        for jeditaskid, taskEntry in taskserrors.iteritems():
            taskserrors[jeditaskid]['totalstats']['percent'] = int(math.ceil(
                taskEntry['totalstats']['finishedc']*100./taskEntry['totalstats']['allc'])) if taskEntry['totalstats']['allc'] > 0 else 0



        tasksToDel = []

        # make cleanup of full none erroneous tasks
        for jeditaskid, taskEntry in taskserrors.iteritems():
            notNone = False
            if taskEntry['totalstats']['allc'] == 0:
                notNone = True
            if notNone:
                tasksToDel.append(jeditaskid)

        for taskToDel in tasksToDel:
            del taskserrors[taskToDel]

        for jeditaskid, taskentry in taskserrors.iteritems():
            for sitename, siteval in taskentry['columns'].iteritems():
                computingSites.append(sitename)

        computingSites = sorted(set(computingSites), key=lambda x: sitesDictForOrdering.get(x))

        ### fill
        for jeditaskid, taskentry in taskserrors.iteritems():
            for computingSite in computingSites:
                if computingSite not in taskentry['columns']:
                    taskserrors[jeditaskid]['columns'][computingSite] = {}
                    for param in statsParams:
                        taskserrors[jeditaskid]['columns'][computingSite][param] = 0

        ### calculate stats for column
        columnstats = {}
        for cn in computingSites:
            cns = str(cn)
            columnstats[cns] = {}
            for param in statsParams:
                columnstats[cns][param] = 0
        for jeditaskid, taskEntry in taskserrors.iteritems():
            for cs in computingSites:
                for cname, cEntry in taskEntry['columns'].iteritems():
                    if cs == cname:
                        columnstats[cs]['finishedc'] += cEntry['finishedc']
                        columnstats[cs]['failedc'] += cEntry['failedc']
                        columnstats[cs]['allc'] += cEntry['allc']
        for csn, stats in columnstats.iteritems():
            columnstats[csn]['percent'] = int(
                math.ceil(columnstats[csn]['finishedc'] * 100. / columnstats[csn]['allc'])) if \
                    columnstats[csn]['allc'] > 0 else 0


        ### transform requesterrors dict to list for sorting on template
        for jeditaskid, taskEntry in taskserrors.iteritems():
            columnlist = []
            for columnname, stats in taskEntry['columns'].iteritems():
                stats['computingsite'] = columnname
                columnlist.append(stats)
            taskEntry['columns'] = sorted(columnlist, key=lambda x: sitesDictForOrdering.get(x['computingsite']))

        for jeditaskid, taskEntry in taskserrors.iteritems():
            tasksErrorsList.append(taskEntry)

        tasksErrorsList = sorted(tasksErrorsList, key=lambda x: x['totalstats']['percent'])

    elif len(grouping) == 1 and 'reqid' in grouping:

        clouderrors = {}
        # we fill here the dict
        for errorEntry in errorsRaw:
            jeditaskid = errorEntry['JEDITASKID']
            if jeditaskid not in taskserrors:
                taskentry = {}
                taskserrors[jeditaskid] = taskentry
                taskserrors[jeditaskid]['jeditaskid'] = jeditaskid
                taskserrors[jeditaskid]['columns'] = {}
                taskserrors[jeditaskid]['totalstats'] = {}
                for param in statsParams:
                    taskserrors[jeditaskid]['totalstats'][param] = 0
            if errorEntry['CLOUD'] not in taskserrors[jeditaskid]['columns']:
                taskserrors[jeditaskid]['columns'][errorEntry['CLOUD']] = {}
                for param in statsParams:
                    taskserrors[jeditaskid]['columns'][errorEntry['CLOUD']][param] = 0
            taskserrors[jeditaskid]['columns'][errorEntry['CLOUD']]['allc'] += errorEntry['FINISHEDC'] + errorEntry['FAILEDC']
            taskserrors[jeditaskid]['columns'][errorEntry['CLOUD']]['finishedc'] += errorEntry['FINISHEDC']
            taskserrors[jeditaskid]['columns'][errorEntry['CLOUD']]['failedc'] += errorEntry['FAILEDC']
            taskserrors[jeditaskid]['totalstats']['finishedc'] += errorEntry['FINISHEDC']
            taskserrors[jeditaskid]['totalstats']['failedc'] += errorEntry['FAILEDC']
            taskserrors[jeditaskid]['totalstats']['allc'] += errorEntry['FINISHEDC'] + errorEntry['FAILEDC']

            if errorEntry['CLOUD'] not in clouderrors:
                clouderrors[errorEntry['CLOUD']] = {}
            if errorEntry['COMPUTINGSITE'] not in clouderrors[errorEntry['CLOUD']]:
                clouderrors[errorEntry['CLOUD']][errorEntry['COMPUTINGSITE']] = {}
                clouderrors[errorEntry['CLOUD']][errorEntry['COMPUTINGSITE']]['finishedc'] = 0
                clouderrors[errorEntry['CLOUD']][errorEntry['COMPUTINGSITE']]['failedc'] = 0
                clouderrors[errorEntry['CLOUD']][errorEntry['COMPUTINGSITE']]['allc'] = 0
            clouderrors[errorEntry['CLOUD']][errorEntry['COMPUTINGSITE']]['finishedc'] += errorEntry['FINISHEDC']
            clouderrors[errorEntry['CLOUD']][errorEntry['COMPUTINGSITE']]['failedc'] += errorEntry['FAILEDC']
            clouderrors[errorEntry['CLOUD']][errorEntry['COMPUTINGSITE']]['allc'] += (errorEntry['FINISHEDC'] + errorEntry['FAILEDC'])

        ### calculate totalstats
        for jeditaskid, taskEntry in taskserrors.iteritems():
            taskserrors[jeditaskid]['totalstats']['percent'] = int(
                math.ceil(taskEntry['totalstats']['finishedc'] * 100. / taskEntry['totalstats']['allc'])) if \
                    taskEntry['totalstats']['allc'] > 0 else 0

        tasksToDel = []

        #make cleanup of full none erroneous tasks
        for jeditaskid, taskEntry  in taskserrors.iteritems():
            notNone = False
            if taskEntry['totalstats']['allc'] == 0:
                notNone = True
            if notNone:
                tasksToDel.append(jeditaskid)

        for taskToDel in tasksToDel:
            del taskserrors[taskToDel]


        for jeditaskid, taskentry in taskserrors.iteritems():
            for c in clouds:
                if not c in taskentry['columns']:
                    taskentry['columns'][c] = {}
                    for param in statsParams:
                        taskentry['columns'][c][param] = 0
                else:
                    taskentry['columns'][c]['percent'] = int(math.ceil(taskentry['columns'][c]['finishedc']*100./taskentry['columns'][c]['allc'])) if \
                        taskentry['columns'][c]['allc'] > 0 else 0

        ### calculate stats for columns
        columnstats = {}
        for cn in clouds:
            cns = str(cn)
            columnstats[cns] = {}
            for param in statsParams:
                columnstats[cns][param] = 0

            columnstats[cns]['minpercent'] = 100
            for color, srint in successrateIntervals.items():
                columnstats[cns][color + 'c'] = 0

        for cloudname, sites in clouderrors.iteritems():
            for sitename, sstats in sites.iteritems():
                columnstats[cloudname]['finishedc'] += sstats['finishedc']
                columnstats[cloudname]['failedc'] += sstats['failedc']
                columnstats[cloudname]['allc'] += sstats['allc']
                srpct = int(sstats['finishedc'] * 100. / sstats['allc'])
                for color, srint in successrateIntervals.items():
                    columnstats[cloudname][color + 'c'] += 1 if (srpct >= srint[0] and srpct <= srint[1]) else 0
            columnstats[cloudname]['minpercent'] = min(
                int(cstats['finishedc'] * 100. / cstats['allc']) for cstats in sites.values())

        for cn, stats in columnstats.iteritems():
            columnstats[cn]['percent'] = int(
                math.ceil(columnstats[cn]['finishedc'] * 100. / columnstats[cn]['allc'])) if \
                    columnstats[cn]['allc'] > 0 else 0

        ### transform requesterrors dict to list for sorting on template
        for jeditaskid, taskEntry in taskserrors.iteritems():
            tasksErrorsList.append(taskEntry)

        tasksErrorsList = sorted(tasksErrorsList, key=lambda x: x['totalstats']['percent'])

    elif 'cloud' in grouping or view == 'queues':

        print '%s starting data aggregation' % (datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
        # we fill here the dict
        for errorEntry in errorsRaw:
            rid = errorEntry['REQID']
            if rid not in reqerrors:
                reqentry = {}
                reqerrors[rid] = reqentry
                reqerrors[rid]['columns'] = {}
                reqerrors[rid]['reqid'] = rid
                reqerrors[rid]['totalstats'] = {}
                reqerrors[rid]['totalstats']['greenc'] = 0
                reqerrors[rid]['totalstats']['yellowc'] = 0
                reqerrors[rid]['totalstats']['redc'] = 0
                reqerrors[rid]['tasks'] = {}
                for param in statsParams:
                    reqerrors[rid]['totalstats'][param] = 0
            if errorEntry['COMPUTINGSITE'] not in reqerrors[rid]['columns']:
                reqerrors[rid]['columns'][errorEntry['COMPUTINGSITE']] = {}
                for param in statsParams:
                    reqerrors[rid]['columns'][errorEntry['COMPUTINGSITE']][param] = 0
            if errorEntry['JEDITASKID'] not in reqerrors[rid]['tasks']:
                reqerrors[rid]['tasks'][errorEntry['JEDITASKID']] = {}
                reqerrors[rid]['tasks'][errorEntry['JEDITASKID']]['finishedc'] = 0
                reqerrors[rid]['tasks'][errorEntry['JEDITASKID']]['allc'] = 0
            reqerrors[rid]['columns'][errorEntry['COMPUTINGSITE']]['finishedc'] += errorEntry['FINISHEDC']
            reqerrors[rid]['columns'][errorEntry['COMPUTINGSITE']]['failedc'] += errorEntry['FAILEDC']
            reqerrors[rid]['columns'][errorEntry['COMPUTINGSITE']]['allc'] += errorEntry['FINISHEDC'] + errorEntry['FAILEDC']

            reqerrors[rid]['tasks'][errorEntry['JEDITASKID']]['finishedc'] += errorEntry['FINISHEDC']
            reqerrors[rid]['tasks'][errorEntry['JEDITASKID']]['allc'] += errorEntry['FINISHEDC'] + errorEntry['FAILEDC']

            reqerrors[rid]['totalstats']['finishedc'] += reqerrors[rid]['columns'][errorEntry['COMPUTINGSITE']]['finishedc']
            reqerrors[rid]['totalstats']['failedc'] += reqerrors[rid]['columns'][errorEntry['COMPUTINGSITE']]['failedc']
            reqerrors[rid]['totalstats']['allc'] += reqerrors[rid]['columns'][errorEntry['COMPUTINGSITE']]['allc']

        for rid, reqentry in reqerrors.iteritems():
            reqerrors[rid]['totalstats']['percent'] = int(
                math.ceil(reqerrors[rid]['totalstats']['finishedc'] * 100. / reqerrors[rid]['totalstats']['allc'])) if \
                    reqerrors[rid]['totalstats']['allc'] > 0 else 0
            reqerrors[rid]['totalstats']['minpercent'] = min(
                int(tstats['finishedc'] * 100. / tstats['allc']) for tstats in reqentry['tasks'].values())
            for tstats in reqentry['tasks'].values():
                srpct = int(tstats['finishedc'] * 100. / tstats['allc'])
                for color, srint in successrateIntervals.items():
                    reqerrors[rid]['totalstats'][color + 'c'] += 1 if (srpct >= srint[0] and srpct <= srint[1]) else 0


        print '%s starting cleaning of non-errorneous requests' % (datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
        reqsToDel = []

        #make cleanup of full none erroneous tasks
        for rid, reqentry in reqerrors.iteritems():
            notNone = False
            if reqentry['totalstats']['allc'] == 0:
                notNone = True
            if notNone:
                reqsToDel.append(rid)

        for reqToDel in reqsToDel:
            del reqerrors[reqToDel]

        print '%s starting calculation of row average stats' % (datetime.now().strftime("%Y-%m-%d %H:%M:%S"))

        for rid, reqentry in reqerrors.iteritems():
            for sn, sv in reqentry['columns'].iteritems():
                computingSites.append(str(sn))

        computingSites = sorted(set(computingSites), key=lambda x: sitesDictForOrdering.get(x))

        for rid, reqentry  in reqerrors.iteritems():
            for s in computingSites:
                if not s in reqentry['columns']:
                    reqentry['columns'][s] = {}
                    for param in statsParams:
                        reqentry['columns'][s][param] = 0
                else:
                    reqentry['columns'][s]['percent'] = int(math.ceil(reqentry['columns'][s]['finishedc']*100./reqentry['columns'][s]['allc'])) if \
                        reqentry['columns'][s]['allc'] > 0 else 0

        print '%s starting calculation of columns average stats' % (datetime.now().strftime("%Y-%m-%d %H:%M:%S"))

        ### calculate stats for columns
        columnstats = {}
        for cn in computingSites:
            cns = str(cn)
            columnstats[cns] = {}
            for param in statsParams:
                columnstats[cns][param] = 0
        for rid, reqEntry in reqerrors.iteritems():
            for cn in computingSites:
                for cname, cEntry in reqEntry['columns'].iteritems():
                    if cn == cname:
                        columnstats[cn]['finishedc'] += cEntry['finishedc']
                        columnstats[cn]['failedc'] += cEntry['failedc']
                        columnstats[cn]['allc'] += cEntry['allc']
        for cn, stats in columnstats.iteritems():
            columnstats[cn]['percent'] = int(
                math.ceil(columnstats[cn]['finishedc'] * 100. / columnstats[cn]['allc'])) if \
                    columnstats[cn]['allc'] > 0 else 0

        print '%s starting set unique cache for each request' % (datetime.now().strftime("%Y-%m-%d %H:%M:%S"))

        ### Introducing unique tk for each reqid
        for rid, reqentry in reqerrors.iteritems():
            if rid in taskListByReq and len(taskListByReq[rid]) > 0:
                tk = setCacheData(request, lifetime=60*20, jeditaskid=taskListByReq[rid][:-1])
                reqentry['tk'] = tk

        print '%s starting transform dict to list' % (datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
        ### transform requesterrors dict to list for sorting on template
        for rid, reqEntry in reqerrors.iteritems():
            columnlist = []
            for columnname, stats in reqEntry['columns'].iteritems():
                stats['computingsite'] = columnname
                columnlist.append(stats)
            reqEntry['columns'] = sorted(columnlist, key=lambda x: sitesDictForOrdering.get(x['computingsite']))
        reqErrorsList = []
        for rid, reqEntry in reqerrors.iteritems():
            reqErrorsList.append(reqEntry)
        reqErrorsList = sorted(reqErrorsList, key=lambda x: x['totalstats']['percent'])

    data = {
        'request': request,
        'viewParams': request.session['viewParams'],
        'requestParams': request.session['requestParams'],
        'cloud': cloud,
        'reqid': reqid,
        'grouping': grouping,
        'view': view,
        'computingSites': computingSites,
        'clouds': clouds,
        'columnstats': columnstats,
        'taskserrors': tasksErrorsList,
        'reqerrors': reqErrorsList,
        'scouts': 'exclude' if isExcludeScouts else 'include',
        'nrows': max(len(tasksErrorsList), len(reqErrorsList)),
        'built': datetime.now().strftime("%H:%M:%S"),
    }
    print '%s starting rendering of the page' % (datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
    ##self monitor
    endSelfMonitor(request)
    setCacheEntry(request, "errorsScatteringDetailed", json.dumps(data, cls=DateEncoder), 60 * 20)
    response = render_to_response('errorsScatteringDetailed.html', data, content_type='text/html')
    patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
    return response
Ejemplo n.º 29
0
def prodNeventsTrend(request):
    """
    The view presents historical trend of nevents in different states for various processing types
    Default time window - 1 week
    """
    valid, response = initRequest(request)
    defaultdays = 7
    equery = {}
    if 'days' in request.session['requestParams'] and request.session[
            'requestParams']['days']:
        try:
            days = int(request.session['requestParams']['days'])
        except:
            days = defaultdays
        if days > defaultdays:
            days = defaultdays
        starttime = datetime.now() - timedelta(days=days)
        endtime = datetime.now()
        request.session['requestParams']['days'] = days
    else:
        starttime = datetime.now() - timedelta(days=defaultdays)
        endtime = datetime.now()
        request.session['requestParams']['days'] = defaultdays
    equery['timestamp__range'] = [starttime, endtime]

    if 'processingtype' in request.session['requestParams'] and request.session[
            'requestParams']['processingtype']:
        if '|' not in request.session['requestParams']['processingtype']:
            equery['processingtype'] = request.session['requestParams'][
                'processingtype']
        else:
            pts = request.session['requestParams']['processingtype'].split('|')
            equery['processingtype__in'] = pts

    events = ProdNeventsHistory.objects.filter(**equery).values()

    timeline = set([ev['timestamp'] for ev in events])
    timelinestr = [
        datetime.strftime(ts, defaultDatetimeFormat) for ts in timeline
    ]

    if 'view' in request.session['requestParams'] and request.session[
            'requestParams']['view'] and request.session['requestParams'][
                'view'] == 'separated':
        view = request.session['requestParams']['view']
    else:
        view = 'joint'

    plot_data = []

    if view == 'joint':
        ev_states = ['running', 'used', 'waiting']

        data = {}
        for es in ev_states:
            data[es] = {}
            for ts in timelinestr:
                data[es][ts] = 0
        for ev in events:
            for es in ev_states:
                data[es][datetime.strftime(
                    ev['timestamp'],
                    defaultDatetimeFormat)] += ev['nevents' + str(es)]

    else:
        processingtypes = set([ev['processingtype'] for ev in events])
        ev_states = ['running', 'waiting']
        lines = []
        for prtype in processingtypes:
            for evst in ev_states:
                lines.append(str(prtype + '_' + evst))
        if len(processingtypes) > 1:
            lines.append('total_running')
            lines.append('total_waiting')

        data = {}
        for l in lines:
            data[l] = {}
            for ts in timelinestr:
                data[l][ts] = 0
        for ev in events:
            for l in lines:
                if ev['processingtype'] in l:
                    data[l][datetime.strftime(
                        ev['timestamp'],
                        defaultDatetimeFormat)] += ev['nevents' +
                                                      str(l.split('_')[1])]
                if l.startswith('total'):
                    data[l][datetime.strftime(
                        ev['timestamp'],
                        defaultDatetimeFormat)] += ev['nevents' +
                                                      str(l.split('_')[1])]

    for key, value in data.iteritems():
        newDict = {'state': key, 'values': []}
        for ts, nevents in value.iteritems():
            newDict['values'].append({'timestamp': ts, 'nevents': nevents})
        newDict['values'] = sorted(newDict['values'],
                                   key=lambda k: k['timestamp'])
        plot_data.append(newDict)

    if (('HTTP_ACCEPT' in request.META) and
        (request.META.get('HTTP_ACCEPT') in ('text/json', 'application/json'))
        ) or ('json' in request.session['requestParams']):
        ##self monitor
        endSelfMonitor(request)

        dump = json.dumps(plot_data, cls=DateEncoder)
        return HttpResponse(dump, content_type='text/html')
    else:
        data = {
            'request': request,
            'viewParams': request.session['viewParams'],
            'requestParams': request.session['requestParams'],
            'built': datetime.now().strftime("%H:%M:%S"),
            'plotData': json.dumps(plot_data)
        }

        endSelfMonitor(request)
        response = render_to_response('prodNeventsTrend.html',
                                      data,
                                      content_type='text/html')
        setCacheEntry(request, "prodNeventsTrend",
                      json.dumps(data, cls=DateEncoder), 60 * 20)
        patch_response_headers(
            response, cache_timeout=request.session['max_age_minutes'] * 60)
        return response
Ejemplo n.º 30
0
def registerARTTest(request):
    """
    API to register ART tests
    Example of curl command:
    curl -X POST -d "pandaid=XXX" -d "testname=test_XXXXX.sh" http://bigpanda.cern.ch/art/registerarttest/?json
    """
    valid,response = initRequest(request)
    pandaid = -1
    testname = ''
    nightly_release_short = None
    platform = None
    project = None
    package = None
    nightly_tag = None

    ### Checking whether params were provided
    if 'requestParams' in request.session and 'pandaid' in request.session['requestParams'] and 'testname' in request.session['requestParams']:
            pandaid = request.session['requestParams']['pandaid']
            testname = request.session['requestParams']['testname']
    else:
        data = {'exit_code': -1, 'message': "There were not recieved any pandaid and testname"}
        return HttpResponse(json.dumps(data), content_type='text/html')

    if 'nightly_release_short' in request.session['requestParams']:
        nightly_release_short = request.session['requestParams']['nightly_release_short']
    else:
        data = {'exit_code': -1, 'message': "No nightly_release_short provided"}
        return HttpResponse(json.dumps(data), content_type='text/html')
    if 'platform' in request.session['requestParams']:
        platform = request.session['requestParams']['platform']
    else:
        data = {'exit_code': -1, 'message': "No platform provided"}
        return HttpResponse(json.dumps(data), content_type='text/html')
    if 'project' in request.session['requestParams']:
        project = request.session['requestParams']['project']
    else:
        data = {'exit_code': -1, 'message': "No project provided"}
        return HttpResponse(json.dumps(data), content_type='text/html')
    if 'package' in request.session['requestParams']:
        package = request.session['requestParams']['package']
    else:
        data = {'exit_code': -1, 'message': "No package provided"}
        return HttpResponse(json.dumps(data), content_type='text/html')
    if 'nightly_tag' in request.session['requestParams']:
        nightly_tag = request.session['requestParams']['nightly_tag']
    else:
        data = {'exit_code': -1, 'message': "No nightly_tag provided"}
        return HttpResponse(json.dumps(data), content_type='text/html')

    ### Checking whether params is valid
    try:
        pandaid = int(pandaid)
    except:
        data = {'exit_code': -1, 'message': "Illegal pandaid was recieved"}
        return HttpResponse(json.dumps(data), content_type='text/html')

    if pandaid < 0:
        data = {'exit_code': -1, 'message': "Illegal pandaid was recieved"}
        return HttpResponse(json.dumps(data), content_type='text/html')

    if not str(testname).startswith('test_'):
        data = {'exit_code': -1, 'message': "Illegal test name was recieved"}
        return HttpResponse(json.dumps(data), content_type='text/html')

    ### Checking if provided pandaid exists in panda db
    query={}
    query['pandaid'] = pandaid
    values = 'pandaid', 'jeditaskid', 'jobname'
    jobs = []
    jobs.extend(CombinedWaitActDefArch4.objects.filter(**query).values(*values))
    if len(jobs) == 0:
        # check archived table
        jobs.extend(Jobsarchived.objects.filter(**query).values(*values))
    try:
       job = jobs[0]
    except:
        data = {'exit_code': -1, 'message': "Provided pandaid does not exists"}
        return HttpResponse(json.dumps(data), content_type='text/html')

    ### Checking whether provided pandaid is art job
    if 'jobname' in job and not job['jobname'].startswith('user.artprod'):
        data = {'exit_code': -1, 'message': "Provided pandaid is not art job"}
        return HttpResponse(json.dumps(data), content_type='text/html')

    ### Preparing params to register art job

    jeditaskid = job['jeditaskid']

    ### table columns:
    # pandaid
    # testname
    # nightly_release_short
    # platform
    # project
    # package
    # nightly_tag
    # jeditaskid

    ### Check whether the pandaid has been registered already
    if ARTTests.objects.filter(pandaid=pandaid).count() == 0:

        ## INSERT ROW
        try:
            insertRow = ARTTests.objects.create(pandaid=pandaid,
                                                jeditaskid=jeditaskid,
                                                testname=testname,
                                                nightly_release_short=nightly_release_short,
                                                nightly_tag=nightly_tag,
                                                project=project,
                                                platform=platform,
                                                package=package
                                                )
            insertRow.save()
            data = {'exit_code': 0, 'message': "Provided pandaid has been successfully registered"}
        except:
            data = {'exit_code': 0, 'message': "Provided pandaid is already registered (pk violated)"}
    else:
        data = {'exit_code': 0, 'message': "Provided pandaid is already registered"}


    return HttpResponse(json.dumps(data), content_type='text/html')
Ejemplo n.º 31
0
def artTasks(request):
    valid, response = initRequest(request)
    query = setupView(request, 'job')

    # Here we try to get cached data
    data = getCacheEntry(request, "artTasks")
    # data = None
    if data is not None:
        data = json.loads(data)
        data['request'] = request
        if 'ntaglist' in data:
            if len(data['ntaglist']) > 0:
                ntags = []
                for ntag in data['ntaglist']:
                    try:
                        ntags.append(datetime.strptime(ntag, artdateformat))
                    except:
                        pass
                if len(ntags) > 1 and 'requestParams' in data:
                    data['requestParams']['ntag_from'] = min(ntags)
                    data['requestParams']['ntag_to'] = max(ntags)
                elif len(ntags) == 1:
                    data['requestParams']['ntag'] = ntags[0]
        response = render_to_response('artTasks.html', data, content_type='text/html')
        patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
        endSelfMonitor(request)
        return response

    cur = connection.cursor()
    if datetime.strptime(query['ntag_from'], '%Y-%m-%d') <  datetime.strptime('2018-03-20', '%Y-%m-%d'):
        query_raw = """SELECT package, branch, ntag, taskid, status, result FROM table(ATLAS_PANDABIGMON.ARTTESTS('%s','%s','%s'))""" % (query['ntag_from'], query['ntag_to'], query['strcondition'])
    else:
        query_raw = """SELECT package, branch, ntag, taskid, status, result FROM table(ATLAS_PANDABIGMON.ARTTESTS_1('%s','%s','%s'))""" % (query['ntag_from'], query['ntag_to'], query['strcondition'])

    cur.execute(query_raw)
    tasks_raw = cur.fetchall()
    cur.close()

    artJobs = ['package', 'branch', 'ntag', 'task_id', 'jobstatus', 'result']
    jobs = [dict(zip(artJobs, row)) for row in tasks_raw]

    # tasks = ARTTasks.objects.filter(**query).values('package','branch','task_id', 'ntag', 'nfilesfinished', 'nfilesfailed')
    ntagslist = list(sorted(set([x['ntag'] for x in jobs])))
    statestocount = ['finished', 'failed', 'active']
    arttasksdict = {}
    if not 'view' in request.session['requestParams'] or ('view' in request.session['requestParams'] and request.session['requestParams']['view'] == 'packages'):
        for job in jobs:
            if job['package'] not in arttasksdict.keys():
                arttasksdict[job['package']] = {}
            if job['branch'] not in arttasksdict[job['package']].keys():
                arttasksdict[job['package']][job['branch']] = {}
                for n in ntagslist:
                    arttasksdict[job['package']][job['branch']][n.strftime(artdateformat)] = {}
                    arttasksdict[job['package']][job['branch']][n.strftime(artdateformat)]['ntag_hf'] = n.strftime(humandateformat)
                    for state in statestocount:
                        arttasksdict[job['package']][job['branch']][n.strftime(artdateformat)][state] = 0
            if job['ntag'].strftime(artdateformat) in arttasksdict[job['package']][job['branch']]:
                finalresult, testexitcode, subresults, testdirectory = getFinalResult(job)
                arttasksdict[job['package']][job['branch']][job['ntag'].strftime(artdateformat)][finalresult] += 1
    elif 'view' in request.session['requestParams'] and request.session['requestParams']['view'] == 'branches':
        for job in jobs:
            if job['branch'] not in arttasksdict.keys():
                arttasksdict[job['branch']] = {}
            if job['package'] not in arttasksdict[job['branch']].keys():
                arttasksdict[job['branch']][job['package']] = {}
                for n in ntagslist:
                    arttasksdict[job['branch']][job['package']][n.strftime(artdateformat)] = {}
                    arttasksdict[job['branch']][job['package']][n.strftime(artdateformat)]['ntag_hf'] = n.strftime(humandateformat)
                    for state in statestocount:
                        arttasksdict[job['branch']][job['package']][n.strftime(artdateformat)][state] = 0
            if job['ntag'].strftime(artdateformat) in arttasksdict[job['branch']][job['package']]:
                finalresult, testexitcode, subresults, testdirectory = getFinalResult(job)
                arttasksdict[job['branch']][job['package']][job['ntag'].strftime(artdateformat)][finalresult] += 1

    xurl = extensibleURL(request)
    noviewurl = removeParam(xurl, 'view', mode='extensible')

    if (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('text/json', 'application/json'))) or (
        'json' in request.session['requestParams']):

        data = {
            'arttasks' : arttasksdict,
        }

        dump = json.dumps(data, cls=DateEncoder)
        return HttpResponse(dump, content_type='text/html')
    else:
        data = {
            'requestParams': request.session['requestParams'],
            'viewParams': request.session['viewParams'],
            'arttasks' : arttasksdict,
            'noviewurl': noviewurl,
            'ntaglist': [ntag.strftime(artdateformat) for ntag in ntagslist],
        }

        setCacheEntry(request, "artTasks", json.dumps(data, cls=DateEncoder), 60 * cache_timeout)
        response = render_to_response('artTasks.html', data, content_type='text/html')
        patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
        endSelfMonitor(request)
        return response
Ejemplo n.º 32
0
def prodNeventsTrend(request):
    """
    The view presents historical trend of nevents in different states for various processing types
    Default time window - 1 week
    """
    valid, response=  initRequest(request)
    defaultdays = 7
    equery = {}
    if 'days' in request.session['requestParams'] and request.session['requestParams']['days']:
        try:
            days = int(request.session['requestParams']['days'])
        except:
            days = defaultdays
        if days > defaultdays:
            days = defaultdays
        starttime = datetime.now() - timedelta(days=days)
        endtime = datetime.now()
        request.session['requestParams']['days'] = days
    else:
        starttime = datetime.now() - timedelta(days=defaultdays)
        endtime = datetime.now()
        request.session['requestParams']['days'] = defaultdays
    equery['timestamp__range'] = [starttime, endtime]

    if 'processingtype' in request.session['requestParams'] and request.session['requestParams']['processingtype']:
        if '|' not in request.session['requestParams']['processingtype']:
            equery['processingtype'] = request.session['requestParams']['processingtype']
        else:
            pts = request.session['requestParams']['processingtype'].split('|')
            equery['processingtype__in'] = pts

    events = ProdNeventsHistory.objects.filter(**equery).values()

    timeline = set([ev['timestamp'] for ev in events])
    timelinestr = [datetime.strftime(ts, defaultDatetimeFormat) for ts in timeline]
    ev_states = ['running', 'used', 'waiting']

    data = {}
    for es in ev_states:
        data[es] = {}
        for ts in timelinestr:
            data[es][ts] = 0
    for ev in events:
        for es in ev_states:
            data[es][datetime.strftime(ev['timestamp'], defaultDatetimeFormat)] += ev['nevents' + str(es)]

    plot_data = []
    for key, value in data.iteritems():
        newDict = {'state': key, 'values':[]}
        for ts, nevents in value.iteritems():
            newDict['values'].append({'timestamp': ts, 'nevents':nevents})
        newDict['values'] = sorted(newDict['values'], key=lambda k: k['timestamp'])
        plot_data.append(newDict)


    data = {
        'request': request,
        'requestParams': request.session['requestParams'],
        'plotData': json.dumps(plot_data)
    }

    endSelfMonitor(request)
    response = render_to_response('prodNeventsTrend.html', data, content_type='text/html')
    setCacheEntry(request, "prodNeventsTrend", json.dumps(data, cls=DateEncoder), 60 * 20)
    patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
    return response
Ejemplo n.º 33
0
def globalshares(request):
    valid, response = initRequest(request)
    data = getCacheEntry(request, "globalshares")
    if data is not None:
        data = json.loads(data)
        data['request'] = request
        gsPlotData = {}
        oldGsPlotData = data['gsPlotData']
        for shareName, shareValue in oldGsPlotData.iteritems():
            gsPlotData[str(shareName)] = int(shareValue)
        data['gsPlotData'] = gsPlotData

    if not valid: return response
    setupView(request, hours=180 * 24, limit=9999999)
    gs, tablerows = __get_hs_leave_distribution()
    gsPlotData = {
    }  #{'Upgrade':130049 , 'Reprocessing default':568841, 'Data Derivations': 202962, 'Event Index': 143 }

    for shareName, shareValue in gs.iteritems():
        shareValue['delta'] = shareValue['executing'] - shareValue['pledged']
        shareValue[
            'used'] = shareValue['ratio'] if 'ratio' in shareValue else None
        gsPlotData[str(shareName)] = int(shareValue['executing'])

    for shareValue in tablerows:
        shareValue['used'] = shareValue['ratio'] * Decimal(
            shareValue['value']) / 100 if 'ratio' in shareValue else None
    ordtablerows = {}
    ordtablerows['childlist'] = []
    level1 = ''
    level2 = ''
    level3 = ''

    for shareValue in tablerows:
        if len(shareValue['level1']) != 0:
            level1 = shareValue['level1']
            ordtablerows[level1] = {}
            ordtablerows['childlist'].append(level1)
            ordtablerows[level1]['childlist'] = []
        if len(shareValue['level2']) != 0:
            level2 = shareValue['level2']
            ordtablerows[level1][level2] = {}
            ordtablerows[level1]['childlist'].append(level2)
            ordtablerows[level1][level2]['childlist'] = []
        if len(shareValue['level3']) != 0:
            level3 = shareValue['level3']
            ordtablerows[level1][level2][level3] = {}
            ordtablerows[level1][level2]['childlist'].append(level3)

    resources_list, resources_dict = get_resources_gshare()

    newTablesRow = []
    for ordValueLevel1 in sorted(ordtablerows['childlist']):
        for shareValue in tablerows:
            if ordValueLevel1 in shareValue['level1']:
                ord1Short = re.sub('\[(.*)\]', '',
                                   ordValueLevel1).rstrip().lower()
                shareValue['level'] = 'level1'
                shareValue['gshare'] = ord1Short.replace(' ', '_')
                newTablesRow.append(shareValue)
                tablerows.remove(shareValue)
                if len(ordtablerows[ordValueLevel1]['childlist']) == 0:
                    add_resources(ord1Short, newTablesRow, resources_list,
                                  shareValue['level'])
                else:
                    childsgsharelist = []
                    get_child_elements(ordtablerows[ordValueLevel1],
                                       childsgsharelist)
                    resources_dict = get_child_sumstats(
                        childsgsharelist, resources_dict, ord1Short)
                    short_resource_list = resourcesDictToList(resources_dict)
                    add_resources(ord1Short, newTablesRow, short_resource_list,
                                  shareValue['level'])
                break
        for ordValueLevel2 in sorted(
                ordtablerows[ordValueLevel1]['childlist']):
            for shareValue in tablerows:
                if ordValueLevel2 in shareValue['level2']:
                    if len(ordtablerows[ordValueLevel1][ordValueLevel2]
                           ['childlist']) == 0:
                        ord1Short = re.sub('\[(.*)\]', '',
                                           ordValueLevel1).rstrip().lower()
                        ord2Short = re.sub('\[(.*)\]', '',
                                           ordValueLevel2).rstrip().lower()
                        link = "?jobtype=%s&display_limit=100&gshare=%s" % (
                            ord1Short, ord2Short)
                        shareValue['link'] = link
                        shareValue['level'] = 'level2'
                        shareValue['gshare'] = ord2Short.replace(' ', '_')
                    newTablesRow.append(shareValue)
                    tablerows.remove(shareValue)
                    if 'level' in shareValue:
                        add_resources(ord2Short, newTablesRow, resources_list,
                                      shareValue['level'])
                    break
            for ordValueLevel3 in sorted(
                    ordtablerows[ordValueLevel1][ordValueLevel2]['childlist']):
                for shareValue in tablerows:
                    if ordValueLevel3 in shareValue['level3']:
                        if len(ordtablerows[ordValueLevel1][ordValueLevel2]
                               ['childlist']) > 0:
                            ord1Short = re.sub(
                                '\[(.*)\]', '',
                                ordValueLevel1).rstrip().lower()
                            ord3Short = re.sub(
                                '\[(.*)\]', '',
                                ordValueLevel3).rstrip().lower()
                            link = "?jobtype=%s&display_limit=100&gshare=%s" % (
                                ord1Short, ord3Short)
                            shareValue['link'] = link
                            shareValue['level'] = 'level3'
                            shareValue['gshare'] = ord3Short.replace(' ', '_')
                        newTablesRow.append(shareValue)
                        tablerows.remove(shareValue)
                        if 'level' in shareValue:
                            add_resources(ord3Short, newTablesRow,
                                          resources_list, shareValue['level'])
                        break

    tablerows = newTablesRow

    del request.session['TFIRST']
    del request.session['TLAST']
    ##self monitor
    endSelfMonitor(request)
    if (not (('HTTP_ACCEPT' in request.META) and
             (request.META.get('HTTP_ACCEPT') in ('application/json')))
            and ('json' not in request.session['requestParams'])):
        data = {
            'request': request,
            'viewParams': request.session['viewParams'],
            'requestParams': request.session['requestParams'],
            'globalshares': gs,
            'xurl': extensibleURL(request),
            'gsPlotData': gsPlotData,
            'tablerows': tablerows,
            'built': datetime.now().strftime("%H:%M:%S"),
        }
        response = render_to_response('globalshares.html',
                                      data,
                                      content_type='text/html')
        setCacheEntry(request, "globalshares", json.dumps(data,
                                                          cls=DateEncoder),
                      60 * 20)
        patch_response_headers(
            response, cache_timeout=request.session['max_age_minutes'] * 60)
        return response
    else:
        return HttpResponse(json.dumps(gs), content_type='text/html')
Ejemplo n.º 34
0
def artJobs(request):
    valid, response = initRequest(request)
    if not valid: return response

    # Here we try to get cached data
    data = getCacheEntry(request, "artJobs")
    # data = None
    if data is not None:
        data = json.loads(data)
        data['request'] = request
        if 'ntaglist' in data:
            if len(data['ntaglist']) > 0:
                ntags = []
                for ntag in data['ntaglist']:
                    try:
                        ntags.append(datetime.strptime(ntag, artdateformat))
                    except:
                        pass
                if len(ntags) > 1 and 'requestParams' in data:
                    data['requestParams']['ntag_from'] = min(ntags)
                    data['requestParams']['ntag_to'] = max(ntags)
                elif len(ntags) == 1:
                    data['requestParams']['ntag'] = ntags[0]
        response = render_to_response('artJobs.html', data, content_type='text/html')
        patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
        endSelfMonitor(request)
        return response


    query = setupView(request, 'job')

    cur = connection.cursor()
    if datetime.strptime(query['ntag_from'], '%Y-%m-%d') < datetime.strptime('2018-03-20', '%Y-%m-%d'):
        cur.execute("SELECT * FROM table(ATLAS_PANDABIGMON.ARTTESTS('%s','%s','%s'))" % (query['ntag_from'], query['ntag_to'], query['strcondition']))
    else:
        cur.execute("SELECT * FROM table(ATLAS_PANDABIGMON.ARTTESTS_1('%s','%s','%s'))" % (query['ntag_from'], query['ntag_to'], query['strcondition']))
    jobs = cur.fetchall()
    cur.close()

    artJobsNames = ['taskid','package', 'branch', 'ntag', 'nightly_tag', 'testname', 'jobstatus', 'origpandaid', 'computingsite', 'endtime', 'starttime' , 'maxvmem', 'cpuconsumptiontime', 'guid', 'scope', 'lfn', 'taskstatus', 'taskmodificationtime', 'jobmodificationtime', 'result']
    jobs = [dict(zip(artJobsNames, row)) for row in jobs]

    # i=0
    # for job in jobs:
    #     i+=1
    #     print 'registering %i out of %i jobs' % (i, len(jobs))
    #     x = ArtTest(job['origpandaid'], job['testname'], job['branch'].split('/')[0], job['branch'].split('/')[1],job['branch'].split('/')[2], job['package'], job['nightly_tag'])
    #     if x.registerArtTest():
    #         print '%i job registered sucessfully out of %i' % (i, len(jobs))

    ntagslist=list(sorted(set([x['ntag'] for x in jobs])))
    jeditaskids = list(sorted(set([x['taskid'] for x in jobs])))

    artjobsdict={}
    if not 'view' in request.session['requestParams'] or (
            'view' in request.session['requestParams'] and request.session['requestParams']['view'] == 'packages'):
        for job in jobs:
            if job['package'] not in artjobsdict.keys():
                artjobsdict[job['package']] = {}
            if job['branch'] not in artjobsdict[job['package']].keys():
                artjobsdict[job['package']][job['branch']] = {}
            if job['testname'] not in artjobsdict[job['package']][job['branch']].keys():
                artjobsdict[job['package']][job['branch']][job['testname']] = {}
                for n in ntagslist:
                    artjobsdict[job['package']][job['branch']][job['testname']][n.strftime(artdateformat)] = {}
                    artjobsdict[job['package']][job['branch']][job['testname']][n.strftime(artdateformat)]['ntag_hf'] = n.strftime(humandateformat)
                    artjobsdict[job['package']][job['branch']][job['testname']][n.strftime(artdateformat)]['jobs'] = []
            if job['ntag'].strftime(artdateformat) in artjobsdict[job['package']][job['branch']][job['testname']]:
                jobdict = {}
                jobdict['jobstatus'] = job['jobstatus']
                jobdict['origpandaid'] = job['origpandaid']
                jobdict['linktext'] = job['branch'] + '/' + job['nightly_tag'] + '/' + job['package'] + '/' + job['testname'][:-3]
                jobdict['computingsite'] = job['computingsite']
                jobdict['guid'] = job['guid']
                jobdict['scope'] = job['scope']
                jobdict['lfn'] = job['lfn']
                jobdict['jeditaskid'] = job['taskid']
                jobdict['maxvmem'] = round(job['maxvmem']*1.0/1000,1) if job['maxvmem'] is not None else '---'
                jobdict['cpuconsumptiontime'] = job['cpuconsumptiontime'] if job['jobstatus'] in ('finished', 'failed') else '---'
                if job['jobstatus'] in ('finished', 'failed'):
                    jobdict['duration'] = job['endtime'] - job['starttime']
                else:
                    jobdict['duration'] = str(datetime.now() - job['starttime']).split('.')[0] if job['starttime'] is not None else "---"
                try:
                    jobdict['tarindex'] = int(re.search('.([0-9]{6}).log.', job['lfn']).group(1))
                except:
                    jobdict['tarindex'] = ''

                finalresult, testexitcode, subresults, testdirectory = getFinalResult(job)

                jobdict['finalresult'] = finalresult
                jobdict['testexitcode'] = testexitcode
                jobdict['testresult'] = subresults
                jobdict['testdirectory'] = testdirectory

                artjobsdict[job['package']][job['branch']][job['testname']][job['ntag'].strftime(artdateformat)]['jobs'].append(jobdict)

    elif 'view' in request.session['requestParams'] and request.session['requestParams']['view'] == 'branches':
        for job in jobs:
            if job['branch'] not in artjobsdict.keys():
                artjobsdict[job['branch']] = {}
            if job['package'] not in artjobsdict[job['branch']].keys():
                artjobsdict[job['branch']][job['package']] = {}
            if job['testname'] not in artjobsdict[job['branch']][job['package']].keys():
                artjobsdict[job['branch']][job['package']][job['testname']] = {}
                for n in ntagslist:
                    artjobsdict[job['branch']][job['package']][job['testname']][n.strftime(artdateformat)] = {}
                    artjobsdict[job['branch']][job['package']][job['testname']][n.strftime(artdateformat)]['ntag_hf'] = n.strftime(humandateformat)
                    artjobsdict[job['branch']][job['package']][job['testname']][n.strftime(artdateformat)]['jobs'] = []
            if job['ntag'].strftime(artdateformat) in artjobsdict[job['branch']][job['package']][job['testname']]:
                jobdict = {}
                jobdict['jobstatus'] = job['jobstatus']
                jobdict['origpandaid'] = job['origpandaid']
                jobdict['linktext'] = job['branch'] + '/' + job['nightly_tag'] + '/' + job['package'] + '/' + job['testname'][:-3]
                jobdict['computingsite'] = job['computingsite']
                jobdict['guid'] = job['guid']
                jobdict['scope'] = job['scope']
                jobdict['lfn'] = job['lfn']
                jobdict['jeditaskid'] = job['taskid']
                jobdict['maxvmem'] = round(job['maxvmem'] * 1.0 / 1000, 1) if job['maxvmem'] is not None else '---'
                jobdict['cpuconsumptiontime'] = job['cpuconsumptiontime'] if job['jobstatus'] in (
                'finished', 'failed') else '---'
                if job['jobstatus'] in ('finished', 'failed'):
                    jobdict['duration'] = job['endtime'] - job['starttime']
                else:
                    jobdict['duration'] = str(datetime.now() - job['starttime']).split('.')[0] if job['starttime'] is not None else "---"
                try:
                    jobdict['tarindex'] = int(re.search('.([0-9]{6}).log.', job['lfn']).group(1))
                except:
                    jobdict['tarindex'] = ''

                finalresult, testexitcode, subresults, testdirectory = getFinalResult(job)

                jobdict['finalresult'] = finalresult
                jobdict['testexitcode'] = testexitcode
                jobdict['testresult'] = subresults
                jobdict['testdirectory'] = testdirectory
                artjobsdict[job['branch']][job['package']][job['testname']][job['ntag'].strftime(artdateformat)]['jobs'].append(jobdict)


    xurl = extensibleURL(request)
    noviewurl = removeParam(xurl, 'view', mode='extensible')

    if (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('text/json', 'application/json'))) or (
        'json' in request.session['requestParams']):

        data = {
            'artjobs': artjobsdict,
        }

        dump = json.dumps(data, cls=DateEncoder)
        return HttpResponse(dump, content_type='text/html')
    else:
        data = {
            'requestParams': request.session['requestParams'],
            'viewParams': request.session['viewParams'],
            'artjobs': artjobsdict,
            'noviewurl': noviewurl,
            'ntaglist': [ntag.strftime(artdateformat) for ntag in ntagslist],
            'taskids' : jeditaskids,
        }
        setCacheEntry(request, "artJobs", json.dumps(data, cls=DateEncoder), 60 * cache_timeout)
        response = render_to_response('artJobs.html', data, content_type='text/html')
        patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
        endSelfMonitor(request)
        return response