Exemplo n.º 1
0
def harvesters(request):
    import json
    valid, response = initRequest(request)
    #query, extra, LAST_N_HOURS_MAX = setupView(request, wildCardExt=True)
    extra = '1=1'
    xurl = extensibleURL(request)

    if 'instance' in request.session['requestParams']:
        instance = request.session['requestParams']['instance']
        # if (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('text/json', 'application/json'))) or ('json' in request.session['requestParams']):
        #     data = getCacheEntry(request, instance,isData=True)
        #     import json
        #     return HttpResponse(data, content_type='text/html')
        data = getCacheEntry(request, "harvester")
        if data is not None:
            import json
            data = json.loads(data)
            data['request'] = request
            response = render_to_response('harvesters.html',
                                          data,
                                          content_type='text/html')
            patch_response_headers(
                response,
                cache_timeout=request.session['max_age_minutes'] * 60)
            endSelfMonitor(request)
            return response
        if ('workersstats' in request.session['requestParams']
                and 'instance' in request.session['requestParams']):
            harvsterworkerstats = []
            tquery = {}
            tquery['harvesterid'] = instance
            limit = 100
            if 'limit' in request.session['requestParams']:
                limit = request.session['requestParams']['limit']
            harvsterworkerstat = HarvesterWorkerStats.objects.filter(
                **tquery).values(
                    'computingsite', 'resourcetype', 'status', 'nworkers',
                    'lastupdate').filter(**tquery).extra(
                        where=[extra]).order_by('-lastupdate')[:limit]
            # dialogs.extend(HarvesterDialogs.objects.filter(**tquery).values('creationtime','modulename', 'messagelevel','diagmessage').filter(**tquery).extra(where=[extra]).order_by('-creationtime'))
            old_format = '%Y-%m-%d %H:%M:%S'
            new_format = '%d-%m-%Y %H:%M:%S'
            for stat in harvsterworkerstat:
                stat['lastupdate'] = datetime.strptime(str(
                    stat['lastupdate']), old_format).strftime(new_format)
                harvsterworkerstats.append(stat)
            return HttpResponse(json.dumps(harvsterworkerstats,
                                           cls=DateTimeEncoder),
                                content_type='text/html')
        if ('dialogs' in request.session['requestParams']
                and 'instance' in request.session['requestParams']):
            dialogs = []
            tquery = {}
            tquery['harvesterid'] = instance
            limit = 100
            if 'limit' in request.session['requestParams']:
                limit = request.session['requestParams']['limit']
            dialogsList = HarvesterDialogs.objects.filter(**tquery).values(
                'creationtime', 'modulename', 'messagelevel',
                'diagmessage').filter(**tquery).extra(
                    where=[extra]).order_by('-creationtime')[:limit]
            # dialogs.extend(HarvesterDialogs.objects.filter(**tquery).values('creationtime','modulename', 'messagelevel','diagmessage').filter(**tquery).extra(where=[extra]).order_by('-creationtime'))
            old_format = '%Y-%m-%d %H:%M:%S'
            new_format = '%d-%m-%Y %H:%M:%S'
            for dialog in dialogsList:
                dialog['creationtime'] = datetime.strptime(
                    str(dialog['creationtime']),
                    old_format).strftime(new_format)
                dialogs.append(dialog)
            return HttpResponse(json.dumps(dialogs, cls=DateTimeEncoder),
                                content_type='text/html')
        if ('dt' in request.session['requestParams']
                and 'tk' in request.session['requestParams']):
            tk = request.session['requestParams']['tk']
            data = getCacheEntry(request, tk, isData=True)
            return HttpResponse(data, content_type='text/html')
        lastupdateCache = ''
        workersListCache = []

        data = {}
        setCacheEntry(request,
                      instance,
                      json.dumps(data, cls=DateEncoder),
                      1,
                      isData=True)

        workersListisEmty = True
        if 'status' not in request.session[
                'requestParams'] and 'computingsite' not in request.session[
                    'requestParams'] and 'days' not in request.session[
                        'requestParams']:
            data = getCacheEntry(request, instance, isData=True)
            if data is not None and data != "null":
                if 'lastupdate' in data:
                    data = json.loads(data)
                    lastupdateCache = data['lastupdate'].replace('T', ' ')
                    lastupdateCache = """ AND "wrklastupdate" >= to_date('%s','yyyy-mm-dd hh24:mi:ss')""" % (
                        lastupdateCache)
                    workersListCache = data['workersList']
                    workersListisEmty = False

                    tmpworkerList = data['workersList'].keys()
                    for worker in tmpworkerList:
                        if datetime.strptime(
                                data['workersList'][worker]['wrklastupdate'],
                                '%d-%m-%Y %H:%M:%S'
                        ) < datetime.now() - timedelta(days=60):
                            del data['workersList'][worker]
        else:
            lastupdateCache = ''
            workersListCache = []

        status = ''
        computingsite = ''
        workerid = ''
        days = ''
        if 'status' in request.session['requestParams']:
            status = """AND status like '%s'""" % (str(
                request.session['requestParams']['status']))
        if 'computingsite' in request.session['requestParams']:
            computingsite = """AND computingsite like '%s'""" % (str(
                request.session['requestParams']['computingsite']))
        if 'workerid' in request.session['requestParams']:
            workerid = """AND workerid in (%s)""" % (
                request.session['requestParams']['workerid'])
        if 'days' in request.session['requestParams']:
            days = """AND to_date("wrklastupdate", 'dd-mm-yyyy hh24:mi:ss') > sysdate - %s """ % (
                request.session['requestParams']['days'])
        sqlquery = """
        select * from (SELECT
        ff.harvester_id,
        ff.description,
        to_char(ff.starttime, 'dd-mm-yyyy hh24:mi:ss') as "insstarttime",
        ff.owner,
        ff.hostname,
        ff.sw_version,
        ff.commit_stamp,
        gg.workerid,
        to_char((select max(lastupdate) from atlas_panda.harvester_workers where harvesterid like '%s'), 'dd-mm-yyyy hh24:mi:ss') as "inslastupdate",
        gg.status,
        gg.batchid,
        gg.nodeid,
        gg.queuename,
        gg.computingsite,
        to_char(gg.submittime, 'dd-mm-yyyy hh24:mi:ss') as "submittime",
        to_char(gg.lastupdate , 'dd-mm-yyyy hh24:mi:ss') as "wrklastupdate",
        to_char(gg.starttime , 'dd-mm-yyyy hh24:mi:ss') as "wrkstarttime",
        to_char(gg.endtime, 'dd-mm-yyyy hh24:mi:ss') as "wrkendtime",
        gg.ncore,
        gg.errorcode,
        gg.stdout,
        gg.stderr,
        gg.batchlog,
        gg.resourcetype,
        gg.nativeexitcode,
        gg.nativestatus,
        gg.diagmessage,
        gg.computingelement,
        gg.njobs
        FROM
        atlas_panda.harvester_workers gg,
        atlas_panda.harvester_instances ff
        WHERE
        ff.harvester_id = gg.harvesterid) where harvester_id like '%s' %s %s %s %s %s
        order by workerid DESC
        """ % (str(instance), str(instance), status, computingsite, workerid,
               lastupdateCache, days)
        workersList = []
        cur = connection.cursor()
        cur.execute(sqlquery)
        columns = [str(i[0]).lower() for i in cur.description]
        workersDictinoary = {}

        timeLastUpdate = ''
        if workersListisEmty == False:
            for worker in cur:
                object = {}
                object = dict(zip(columns, worker))
                workersListCache[int(object['workerid'])] = object
                timeLastUpdate = object['inslastupdate']
            workersList = workersListCache.values()
            workersDictinoary = workersListCache

        else:
            for worker in cur:
                object = {}
                object = dict(zip(columns, worker))
                workersDictinoary[int(object['workerid'])] = object
                workersList.append(object)
                timeLastUpdate = object['inslastupdate']

        # dbCache = {
        #     "workersList": workersDictinoary,
        #     "lastupdate": timeLastUpdate
        # }
        # print len(workersListCache)
        # if 'status' not in request.session['requestParams'] and 'computingsite' not in request.session['requestParams'] and 'workerid' not in request.session['requestParams'] :
        #     setCacheEntry(request, instance, json.dumps(dbCache, cls=DateEncoder), 86400, isData=True)

        statuses = {}
        computingsites = {}
        workerIDs = set()
        generalInstanseInfo = {}

        if 'display_limit_workers' in request.session['requestParams']:
            display_limit_workers = int(
                request.session['requestParams']['display_limit_workers'])
        else:
            display_limit_workers = 30000

        generalWorkersFields = [
            'workerid', 'status', 'batchid', 'nodeid', 'queuename',
            'computingsite', 'submittime', 'wrklastupdate', 'wrkstarttime',
            'wrkendtime', 'ncore', 'errorcode', 'stdout', 'stderr', 'batchlog',
            'resourcetype', 'nativeexitcode', 'nativestatus', 'diagmessage',
            'njobs', 'computingelement'
        ]
        generalWorkersList = []

        wrkPandaIDs = {}
        for i, worker in enumerate(workersList):
            object = {}
            computingsites.setdefault(worker['computingsite'],
                                      []).append(worker['workerid'])
            statuses.setdefault(worker['status'],
                                []).append(worker['workerid'])
            # if worker['njobs'] is not None:
            #     wrkPandaIDs[worker['workerid']] = worker['njobs']
            # else: wrkPandaIDs[worker['workerid']] = 0
            #workerIDs.add(worker['workerid'])
            for field in generalWorkersFields:
                if worker[field] is not None:
                    object[field] = worker[field]
                else:
                    object[field] = 0
            generalWorkersList.append(object)
            if i == len(workersList) - 1:
                for computingsite in computingsites.keys():
                    computingsites[computingsite] = len(
                        computingsites[computingsite])
                for status in statuses.keys():
                    statuses[status] = len(statuses[status])
                generalInstanseInfo = {
                    'HarvesterID': worker['harvester_id'],
                    'Description': worker['description'],
                    'Starttime': worker['insstarttime'],
                    'Owner': worker['owner'],
                    'Hostname': worker['hostname'],
                    'Lastupdate': worker['inslastupdate'],
                    'Computingsites': computingsites,
                    'Statuses': statuses,
                    'Software version': worker['sw_version'],
                    'Commit stamp': worker['commit_stamp']
                }
        # 'wrkpandaids': OrderedDict(sorted(wrkPandaIDs.items(), key=lambda x: x[1], reverse=True)[:200])
        transactionKey = random.randrange(1000000)
        data = {
            'generalInstanseInfo': generalInstanseInfo,
            'type': 'workers',
            'instance': instance,
            'xurl': xurl,
            'tk': transactionKey,
            'request': request,
            'requestParams': request.session['requestParams'],
            'viewParams': request.session['viewParams'],
            'built': datetime.now().strftime("%H:%M:%S"),
        }
        setCacheEntry(request,
                      transactionKey,
                      json.dumps(generalWorkersList[:display_limit_workers],
                                 cls=DateEncoder),
                      60 * 60,
                      isData=True)
        setCacheEntry(request, 'harvester', json.dumps(data, cls=DateEncoder),
                      60 * 60)
        endSelfMonitor(request)
        return render_to_response('harvesters.html',
                                  data,
                                  content_type='text/html')

    # elif 'instance' in request.session['requestParams'] and 'workerid' in 'instance' in request.session['requestParams']:
    #     pass
    else:
        sqlquery = """
        select  
        R.harvid,
        count(R.workid) as total,
        (select cnt from   (select harvid, count(*) as cnt from (
        SELECT
        a.harvester_id as harvid, 
        b.workerid as workid,
        to_char(b.lastupdate, 'dd-mm-yyyy hh24:mi:ss') as alldate,
        (SELECT
        to_char(max(O.lastupdate), 'dd-mm-yyyy hh24:mi:ss')
        FROM atlas_panda.harvester_workers O WHERE O.harvesterid = a.harvester_id   Group by O.harvesterid) as recently, 
        a.DESCRIPTION as description
        FROM
        atlas_panda.harvester_workers b,
        atlas_panda.harvester_instances a
        WHERE a.harvester_id = b.harvesterid
        ) WHERE alldate = recently Group by harvid) W WHERE W.harvid=R.harvid) as recent,
        R.recently,
        R.sw_version,
        R.commit_stamp,
        R.lastupdate,
        R.description
        FROM (SELECT
        a.harvester_id as harvid, 
        b.workerid as workid,
        to_char(b.lastupdate, 'dd-mm-yyyy hh24:mi:ss') as alldate,
        (SELECT
        to_char(max(O.lastupdate), 'dd-mm-yyyy hh24:mi:ss')
        FROM atlas_panda.harvester_rel_jobs_workers O where  O.harvesterid = a.harvester_id   Group by O.harvesterid) as recently,
        a.sw_version,
        a.commit_stamp,
        to_char(a.lastupdate, 'dd-mm-yyyy hh24:mi:ss') as lastupdate, 
        a.DESCRIPTION as description
        FROM
        atlas_panda.harvester_workers b,
        atlas_panda.harvester_instances a
        WHERE a.harvester_id = b.harvesterid) R group by harvid,recently,sw_version,commit_stamp,lastupdate,description
        """
        instanceDictionary = []
        cur = connection.cursor()
        cur.execute(sqlquery)

        for instance in cur:
            instanceDictionary.append({
                'instance': instance[0],
                'total': instance[1],
                'recently': instance[2],
                'when': instance[3],
                'sw_version': instance[4],
                'commit_stamp': instance[5],
                'lastupdate': instance[6],
                'descr': instance[7]
            })

        data = {
            'instances': instanceDictionary,
            'type': 'instances',
            'xurl': xurl,
            'request': request,
            'requestParams': request.session['requestParams'],
            'viewParams': request.session['viewParams']
        }
        #data =json.dumps(data,cls=DateEncoder)
        response = render_to_response('harvesters.html',
                                      data,
                                      content_type='text/html')
    return response
Exemplo n.º 2
0
def globalshares(request):
    valid, response = initRequest(request)
    data = getCacheEntry(request, "globalshares")
    if data is not None:
        data = json.loads(data)
        data['request'] = request
        gsPlotData = {}
        oldGsPlotData = data['gsPlotData']
        for shareName, shareValue in oldGsPlotData.iteritems():
            gsPlotData[str(shareName)] = int(shareValue)
        data['gsPlotData'] = gsPlotData

    if not valid: return response
    setupView(request, hours=180 * 24, limit=9999999)
    gs, tablerows = __get_hs_leave_distribution()
    gsPlotData = {
    }  #{'Upgrade':130049 , 'Reprocessing default':568841, 'Data Derivations': 202962, 'Event Index': 143 }

    for shareName, shareValue in gs.iteritems():
        shareValue['delta'] = shareValue['executing'] - shareValue['pledged']
        shareValue[
            'used'] = shareValue['ratio'] if 'ratio' in shareValue else None
        gsPlotData[str(shareName)] = int(shareValue['executing'])

    for shareValue in tablerows:
        shareValue['used'] = shareValue['ratio'] * Decimal(
            shareValue['value']) / 100 if 'ratio' in shareValue else None
    ordtablerows = {}
    ordtablerows['childlist'] = []
    level1 = ''
    level2 = ''
    level3 = ''

    for shareValue in tablerows:
        if len(shareValue['level1']) != 0:
            level1 = shareValue['level1']
            ordtablerows[level1] = {}
            ordtablerows['childlist'].append(level1)
            ordtablerows[level1]['childlist'] = []
        if len(shareValue['level2']) != 0:
            level2 = shareValue['level2']
            ordtablerows[level1][level2] = {}
            ordtablerows[level1]['childlist'].append(level2)
            ordtablerows[level1][level2]['childlist'] = []
        if len(shareValue['level3']) != 0:
            level3 = shareValue['level3']
            ordtablerows[level1][level2][level3] = {}
            ordtablerows[level1][level2]['childlist'].append(level3)

    resources_list, resources_dict = get_resources_gshare()

    newTablesRow = []
    for ordValueLevel1 in sorted(ordtablerows['childlist']):
        for shareValue in tablerows:
            if ordValueLevel1 in shareValue['level1']:
                ord1Short = re.sub('\[(.*)\]', '',
                                   ordValueLevel1).rstrip().lower()
                shareValue['level'] = 'level1'
                shareValue['gshare'] = ord1Short.replace(' ', '_')
                newTablesRow.append(shareValue)
                tablerows.remove(shareValue)
                if len(ordtablerows[ordValueLevel1]['childlist']) == 0:
                    add_resources(ord1Short, newTablesRow, resources_list,
                                  shareValue['level'])
                else:
                    childsgsharelist = []
                    get_child_elements(ordtablerows[ordValueLevel1],
                                       childsgsharelist)
                    resources_dict = get_child_sumstats(
                        childsgsharelist, resources_dict, ord1Short)
                    short_resource_list = resourcesDictToList(resources_dict)
                    add_resources(ord1Short, newTablesRow, short_resource_list,
                                  shareValue['level'])
                break
        for ordValueLevel2 in sorted(
                ordtablerows[ordValueLevel1]['childlist']):
            for shareValue in tablerows:
                if ordValueLevel2 in shareValue['level2']:
                    if len(ordtablerows[ordValueLevel1][ordValueLevel2]
                           ['childlist']) == 0:
                        ord1Short = re.sub('\[(.*)\]', '',
                                           ordValueLevel1).rstrip().lower()
                        ord2Short = re.sub('\[(.*)\]', '',
                                           ordValueLevel2).rstrip().lower()
                        link = "?jobtype=%s&display_limit=100&gshare=%s" % (
                            ord1Short, ord2Short)
                        shareValue['link'] = link
                        shareValue['level'] = 'level2'
                        shareValue['gshare'] = ord2Short.replace(' ', '_')
                    newTablesRow.append(shareValue)
                    tablerows.remove(shareValue)
                    if 'level' in shareValue:
                        add_resources(ord2Short, newTablesRow, resources_list,
                                      shareValue['level'])
                    break
            for ordValueLevel3 in sorted(
                    ordtablerows[ordValueLevel1][ordValueLevel2]['childlist']):
                for shareValue in tablerows:
                    if ordValueLevel3 in shareValue['level3']:
                        if len(ordtablerows[ordValueLevel1][ordValueLevel2]
                               ['childlist']) > 0:
                            ord1Short = re.sub(
                                '\[(.*)\]', '',
                                ordValueLevel1).rstrip().lower()
                            ord3Short = re.sub(
                                '\[(.*)\]', '',
                                ordValueLevel3).rstrip().lower()
                            link = "?jobtype=%s&display_limit=100&gshare=%s" % (
                                ord1Short, ord3Short)
                            shareValue['link'] = link
                            shareValue['level'] = 'level3'
                            shareValue['gshare'] = ord3Short.replace(' ', '_')
                        newTablesRow.append(shareValue)
                        tablerows.remove(shareValue)
                        if 'level' in shareValue:
                            add_resources(ord3Short, newTablesRow,
                                          resources_list, shareValue['level'])
                        break

    tablerows = newTablesRow

    del request.session['TFIRST']
    del request.session['TLAST']
    ##self monitor
    endSelfMonitor(request)
    if (not (('HTTP_ACCEPT' in request.META) and
             (request.META.get('HTTP_ACCEPT') in ('application/json')))
            and ('json' not in request.session['requestParams'])):
        data = {
            'request': request,
            'viewParams': request.session['viewParams'],
            'requestParams': request.session['requestParams'],
            'globalshares': gs,
            'xurl': extensibleURL(request),
            'gsPlotData': gsPlotData,
            'tablerows': tablerows,
            'built': datetime.now().strftime("%H:%M:%S"),
        }
        response = render_to_response('globalshares.html',
                                      data,
                                      content_type='text/html')
        setCacheEntry(request, "globalshares", json.dumps(data,
                                                          cls=DateEncoder),
                      60 * 20)
        patch_response_headers(
            response, cache_timeout=request.session['max_age_minutes'] * 60)
        return response
    else:
        return HttpResponse(json.dumps(gs), content_type='text/html')
Exemplo n.º 3
0
def compareJobs(request):
    valid, response = initRequest(request)
    if not valid: return response

    pandaidstr = None
    if 'pandaid' in request.session['requestParams']:
        pandaidstr = request.session['requestParams']['pandaid'].split('|')
    else:
        query = {}
        query['userid'] = request.user.id
        query['object'] = 'job'
        try:
            jobsComparison = ObjectsComparison.objects.get(**query)
            pandaidstr = json.loads(jobsComparison.comparisonlist)
        except ObjectsComparison.DoesNotExist:
            pandaidstr = None

    if not pandaidstr:
        return render_to_response(
            'errorPage.html',
            {'errormessage': 'No pandaids for comparison provided'},
            content_type='text/html')

    pandaids = []
    for pid in pandaidstr:
        try:
            pid = int(pid)
            pandaids.append(pid)
        except:
            pass
    maxNJobs = 5
    if len(pandaids) > maxNJobs:
        pandaids = pandaids[:maxNJobs]

    jobInfoJSON = []

    # Looking for a job in cache
    pandaidsToBeLoad = []
    for pandaid in pandaids:
        data = getCacheEntry(request,
                             "compareJob_" + str(pandaid),
                             isData=True)
        # data = None
        if data is not None:
            jobInfoJSON.append(json.loads(data))
        else:
            pandaidsToBeLoad.append(pandaid)

    #Loading jobs info in parallel
    nprocesses = maxNJobs
    if len(pandaidsToBeLoad) > 0:
        url_params = [('?json=1&pandaid=' + str(pid))
                      for pid in pandaidsToBeLoad]
        pool = multiprocessing.Pool(processes=nprocesses)
        jobInfoJSON.extend(pool.map(job_info_getter, url_params))
        pool.close()
        pool.join()

    #Put loaded jobs info to cache
    for job in jobInfoJSON:
        setCacheEntry(request,
                      "compareJob_" + str(job.keys()[0]),
                      json.dumps(job.values()[0], cls=DateEncoder),
                      60 * 30,
                      isData=True)

    compareParamNames = {
        'produsername': '******',
        'reqid': 'Request ID',
        'jeditaskid': 'Task ID',
        'jobstatus': 'Status',
        'attemptnr': 'Attempt',
        'creationtime': 'Created',
        'waittime': 'Time to start',
        'duration': 'Duration',
        'modificationtime': 'Modified',
        'cloud': 'Cloud',
        'computingsite': 'Site',
        'currentpriority': 'Priority',
        'jobname': 'Name',
        'processingtype': 'Type',
        'transformation': 'Transformation',
        'proddblock': 'Input',
        'destinationdblock': 'Output',
        'jobsetid': 'Jobset ID',
        'batchid': 'Batch ID',
        'eventservice': 'Event Service'
    }

    compareParams = [
        'produsername', 'reqid', 'jeditaskid', 'jobstatus', 'attemptnr',
        'creationtime', 'waittime', 'duration', 'modificationtime', 'cloud',
        'computingsite', 'currentpriority', 'jobname', 'processingtype',
        'transformation', 'proddblock', 'destinationdblock', 'jobsetid',
        'batchid', 'eventservice'
    ]

    ###Excluded params because of too long values###
    excludedParams = ['metadata', 'metastruct']

    jobsComparisonMain = []
    for param in compareParams:
        row = [{'paramname': compareParamNames[param]}]
        for jobd in jobInfoJSON:
            job = jobd['job']
            if param in job:
                row.append({'value': job[param]})
            else:
                row.append({'value': '-'})
        if len(set([d['value'] for d in row if 'value' in d])) == 1:
            row[0]['mark'] = 'equal'
        jobsComparisonMain.append(row)

    all_params = []
    for jobd in jobInfoJSON:
        all_params.extend(list(jobd['job'].keys()))
    all_params = sorted(set(all_params))

    jobsComparisonAll = []
    for param in all_params:
        if param not in excludedParams:
            row = [{'paramname': param}]
            for jobd in jobInfoJSON:
                job = jobd['job']
                if param in job and job[param] is not None:
                    row.append({'value': job[param]})
                else:
                    row.append({'value': '-'})
            if len(set([d['value'] for d in row if 'value' in d])) == 1:
                row[0]['mark'] = 'equal'
            jobsComparisonAll.append(row)

    xurl = extensibleURL(request)
    data = {
        'request': request,
        'viewParams': request.session['viewParams'],
        'requestParams': request.session['requestParams'],
        'url': request.path,
        'jobsComparisonMain': jobsComparisonMain,
        'jobsComparisonAll': jobsComparisonAll,
        'pandaids': pandaids,
        'xurl': xurl,
        'built': datetime.now().strftime("%H:%M:%S"),
    }

    ##self monitor
    endSelfMonitor(request)
    response = render_to_response('compareJobs.html',
                                  data,
                                  content_type='text/html')
    patch_response_headers(response,
                           cache_timeout=request.session['max_age_minutes'] *
                           60)
    return response
Exemplo n.º 4
0
def globalshares(request):
    valid, response = initRequest(request)
    data = getCacheEntry(request, "globalshares")
    if data is not None:
        data = json.loads(data)
        data['request'] = request
        gsPlotData = {}
        oldGsPlotData = data['gsPlotData']
        for shareName, shareValue in oldGsPlotData.iteritems():
            gsPlotData[str(shareName)] = int(shareValue)
        data['gsPlotData'] = gsPlotData

    if not valid: return response
    setupView(request, hours=180 * 24, limit=9999999)
    gs, tablerows = __get_hs_leave_distribution()
    gsPlotData = {}#{'Upgrade':130049 , 'Reprocessing default':568841, 'Data Derivations': 202962, 'Event Index': 143 }

    for shareName, shareValue in gs.iteritems():
        shareValue['delta'] = shareValue['executing'] - shareValue['pledged']
        shareValue['used'] = shareValue['ratio'] if 'ratio' in shareValue else None
        gsPlotData[str(shareName)] = int(shareValue['executing'])


    for shareValue in tablerows:
        shareValue['used'] = shareValue['ratio']*Decimal(shareValue['value'])/100 if 'ratio' in shareValue else None
    ordtablerows ={}
    ordtablerows['childlist']=[]
    level1=''
    level2=''
    level3=''

    for shareValue in tablerows:
        if len(shareValue['level1'])!=0:
            level1 = shareValue['level1']
            ordtablerows[level1] = {}
            ordtablerows['childlist'].append(level1)
            ordtablerows[level1]['childlist'] = []
        if len(shareValue['level2'])!=0:
            level2 = shareValue['level2']
            ordtablerows[level1][level2] = {}
            ordtablerows[level1]['childlist'].append(level2)
            ordtablerows[level1][level2]['childlist'] = []
        if len(shareValue['level3'])!=0:
            level3 = shareValue['level3']
            ordtablerows[level1][level2][level3] = {}
            ordtablerows[level1][level2]['childlist'].append(level3)

    resources_list, resources_dict = get_resources_gshare()

    newTablesRow =[]
    for ordValueLevel1 in sorted(ordtablerows['childlist']):
        for shareValue in tablerows:
            if ordValueLevel1 in shareValue['level1']:
                ord1Short = re.sub('\[(.*)\]', '', ordValueLevel1).rstrip().lower()
                shareValue['level'] = 'level1'
                shareValue['gshare'] = ord1Short.replace(' ', '_')
                newTablesRow.append(shareValue)
                tablerows.remove(shareValue)
                if len(ordtablerows[ordValueLevel1]['childlist']) == 0:
                    add_resources(ord1Short,newTablesRow,resources_list,shareValue['level'])
                else:
                    childsgsharelist = []
                    get_child_elements(ordtablerows[ordValueLevel1],childsgsharelist)
                    resources_dict = get_child_sumstats(childsgsharelist,resources_dict,ord1Short)
                    short_resource_list= resourcesDictToList(resources_dict)
                    add_resources(ord1Short, newTablesRow, short_resource_list, shareValue['level'])
                break
        for ordValueLevel2 in sorted(ordtablerows[ordValueLevel1]['childlist']):
            for shareValue in tablerows:
                if ordValueLevel2 in shareValue['level2']:
                    if len(ordtablerows[ordValueLevel1][ordValueLevel2]['childlist'])==0:
                        ord1Short = re.sub('\[(.*)\]','',ordValueLevel1).rstrip().lower()
                        ord2Short = re.sub('\[(.*)\]', '', ordValueLevel2).rstrip().lower()
                        link = "?jobtype=%s&display_limit=100&gshare=%s"%(ord1Short,ord2Short)
                        shareValue['link'] = link
                        shareValue['level'] = 'level2'
                        shareValue['gshare'] = ord2Short.replace(' ', '_')
                    newTablesRow.append(shareValue)
                    tablerows.remove(shareValue)
                    if 'level' in shareValue:
                        add_resources(ord2Short, newTablesRow, resources_list, shareValue['level'])
                    break
            for ordValueLevel3 in sorted(ordtablerows[ordValueLevel1][ordValueLevel2]['childlist']):
                for shareValue in tablerows:
                    if ordValueLevel3 in shareValue['level3']:
                        if len(ordtablerows[ordValueLevel1][ordValueLevel2]['childlist']) > 0:
                            ord1Short = re.sub('\[(.*)\]', '', ordValueLevel1).rstrip().lower()
                            ord3Short = re.sub('\[(.*)\]', '', ordValueLevel3).rstrip().lower()
                            link = "?jobtype=%s&display_limit=100&gshare=%s" % (ord1Short, ord3Short)
                            shareValue['link'] = link
                            shareValue['level'] = 'level3'
                            shareValue['gshare'] = ord3Short.replace(' ', '_')
                        newTablesRow.append(shareValue)
                        tablerows.remove(shareValue)
                        if 'level' in shareValue:
                            add_resources(ord3Short, newTablesRow, resources_list, shareValue['level'])
                        break

    tablerows = newTablesRow

    del request.session['TFIRST']
    del request.session['TLAST']
    ##self monitor
    endSelfMonitor(request)
    if (not (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('application/json'))) and (
                'json' not in request.session['requestParams'])):
        data = {
            'request': request,
            'viewParams': request.session['viewParams'],
            'requestParams': request.session['requestParams'],
            'globalshares': gs,
            'xurl': extensibleURL(request),
            'gsPlotData':gsPlotData,
            'tablerows':tablerows,
            'built': datetime.now().strftime("%H:%M:%S"),
        }
        response = render_to_response('globalshares.html', data, content_type='text/html')
        setCacheEntry(request, "globalshares", json.dumps(data, cls=DateEncoder), 60 * 20)
        patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
        return response
    else:
        return HttpResponse(DecimalEncoder().encode(gs), content_type='text/html')
Exemplo n.º 5
0
def compareJobs(request):
    valid, response = initRequest(request)
    if not valid: return response

    pandaidstr = None
    if 'pandaid' in request.session['requestParams']:
        pandaidstr = request.session['requestParams']['pandaid'].split('|')
    else:
        query = {}
        query['userid'] = request.user.id
        query['object'] = 'job'
        try:
            jobsComparison = ObjectsComparison.objects.get(**query)
            pandaidstr = json.loads(jobsComparison.comparisonlist)
        except ObjectsComparison.DoesNotExist:
            pandaidstr = None


    if not pandaidstr:
        return render_to_response('errorPage.html', {'errormessage': 'No pandaids for comparison provided'}, content_type='text/html')

    pandaids = []
    for pid in pandaidstr:
        try:
            pid = int(pid)
            pandaids.append(pid)
        except:
            pass
    maxNJobs = 5
    if len(pandaids) > maxNJobs:
        pandaids = pandaids[:maxNJobs]


    jobInfoJSON = []

    # Looking for a job in cache
    pandaidsToBeLoad = []
    for pandaid in pandaids:
        data = getCacheEntry(request, "compareJob_" + str(pandaid), isData=True)
        # data = None
        if data is not None:
            jobInfoJSON.append(json.loads(data))
        else:
            pandaidsToBeLoad.append(pandaid)

    #Loading jobs info in parallel
    nprocesses = maxNJobs
    if len(pandaidsToBeLoad) > 0:
        url_params = [('?json=1&pandaid=' + str(pid)) for pid in pandaidsToBeLoad]
        pool = multiprocessing.Pool(processes=nprocesses)
        jobInfoJSON.extend(pool.map(job_info_getter, url_params))
        pool.close()
        pool.join()

    #Put loaded jobs info to cache
    for job in jobInfoJSON:
        setCacheEntry(request, "compareJob_" + str(job.keys()[0]),
                      json.dumps(job.values()[0], cls=DateEncoder), 60 * 30, isData=True)

    compareParamNames = {'produsername': '******', 'reqid': 'Request ID', 'jeditaskid': 'Task ID', 'jobstatus': 'Status',
                     'attemptnr': 'Attempt', 'creationtime': 'Created', 'waittime': 'Time to start', 'duration': 'Duration',
                     'modificationtime': 'Modified', 'cloud': 'Cloud', 'computingsite': 'Site', 'currentpriority': 'Priority',
                     'jobname': 'Name', 'processingtype': 'Type', 'transformation': 'Transformation', 'proddblock': 'Input',
                     'destinationdblock': 'Output', 'jobsetid': 'Jobset ID', 'batchid': 'Batch ID', 'eventservice': 'Event Service'}

    compareParams = ['produsername', 'reqid', 'jeditaskid', 'jobstatus', 'attemptnr','creationtime', 'waittime', 'duration',
                         'modificationtime', 'cloud', 'computingsite','currentpriority',
                         'jobname', 'processingtype', 'transformation','proddblock','destinationdblock', 'jobsetid', 'batchid','eventservice']

    ###Excluded params because of too long values###
    excludedParams = ['metadata', 'metastruct']

    jobsComparisonMain = []
    for param in compareParams:
        row = [{'paramname': compareParamNames[param]}]
        for jobd in jobInfoJSON:
            job = jobd['job']
            if param in job:
                row.append({'value': job[param]})
            else:
                row.append({'value': '-'})
        if len(set([d['value'] for d in row if 'value' in d])) == 1:
            row[0]['mark'] = 'equal'
        jobsComparisonMain.append(row)


    all_params = []
    for jobd in jobInfoJSON:
        all_params.extend(list(jobd['job'].keys()))
    all_params = sorted(set(all_params))

    jobsComparisonAll = []
    for param in all_params:
        if param not in excludedParams:
            row = [{'paramname': param}]
            for jobd in jobInfoJSON:
                job = jobd['job']
                if param in job and job[param] is not None:
                    row.append({'value': job[param]})
                else:
                    row.append({'value': '-'})
            if len(set([d['value'] for d in row if 'value' in d])) == 1:
                row[0]['mark'] = 'equal'
            jobsComparisonAll.append(row)


    xurl = extensibleURL(request)
    data = {
        'request': request,
        'viewParams': request.session['viewParams'],
        'requestParams': request.session['requestParams'],
        'url': request.path,
        'jobsComparisonMain': jobsComparisonMain,
        'jobsComparisonAll': jobsComparisonAll,
        'pandaids': pandaids,
        'xurl': xurl,
        'built': datetime.now().strftime("%H:%M:%S"),
    }

    ##self monitor
    endSelfMonitor(request)
    response = render_to_response('compareJobs.html', data, content_type='text/html')
    patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
    return response
Exemplo n.º 6
0
def artJobs(request):
    valid, response = initRequest(request)
    if not valid: return response

    # Here we try to get cached data
    data = getCacheEntry(request, "artJobs")
    # data = None
    if data is not None:
        data = json.loads(data)
        data['request'] = request
        if 'ntaglist' in data:
            if len(data['ntaglist']) > 0:
                ntags = []
                for ntag in data['ntaglist']:
                    try:
                        ntags.append(datetime.strptime(ntag, artdateformat))
                    except:
                        pass
                if len(ntags) > 1 and 'requestParams' in data:
                    data['requestParams']['ntag_from'] = min(ntags)
                    data['requestParams']['ntag_to'] = max(ntags)
                elif len(ntags) == 1:
                    data['requestParams']['ntag'] = ntags[0]
        response = render_to_response('artJobs.html', data, content_type='text/html')
        patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
        endSelfMonitor(request)
        return response


    query = setupView(request, 'job')

    cur = connection.cursor()
    if datetime.strptime(query['ntag_from'], '%Y-%m-%d') < datetime.strptime('2018-03-20', '%Y-%m-%d'):
        cur.execute("SELECT * FROM table(ATLAS_PANDABIGMON.ARTTESTS('%s','%s','%s'))" % (query['ntag_from'], query['ntag_to'], query['strcondition']))
    else:
        cur.execute("SELECT * FROM table(ATLAS_PANDABIGMON.ARTTESTS_1('%s','%s','%s'))" % (query['ntag_from'], query['ntag_to'], query['strcondition']))
    jobs = cur.fetchall()
    cur.close()

    artJobsNames = ['taskid','package', 'branch', 'ntag', 'nightly_tag', 'testname', 'jobstatus', 'origpandaid', 'computingsite', 'endtime', 'starttime' , 'maxvmem', 'cpuconsumptiontime', 'guid', 'scope', 'lfn', 'taskstatus', 'taskmodificationtime', 'jobmodificationtime', 'result']
    jobs = [dict(zip(artJobsNames, row)) for row in jobs]

    # i=0
    # for job in jobs:
    #     i+=1
    #     print 'registering %i out of %i jobs' % (i, len(jobs))
    #     x = ArtTest(job['origpandaid'], job['testname'], job['branch'].split('/')[0], job['branch'].split('/')[1],job['branch'].split('/')[2], job['package'], job['nightly_tag'])
    #     if x.registerArtTest():
    #         print '%i job registered sucessfully out of %i' % (i, len(jobs))

    ntagslist=list(sorted(set([x['ntag'] for x in jobs])))
    jeditaskids = list(sorted(set([x['taskid'] for x in jobs])))

    artjobsdict={}
    if not 'view' in request.session['requestParams'] or (
            'view' in request.session['requestParams'] and request.session['requestParams']['view'] == 'packages'):
        for job in jobs:
            if job['package'] not in artjobsdict.keys():
                artjobsdict[job['package']] = {}
            if job['branch'] not in artjobsdict[job['package']].keys():
                artjobsdict[job['package']][job['branch']] = {}
            if job['testname'] not in artjobsdict[job['package']][job['branch']].keys():
                artjobsdict[job['package']][job['branch']][job['testname']] = {}
                for n in ntagslist:
                    artjobsdict[job['package']][job['branch']][job['testname']][n.strftime(artdateformat)] = {}
                    artjobsdict[job['package']][job['branch']][job['testname']][n.strftime(artdateformat)]['ntag_hf'] = n.strftime(humandateformat)
                    artjobsdict[job['package']][job['branch']][job['testname']][n.strftime(artdateformat)]['jobs'] = []
            if job['ntag'].strftime(artdateformat) in artjobsdict[job['package']][job['branch']][job['testname']]:
                jobdict = {}
                jobdict['jobstatus'] = job['jobstatus']
                jobdict['origpandaid'] = job['origpandaid']
                jobdict['linktext'] = job['branch'] + '/' + job['nightly_tag'] + '/' + job['package'] + '/' + job['testname'][:-3]
                jobdict['computingsite'] = job['computingsite']
                jobdict['guid'] = job['guid']
                jobdict['scope'] = job['scope']
                jobdict['lfn'] = job['lfn']
                jobdict['jeditaskid'] = job['taskid']
                jobdict['maxvmem'] = round(job['maxvmem']*1.0/1000,1) if job['maxvmem'] is not None else '---'
                jobdict['cpuconsumptiontime'] = job['cpuconsumptiontime'] if job['jobstatus'] in ('finished', 'failed') else '---'
                if job['jobstatus'] in ('finished', 'failed'):
                    jobdict['duration'] = job['endtime'] - job['starttime']
                else:
                    jobdict['duration'] = str(datetime.now() - job['starttime']).split('.')[0] if job['starttime'] is not None else "---"
                try:
                    jobdict['tarindex'] = int(re.search('.([0-9]{6}).log.', job['lfn']).group(1))
                except:
                    jobdict['tarindex'] = ''

                finalresult, testexitcode, subresults, testdirectory = getFinalResult(job)

                jobdict['finalresult'] = finalresult
                jobdict['testexitcode'] = testexitcode
                jobdict['testresult'] = subresults
                jobdict['testdirectory'] = testdirectory

                artjobsdict[job['package']][job['branch']][job['testname']][job['ntag'].strftime(artdateformat)]['jobs'].append(jobdict)

    elif 'view' in request.session['requestParams'] and request.session['requestParams']['view'] == 'branches':
        for job in jobs:
            if job['branch'] not in artjobsdict.keys():
                artjobsdict[job['branch']] = {}
            if job['package'] not in artjobsdict[job['branch']].keys():
                artjobsdict[job['branch']][job['package']] = {}
            if job['testname'] not in artjobsdict[job['branch']][job['package']].keys():
                artjobsdict[job['branch']][job['package']][job['testname']] = {}
                for n in ntagslist:
                    artjobsdict[job['branch']][job['package']][job['testname']][n.strftime(artdateformat)] = {}
                    artjobsdict[job['branch']][job['package']][job['testname']][n.strftime(artdateformat)]['ntag_hf'] = n.strftime(humandateformat)
                    artjobsdict[job['branch']][job['package']][job['testname']][n.strftime(artdateformat)]['jobs'] = []
            if job['ntag'].strftime(artdateformat) in artjobsdict[job['branch']][job['package']][job['testname']]:
                jobdict = {}
                jobdict['jobstatus'] = job['jobstatus']
                jobdict['origpandaid'] = job['origpandaid']
                jobdict['linktext'] = job['branch'] + '/' + job['nightly_tag'] + '/' + job['package'] + '/' + job['testname'][:-3]
                jobdict['computingsite'] = job['computingsite']
                jobdict['guid'] = job['guid']
                jobdict['scope'] = job['scope']
                jobdict['lfn'] = job['lfn']
                jobdict['jeditaskid'] = job['taskid']
                jobdict['maxvmem'] = round(job['maxvmem'] * 1.0 / 1000, 1) if job['maxvmem'] is not None else '---'
                jobdict['cpuconsumptiontime'] = job['cpuconsumptiontime'] if job['jobstatus'] in (
                'finished', 'failed') else '---'
                if job['jobstatus'] in ('finished', 'failed'):
                    jobdict['duration'] = job['endtime'] - job['starttime']
                else:
                    jobdict['duration'] = str(datetime.now() - job['starttime']).split('.')[0] if job['starttime'] is not None else "---"
                try:
                    jobdict['tarindex'] = int(re.search('.([0-9]{6}).log.', job['lfn']).group(1))
                except:
                    jobdict['tarindex'] = ''

                finalresult, testexitcode, subresults, testdirectory = getFinalResult(job)

                jobdict['finalresult'] = finalresult
                jobdict['testexitcode'] = testexitcode
                jobdict['testresult'] = subresults
                jobdict['testdirectory'] = testdirectory
                artjobsdict[job['branch']][job['package']][job['testname']][job['ntag'].strftime(artdateformat)]['jobs'].append(jobdict)


    xurl = extensibleURL(request)
    noviewurl = removeParam(xurl, 'view', mode='extensible')

    if (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('text/json', 'application/json'))) or (
        'json' in request.session['requestParams']):

        data = {
            'artjobs': artjobsdict,
        }

        dump = json.dumps(data, cls=DateEncoder)
        return HttpResponse(dump, content_type='text/html')
    else:
        data = {
            'requestParams': request.session['requestParams'],
            'viewParams': request.session['viewParams'],
            'artjobs': artjobsdict,
            'noviewurl': noviewurl,
            'ntaglist': [ntag.strftime(artdateformat) for ntag in ntagslist],
            'taskids' : jeditaskids,
        }
        setCacheEntry(request, "artJobs", json.dumps(data, cls=DateEncoder), 60 * cache_timeout)
        response = render_to_response('artJobs.html', data, content_type='text/html')
        patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
        endSelfMonitor(request)
        return response
Exemplo n.º 7
0
def artTasks(request):
    valid, response = initRequest(request)
    query = setupView(request, 'job')

    # Here we try to get cached data
    data = getCacheEntry(request, "artTasks")
    # data = None
    if data is not None:
        data = json.loads(data)
        data['request'] = request
        if 'ntaglist' in data:
            if len(data['ntaglist']) > 0:
                ntags = []
                for ntag in data['ntaglist']:
                    try:
                        ntags.append(datetime.strptime(ntag, artdateformat))
                    except:
                        pass
                if len(ntags) > 1 and 'requestParams' in data:
                    data['requestParams']['ntag_from'] = min(ntags)
                    data['requestParams']['ntag_to'] = max(ntags)
                elif len(ntags) == 1:
                    data['requestParams']['ntag'] = ntags[0]
        response = render_to_response('artTasks.html', data, content_type='text/html')
        patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
        endSelfMonitor(request)
        return response

    cur = connection.cursor()
    if datetime.strptime(query['ntag_from'], '%Y-%m-%d') <  datetime.strptime('2018-03-20', '%Y-%m-%d'):
        query_raw = """SELECT package, branch, ntag, taskid, status, result FROM table(ATLAS_PANDABIGMON.ARTTESTS('%s','%s','%s'))""" % (query['ntag_from'], query['ntag_to'], query['strcondition'])
    else:
        query_raw = """SELECT package, branch, ntag, taskid, status, result FROM table(ATLAS_PANDABIGMON.ARTTESTS_1('%s','%s','%s'))""" % (query['ntag_from'], query['ntag_to'], query['strcondition'])

    cur.execute(query_raw)
    tasks_raw = cur.fetchall()
    cur.close()

    artJobs = ['package', 'branch', 'ntag', 'task_id', 'jobstatus', 'result']
    jobs = [dict(zip(artJobs, row)) for row in tasks_raw]

    # tasks = ARTTasks.objects.filter(**query).values('package','branch','task_id', 'ntag', 'nfilesfinished', 'nfilesfailed')
    ntagslist = list(sorted(set([x['ntag'] for x in jobs])))
    statestocount = ['finished', 'failed', 'active']
    arttasksdict = {}
    if not 'view' in request.session['requestParams'] or ('view' in request.session['requestParams'] and request.session['requestParams']['view'] == 'packages'):
        for job in jobs:
            if job['package'] not in arttasksdict.keys():
                arttasksdict[job['package']] = {}
            if job['branch'] not in arttasksdict[job['package']].keys():
                arttasksdict[job['package']][job['branch']] = {}
                for n in ntagslist:
                    arttasksdict[job['package']][job['branch']][n.strftime(artdateformat)] = {}
                    arttasksdict[job['package']][job['branch']][n.strftime(artdateformat)]['ntag_hf'] = n.strftime(humandateformat)
                    for state in statestocount:
                        arttasksdict[job['package']][job['branch']][n.strftime(artdateformat)][state] = 0
            if job['ntag'].strftime(artdateformat) in arttasksdict[job['package']][job['branch']]:
                finalresult, testexitcode, subresults, testdirectory = getFinalResult(job)
                arttasksdict[job['package']][job['branch']][job['ntag'].strftime(artdateformat)][finalresult] += 1
    elif 'view' in request.session['requestParams'] and request.session['requestParams']['view'] == 'branches':
        for job in jobs:
            if job['branch'] not in arttasksdict.keys():
                arttasksdict[job['branch']] = {}
            if job['package'] not in arttasksdict[job['branch']].keys():
                arttasksdict[job['branch']][job['package']] = {}
                for n in ntagslist:
                    arttasksdict[job['branch']][job['package']][n.strftime(artdateformat)] = {}
                    arttasksdict[job['branch']][job['package']][n.strftime(artdateformat)]['ntag_hf'] = n.strftime(humandateformat)
                    for state in statestocount:
                        arttasksdict[job['branch']][job['package']][n.strftime(artdateformat)][state] = 0
            if job['ntag'].strftime(artdateformat) in arttasksdict[job['branch']][job['package']]:
                finalresult, testexitcode, subresults, testdirectory = getFinalResult(job)
                arttasksdict[job['branch']][job['package']][job['ntag'].strftime(artdateformat)][finalresult] += 1

    xurl = extensibleURL(request)
    noviewurl = removeParam(xurl, 'view', mode='extensible')

    if (('HTTP_ACCEPT' in request.META) and (request.META.get('HTTP_ACCEPT') in ('text/json', 'application/json'))) or (
        'json' in request.session['requestParams']):

        data = {
            'arttasks' : arttasksdict,
        }

        dump = json.dumps(data, cls=DateEncoder)
        return HttpResponse(dump, content_type='text/html')
    else:
        data = {
            'requestParams': request.session['requestParams'],
            'viewParams': request.session['viewParams'],
            'arttasks' : arttasksdict,
            'noviewurl': noviewurl,
            'ntaglist': [ntag.strftime(artdateformat) for ntag in ntagslist],
        }

        setCacheEntry(request, "artTasks", json.dumps(data, cls=DateEncoder), 60 * cache_timeout)
        response = render_to_response('artTasks.html', data, content_type='text/html')
        patch_response_headers(response, cache_timeout=request.session['max_age_minutes'] * 60)
        endSelfMonitor(request)
        return response