Exemple #1
0
            #extrap.SetRange()
            extrap.Draw("same")
            a=extrap.GetParameter(0)
            b=extrap.GetParameter(1)
            if a!=0:
                eta=(Nexpected-a)/b
                print "ETA is:",eta,"weeks",eta*7.,"days"
            else:
                print "cannot compute eta"
            while True:
                time.sleep(1)
    else:
        print one,"has still no entries"

if __name__ == "__main__":
    i=Interface('http://cms-pdmv-golem.cern.ch:5984/stats')

    allDocs=i.get_all_files()
    docs = [doc['id'] for doc in allDocs['rows']]

    test='vlimant_Winter532012BMuOniaParked_ASGCPrio1_537p5_130122_195107_1235'
    test='nnazirid_BPH-Summer12_DR53X-00091_T1_FR_CCIN2P3_MSS_000_v1__130121_143546_6312'
    #test='nnazirid_HIG-Summer12_DR53X-00956_T1_FR_CCIN2P3_MSS_000_v1__130121_153018_5646'
    
    for one in docs:
        if one !=test: continue
        
        thisDoc=i.get_file_info_withrev(one)
        plotGrowth(thisDoc,i,wait=True)
    
Exemple #2
0
def main_do(options):
    logger.info("Running main")
    if options.check:
        logger.info('Check')
        # we check if this script is already running with same parameters#
        checks = ['ps -f -u $USER']
        for arg in sys.argv[1:]:
            checks.append('grep "%s"' % (arg.split('/')[-1].replace('--', '')))
        checks.append('grep -v grep')
        check = filter(None, os.popen("|".join(checks)).read().split('\n'))
        if len(check) != 1:
            logger.error("Already running with that exact setting")
            logger.info(check)
            sys.exit(1)
        else:
            logger.info("ok to operate")

    start_time = time.asctime()
    global statsCouch, docs, FORCE
    # interface to the couchDB
    statsCouch = Interface(options.db + ':5984/stats')

    # get from stats couch the list of requests
    view = 'yearAgo' if options.do == 'update' else 'all'
    # in case we want to force update even older workflows
    if options.force:
        view = 'all'

    logger.info("Getting all stats ...")
    allDocs = statsCouch.get_view(view)
    docs = set([doc['id'] for doc in allDocs['rows']])
    # remove the _design/stats
    if view == 'all':
        docs = set(filter(lambda doc: not doc.startswith('_'), docs))

    logger.info("... done")

    nproc = 4
    limit = None
    if options.test:
        limit = 10

    if options.do == 'insert':
        logger.info('do = insert')
        # get from wm couch
        from statsMonitoring import parallel_test, get_requests_list
        logger.info("Getting all req ...")
        req_list = get_requests_list()
        logger.info("... done")

        # insert new requests, not already in stats couch into stats couch
        # insertAll(req_list,docs,options.search,limit)

        logger.info('Will filter')
        if options.search:
            req_list = filter(lambda req: options.search in req["request_name"], req_list)
            logger.info('%d requests after search' % (len(req_list)))

        # print "req_list: " % (req_list)
        # skip malformated ones
        req_list = filter(lambda req: "status" in req, req_list)
        logger.info('%d requests after skipping malformed' % (len(req_list)))

        # take only the ones not already in there
        req_list = filter(lambda req: req["request_name"] not in docs, req_list)
        logger.info('%d after taking only those not already in there' % (len(req_list)))

        # skip trying to insert aborted and rejected or failed
        # req_list = filter( lambda req : not req["status"] in ['aborted','rejected','failed','aborted-archived','rejected-archived','failed-archived'], req_list )
        req_list = filter(lambda req: not req["status"] in ['aborted', 'rejected', 'failed', None], req_list)
        logger.info('%d after skipping aborted, rejected, failed and None' % (len(req_list)))

        # do not update TaskChain request statuses
        # req_list = filter( lambda req : 'type' in req and req['type']!='TaskChain', req_list)
        # logger.info('Requests %d' % (len(req_list)))

        if limit:
            req_list = req_list[0:limit]
            logger.info('%d after limiting' % (len(req_list)))

        logger.info('Dispatching %d requests to %d processes' % (len(req_list), nproc))
        pool = multiprocessing.Pool(nproc)
        results = pool.map(insertOne, req_list)
        logger.info('End dispatching')

        results = filter(lambda item: item is not False, results)
        logger.info('%d inserted' % (len(results)))
        logger.info(str(results))
        """
        showme=''
        for r in results:
            showme+='\t'+r+'\n'
        print showme
        """
    elif options.do == 'kill' or options.do == 'list':
        logger.info('do = kill OR do = list')
        # get from wm couch
        from statsMonitoring import parallel_test, get_requests_list
        logger.info("Getting all req ...")
        req_list = get_requests_list()
        logger.info("... done")

        removed = []
        if options.search:
            req_list = filter(lambda req: options.search in req["request_name"], req_list)
            for r in req_list:
                logger.info("Found %s in status %s?" % (r['request_name'], (r['status'] if 'status' in r else 'undef')))
                if options.do == 'kill':
                    # print "killing",r['request_name'],"in status",(r['status'] if 'status' in r else 'undef'),"?"
                    docid = r['request_name']
                    if docid in docs and docid not in removed:
                        thisDoc = statsCouch.get_file_info(docid)
                        logger.info("Removing record for docid %s" % (docid))
                        statsCouch.delete_file_info(docid, thisDoc['_rev'])
                        removed.append(docid)
                    else:
                        logger.info("Nothing to kill")

    elif options.do == 'update':
        logger.info('do = update')
        __newest = True
        if options.search:
            __newest = False
        # get from wm couch
        from statsMonitoring import get_requests_list
        logger.info("Getting all req ...")
        req_list = get_requests_list(not_in_wmstats=options.nowmstats, newest=__newest)
        logger.info("... done")

        cookie_path = '/home/pdmvserv/private/prod_cookie.txt'
        if options.mcm:
            sys.path.append('/afs/cern.ch/cms/PPD/PdmV/tools/McM/')
            from rest import restful
            mcm = restful(dev=False, cookie=cookie_path)
            rs = mcm.getA('requests', query='status=submitted')
            rids = map(lambda d: d['prepid'], rs)

            logger.info("Got %d to update from mcm" % (len(rids)))
            # print len(docs),len(req_list)
            # print map( lambda docid : any( map(lambda rid : rid in doc, rids)), docs)
            docs = filter(lambda docid: any(map(lambda rid: rid in docid, rids)), docs)
            if len(docs):
                # req_list = filter(lambda req: any(map(lambda rid: rid in req["request_name"], rids)), req_list)
                req_list = filter(lambda req: req['request_name'] in docs, req_list)

        if options.search:
            if options.force:
                FORCE = True
            docs = filter(lambda docid: options.search in docid, docs)
            if len(docs):
                # req_list = filter(lambda req: options.search in req["request_name"], req_list)
                req_list = filter(lambda req: req['request_name'] in docs, req_list)
                if len(req_list):
                    pprint.pprint(req_list)

        if limit:
            req_list = req_list[0:limit]

        request_dict = {}
        for request in req_list:
            if request['request_name'] in request_dict:
                request_dict[request['request_name']].append(request)
                logger.info('APPEND! %s' % (request['request_name']))
            else:
                request_dict[request['request_name']] = [request]

        logger.info("Dispaching %d requests to %d processes..." % (len(request_dict), nproc))
        pool = multiprocessing.Pool(nproc)
        results = pool.map(updateOneIt, request_dict.iteritems())

        logger.info("End dispatching")

        if options.search:
            dump = dumpSome(docs, limit)
            logger.info("Result from update with search")
            pprint.pprint(dump)

        results = filter(lambda item: item is not False, results)
        logger.info('%d updated' % (len(results)))
        logger.info(str(results))

        print "\n\n"
        # for r in results:
        #     try:
        #         withRevisions = statsCouch.get_file_info_withrev(r)
        #         # we shouldnt trigger mcm for ReRecos or Relvals which doesnt exist there
        #         if any(el in withRevisions['pdmv_prep_id'].lower() for el in ['relval', 'rereco']):
        #             logger.info("NOT bothering McM for rereco or relval: %s" % (withRevisions['pdmv_prep_id']))
        #             continue

        #         # he we should trigger McM update if request is in done.
        #         # because inspection on done doesn't exists.
        #         if (withRevisions['pdmv_type'] != 'Resubmission' and
        #                 withRevisions['pdmv_prep_id'].strip() not in ['No-Prepid-Found', '', 'None', '_'] and
        #                 withRevisions['pdmv_status_from_reqmngr'] == "normal-archived"):
        #             # we should trigger this only if events_in_das was updated for done
        #             logger.info("Triggering McM completed_evts syncing for a done request %s" % (r))
        #             update_comm = 'curl -s -k -L --cookie %s https://cms-pdmv.cern.ch/mcm/restapi/requests/fetch_stats_by_wf/%s' % (cookie_path, r)
        #             os.system(update_comm)
        #         else:
        #             logger.info('%s type (%s) is either Resubmission OR prepid (%s) is bad OR it\'s not normal-archived (%s)' % (r,
        #                                                                                                                          withRevisions['pdmv_type'],
        #                                                                                                                          withRevisions['pdmv_prep_id'],
        #                                                                                                                          withRevisions['pdmv_status_from_reqmngr']))
        #     except:
        #         logger.error("failed to update growth for %s" % (r))
        #         logger.error(str(traceback.format_exc()))

        print "\n\n"
        # set in the log file
        # serves as forceupdated !
        logger.info("start time: %s" % str(start_time))
        logger.info("logging updating time: %s" % str(time.asctime()))
        log_file = open('stats.log', 'a')
        log_file.write(time.asctime() + '\n')
        log_file.close()
Exemple #3
0
def main_do( options ):

    if options.check:
        #we check if this script is already running with same parameters#
        checks=['ps -f -u $USER']
        for arg in sys.argv[1:]:
            checks.append('grep "%s"'%(arg.split('/')[-1].replace('--','')))
        checks.append('grep -v grep')
        c = " | ".join(checks)
        check=filter(None,os.popen("|".join(checks)).read().split('\n'))
        if len(check)!=1:
            print "already running with that exact setting"
            print check
            sys.exit(1)
        else:
            print "ok to operate"

    start_time = time.asctime()
    global statsCouch, docs, FORCE
    #interface to the couchDB
    statsCouch = Interface(options.db+':5984/stats')


    ## get from stats couch the list of requests
    print "Getting all stats ..."
    allDocs = statsCouch.get_view('all')
    docs = [doc['id'] for doc in allDocs['rows']]
    #remove the _design/stats
    docs = filter(lambda doc : not doc.startswith('_'), docs)
    print "... done"

    nproc = 5
    limit = None
    if options.test:
        limit = 10

    if options.do == 'insert':
        ## get from wm couch
        from statsMonitoring import parallel_test,get_requests_list
        print "Getting all req ..."
        req_list = get_requests_list()
        print "... done"

        ## insert new requests, not already in stats couch into stats couch
        #insertAll(req_list,docs,options.search,limit)

        if options.search:
            req_list = filter( lambda req : options.search in req["request_name"], req_list )
            #print len(req_list)

        #skip malformated ones
        req_list = filter( lambda req : "status" in req, req_list )
        #print len(req_list)

        #take only the ones not already in there
        req_list = filter( lambda req : req["request_name"] not in docs, req_list )
        #print len(req_list)

        #skip trying to insert aborted and rejected or failed
        #req_list = filter( lambda req : not req["status"] in ['aborted','rejected','failed','aborted-archived','rejected-archived','failed-archived'], req_list )
        req_list = filter( lambda req : not req["status"] in ['aborted','rejected','failed'], req_list )
        #print len(req_list)

        #do not update TaskChain request statuses
        #req_list = filter( lambda req : 'type' in req and req['type']!='TaskChain', req_list)
        #print len(req_list)

        pprint.pprint(req_list)

        if limit:
            req_list = req_list[0:limit]
            #print len(req_list)

        newentries = 0
        print "Dispaching", len(req_list), "requests to", str(nproc), "processes..."
        pool = multiprocessing.Pool(nproc)
        results = pool.map(insertOne, req_list)
        print "End dispatching!"

        results = filter(lambda item : item != False, results)
        print len(results), "inserted"
        print str(results)
        """
        showme=''
        for r in results:
            showme+='\t'+r+'\n'
        print showme
        """
    elif options.do =='kill' or options.do =='list' :
        ## get from wm couch
        from statsMonitoring import parallel_test,get_requests_list
        print "Getting all req ..."
        req_list = get_requests_list()
        print "... done"

        removed = []
        if options.search:
            req_list = filter(lambda req : options.search in req["request_name"], req_list)
            for r in req_list:
                print "Found", r['request_name'], "in status", (r['status'] if 'status' in r else 'undef'), "?"
                if options.do == 'kill':
                    #print "killing",r['request_name'],"in status",(r['status'] if 'status' in r else 'undef'),"?"
                    docid = r['request_name']
                    if docid in docs and not docid in removed:
                        thisDoc = statsCouch.get_file_info(docid)
                        print "removing record for docid"
                        statsCouch.delete_file_info(docid, thisDoc['_rev'])
                        removed.append(docid)
                    else:
                        print "nothing to kill"

    elif options.do == 'update':
        __newest = True
        if options.search:
            __newest = False
        ## get from wm couch
        from statsMonitoring import parallel_test,get_requests_list
        print "Getting all req ..."
        req_list = get_requests_list(not_in_wmstats=options.nowmstats, newest=__newest)
        print "... done"

        ## unthreaded
        #updateSeveral(docs,req_list,pattern=None)

        if options.mcm:
            sys.path.append('/afs/cern.ch/cms/PPD/PdmV/tools/McM/')
            from rest import restful
            mcm = restful(dev=False, cookie='/afs/cern.ch/user/p/pdmvserv/private/prod-cookie.txt')
            rs = mcm.getA('requests', query='status=submitted')
            rids = map(lambda d : d['prepid'], rs)

            print "Got", len(rids), "to update from mcm"
            #print len(docs),len(req_list)
            #print map( lambda docid : any( map(lambda rid : rid in doc, rids)), docs)
            docs = filter(lambda docid : any(map(lambda rid : rid in docid, rids)), docs)
            if not len(docs):
                req_list = filter(lambda req : any(map(lambda rid : rid in req["request_name"], rids)), req_list)

        if options.search:
            if options.force:
                FORCE = True
            docs = filter(lambda docid : options.search in docid, docs)
            if not len(docs):
                req_list = filter(lambda req : options.search in req["request_name"], req_list)
                if len(req_list):
                    pprint.pprint(req_list)
        if limit:
            docs = docs[0:limit]


        repeated_req_list = itertools.repeat(req_list, len(docs))

        print "Dispaching", len(docs), "requests to ", str(nproc), "processes..."
        pool = multiprocessing.Pool(nproc)
        results = pool.map(updateOneIt, itertools.izip(docs, repeated_req_list))

        print "End dispatching!"

        if options.search:
            dump = dumpSome(docs, limit)
            print "Result from update with search"
            pprint.pprint(dump)

        results = filter( lambda item : item != False, results)
        print len(results), "updated"
        print results

        print "\n\n"
        ##udpdate the growth plots ???
        from growth import plotGrowth
        for r in results:
            try:
                withRevisions = statsCouch.get_file_info_withrev(r)
                plotGrowth(withRevisions,statsCouch,force=FORCE)
                ## notify McM for update !!
                if (withRevisions['pdmv_prep_id'].strip() not in ['No-Prepid-Found','','None']) and options.inspect and '_' not in withRevisions['pdmv_prep_id']:
                    inspect = 'curl -s -k --cookie ~/private/prod-cookie.txt https://cms-pdmv.cern.ch/mcm/restapi/requests/inspect/%s' % withRevisions['pdmv_prep_id']
                    os.system(inspect)
                ## he we should trigger McM update if request is in done.
                ## because inspection on done doesn't exists.
                if (withRevisions['pdmv_type'] != 'Resubmission' and
                    withRevisions['pdmv_prep_id'].strip() not in ['No-Prepid-Found',
                            '', 'None', '_'] and
                    withRevisions['pdmv_status_from_reqmngr'] == "normal-archived"):
                    ## we should trigger this only if events_in_das was updated for done
                    update_comm = 'curl -s -k --cookie ~/private/prod-cookie.txt https://cms-pdmv.cern.ch/mcm/restapi/requests/update_stats/%s/no_refresh' % withRevisions['pdmv_prep_id']
                    print "Triggering McM completed_evts syncing for a done request %s" % (
                            withRevisions['pdmv_prep_id'])

                    os.system(update_comm)
            except:
                print "failed to update growth for", r
                print traceback.format_exc()


        print "\n\n"
        ## set in the log file
        #serves as forceupdated !
        print "start time: ", start_time
        print "logging updating time:", time.asctime()
        l = open('stats.log','a')
        l.write(time.asctime()+'\n')
        l.close()