#extrap.SetRange() extrap.Draw("same") a=extrap.GetParameter(0) b=extrap.GetParameter(1) if a!=0: eta=(Nexpected-a)/b print "ETA is:",eta,"weeks",eta*7.,"days" else: print "cannot compute eta" while True: time.sleep(1) else: print one,"has still no entries" if __name__ == "__main__": i=Interface('http://cms-pdmv-golem.cern.ch:5984/stats') allDocs=i.get_all_files() docs = [doc['id'] for doc in allDocs['rows']] test='vlimant_Winter532012BMuOniaParked_ASGCPrio1_537p5_130122_195107_1235' test='nnazirid_BPH-Summer12_DR53X-00091_T1_FR_CCIN2P3_MSS_000_v1__130121_143546_6312' #test='nnazirid_HIG-Summer12_DR53X-00956_T1_FR_CCIN2P3_MSS_000_v1__130121_153018_5646' for one in docs: if one !=test: continue thisDoc=i.get_file_info_withrev(one) plotGrowth(thisDoc,i,wait=True)
def main_do( options ): if options.check: #we check if this script is already running with same parameters# checks=['ps -f -u $USER'] for arg in sys.argv[1:]: checks.append('grep "%s"'%(arg.split('/')[-1].replace('--',''))) checks.append('grep -v grep') c = " | ".join(checks) check=filter(None,os.popen("|".join(checks)).read().split('\n')) if len(check)!=1: print "already running with that exact setting" print check sys.exit(1) else: print "ok to operate" start_time = time.asctime() global statsCouch, docs, FORCE #interface to the couchDB statsCouch = Interface(options.db+':5984/stats') ## get from stats couch the list of requests print "Getting all stats ..." allDocs = statsCouch.get_view('all') docs = [doc['id'] for doc in allDocs['rows']] #remove the _design/stats docs = filter(lambda doc : not doc.startswith('_'), docs) print "... done" nproc = 5 limit = None if options.test: limit = 10 if options.do == 'insert': ## get from wm couch from statsMonitoring import parallel_test,get_requests_list print "Getting all req ..." req_list = get_requests_list() print "... done" ## insert new requests, not already in stats couch into stats couch #insertAll(req_list,docs,options.search,limit) if options.search: req_list = filter( lambda req : options.search in req["request_name"], req_list ) #print len(req_list) #skip malformated ones req_list = filter( lambda req : "status" in req, req_list ) #print len(req_list) #take only the ones not already in there req_list = filter( lambda req : req["request_name"] not in docs, req_list ) #print len(req_list) #skip trying to insert aborted and rejected or failed #req_list = filter( lambda req : not req["status"] in ['aborted','rejected','failed','aborted-archived','rejected-archived','failed-archived'], req_list ) req_list = filter( lambda req : not req["status"] in ['aborted','rejected','failed'], req_list ) #print len(req_list) #do not update TaskChain request statuses #req_list = filter( lambda req : 'type' in req and req['type']!='TaskChain', req_list) #print len(req_list) pprint.pprint(req_list) if limit: req_list = req_list[0:limit] #print len(req_list) newentries = 0 print "Dispaching", len(req_list), "requests to", str(nproc), "processes..." pool = multiprocessing.Pool(nproc) results = pool.map(insertOne, req_list) print "End dispatching!" results = filter(lambda item : item != False, results) print len(results), "inserted" print str(results) """ showme='' for r in results: showme+='\t'+r+'\n' print showme """ elif options.do =='kill' or options.do =='list' : ## get from wm couch from statsMonitoring import parallel_test,get_requests_list print "Getting all req ..." req_list = get_requests_list() print "... done" removed = [] if options.search: req_list = filter(lambda req : options.search in req["request_name"], req_list) for r in req_list: print "Found", r['request_name'], "in status", (r['status'] if 'status' in r else 'undef'), "?" if options.do == 'kill': #print "killing",r['request_name'],"in status",(r['status'] if 'status' in r else 'undef'),"?" docid = r['request_name'] if docid in docs and not docid in removed: thisDoc = statsCouch.get_file_info(docid) print "removing record for docid" statsCouch.delete_file_info(docid, thisDoc['_rev']) removed.append(docid) else: print "nothing to kill" elif options.do == 'update': __newest = True if options.search: __newest = False ## get from wm couch from statsMonitoring import parallel_test,get_requests_list print "Getting all req ..." req_list = get_requests_list(not_in_wmstats=options.nowmstats, newest=__newest) print "... done" ## unthreaded #updateSeveral(docs,req_list,pattern=None) if options.mcm: sys.path.append('/afs/cern.ch/cms/PPD/PdmV/tools/McM/') from rest import restful mcm = restful(dev=False, cookie='/afs/cern.ch/user/p/pdmvserv/private/prod-cookie.txt') rs = mcm.getA('requests', query='status=submitted') rids = map(lambda d : d['prepid'], rs) print "Got", len(rids), "to update from mcm" #print len(docs),len(req_list) #print map( lambda docid : any( map(lambda rid : rid in doc, rids)), docs) docs = filter(lambda docid : any(map(lambda rid : rid in docid, rids)), docs) if not len(docs): req_list = filter(lambda req : any(map(lambda rid : rid in req["request_name"], rids)), req_list) if options.search: if options.force: FORCE = True docs = filter(lambda docid : options.search in docid, docs) if not len(docs): req_list = filter(lambda req : options.search in req["request_name"], req_list) if len(req_list): pprint.pprint(req_list) if limit: docs = docs[0:limit] repeated_req_list = itertools.repeat(req_list, len(docs)) print "Dispaching", len(docs), "requests to ", str(nproc), "processes..." pool = multiprocessing.Pool(nproc) results = pool.map(updateOneIt, itertools.izip(docs, repeated_req_list)) print "End dispatching!" if options.search: dump = dumpSome(docs, limit) print "Result from update with search" pprint.pprint(dump) results = filter( lambda item : item != False, results) print len(results), "updated" print results print "\n\n" ##udpdate the growth plots ??? from growth import plotGrowth for r in results: try: withRevisions = statsCouch.get_file_info_withrev(r) plotGrowth(withRevisions,statsCouch,force=FORCE) ## notify McM for update !! if (withRevisions['pdmv_prep_id'].strip() not in ['No-Prepid-Found','','None']) and options.inspect and '_' not in withRevisions['pdmv_prep_id']: inspect = 'curl -s -k --cookie ~/private/prod-cookie.txt https://cms-pdmv.cern.ch/mcm/restapi/requests/inspect/%s' % withRevisions['pdmv_prep_id'] os.system(inspect) ## he we should trigger McM update if request is in done. ## because inspection on done doesn't exists. if (withRevisions['pdmv_type'] != 'Resubmission' and withRevisions['pdmv_prep_id'].strip() not in ['No-Prepid-Found', '', 'None', '_'] and withRevisions['pdmv_status_from_reqmngr'] == "normal-archived"): ## we should trigger this only if events_in_das was updated for done update_comm = 'curl -s -k --cookie ~/private/prod-cookie.txt https://cms-pdmv.cern.ch/mcm/restapi/requests/update_stats/%s/no_refresh' % withRevisions['pdmv_prep_id'] print "Triggering McM completed_evts syncing for a done request %s" % ( withRevisions['pdmv_prep_id']) os.system(update_comm) except: print "failed to update growth for", r print traceback.format_exc() print "\n\n" ## set in the log file #serves as forceupdated ! print "start time: ", start_time print "logging updating time:", time.asctime() l = open('stats.log','a') l.write(time.asctime()+'\n') l.close()