def rejector(url, specific, options=None): if specific.startswith('/'): pass else: wfo = session.query(Workflow).filter(Workflow.name == specific).first() if not wfo: print "cannot reject", spec return results = [] wfi = workflowInfo(url, wfo.name) if wfi.request['RequestStatus'] in [ 'assignment-approved', 'new', 'completed' ]: #results.append( reqMgrClient.rejectWorkflow(url, wfo.name)) reqMgrClient.rejectWorkflow(url, wfo.name) else: #results.append( reqMgrClient.abortWorkflow(url, wfo.name)) reqMgrClient.abortWorkflow(url, wfo.name) datasets = wfi.request['OutputDatasets'] for dataset in datasets: if options.keep: print "keeping", dataset, "in its current status" else: results.append( setDatasetStatusDBS3.setStatusDBS3( 'https://cmsweb.cern.ch/dbs/prod/global/DBSWriter', dataset, 'INVALID', None)) if all(map(lambda result: result in ['None', None], results)): wfo.status = 'forget' session.commit() print wfo.name, "and", datasets, "are rejected" if options and options.clone: schema = wfi.getSchema() schema['Requestor'] = os.getenv('USER') schema['Group'] = 'DATAOPS' if 'ProcessingVersion' in schema: schema['ProcessingVersion'] += 1 else: schema['ProcessingVersion'] = 2 ##schema.pop('RequestDate') ## ok then, let's not reset the time stamp if options.Memory: schema['Memory'] = options.Memory response = reqMgrClient.submitWorkflow(url, schema) m = re.search("details\/(.*)\'", response) if not m: print "error in cloning", wfo.name print response return newWorkflow = m.group(1) data = reqMgrClient.setWorkflowApproved(url, newWorkflow) print data wfo.status = 'trouble' session.commit() else: print "error in rejecting", wfo.name, results
def main(): #Create option parser usage = "usage: %prog (-w workflow|-f filelist) (-t TASK|--all) [--tesbed]" parser = OptionParser(usage=usage) parser.add_option("-f","--file", dest="file", default=None, help="Text file with a list of workflows") parser.add_option("-w","--workflow", default=None, help="Coma separated list of wf to handle") parser.add_option("-t","--task", default=None, help="Coma separated task to be recovered") parser.add_option("-a","--all", help="Make acdc for all tasks to be recovered",default=False, action='store_true') parser.add_option("-m","--memory", dest="memory", default=None, type=float, help="Memory to override the original request memory") parser.add_option("--testbed", default=False, action="store_true") (options, args) = parser.parse_args() global url url = testbed_url if options.testbed else prod_url wfs = None if options.file: wfs = [l.strip() for l in open(options.file) if l.strip()] elif options.workflow: wfs = options.workflow.split(',') else: parser.error("Either provide a -f filelist or a -w workflow") sys.exit(1) if (not wfs) or (not options.task and not options.all): parser.error("Provide the -w Workflow Name and the -t Task Name or --all") sys.exit(1) for wfname in wfs: wfi = workflowInfo(url, wfname) if options.task == 'all' or options.all: where,how_much,how_much_where = wfi.getRecoveryInfo() tasks = sorted(how_much.keys()) else: tasks = [('/%s/%s'%(wfname,task)).replace('//','/') for task in options.task.split(',')] created = {} print "Workflow:",wfname print "Tasks:",tasks for task in tasks: r = makeACDC(url, wfi, task, options.memory) if not r: print "Error in creating ACDC for",task,"on",wfname break created[task] = r if len(created)!=len(tasks): print "Error in creating all required ACDCs" sys.exit(1) print "Created:" for task in created: print created[task],"for",task
def rejector(url, specific, options=None): up = componentInfo() if specific.startswith('/'): pass else: wfo = session.query(Workflow).filter(Workflow.name == specific).first() if not wfo: print "cannot reject",spec return results=[] wfi = workflowInfo(url, wfo.name) reqMgrClient.invalidateWorkflow(url, wfo.name, current_status=wfi.request['RequestStatus']) #if wfi.request['RequestStatus'] in ['assignment-approved','new','completed']: # #results.append( reqMgrClient.rejectWorkflow(url, wfo.name)) # reqMgrClient.rejectWorkflow(url, wfo.name) #else: # #results.append( reqMgrClient.abortWorkflow(url, wfo.name)) # reqMgrClient.abortWorkflow(url, wfo.name) datasets = wfi.request['OutputDatasets'] for dataset in datasets: if options.keep: print "keeping",dataset,"in its current status" else: results.append( setDatasetStatus(dataset, 'INVALID') ) if all(map(lambda result : result in ['None',None,True],results)): wfo.status = 'forget' session.commit() print wfo.name,"and",datasets,"are rejected" if options and options.clone: schema = wfi.getSchema() schema['Requestor'] = os.getenv('USER') schema['Group'] = 'DATAOPS' schema['OriginalRequestName'] = wfo.name if 'ProcessingVersion' in schema: schema['ProcessingVersion']+=1 else: schema['ProcessingVersion']=2 ##schema.pop('RequestDate') ## ok then, let's not reset the time stamp if options.Memory: schema['Memory'] = options.Memory response = reqMgrClient.submitWorkflow(url, schema) m = re.search("details\/(.*)\'",response) if not m: print "error in cloning",wfo.name print response return newWorkflow = m.group(1) data = reqMgrClient.setWorkflowApproved(url, newWorkflow) print data wfo.status = 'trouble' session.commit() else: print "error in rejecting",wfo.name,results
def invalidator(url, invalid_status='INVALID'): mcm = McMClient(dev=False) invalids = mcm.getA('invalidations',query='status=new') print len(invalids),"Object to be invalidated" for invalid in invalids: acknowledge= False if invalid['type'] == 'request': wfn = invalid['object'] print "need to invalidate the workflow",wfn wfo = session.query(Workflow).filter(Workflow.name == wfn).first() if wfo: ## set forget of that thing (although checkor will recover from it) wfo.status = 'forget' session.commit() wfi = workflowInfo(url, wfn) success = "not rejected" if wfi.request['RequestStatus'] in ['assignment-approved','new','completed']: success = reqMgrClient.rejectWorkflow(url, wfn) pass else: success = reqMgrClient.abortWorkflow(url, wfn) pass print success acknowledge= True elif invalid['type'] == 'dataset': dataset = invalid['object'] if 'None-' in dataset: continue if 'FAKE-' in dataset: continue print "setting",dataset,"to",invalid_status success = "not invalidated" success = setDatasetStatusDBS3.setStatusDBS3('https://cmsweb.cern.ch/dbs/prod/global/DBSWriter', dataset, invalid_status, None) print success ## make a delete request from everywhere we can find ? acknowledge= True else: print "\t\t",invalid['type']," type not recognized" if acknowledge: ## acknoldge invalidation in mcm, provided we can have the api print "No acknowledgment api yet available"
def releasor(): if duplicateLock() : return SI = siteInfo() CI = campaignInfo() LI = lockInfo() tiers_no_custodial = ['MINIADOSIM'] wfs = [] for fetch in ['done','forget']: wfs.extend( session.query(Workflow).filter(Workflow.status==fetch).all() ) for wfo in wfs: wfi = workflowInfo(url, wfo.name ) announced_log = filter(lambda change : change["Status"] in ["closed-out","normal-archived","announced"],wfi.request['RequestTransition']) if not announced_log: print "Cannot figure out when",wfo.name,"was finished" continue now = time.mktime(time.gmtime()) / (60*60*24.) then = announced_log[-1]['UpdateTime'] / (60.*60.*24.) if (now-then) <2: print "workflow",wfo.name, "finished",now-then,"days ago. Too fresh to clean" continue else: print "workflow",wfo.name,"has finished",now-then,"days ago." (_,primaries,_,secondaries) = wfi.getIO() outputs = wfi.request['OutputDatasets'] datasets_to_check = list(primaries)+list(secondaries)+outputs for dataset in datasets_to_check: (_,_,_,tier) = dataset.split('/') ## check custodial if required if tier not in tiers_no_custodial: ## check not used anymore by anything ## unlock output and input everywhere if so pass
def transferor(url ,specific = None, talk=True, options=None): if userLock(): return if duplicateLock(): return use_mcm = True up = componentInfo(mcm=use_mcm, soft=['mcm']) if not up.check(): return use_mcm = up.status['mcm'] if options and options.test: execute = False else: execute = True SI = siteInfo() CI = campaignInfo() NLI = newLockInfo() mcm = McMClient(dev=False) dss = DSS() #allowed_secondary = UC.get('') print "counting all being handled..." being_handled = len(session.query(Workflow).filter(Workflow.status == 'away').all()) being_handled += len(session.query(Workflow).filter(Workflow.status.startswith('stag')).all()) being_transfered = len(session.query(Workflow).filter(Workflow.status == 'staging').all()) being_handled += len(session.query(Workflow).filter(Workflow.status.startswith('assistance-')).all()) max_to_handle = options.maxworkflows max_to_transfer = options.maxstaging allowed_to_handle = max(0,max_to_handle - being_handled) allowed_to_transfer = max(0,max_to_transfer - being_transfered) wf_buffer = 5 if allowed_to_handle<=wf_buffer: ## buffer for having several wf per transfer print "Not allowed to run more than",max_to_handle,"at a time. Currently",being_handled,"and",wf_buffer,"buffer" else: print being_handled,"already being handled",max_to_handle,"max allowed,",allowed_to_handle,"remaining","and",wf_buffer,"buffer" if allowed_to_transfer <= wf_buffer: print "Not allowed to transfer more than",max_to_transfer,"at a time. Currently",being_transfered,"and",wf_buffer,"buffer" else: print being_transfered,"already being transfered",max_to_transfer,"max allowed,",allowed_to_transfer,"remaining","and",wf_buffer,"buffer" print "... done" all_transfers=defaultdict(list) workflow_dependencies = defaultdict(set) ## list of wf.id per input dataset wfs_and_wfh=[] print "getting all wf to consider ..." cache = getWorkflows(url, 'assignment-approved', details=True) for wfo in session.query(Workflow).filter(Workflow.status.startswith('considered')).all(): print "\t",wfo.name if specific and not specific in wfo.name: continue cache_r =filter(lambda d:d['RequestName']==wfo.name, cache) if len(cache_r): wfs_and_wfh.append( (wfo, workflowInfo( url, wfo.name, spec=False, request = cache_r[0]) ) ) else: wfs_and_wfh.append( (wfo, workflowInfo( url, wfo.name, spec=False) ) ) print "... done" transfers_per_sites = defaultdict(int) input_sizes = {} ignored_input_sizes = {} input_cput = {} input_st = {} ## list the size of those in transfer already in_transfer_priority=None min_transfer_priority=None print "getting all wf in staging ..." stucks = json.loads(open('%s/stuck_transfers.json'%monitor_dir).read()) for wfo in session.query(Workflow).filter(Workflow.status=='staging').all(): wfh = workflowInfo( url, wfo.name, spec=False) #(lheinput,primary,parent,secondary) = wfh.getIO() #sites_allowed = getSiteWhiteList( (lheinput,primary,parent,secondary) ) (lheinput,primary,parent,secondary,sites_allowed) = wfh.getSiteWhiteList() for site in sites_allowed: ## we should get the actual transfer destination instead of the full white list transfers_per_sites[site] += 1 #input_cput[wfo.name] = wfh.getComputingTime() #input_st[wfo.name] = wfh.getSystemTime() for prim in primary: ds_s = dss.get( prim ) if prim in stucks: sendLog('transferor', "%s appears stuck, so not counting it %s [GB]"%( prim, ds_s), wfi=wfh) ignored_input_sizes[prim] = ds_s else: input_sizes[prim] = ds_s sendLog('transferor', "%s needs %s [GB]"%( wfo.name, ds_s), wfi=wfh) if in_transfer_priority==None: in_transfer_priority = int(wfh.request['RequestPriority']) else: in_transfer_priority = max(in_transfer_priority, int(wfh.request['RequestPriority'])) if min_transfer_priority==None: min_transfer_priority = int(wfh.request['RequestPriority']) else: min_transfer_priority = min(min_transfer_priority, int(wfh.request['RequestPriority'])) if min_transfer_priority==None or in_transfer_priority ==None: print "nothing is lining up for transfer" sendEmail("no request in staging","no request in staging") return pass try: print "Ignored input sizes" ignored_values = list(ignored_input_sizes.items()) ignored_values.sort( key = lambda i : i[1] ) print "\n".join( map(str, ignored_values ) ) print "Considered input sizes" considered_values = list(input_sizes.items()) considered_values.sort( key = lambda i : i[1] ) print "\n".join( map(str, considered_values) ) except Exception as e: print "trying to print the summary of input size" print str(e) print "... done" print "Max priority in transfer already",in_transfer_priority print "Min priority in transfer already",min_transfer_priority print "transfers per sites" print json.dumps( transfers_per_sites, indent=2) in_transfer_already = sum(input_sizes.values()) cput_in_transfer_already = sum(input_cput.values()) st_in_transfer_already = sum(input_st.values()) ## list the size of all inputs primary_input_per_workflow_gb = defaultdict(float) print "getting all input sizes ..." for (wfo,wfh) in wfs_and_wfh: (_,primary,_,_) = wfh.getIO() #input_cput[wfo.name] = wfh.getComputingTime() #input_st[wfo.name] = wfh.getSystemTime() for prim in primary: ## do not count it if it appears stalled ! prim_size = dss.get( prim ) input_sizes[prim] = prim_size primary_input_per_workflow_gb[wfo.name] += prim_size print "... done" # shuffle first by name random.shuffle( wfs_and_wfh ) # Sort smallest transfers first; allows us to transfer as many as possible workflows. def prio_and_size( i, j): if int(i[1].request['RequestPriority']) == int(j[1].request['RequestPriority']): return cmp(int(primary_input_per_workflow_gb.get(j[0].name, 0)), int(primary_input_per_workflow_gb.get(i[0].name, 0)) ) else: return cmp(int(i[1].request['RequestPriority']),int(j[1].request['RequestPriority'])) #wfs_and_wfh.sort(cmp = prio_and_size, reverse=True) #wfs_and_wfh.sort(cmp = lambda i,j : cmp(int(primary_input_per_workflow_gb.get(i[0].name, 0)), int(primary_input_per_workflow_gb.get(j[0].name, 0)) )) #sort by priority higher first wfs_and_wfh.sort(cmp = lambda i,j : cmp(int(i[1].request['RequestPriority']),int(j[1].request['RequestPriority']) ), reverse=True) cput_grand_total = sum(input_cput.values()) cput_to_transfer = cput_grand_total - cput_in_transfer_already st_grand_total = sum(input_st.values()) st_to_transfer = st_grand_total - st_in_transfer_already print "%15.4f [CPU h] worth already in transfer"%cput_in_transfer_already print "%15.4f [CPU h] worth is the current requested transfer load"%cput_to_transfer print "%15.4f [h] worth of absolute system time in transfer"%( cput_in_transfer_already / SI.availableSlots()) print "%15.4f [h] worth of absolute system time is the current requested transfer load"%( cput_to_transfer / SI.availableSlots()) print "%15.4f [h] worth of theoritical system time in transfer"%( st_in_transfer_already ) print "%15.4f [h] worth of theoritical system time is the current requested transfer load"%( st_to_transfer ) grand_total = sum(input_sizes.values()) to_transfer = grand_total - in_transfer_already grand_transfer_limit = options.maxtransfer #grand_transfer_limit = SI.total_disk()*0.25*1024## half of the free sapce in TB->GB transfer_limit = grand_transfer_limit - in_transfer_already print "%15.4f GB already being transfered"%in_transfer_already print "%15.4f GB is the current requested transfer load"%to_transfer print "%15.4f GB is the global transfer limit"%grand_transfer_limit print "%15.4f GB is the available limit"%transfer_limit max_staging_per_site = options.maxstagingpersite # the max priority value per dataset. max_priority = defaultdict(int) needs_transfer=0 ## so that we can count'em passing_along = 0 transfer_sizes={} went_over_budget=False destination_cache = {} no_goes = set() max_per_round = UC.get('max_per_round').get('transferor',None) if max_per_round and not spec: wfs_and_wfh = wfs_and_wfh[:max_per_round] for (wfo,wfh) in wfs_and_wfh: print wfo.name,"to be transfered with priority",wfh.request['RequestPriority'] if wfh.request['RequestStatus']!='assignment-approved': if wfh.request['RequestStatus'] in ['aborted','rejected','rejected-archived','aborted-archived']: wfo.status = 'trouble' ## so that we look or a replacement else: wfo.status = 'away' wfh.sendLog('transferor', '%s in status %s, setting %s'%( wfo.name,wfh.request['RequestStatus'],wfo.status)) continue (_,primary,_,_) = wfh.getIO() this_load=sum([input_sizes[prim] for prim in primary]) no_budget = False if ( this_load and (sum(transfer_sizes.values())+this_load > transfer_limit or went_over_budget ) ): if went_over_budget: wfh.sendLog('transferor', "Transfer has gone over bubget.") else: wfh.sendLog('transferor', "Transfer will go over bubget.") wfh.sendLog('transferor', "%15.4f GB this load, %15.4f GB already this round, %15.4f GB is the available limit"%(this_load, sum(transfer_sizes.values()), transfer_limit)) #if sum(transfer_sizes.values()) > transfer_limit: went_over_budget = True if in_transfer_priority!=None and min_transfer_priority!=None: if int(wfh.request['RequestPriority']) >= in_transfer_priority and min_transfer_priority!=in_transfer_priority: wfh.sendLog('transferor',"Higher priority sample %s >= %s go-on over budget"%( wfh.request['RequestPriority'], in_transfer_priority)) else: if not options.go: wfh.sendLog('transferor',"%s minimum priority %s < %s : stop"%( min_transfer_priority,wfh.request['RequestPriority'],in_transfer_priority)) no_budget = True ## throtlle by campaign go no_go = False if not wfh.go(log=True) and not options.go: no_go = True no_goes.add( wfo.name ) allowed_secondary = set() for campaign in wfh.getCampaigns(): if campaign in CI.campaigns and 'secondaries' in CI.campaigns[campaign]: allowed_secondary.update( CI.campaigns[campaign]['secondaries'] ) if secondary: if (secondary and allowed_secondary) and (set(secondary)&allowed_secondary!=set(secondary)): wfh.sendLog('assignor','%s is not an allowed secondary'%(', '.join(set(secondary)-allowed_secondary))) no_go = True if no_go: continue ## check if the batch is announced def check_mcm(wfn): announced=False is_real=False if not wfn.startswith('pdmvserv'): is_real = True try: for b in mcm.getA('batches',query='contains=%s'% wfo.name): is_real = True if b['status']=='announced': announced=True break except: try: for b in mcm.getA('batches',query='contains=%s'% wfo.name): is_real = True if b['status']=='announced': announced=True break except: print "could not get mcm batch announcement, assuming not real" return announced,is_real if not use_mcm: announced,is_real = False,True else: if wfh.request['RequestType'] in ['ReReco']: announced,is_real = True,True else: announced,is_real = check_mcm( wfo.name ) if not announced: wfh.sendLog('transferor', "does not look announced.") if not is_real: wfh.sendLog('transferor', "does not appear to be genuine.") ## prevent any duplication. if the wf is not mentioned in any batch, regardless of status continue ## check on a grace period injection_time = time.mktime(time.strptime('.'.join(map(str,wfh.request['RequestDate'])),"%Y.%m.%d.%H.%M.%S")) / (60.*60.) now = time.mktime(time.gmtime()) / (60.*60.) if float(now - injection_time) < 4.: if not options.go and not announced: wfh.sendLog('transferor', "It is too soon to start transfer: %3.2fH remaining"%(now - injection_time)) continue if passing_along >= allowed_to_handle: #if int(wfh.request['RequestPriority']) >= in_transfer_priority and min_transfer_priority!=in_transfer_priority: if in_transfer_priority!=None and min_transfer_priority!=None: if int(wfh.request['RequestPriority']) >= in_transfer_priority and int(wfh.request['RequestPriority']) !=min_transfer_priority: ## higher priority, and not only this priority being transfered wfh.sendLog('transferor',"Higher priority sample %s >= %s go-on over %s"%( wfh.request['RequestPriority'], in_transfer_priority, max_to_handle)) else: wfh.sendLog('transferor'," Not allowed to pass more than %s at a time. Currently %s handled, and adding %s"%( max_to_handle, being_handled, passing_along)) if not options.go: ## should not allow to jump that fence break if this_load and needs_transfer >= allowed_to_transfer: if in_transfer_priority!=None and min_transfer_priority!=None: if int(wfh.request['RequestPriority']) >= in_transfer_priority and int(wfh.request['RequestPriority']) !=min_transfer_priority: ## higher priority, and not only this priority being transfered wfh.sendLog('transferor',"Higher priority sample %s >= %s go-on over %s"%(wfh.request['RequestPriority'], in_transfer_priority,max_to_transfer)) else: wfh.sendLog('transferor',"Not allowed to transfer more than %s at a time. Currently %s transfering, and adding %s"%( max_to_transfer, being_transfered, needs_transfer)) if not options.go: no_budget = True if no_budget: continue ## the site white list considers site, campaign, memory and core information (lheinput,primary,parent,secondary,sites_allowed) = wfh.getSiteWhiteList() if options and options.tosites: sites_allowed = options.tosites.split(',') for dataset in list(primary)+list(parent)+list(secondary): ## lock everything flat NLI.lock( dataset ) if not sites_allowed: wfh.sendLog('transferor',"not possible site to run at") #sendEmail("no possible sites","%s has no possible sites to run at"%( wfo.name )) sendLog('transferor',"%s has no possible sites to run at"%( wfo.name ),level='critical') continue blocks = [] if 'BlockWhitelist' in wfh.request and wfh.request['BlockWhitelist']: blocks = wfh.request['BlockWhitelist'] if 'RunWhitelist' in wfh.request and wfh.request['RunWhitelist']: ## augment with run white list for dataset in primary: blocks = list(set( blocks + getDatasetBlocks( dataset, runs=wfh.request['RunWhitelist'] ) )) if 'LumiList' in wfh.request and wfh.request['LumiList']: ## augment with the lumi white list blocks = list(set( blocks + getDatasetBlocks( dataset, lumis= wfh.request['LumiList'] ) )) if blocks: print "Reading",len(blocks),"in block whitelist" can_go = True staging=False allowed=True primary_destinations = set() if primary: copies_needed_from_CPUh,CPUh = wfh.getNCopies() if talk: print wfo.name,'reads',', '.join(primary),'in primary' ## chope the primary dataset for prim in primary: ## keep track of what needs what workflow_dependencies[prim].add( wfo.id ) max_priority[prim] = max(max_priority[prim],int(wfh.request['RequestPriority'])) wfh.sendLog('transferor',"Would make %s from cpu requirement %s"%( copies_needed_from_CPUh, CPUh)) copies_needed = copies_needed_from_CPUh if 'Campaign' in wfh.request and wfh.request['Campaign'] in CI.campaigns and 'maxcopies' in CI.campaigns[wfh.request['Campaign']]: copies_needed_from_campaign = CI.campaigns[wfh.request['Campaign']]['maxcopies'] copies_needed = min(copies_needed_from_campaign, copies_needed) wfh.sendLog('transferor',"Maxed to %s by campaign configuration %s"%( copies_needed, wfh.request['Campaign'])) ### new ways of making the whole thing destinations,all_block_names = getDatasetDestinations(url, prim, within_sites = [SI.CE_to_SE(site) for site in sites_allowed], only_blocks=blocks ) print json.dumps(destinations, indent=2) ## get where the dataset is in full and completed prim_location = [site for (site,info) in destinations.items() if info['completion']==100 and info['data_fraction']==1] ## the rest is places it is going to be prim_destination = [site for site in destinations.keys() if not site in prim_location] if len(prim_location) >= copies_needed: wfh.sendLog('transferor',"The input is all fully in place at %s sites %s"%( len(prim_location), sorted(prim_location))) continue copies_needed = max(0,copies_needed - len(prim_location)) wfh.sendLog('transferor',"not counting existing copies ; now need %s"% copies_needed) copies_being_made = [ sum([info['blocks'].keys().count(block) for site,info in destinations.items() if site in prim_destination]) for block in all_block_names] latching_on_transfers = set() [latching_on_transfers.update(info['blocks'].values()) for site,info in destinations.items() if site in prim_destination] latching_on_transfers = list(latching_on_transfers) #print latching_on_transfers ## figure out where all this is going to go prim_to_distribute = [site for site in sites_allowed if not SI.CE_to_SE(site) in prim_location] prim_to_distribute = [site for site in prim_to_distribute if not SI.CE_to_SE(site) in prim_destination] ## take out the ones that cannot receive transfers prim_to_distribute = [site for site in prim_to_distribute if not any([osite.startswith(site) for osite in SI.sites_veto_transfer])] wfh.sendLog('transferor',"Could be going to: %s"% sorted( prim_to_distribute)) if not prim_to_distribute or any([transfers_per_sites[site] < max_staging_per_site for site in prim_to_distribute]): ## means there is openings let me go print "There are transfer slots available:",[(site,transfers_per_sites[site]) for site in prim_to_distribute] #for site in sites_allowed: # #increment accross the board, regardless of real destination: could be changed # transfers_per_sites[site] += 1 else: if int(wfh.request['RequestPriority']) >= in_transfer_priority and min_transfer_priority!=in_transfer_priority: wfh.sendLog('transferor', "Higher priority sample %s >= %s go-on over transfer slots available"%(wfh.request['RequestPriority'], in_transfer_priority)) else: wfh.sendLog('transferor',"Not allowed to transfer more than %s per site at a time. Going overboard for %s"%( max_staging_per_site, sorted([site for site in prim_to_distribute if transfers_per_sites[site]>=max_staging_per_site]))) if not options.go: allowed = False break for latching in latching_on_transfers: tfo = session.query(Transfer).filter(Transfer.phedexid == int(latching)).first() if not tfo: tfo = session.query(Transfer).filter(Transfer.phedexid == -int(latching)).first() if not tfo: tfo = Transfer( phedexid = latching) tfo.workflows_id = [] session.add(tfo) else: tfo.phedexid = latching ## make it positive ever if not wfo.id in tfo.workflows_id: print "adding",wfo.id,"to",tfo.id,"with phedexid",latching l = copy.deepcopy( tfo.workflows_id ) l.append( wfo.id ) tfo.workflows_id = l if not options.test: session.commit() else: session.flush() ## regardless of commit later on, we need to let the next wf feeding on this transfer to see it in query can_go = False transfer_sizes[prim] = input_sizes[prim] staging = True # reduce the number of copies required by the on-going full transfer : how do we bootstrap on waiting for them ?? #copies_needed = max(0,copies_needed - len(prim_destination)) copies_needed = max(0,copies_needed - min(copies_being_made)) wfh.sendLog('transferor', "Not counting the copies being made ; then need %s"% copies_needed) if copies_needed == 0: wfh.sendLog('transferor', "The output is either fully in place or getting in full somewhere with %s"% latching_on_transfers) can_go = True continue elif len(prim_to_distribute)==0: wfh.sendLog('transferor', "We are going to need extra copies, but no destinations seems available") prim_to_distribute = [site for site in sites_allowed if not SI.CE_to_SE(site) in prim_location] prim_to_distribute = [site for site in prim_to_distribute if not any([osite.startswith(site) for osite in SI.sites_veto_transfer])] if len(prim_to_distribute)>0: ## maybe that a parameter we can play with to limit the if not options or options.chop: ### hard include the tape disk andpoint ? #tapes = [site for site in getDatasetPresence( url, prim, vetos=['T0','T2','T3','Disk']) if site.endswith('MSS')] chops,sizes = getDatasetChops(prim, chop_threshold = options.chopsize, only_blocks=blocks) spreading = distributeToSites( chops, prim_to_distribute, n_copies = copies_needed, weights=SI.cpu_pledges, sizes=sizes) transfer_sizes[prim] = sum(sizes) if not spreading: sendLog('transferor','cannot send %s to any site, it cannot fit anywhere'% prim, level='critical') wfh.sendLog('transferor', "cannot send to any site. %s cannot seem to fit anywhere"%(prim)) staging=False can_go = False else: spreading = {} for site in prim_to_distribute: if blocks: spreading[site]=blocks else: spreading[site]=[prim] transfer_sizes[prim] = input_sizes[prim] ## this is approximate if blocks are specified can_go = False wfh.sendLog('transferor', "selected CE destinations %s"%(sorted( spreading.keys()))) for (site,items) in spreading.items(): all_transfers[site].extend( items ) transfers_per_sites[site] += 1 primary_destinations.add( site ) if not allowed: wfh.sendLog('transferor', "Not allowed to move on with") continue if secondary: override_sec_destination = [] if 'SecondaryLocation' in CI.campaigns[wfh.request['Campaign']]: override_sec_destination = CI.campaigns[wfh.request['Campaign']]['SecondaryLocation'] print wfo.name,'reads',', '.join(secondary),'in secondary' for sec in secondary: workflow_dependencies[sec].add( wfo.id ) if True: ## new style, failing on minbias if not sec in destination_cache: ## this is barbbaric, and does not show the correct picture on workflow by workflow with different whitelist destination_cache[sec],_ = getDatasetDestinations(url, sec) ## NO SITE WHITE LIST ADDED #destination_cache[sec],_ = getDatasetDestinations(url, sec, within_sites = [SI.CE_to_SE(site) for site in sites_allowed]) ## limit to the site whitelist NOW se_allowed = [SI.CE_to_SE(site) for site in sites_allowed] destinations = dict([(k,v) for (k,v) in destination_cache[sec].items() if site in se_allowed]) ## truncate location/destination to those making up for >90% of the dataset bad_destinations = [destinations.pop(site) for (site,info) in destinations.items() if info['data_fraction']<0.9] sec_location = [site for (site,info) in destinations.items() if info['completion']>=95] sec_destination = [site for site in destinations.keys() if not site in sec_location] else: ## old style presence = getDatasetPresence( url, sec ) sec_location = [site for site,pres in presence.items() if pres[1]>90.] ## more than 90% of the minbias at sites subscriptions = listSubscriptions( url ,sec ) sec_destination = [site for site in subscriptions] sec_to_distribute = [site for site in sites_allowed if not any([osite.startswith(site) for osite in sec_location])] sec_to_distribute = [site for site in sec_to_distribute if not any([osite.startswith(site) for osite in sec_destination])] sec_to_distribute = [site for site in sec_to_distribute if not any([osite.startswith(site) for osite in SI.sites_veto_transfer])] if override_sec_destination: ## intersect with where we want the PU to be not_needed_anymore = list(set(sec_to_distribute) - set(override_sec_destination)) #sendEmail("secondary superfluous","the dataset %s could be removed from %s"%( sec, not_needed_anymore )) sendLog('transferor', "the dataset %s could be removed from %s"%( sec, not_needed_anymore )) sec_to_distribute = list(set(sec_to_distribute) & set(override_sec_destination)) if len( sec_to_distribute )>0: print "secondary could go to",sorted(sec_to_distribute) sec_size = dss.get( sec ) for site in sec_to_distribute: site_se =SI.CE_to_SE(site) if (SI.disk[site_se]*1024.) > sec_size: all_transfers[site].append( sec ) can_go = False else: print "could not send the secondary input to",site_se,"because it is too big for the available disk",SI.disk[site_se]*1024,"GB need",sec_size if primary_destinations and site in primary_destinations: #sendEmail('secondary input too big','%s is too big (%s) for %s (%s)'%( sec, sec_size, site_se, SI.disk[site_se]*1024)) sendLog('transferor', '%s is too big (%s) for %s (%s)'%( sec, sec_size, site_se, SI.disk[site_se]*1024), level='critical') else: print "the secondary input does not have to be send to site" ## is that possible to do something more if can_go: ## no explicit transfer required this time if staging: ## but using existing ones wfh.sendLog('transferor', "latches on existing transfers, and nothing else, settin staging") wfo.status = 'staging' needs_transfer+=1 else: wfh.sendLog('transferor', "should just be assigned now to %s"%sorted(sites_allowed)) wfo.status = 'staged' passing_along+=1 wfh.sendLog('transferor', "setting status to %s"%wfo.status) session.commit() continue else: ## there is an explicit transfer required if staging: ## and also using an existing one wfh.sendLog('transferor', "latches on existing transfers") if not options.test: wfo.status = 'staging' wfh.sendLog('transferor', "setting status to %s"%wfo.status) session.commit() wfh.sendLog('transferor',"needs a transfer") needs_transfer+=1 passing_along+=1 if no_goes: #sendEmail("no go for managing","No go for \n"+"\n".join( no_goes )) sendLog('transferor', "No go for \n"+"\n".join( no_goes ), level='critical') print "accumulated transfers" print json.dumps(all_transfers, indent=2) fake_id=-1 wf_id_in_prestaging=set() for (site,items_to_transfer) in all_transfers.iteritems(): items_to_transfer = list(set(items_to_transfer)) ## convert to storage element site_se = SI.CE_to_SE(site) ## site that do not want input datasets if site in SI.sites_veto_transfer: print site,"does not want transfers" continue ## throttle the transfer size to T2s ? we'd be screwed by a noPU sample properly configured. ## massage a bit the items blocks = [it for it in items_to_transfer if '#' in it] block_datasets = list(set([it.split('#')[0] for it in blocks])) datasets = [it for it in items_to_transfer if not '#' in it] details_text = "Making a replica to %s (CE) %s (SE) for"%( site, site_se) #print "\t",len(blocks),"blocks" ## remove blocks if full dataset is send out blocks = [block for block in blocks if not block.split('#')[0] in datasets] #print "\t",len(blocks),"needed blocks for",list(set([block.split('#')[0] for block in blocks])) #print "\t",len(datasets),"datasets" #print "\t",datasets details_text += '\n\t%d blocks'%len(blocks) details_text += '\n\t%d needed blocks for %s'%( len(blocks), sorted(list(set([block.split('#')[0] for block in blocks])))) details_text += '\n\t%d datasets'% len(datasets) details_text += '\n\t%s'%sorted(datasets) items_to_transfer = blocks + datasets if execute: sendLog('transferor', details_text) else: print "Would make a replica to",site,"(CE)",site_se,"(SE) for" print details_text ## operate the transfer if options and options.stop: ## ask to move-on answer = raw_input('Continue with that ?') if not answer.lower() in ['y','yes','go']: continue if execute: priority = 'normal' cds = [ds for ds in datasets+block_datasets if ds in max_priority] if cds and False: ## I don't think this is working. subscription should be updated on the fly and regularly for raising the priority if needed ## decide on an overall priority : that's a bit too large though if any([max_priority[ds]>=90000 for ds in cds]): priority = 'high' elif all([max_priority[ds]<80000 for ds in cds]): priority = 'low' result = makeReplicaRequest(url, site_se, items_to_transfer, 'prestaging', priority=priority) else: result= {'phedex':{'request_created' : []}} fake_id-=1 if not result: print "ERROR Could not make a replica request for",site,items_to_transfer,"pre-staging" continue for phedexid in [o['id'] for o in result['phedex']['request_created']]: new_transfer = session.query(Transfer).filter(Transfer.phedexid == int(phedexid)).first() if not new_transfer: new_transfer = session.query(Transfer).filter(Transfer.phedexid == -int(phedexid)).first() print phedexid,"transfer created" if not new_transfer: new_transfer = Transfer( phedexid = phedexid) session.add( new_transfer ) else: new_transfer.phedexid = phedexid ## make it positive again new_transfer.workflows_id = set() for transfering in list(set(map(lambda it : it.split('#')[0], items_to_transfer))): new_transfer.workflows_id.update( workflow_dependencies[transfering] ) new_transfer.workflows_id = list(new_transfer.workflows_id) wf_id_in_prestaging.update(new_transfer.workflows_id) session.commit() ## auto approve it if execute: approved = approveSubscription(url, phedexid, [site_se]) for wfid in wf_id_in_prestaging: tr_wf = session.query(Workflow).get(wfid) if tr_wf and tr_wf.status!='staging': if execute: tr_wf.status = 'staging' if talk: print "setting",tr_wf.name,"to staging" session.commit()
def assignor(url, specific=None, talk=True, options=None): if userLock() and not options.manual: return mlock = moduleLock() if mlock() and not options.manual: return if not componentInfo().check() and not options.manual: return UC = unifiedConfiguration() CI = campaignInfo() SI = siteInfo() SI = global_SI() ###NLI = newLockInfo() ###if not NLI.free() and not options.go: return LI = lockInfo() #if not LI.free() and not options.go and not options.manual: return n_assigned = 0 n_stalled = 0 wfos = [] fetch_from = [] if specific or options.early: fetch_from.extend(['considered', 'staging']) if specific: fetch_from.extend(['considered-tried']) if options.early: print "Option Early is on" fetch_from.extend(['staged']) if options.from_status: fetch_from = options.from_status.split(',') print "Overriding to read from", fetch_from for status in fetch_from: print "getting wf in", status wfos.extend( session.query(Workflow).filter(Workflow.status == status).all()) print len(wfos) ## in case of partial, go for fetching a list from json ? #if options.partial and not specific: # pass aaa_mapping = json.loads(eosRead('%s/equalizor.json' % monitor_pub_dir))['mapping'] all_stuck = set() all_stuck.update( json.loads(eosRead('%s/stuck_transfers.json' % monitor_pub_dir))) max_per_round = UC.get('max_per_round').get('assignor', None) max_cpuh_block = UC.get('max_cpuh_block') # Temporarily switch off prioritization random.shuffle(wfos) ##order by priority instead of random """ if options.early: cache = sorted(getWorkflows(url, 'assignment-approved', details=True), key = lambda r : r['RequestPriority']) cache = [r['RequestName'] for r in cache] def rank( wfn ): return cache.index( wfn ) if wfn in cache else 0 wfos = sorted(wfos, key = lambda wfo : rank( wfo.name ),reverse=True) print "10 first",[wfo.name for wfo in wfos[:10]] print "10 last",[wfo.name for wfo in wfos[-10:]] else: random.shuffle( wfos ) """ for wfo in wfos: if options.limit and (n_stalled + n_assigned) > options.limit: break if max_per_round and (n_stalled + n_assigned) > max_per_round: break if specific: if not any(map(lambda sp: sp in wfo.name, specific.split(','))): continue #if not specific in wfo.name: continue if not options.manual and 'rucio' in (wfo.name).lower(): continue print "\n\n" wfh = workflowInfo(url, wfo.name) if wfh.request['RequestStatus'] in [ 'rejected', 'aborted', 'aborted-completed', 'aborted-archived', 'rejected-archived' ] and wfh.isRelval(): wfo.status = 'forget' session.commit() n_stalled += 1 continue if options.priority and int( wfh.request['RequestPriority']) < options.priority: continue options_text = "" if options.early: options_text += ", early option is ON" wfh.sendLog('assignor', "%s to be assigned %s" % (wfo.name, options_text)) ## the site whitelist takes into account siteInfo, campaignInfo, memory and cores (lheinput, primary, parent, secondary, sites_allowed, sites_not_allowed) = wfh.getSiteWhiteList() output_tiers = list( set([o.split('/')[-1] for o in wfh.request['OutputDatasets']])) if not output_tiers: n_stalled += 1 wfh.sendLog('assignor', 'There is no output at all') sendLog('assignor', 'Workflow %s has no output at all' % (wfo.name), level='critical') continue is_stuck = (all_stuck & primary) if is_stuck: wfh.sendLog('assignor', "%s are stuck input" % (','.join(is_stuck))) ## check if by configuration we gave it a GO no_go = False if not wfh.go(log=True) and not options.go: no_go = True allowed_secondary = {} assign_parameters = {} check_secondary = (not wfh.isRelval()) for campaign in wfh.getCampaigns(): if campaign in CI.campaigns: assign_parameters.update(CI.campaigns[campaign]) if campaign in CI.campaigns and 'secondaries' in CI.campaigns[ campaign]: if CI.campaigns[campaign]['secondaries']: allowed_secondary.update( CI.campaigns[campaign]['secondaries']) check_secondary = True if campaign in CI.campaigns and 'banned_tier' in CI.campaigns[ campaign]: banned_tier = list( set(CI.campaigns[campaign]['banned_tier']) & set(output_tiers)) if banned_tier: no_go = True wfh.sendLog( 'assignor', 'These data tiers %s are not allowed' % (','.join(banned_tier))) sendLog('assignor', 'These data tiers %s are not allowed' % (','.join(banned_tier)), level='critical') if secondary and check_secondary: if (set(secondary) & set(allowed_secondary.keys()) != set(secondary)): msg = '%s is not an allowed secondary' % ( ', '.join(set(secondary) - set(allowed_secondary.keys()))) wfh.sendLog('assignor', msg) critical_msg = msg + '\nWorkflow URL: https://dmytro.web.cern.ch/dmytro/cmsprodmon/workflows.php?prep_id=task_{}'.format( wfh.getPrepIDs()[0]) sendLog('assignor', critical_msg, level='critical') if not options.go: no_go = True ## then get whether there is something more to be done by secondary for sec in secondary: if sec in allowed_secondary: # and 'parameters' in allowed_secondary[sec]: assign_parameters.update(allowed_secondary[sec]) if no_go: n_stalled += 1 ## make a very loud noise if >100k priority stalled continue ## check on current status for by-passed assignment if wfh.request['RequestStatus'] != 'assignment-approved': if not options.test: wfh.sendLog('assignor', "setting %s away and skipping" % wfo.name) ## the module picking up from away will do what is necessary of it wfo.wm_status = wfh.request['RequestStatus'] wfo.status = 'away' session.commit() continue else: print wfo.name, wfh.request['RequestStatus'] ## retrieve from the schema, dbs and reqMgr what should be the next version version = wfh.getNextVersion() if not version: if options and options.ProcessingVersion: version = options.ProcessingVersion else: wfh.sendLog('assignor', "cannot decide on version number") n_stalled += 1 wfo.status = 'trouble' session.commit() continue wfh.sendLog('assignor', "Site white list %s" % sorted(sites_allowed)) blocks = wfh.getBlocks() if blocks: wfh.sendLog( 'assignor', "Needs {} blocks in input {}".format(len(blocks), '\n'.join(blocks))) wfh.sendLog('assignor', "Allowed %s" % sorted(sites_allowed)) primary_aaa = options.primary_aaa secondary_aaa = options.secondary_aaa if 'Campaign' in wfh.request and wfh.request[ 'Campaign'] in CI.campaigns: assign_parameters.update(CI.campaigns[wfh.request['Campaign']]) if 'primary_AAA' in assign_parameters and primary: primary_aaa = primary_aaa or assign_parameters['primary_AAA'] if 'secondary_AAA' in assign_parameters: secondary_aaa = secondary_aaa or assign_parameters['secondary_AAA'] wfh.sendLog( 'assignor', "Initial values for primary_AAA=%s and secondary_AAA=%s" % (primary_aaa, secondary_aaa)) if primary_aaa: if "T2_CH_CERN_HLT" in sites_allowed: sites_allowed.remove("T2_CH_CERN_HLT") if "T2_CH_CERN_HLT" not in sites_not_allowed: sites_not_allowed.append("T2_CH_CERN_HLT") ## keep track of this, after secondary input location restriction : that's how you want to operate it initial_sites_allowed = copy.deepcopy(sites_allowed) set_lfn = '/store/mc' ## by default for prim in list(primary): set_lfn = getLFNbase(prim) ## if they are requested for processing, they should bbe all closed already # FIXME: remove this closeAllBlocks #closeAllBlocks(url, prim, blocks) ## should be 2 but for the time-being let's lower it to get things going _copies_wanted, cpuh = wfh.getNCopies() wfh.sendLog('assignor', "we need %s CPUh" % cpuh) if cpuh > max_cpuh_block and not options.go: #sendEmail('large workflow','that wf %s has a large number of CPUh %s, not assigning, please check the logs'%(wfo.name, cpuh))#,destination=['*****@*****.**']) sendLog( 'assignor', '%s requires a large numbr of CPUh %s , not assigning, please check with requester' % (wfo.name, cpuh), level='critical') wfh.sendLog( 'assignor', "Requiring a large number of CPUh %s, not assigning" % cpuh) continue ## should also check on number of sources, if large enough, we should be able to overflow most, efficiently ## default back to white list to original white list with any data wfh.sendLog('assignor', "Allowed sites :%s" % sorted(sites_allowed)) # TODO Alan on 1/april/2020: keep the AAA functionality if primary_aaa: ## remove the sites not reachable localy if not in having the data if not sites_allowed: wfh.sendLog('assignor', "Overiding the primary on AAA setting to Off") primary_aaa = False else: aaa_grid = set(sites_allowed) for site in list(aaa_grid): aaa_grid.update(aaa_mapping.get(site, [])) sites_allowed = list(set(initial_sites_allowed) & aaa_grid) wfh.sendLog( 'assignor', "Selected to read primary through xrootd %s" % sorted(sites_allowed)) isStoreResults = ('StoreResults' == wfh.request.setdefault( 'RequestType', None)) if isStoreResults: if 'MergedLFNBase' in wfh.request: set_lfn = wfh.request['MergedLFNBase'] else: n_stalled += 1 wfh.sendLog( 'assignor', "Cannot assign StoreResults request because MergedLFN is missing" ) sendLog( 'assignor', 'Cannot assign StoreResults request because MergedLFN is missing', level='critical') continue if not primary_aaa: if isStoreResults: ## if we are dealing with a StoreResults request, we don't need to check dataset availability and ## should use the SiteWhiteList set in the original request if 'SiteWhitelist' in wfh.request: sites_allowed = wfh.request['SiteWhitelist'] else: wfh.sendLog( 'assignor', "Cannot assign StoreResults request because SiteWhitelist is missing" ) sendLog( 'assignor', 'Cannot assign StoreResults request because SiteWhitelist is missing', level='critical') n_stalled += 1 continue wfh.sendLog('assignor', "Selected for any data %s" % sorted(sites_allowed)) #if not len(sites_allowed): # if not options.early: # wfh.sendLog('assignor',"cannot be assign with no matched sites") # sendLog('assignor','%s has no whitelist'% wfo.name, level='critical') # n_stalled+=1 # continue if not len(sites_allowed) and not options.SiteWhitelist: if not options.early: wfh.sendLog('assignor', "cannot be assign with no matched sites") sendLog('assignor', '%s has no whitelist' % wfo.name, level='critical') n_stalled += 1 continue t1t2_only = [ ce for ce in sites_allowed if [ce.startswith('T1') or ce.startswith('T2')] ] if t1t2_only: # try to pick from T1T2 only first sites_out = [SI.pick_dSE([SI.CE_to_SE(ce) for ce in t1t2_only])] # then pick any otherwise else: sites_out = [ SI.pick_dSE([SI.CE_to_SE(ce) for ce in sites_allowed]) ] print "available=", SI.disk[sites_out[0]] wfh.sendLog('assignor', "Placing the output on %s" % sites_out) parameters = { 'SiteWhitelist': sites_allowed, 'SiteBlacklist': sites_not_allowed, 'NonCustodialSites': sites_out, 'AutoApproveSubscriptionSites': list(set(sites_out)), 'AcquisitionEra': wfh.acquisitionEra(), 'ProcessingString': wfh.processingString(), 'MergedLFNBase': set_lfn, 'ProcessingVersion': version, } if primary_aaa: parameters['TrustSitelists'] = True wfh.sendLog( 'assignor', "Reading primary through xrootd at %s" % sorted(sites_allowed)) if secondary_aaa: # Do not set TrustPUSitelist to True if there is no secondary if secondary: parameters['TrustPUSitelists'] = True wfh.sendLog( 'assignor', "Reading secondary through xrootd at %s" % sorted(sites_allowed)) ## plain assignment here team = 'production' if os.getenv('UNIFIED_TEAM'): team = os.getenv('UNIFIED_TEAM') if options and options.team: team = options.team parameters['Team'] = team if lheinput: ## throttle reading LHE article wfh.sendLog('assignor', 'Setting the number of events per job to 500k max') parameters['EventsPerJob'] = 500000 def pick_options(options, parameters): ##parse options entered in command line if any if options: for key in reqMgrClient.assignWorkflow.keys: v = getattr(options, key) if v != None: if type(v) == str and ',' in v: parameters[key] = filter(None, v.split(',')) else: parameters[key] = v def pick_campaign(assign_parameters, parameters): ## pick up campaign specific assignment parameters parameters.update(assign_parameters.get('parameters', {})) if options.force_options: pick_campaign(assign_parameters, parameters) pick_options(options, parameters) else: ## campaign parameters update last pick_options(options, parameters) pick_campaign(assign_parameters, parameters) if not options.test: parameters['execute'] = True hold_split, split_check = wfh.checkSplitting() if hold_split and not options.go: if split_check: wfh.sendLog( 'assignor', 'Holding on to the change in splitting %s' % ('\n\n'.join([str(i) for i in split_check]))) else: wfh.sendLog('assignor', 'Change of splitting is on hold') n_stalled += 1 continue if split_check == None or split_check == False: n_stalled += 1 continue elif split_check: ## operate all recommended changes reqMgrClient.setWorkflowSplitting(url, wfo.name, split_check) wfh.sendLog( 'assignor', 'Applying the change in splitting %s' % ('\n\n'.join([str(i) for i in split_check]))) split_check = True ## bypass completely and use the above # Handle run-dependent MC pstring = wfh.processingString() if 'PU_RD' in pstring: numEvents = wfh.getRequestNumEvents() eventsPerLumi = [getDatasetEventsPerLumi(prim) for prim in primary] eventsPerLumi = sum(eventsPerLumi) / float(len(eventsPerLumi)) reqJobs = 500 if 'PU_RD2' in pstring: reqJobs = 2000 eventsPerJob = int(numEvents / (reqJobs * 1.4)) lumisPerJob = int(eventsPerJob / eventsPerLumi) if lumisPerJob == 0: #sendEmail("issue with event splitting for run-dependent MC","%s needs to be split by event with %s per job"%(wfo.name, eventsPerJob)) sendLog('assignor', "%s needs to be split by event with %s per job" % (wfo.name, eventsPerJob), level='critical') wfh.sendLog( 'assignor', "%s needs to be split by event with %s per job" % (wfo.name, eventsPerJob)) parameters['EventsPerJob'] = eventsPerJob else: spl = wfh.getSplittings()[0] # FIXME: decide which of the lines below needs to remain... eventsPerJobEstimated = spl[ 'events_per_job'] if 'events_per_job' in spl else None eventsPerJobEstimated = spl[ 'avg_events_per_job'] if 'avg_events_per_job' in spl else None if eventsPerJobEstimated and eventsPerJobEstimated > eventsPerJob: #sendEmail("setting lumi splitting for run-dependent MC","%s was assigned with %s lumis/job"%( wfo.name, lumisPerJob)) sendLog('assignor', "%s was assigned with %s lumis/job" % (wfo.name, lumisPerJob), level='critical') wfh.sendLog( 'assignor', "%s was assigned with %s lumis/job" % (wfo.name, lumisPerJob)) parameters['LumisPerJob'] = lumisPerJob else: #sendEmail("leaving splitting untouched for PU_RD*","please check on "+wfo.name) sendLog( 'assignor', "leaving splitting untouched for %s, please check on %s" % (pstring, wfo.name), level='critical') wfh.sendLog( 'assignor', "leaving splitting untouched for PU_RD*, please check." ) ## make sure to autoapprove all NonCustodialSites parameters['AutoApproveSubscriptionSites'] = list( set(parameters['NonCustodialSites'] + parameters.get('AutoApproveSubscriptionSites', []))) result = reqMgrClient.assignWorkflow( url, wfo.name, None, parameters) ## team is not relevant anymore here # set status if not options.test: if result: wfo.status = 'away' session.commit() n_assigned += 1 wfh.sendLog( 'assignor', "Properly assigned\n%s" % (json.dumps(parameters, indent=2))) if wfh.producePremix() and (not wfh.isRelval()): title = "Heavy workflow assigned to {}".format( parameters['SiteWhitelist']) body = "Workflow name: {}".format( wfh.request['RequestName']) body += "\nOutput dataset(s): {}".format( wfh.request['OutputDatasets']) body += "\nAssigned to: {}".format( parameters['SiteWhitelist']) sendEmail( title, body, destination=[ '*****@*****.**' ]) try: ## refetch information and lock output new_wfi = workflowInfo(url, wfo.name) (_, prim, _, sec) = new_wfi.getIO() for secure in list(prim) + list( sec) + new_wfi.request['OutputDatasets']: ## lock all outputs LI.lock(secure, reason='assigning') except Exception as e: print "fail in locking output" print str(e) sendEmail("failed locking of output", str(e)) else: wfh.sendLog( 'assignor', "Failed to assign %s.\n%s \n Please check the logs" % (wfo.name, reqMgrClient.assignWorkflow.errorMessage)) sendLog('assignor', "Failed to assign %s.\n%s \n Please check the logs" % (wfo.name, reqMgrClient.assignWorkflow.errorMessage), level='critical') print "ERROR could not assign", wfo.name else: pass print "Assignment summary:" sendLog('assignor', "Assigned %d Stalled %s" % (n_assigned, n_stalled)) if n_stalled and not options.go and not options.early: sendLog('assignor', "%s workflows cannot be assigned. Please take a look" % (n_stalled), level='critical')
def completor(url, specific): use_mcm = True up = componentInfo(mcm=use_mcm, soft=['mcm']) if not up.check(): return use_mcm = up.status['mcm'] if use_mcm: mcm = McMClient(dev=False) CI = campaignInfo() SI = siteInfo() UC = unifiedConfiguration() wfs = [] wfs.extend( session.query(Workflow).filter(Workflow.status == 'away').all() ) wfs.extend( session.query(Workflow).filter(Workflow.status.startswith('assistance')).all() ) ## just take it in random order so that not always the same is seen random.shuffle( wfs ) max_per_round = UC.get('max_per_round').get('completor',None) if max_per_round and not specific: wfs = wfs[:max_per_round] ## by workflow a list of fraction / timestamps completions = json.loads( open('%s/completions.json'%monitor_dir).read()) good_fractions = {} timeout = {} for c in CI.campaigns: if 'force-complete' in CI.campaigns[c]: good_fractions[c] = CI.campaigns[c]['force-complete'] if 'force-timeout' in CI.campaigns[c]: timeout[c] = CI.campaigns[c]['force-timeout'] long_lasting = {} overrides = getForceCompletes() if use_mcm: ## add all workflow that mcm wants to get force completed mcm_force = mcm.get('/restapi/requests/forcecomplete') ## assuming this will be a list of actual prepids overrides['mcm'] = mcm_force print "can force complete on" print json.dumps( good_fractions ,indent=2) print json.dumps( overrides, indent=2) max_force = UC.get("max_force_complete") #wfs_no_location_in_GQ = set() #block_locations = defaultdict(lambda : defaultdict(list)) #wfs_no_location_in_GQ = defaultdict(list) set_force_complete = set() for wfo in wfs: if specific and not specific in wfo.name: continue print "looking at",wfo.name ## get all of the same wfi = workflowInfo(url, wfo.name) pids = wfi.getPrepIDs() skip=False if not any([c in wfo.name for c in good_fractions]): skip=True for user,spec in overrides.items(): if wfi.request['RequestStatus']!='force-complete': if any(s in wfo.name for s in spec) or (wfo.name in spec) or any(pid in spec for pid in pids) or any(s in pids for s in spec): sendEmail('force-complete requested','%s is asking for %s to be force complete'%(user,wfo.name)) wfi = workflowInfo(url, wfo.name) forceComplete(url , wfi ) skip=True wfi.notifyRequestor("The workflow %s was force completed by request of %s"%(wfo.name,user), do_batch=False) wfi.sendLog('completor','%s is asking for %s to be force complete'%(user,wfo.name)) break if wfo.status.startswith('assistance'): skip = True if skip: continue priority = wfi.request['RequestPriority'] if not 'Campaign' in wfi.request: continue if not wfi.request['RequestStatus'] in ['acquired','running-open','running-closed']: continue c = wfi.request['Campaign'] if not c in good_fractions: continue good_fraction = good_fractions[c] ignore_fraction = 2. lumi_expected = None event_expected = None if not 'TotalInputEvents' in wfi.request: if 'RequestNumEvents' in wfi.request: event_expected = wfi.request['RequestNumEvents'] else: print "truncated, cannot do anything" continue else: lumi_expected = wfi.request['TotalInputLumis'] event_expected = wfi.request['TotalInputEvents'] now = time.mktime(time.gmtime()) / (60*60*24.) running_log = filter(lambda change : change["Status"] in ["running-open","running-closed"],wfi.request['RequestTransition']) if not running_log: print "\tHas no running log" # cannot figure out when the thing started running continue then = running_log[-1]['UpdateTime'] / (60.*60.*24.) delay = now - then ## in days (w,d) = divmod(delay, 7 ) print "\t"*int(w)+"Running since",delay,"[days] priority=",priority monitor_delay = 7 allowed_delay = 14 if c in timeout: allowed_delay = timeout[c] monitor_delay = min(monitor_delay, allowed_delay) ### just skip if too early if delay <= monitor_delay: continue long_lasting[wfo.name] = { "delay" : delay } percent_completions = {} for output in wfi.request['OutputDatasets']: if "/DQM" in output: continue ## that does not count if not output in completions: completions[output] = { 'injected' : None, 'checkpoints' : [], 'workflow' : wfo.name} ## get completion fraction event_count,lumi_count = getDatasetEventsAndLumis(dataset=output) lumi_completion=0. event_completion=0. if lumi_expected: lumi_completion = lumi_count / float( lumi_expected ) if event_expected: event_completion = event_count / float( event_expected ) #take the less optimistic percent_completions[output] = min( lumi_completion, event_completion ) completions[output]['checkpoints'].append( (now, event_completion ) ) if all([percent_completions[out] >= good_fraction for out in percent_completions]): wfi.sendLog('completor', "all is above %s \n%s"%( good_fraction, json.dumps( percent_completions, indent=2 ) )) else: long_lasting[wfo.name].update({ 'completion': sum(percent_completions.values()) / len(percent_completions), 'completions' : percent_completions }) ## do something about the agents this workflow is in long_lasting[wfo.name]['agents'] = wfi.getAgents() wfi.sendLog('completor', "%s not over bound %s\n%s"%(percent_completions.values(), good_fraction, json.dumps( long_lasting[wfo.name]['agents'], indent=2) )) continue if all([percent_completions[out] >= ignore_fraction for out in percent_completions]): print "all is done, just wait a bit" continue for output in percent_completions: completions[output]['injected'] = then #further check on delays cpuh = wfi.getComputingTime(unit='d') ran_at = wfi.request['SiteWhitelist'] wfi.sendLog('completor',"Required %s, time spend %s"%( cpuh, delay)) ##### WILL FORCE COMPLETE BELOW # only really force complete after n days if delay <= allowed_delay: continue ## find ACDCs that might be running if max_force>0: forceComplete(url, wfi ) set_force_complete.add( wfo.name ) print "going for force-complete of",wfo.name wfi.sendLog('completor','going for force completing') wfi.notifyRequestor("The workflow %s was force completed for running too long"% wfo.name) max_force -=1 else: wfi.sendLog('completor',"too many completion this round, cannot force complete") ## do it once only for testing #break if set_force_complete: sendLog('completor','The followings were set force-complete \n%s'%('\n'.join(set_force_complete))) #sendEmail('set force-complete', 'The followings were set force-complete \n%s'%('\n'.join(set_force_complete))) open('%s/completions.json'%monitor_dir,'w').write( json.dumps( completions , indent=2)) text="These have been running for long" open('%s/longlasting.json'%monitor_dir,'w').write( json.dumps( long_lasting, indent=2 )) for wf,info in sorted(long_lasting.items(), key=lambda tp:tp[1]['delay'], reverse=True): delay = info['delay'] text += "\n %s : %s days"% (wf, delay) if 'completion' in info: text += " %d%%"%( info['completion']*100 ) #if wfs_no_location_in_GQ: # sendEmail('workflow with no location in GQ',"there won't be able to run anytime soon\n%s"%( '\n'.join(wfs_no_location_in_GQ))) #sendEmail("long lasting workflow",text) ## you can check the log print text
def transferor(url, specific=None, talk=True, options=None): if userLock(): return if duplicateLock(): return use_mcm = True up = componentInfo(mcm=use_mcm, soft=['mcm']) if not up.check(): return use_mcm = up.status['mcm'] if options and options.test: execute = False else: execute = True SI = siteInfo() CI = campaignInfo() NLI = newLockInfo() mcm = McMClient(dev=False) dss = DSS() #allowed_secondary = UC.get('') print "counting all being handled..." being_handled = len( session.query(Workflow).filter(Workflow.status == 'away').all()) being_handled += len( session.query(Workflow).filter( Workflow.status.startswith('stag')).all()) being_transfered = len( session.query(Workflow).filter(Workflow.status == 'staging').all()) being_handled += len( session.query(Workflow).filter( Workflow.status.startswith('assistance-')).all()) max_to_handle = options.maxworkflows max_to_transfer = options.maxstaging allowed_to_handle = max(0, max_to_handle - being_handled) allowed_to_transfer = max(0, max_to_transfer - being_transfered) wf_buffer = 5 if allowed_to_handle <= wf_buffer: ## buffer for having several wf per transfer print "Not allowed to run more than", max_to_handle, "at a time. Currently", being_handled, "and", wf_buffer, "buffer" else: print being_handled, "already being handled", max_to_handle, "max allowed,", allowed_to_handle, "remaining", "and", wf_buffer, "buffer" if allowed_to_transfer <= wf_buffer: print "Not allowed to transfer more than", max_to_transfer, "at a time. Currently", being_transfered, "and", wf_buffer, "buffer" else: print being_transfered, "already being transfered", max_to_transfer, "max allowed,", allowed_to_transfer, "remaining", "and", wf_buffer, "buffer" print "... done" all_transfers = defaultdict(list) workflow_dependencies = defaultdict( set) ## list of wf.id per input dataset wfs_and_wfh = [] print "getting all wf to consider ..." cache = getWorkflows(url, 'assignment-approved', details=True) for wfo in session.query(Workflow).filter( Workflow.status.startswith('considered')).all(): print "\t", wfo.name if specific and not specific in wfo.name: continue cache_r = filter(lambda d: d['RequestName'] == wfo.name, cache) if len(cache_r): wfs_and_wfh.append((wfo, workflowInfo(url, wfo.name, spec=False, request=cache_r[0]))) else: wfs_and_wfh.append((wfo, workflowInfo(url, wfo.name, spec=False))) print "... done" transfers_per_sites = defaultdict(int) input_sizes = {} ignored_input_sizes = {} input_cput = {} input_st = {} ## list the size of those in transfer already in_transfer_priority = None min_transfer_priority = None print "getting all wf in staging ..." stucks = json.loads(open('%s/stuck_transfers.json' % monitor_dir).read()) for wfo in session.query(Workflow).filter( Workflow.status == 'staging').all(): wfh = workflowInfo(url, wfo.name, spec=False) #(lheinput,primary,parent,secondary) = wfh.getIO() #sites_allowed = getSiteWhiteList( (lheinput,primary,parent,secondary) ) (lheinput, primary, parent, secondary, sites_allowed) = wfh.getSiteWhiteList() for site in sites_allowed: ## we should get the actual transfer destination instead of the full white list transfers_per_sites[site] += 1 #input_cput[wfo.name] = wfh.getComputingTime() #input_st[wfo.name] = wfh.getSystemTime() for prim in primary: ds_s = dss.get(prim) if prim in stucks: sendLog('transferor', "%s appears stuck, so not counting it %s [GB]" % (prim, ds_s), wfi=wfh) ignored_input_sizes[prim] = ds_s else: input_sizes[prim] = ds_s sendLog('transferor', "%s needs %s [GB]" % (wfo.name, ds_s), wfi=wfh) if in_transfer_priority == None: in_transfer_priority = int(wfh.request['RequestPriority']) else: in_transfer_priority = max(in_transfer_priority, int(wfh.request['RequestPriority'])) if min_transfer_priority == None: min_transfer_priority = int(wfh.request['RequestPriority']) else: min_transfer_priority = min(min_transfer_priority, int(wfh.request['RequestPriority'])) if min_transfer_priority == None or in_transfer_priority == None: print "nothing is lining up for transfer" sendEmail("no request in staging", "no request in staging") return pass try: print "Ignored input sizes" ignored_values = list(ignored_input_sizes.items()) ignored_values.sort(key=lambda i: i[1]) print "\n".join(map(str, ignored_values)) print "Considered input sizes" considered_values = list(input_sizes.items()) considered_values.sort(key=lambda i: i[1]) print "\n".join(map(str, considered_values)) except Exception as e: print "trying to print the summary of input size" print str(e) print "... done" print "Max priority in transfer already", in_transfer_priority print "Min priority in transfer already", min_transfer_priority print "transfers per sites" print json.dumps(transfers_per_sites, indent=2) in_transfer_already = sum(input_sizes.values()) cput_in_transfer_already = sum(input_cput.values()) st_in_transfer_already = sum(input_st.values()) ## list the size of all inputs primary_input_per_workflow_gb = defaultdict(float) print "getting all input sizes ..." for (wfo, wfh) in wfs_and_wfh: (_, primary, _, _) = wfh.getIO() #input_cput[wfo.name] = wfh.getComputingTime() #input_st[wfo.name] = wfh.getSystemTime() for prim in primary: ## do not count it if it appears stalled ! prim_size = dss.get(prim) input_sizes[prim] = prim_size primary_input_per_workflow_gb[wfo.name] += prim_size print "... done" # shuffle first by name random.shuffle(wfs_and_wfh) # Sort smallest transfers first; allows us to transfer as many as possible workflows. def prio_and_size(i, j): if int(i[1].request['RequestPriority']) == int( j[1].request['RequestPriority']): return cmp(int(primary_input_per_workflow_gb.get(j[0].name, 0)), int(primary_input_per_workflow_gb.get(i[0].name, 0))) else: return cmp(int(i[1].request['RequestPriority']), int(j[1].request['RequestPriority'])) #wfs_and_wfh.sort(cmp = prio_and_size, reverse=True) #wfs_and_wfh.sort(cmp = lambda i,j : cmp(int(primary_input_per_workflow_gb.get(i[0].name, 0)), int(primary_input_per_workflow_gb.get(j[0].name, 0)) )) #sort by priority higher first wfs_and_wfh.sort(cmp=lambda i, j: cmp(int(i[1].request[ 'RequestPriority']), int(j[1].request['RequestPriority'])), reverse=True) cput_grand_total = sum(input_cput.values()) cput_to_transfer = cput_grand_total - cput_in_transfer_already st_grand_total = sum(input_st.values()) st_to_transfer = st_grand_total - st_in_transfer_already print "%15.4f [CPU h] worth already in transfer" % cput_in_transfer_already print "%15.4f [CPU h] worth is the current requested transfer load" % cput_to_transfer print "%15.4f [h] worth of absolute system time in transfer" % ( cput_in_transfer_already / SI.availableSlots()) print "%15.4f [h] worth of absolute system time is the current requested transfer load" % ( cput_to_transfer / SI.availableSlots()) print "%15.4f [h] worth of theoritical system time in transfer" % ( st_in_transfer_already) print "%15.4f [h] worth of theoritical system time is the current requested transfer load" % ( st_to_transfer) grand_total = sum(input_sizes.values()) to_transfer = grand_total - in_transfer_already grand_transfer_limit = options.maxtransfer #grand_transfer_limit = SI.total_disk()*0.25*1024## half of the free sapce in TB->GB transfer_limit = grand_transfer_limit - in_transfer_already print "%15.4f GB already being transfered" % in_transfer_already print "%15.4f GB is the current requested transfer load" % to_transfer print "%15.4f GB is the global transfer limit" % grand_transfer_limit print "%15.4f GB is the available limit" % transfer_limit max_staging_per_site = options.maxstagingpersite # the max priority value per dataset. max_priority = defaultdict(int) needs_transfer = 0 ## so that we can count'em passing_along = 0 transfer_sizes = {} went_over_budget = False destination_cache = {} no_goes = set() max_per_round = UC.get('max_per_round').get('transferor', None) if max_per_round and not spec: wfs_and_wfh = wfs_and_wfh[:max_per_round] for (wfo, wfh) in wfs_and_wfh: print wfo.name, "to be transfered with priority", wfh.request[ 'RequestPriority'] if wfh.request['RequestStatus'] != 'assignment-approved': if wfh.request['RequestStatus'] in [ 'aborted', 'rejected', 'rejected-archived', 'aborted-archived' ]: wfo.status = 'trouble' ## so that we look or a replacement else: wfo.status = 'away' wfh.sendLog( 'transferor', '%s in status %s, setting %s' % (wfo.name, wfh.request['RequestStatus'], wfo.status)) continue (_, primary, _, _) = wfh.getIO() this_load = sum([input_sizes[prim] for prim in primary]) no_budget = False if (this_load and (sum(transfer_sizes.values()) + this_load > transfer_limit or went_over_budget)): if went_over_budget: wfh.sendLog('transferor', "Transfer has gone over bubget.") else: wfh.sendLog('transferor', "Transfer will go over bubget.") wfh.sendLog( 'transferor', "%15.4f GB this load, %15.4f GB already this round, %15.4f GB is the available limit" % (this_load, sum(transfer_sizes.values()), transfer_limit)) #if sum(transfer_sizes.values()) > transfer_limit: went_over_budget = True if in_transfer_priority != None and min_transfer_priority != None: if int( wfh.request['RequestPriority'] ) >= in_transfer_priority and min_transfer_priority != in_transfer_priority: wfh.sendLog( 'transferor', "Higher priority sample %s >= %s go-on over budget" % (wfh.request['RequestPriority'], in_transfer_priority)) else: if not options.go: wfh.sendLog( 'transferor', "%s minimum priority %s < %s : stop" % (min_transfer_priority, wfh.request['RequestPriority'], in_transfer_priority)) no_budget = True ## throtlle by campaign go no_go = False if not wfh.go(log=True) and not options.go: no_go = True no_goes.add(wfo.name) allowed_secondary = set() for campaign in wfh.getCampaigns(): if campaign in CI.campaigns and 'secondaries' in CI.campaigns[ campaign]: allowed_secondary.update(CI.campaigns[campaign]['secondaries']) if secondary: if (secondary and allowed_secondary) and ( set(secondary) & allowed_secondary != set(secondary)): wfh.sendLog( 'assignor', '%s is not an allowed secondary' % (', '.join(set(secondary) - allowed_secondary))) no_go = True if no_go: continue ## check if the batch is announced def check_mcm(wfn): announced = False is_real = False if not wfn.startswith('pdmvserv'): is_real = True try: for b in mcm.getA('batches', query='contains=%s' % wfo.name): is_real = True if b['status'] == 'announced': announced = True break except: try: for b in mcm.getA('batches', query='contains=%s' % wfo.name): is_real = True if b['status'] == 'announced': announced = True break except: print "could not get mcm batch announcement, assuming not real" return announced, is_real if not use_mcm: announced, is_real = False, True else: if wfh.request['RequestType'] in ['ReReco']: announced, is_real = True, True else: announced, is_real = check_mcm(wfo.name) if not announced: wfh.sendLog('transferor', "does not look announced.") if not is_real: wfh.sendLog('transferor', "does not appear to be genuine.") ## prevent any duplication. if the wf is not mentioned in any batch, regardless of status continue ## check on a grace period injection_time = time.mktime( time.strptime('.'.join(map(str, wfh.request['RequestDate'])), "%Y.%m.%d.%H.%M.%S")) / (60. * 60.) now = time.mktime(time.gmtime()) / (60. * 60.) if float(now - injection_time) < 4.: if not options.go and not announced: wfh.sendLog( 'transferor', "It is too soon to start transfer: %3.2fH remaining" % (now - injection_time)) continue if passing_along >= allowed_to_handle: #if int(wfh.request['RequestPriority']) >= in_transfer_priority and min_transfer_priority!=in_transfer_priority: if in_transfer_priority != None and min_transfer_priority != None: if int(wfh.request['RequestPriority'] ) >= in_transfer_priority and int( wfh.request['RequestPriority'] ) != min_transfer_priority: ## higher priority, and not only this priority being transfered wfh.sendLog( 'transferor', "Higher priority sample %s >= %s go-on over %s" % (wfh.request['RequestPriority'], in_transfer_priority, max_to_handle)) else: wfh.sendLog( 'transferor', " Not allowed to pass more than %s at a time. Currently %s handled, and adding %s" % (max_to_handle, being_handled, passing_along)) if not options.go: ## should not allow to jump that fence break if this_load and needs_transfer >= allowed_to_transfer: if in_transfer_priority != None and min_transfer_priority != None: if int(wfh.request['RequestPriority'] ) >= in_transfer_priority and int( wfh.request['RequestPriority'] ) != min_transfer_priority: ## higher priority, and not only this priority being transfered wfh.sendLog( 'transferor', "Higher priority sample %s >= %s go-on over %s" % (wfh.request['RequestPriority'], in_transfer_priority, max_to_transfer)) else: wfh.sendLog( 'transferor', "Not allowed to transfer more than %s at a time. Currently %s transfering, and adding %s" % (max_to_transfer, being_transfered, needs_transfer)) if not options.go: no_budget = True if no_budget: continue ## the site white list considers site, campaign, memory and core information (lheinput, primary, parent, secondary, sites_allowed) = wfh.getSiteWhiteList() if options and options.tosites: sites_allowed = options.tosites.split(',') for dataset in list(primary) + list(parent) + list(secondary): ## lock everything flat NLI.lock(dataset) if not sites_allowed: wfh.sendLog('transferor', "not possible site to run at") #sendEmail("no possible sites","%s has no possible sites to run at"%( wfo.name )) sendLog('transferor', "%s has no possible sites to run at" % (wfo.name), level='critical') continue blocks = [] if 'BlockWhitelist' in wfh.request and wfh.request['BlockWhitelist']: blocks = wfh.request['BlockWhitelist'] if 'RunWhitelist' in wfh.request and wfh.request['RunWhitelist']: ## augment with run white list for dataset in primary: blocks = list( set(blocks + getDatasetBlocks( dataset, runs=wfh.request['RunWhitelist']))) if 'LumiList' in wfh.request and wfh.request['LumiList']: ## augment with the lumi white list blocks = list( set(blocks + getDatasetBlocks(dataset, lumis=wfh.request['LumiList']))) if blocks: print "Reading", len(blocks), "in block whitelist" can_go = True staging = False allowed = True primary_destinations = set() if primary: copies_needed_from_CPUh, CPUh = wfh.getNCopies() if talk: print wfo.name, 'reads', ', '.join(primary), 'in primary' ## chope the primary dataset for prim in primary: ## keep track of what needs what workflow_dependencies[prim].add(wfo.id) max_priority[prim] = max(max_priority[prim], int(wfh.request['RequestPriority'])) wfh.sendLog( 'transferor', "Would make %s from cpu requirement %s" % (copies_needed_from_CPUh, CPUh)) copies_needed = copies_needed_from_CPUh if 'Campaign' in wfh.request and wfh.request[ 'Campaign'] in CI.campaigns and 'maxcopies' in CI.campaigns[ wfh.request['Campaign']]: copies_needed_from_campaign = CI.campaigns[ wfh.request['Campaign']]['maxcopies'] copies_needed = min(copies_needed_from_campaign, copies_needed) wfh.sendLog( 'transferor', "Maxed to %s by campaign configuration %s" % (copies_needed, wfh.request['Campaign'])) ### new ways of making the whole thing destinations, all_block_names = getDatasetDestinations( url, prim, within_sites=[SI.CE_to_SE(site) for site in sites_allowed], only_blocks=blocks) print json.dumps(destinations, indent=2) ## get where the dataset is in full and completed prim_location = [ site for (site, info) in destinations.items() if info['completion'] == 100 and info['data_fraction'] == 1 ] ## the rest is places it is going to be prim_destination = [ site for site in destinations.keys() if not site in prim_location ] if len(prim_location) >= copies_needed: wfh.sendLog( 'transferor', "The input is all fully in place at %s sites %s" % (len(prim_location), sorted(prim_location))) continue copies_needed = max(0, copies_needed - len(prim_location)) wfh.sendLog( 'transferor', "not counting existing copies ; now need %s" % copies_needed) copies_being_made = [ sum([ info['blocks'].keys().count(block) for site, info in destinations.items() if site in prim_destination ]) for block in all_block_names ] latching_on_transfers = set() [ latching_on_transfers.update(info['blocks'].values()) for site, info in destinations.items() if site in prim_destination ] latching_on_transfers = list(latching_on_transfers) #print latching_on_transfers ## figure out where all this is going to go prim_to_distribute = [ site for site in sites_allowed if not SI.CE_to_SE(site) in prim_location ] prim_to_distribute = [ site for site in prim_to_distribute if not SI.CE_to_SE(site) in prim_destination ] ## take out the ones that cannot receive transfers prim_to_distribute = [ site for site in prim_to_distribute if not any([ osite.startswith(site) for osite in SI.sites_veto_transfer ]) ] wfh.sendLog( 'transferor', "Could be going to: %s" % sorted(prim_to_distribute)) if not prim_to_distribute or any([ transfers_per_sites[site] < max_staging_per_site for site in prim_to_distribute ]): ## means there is openings let me go print "There are transfer slots available:", [ (site, transfers_per_sites[site]) for site in prim_to_distribute ] #for site in sites_allowed: # #increment accross the board, regardless of real destination: could be changed # transfers_per_sites[site] += 1 else: if int( wfh.request['RequestPriority'] ) >= in_transfer_priority and min_transfer_priority != in_transfer_priority: wfh.sendLog( 'transferor', "Higher priority sample %s >= %s go-on over transfer slots available" % (wfh.request['RequestPriority'], in_transfer_priority)) else: wfh.sendLog( 'transferor', "Not allowed to transfer more than %s per site at a time. Going overboard for %s" % (max_staging_per_site, sorted([ site for site in prim_to_distribute if transfers_per_sites[site] >= max_staging_per_site ]))) if not options.go: allowed = False break for latching in latching_on_transfers: tfo = session.query(Transfer).filter( Transfer.phedexid == int(latching)).first() if not tfo: tfo = session.query(Transfer).filter( Transfer.phedexid == -int(latching)).first() if not tfo: tfo = Transfer(phedexid=latching) tfo.workflows_id = [] session.add(tfo) else: tfo.phedexid = latching ## make it positive ever if not wfo.id in tfo.workflows_id: print "adding", wfo.id, "to", tfo.id, "with phedexid", latching l = copy.deepcopy(tfo.workflows_id) l.append(wfo.id) tfo.workflows_id = l if not options.test: session.commit() else: session.flush( ) ## regardless of commit later on, we need to let the next wf feeding on this transfer to see it in query can_go = False transfer_sizes[prim] = input_sizes[prim] staging = True # reduce the number of copies required by the on-going full transfer : how do we bootstrap on waiting for them ?? #copies_needed = max(0,copies_needed - len(prim_destination)) copies_needed = max(0, copies_needed - min(copies_being_made)) wfh.sendLog( 'transferor', "Not counting the copies being made ; then need %s" % copies_needed) if copies_needed == 0: wfh.sendLog( 'transferor', "The output is either fully in place or getting in full somewhere with %s" % latching_on_transfers) can_go = True continue elif len(prim_to_distribute) == 0: wfh.sendLog( 'transferor', "We are going to need extra copies, but no destinations seems available" ) prim_to_distribute = [ site for site in sites_allowed if not SI.CE_to_SE(site) in prim_location ] prim_to_distribute = [ site for site in prim_to_distribute if not any([ osite.startswith(site) for osite in SI.sites_veto_transfer ]) ] if len( prim_to_distribute ) > 0: ## maybe that a parameter we can play with to limit the if not options or options.chop: ### hard include the tape disk andpoint ? #tapes = [site for site in getDatasetPresence( url, prim, vetos=['T0','T2','T3','Disk']) if site.endswith('MSS')] chops, sizes = getDatasetChops( prim, chop_threshold=options.chopsize, only_blocks=blocks) spreading = distributeToSites(chops, prim_to_distribute, n_copies=copies_needed, weights=SI.cpu_pledges, sizes=sizes) transfer_sizes[prim] = sum(sizes) if not spreading: sendLog( 'transferor', 'cannot send %s to any site, it cannot fit anywhere' % prim, level='critical') wfh.sendLog( 'transferor', "cannot send to any site. %s cannot seem to fit anywhere" % (prim)) staging = False can_go = False else: spreading = {} for site in prim_to_distribute: if blocks: spreading[site] = blocks else: spreading[site] = [prim] transfer_sizes[prim] = input_sizes[ prim] ## this is approximate if blocks are specified can_go = False wfh.sendLog( 'transferor', "selected CE destinations %s" % (sorted(spreading.keys()))) for (site, items) in spreading.items(): all_transfers[site].extend(items) transfers_per_sites[site] += 1 primary_destinations.add(site) if not allowed: wfh.sendLog('transferor', "Not allowed to move on with") continue if secondary: override_sec_destination = [] if 'SecondaryLocation' in CI.campaigns[wfh.request['Campaign']]: override_sec_destination = CI.campaigns[ wfh.request['Campaign']]['SecondaryLocation'] print wfo.name, 'reads', ', '.join(secondary), 'in secondary' for sec in secondary: workflow_dependencies[sec].add(wfo.id) if True: ## new style, failing on minbias if not sec in destination_cache: ## this is barbbaric, and does not show the correct picture on workflow by workflow with different whitelist destination_cache[sec], _ = getDatasetDestinations( url, sec) ## NO SITE WHITE LIST ADDED #destination_cache[sec],_ = getDatasetDestinations(url, sec, within_sites = [SI.CE_to_SE(site) for site in sites_allowed]) ## limit to the site whitelist NOW se_allowed = [SI.CE_to_SE(site) for site in sites_allowed] destinations = dict([ (k, v) for (k, v) in destination_cache[sec].items() if site in se_allowed ]) ## truncate location/destination to those making up for >90% of the dataset bad_destinations = [ destinations.pop(site) for (site, info) in destinations.items() if info['data_fraction'] < 0.9 ] sec_location = [ site for (site, info) in destinations.items() if info['completion'] >= 95 ] sec_destination = [ site for site in destinations.keys() if not site in sec_location ] else: ## old style presence = getDatasetPresence(url, sec) sec_location = [ site for site, pres in presence.items() if pres[1] > 90. ] ## more than 90% of the minbias at sites subscriptions = listSubscriptions(url, sec) sec_destination = [site for site in subscriptions] sec_to_distribute = [ site for site in sites_allowed if not any([osite.startswith(site) for osite in sec_location]) ] sec_to_distribute = [ site for site in sec_to_distribute if not any( [osite.startswith(site) for osite in sec_destination]) ] sec_to_distribute = [ site for site in sec_to_distribute if not any([ osite.startswith(site) for osite in SI.sites_veto_transfer ]) ] if override_sec_destination: ## intersect with where we want the PU to be not_needed_anymore = list( set(sec_to_distribute) - set(override_sec_destination)) #sendEmail("secondary superfluous","the dataset %s could be removed from %s"%( sec, not_needed_anymore )) sendLog( 'transferor', "the dataset %s could be removed from %s" % (sec, not_needed_anymore)) sec_to_distribute = list( set(sec_to_distribute) & set(override_sec_destination)) if len(sec_to_distribute) > 0: print "secondary could go to", sorted(sec_to_distribute) sec_size = dss.get(sec) for site in sec_to_distribute: site_se = SI.CE_to_SE(site) if (SI.disk[site_se] * 1024.) > sec_size: all_transfers[site].append(sec) can_go = False else: print "could not send the secondary input to", site_se, "because it is too big for the available disk", SI.disk[ site_se] * 1024, "GB need", sec_size if primary_destinations and site in primary_destinations: #sendEmail('secondary input too big','%s is too big (%s) for %s (%s)'%( sec, sec_size, site_se, SI.disk[site_se]*1024)) sendLog('transferor', '%s is too big (%s) for %s (%s)' % (sec, sec_size, site_se, SI.disk[site_se] * 1024), level='critical') else: print "the secondary input does not have to be send to site" ## is that possible to do something more if can_go: ## no explicit transfer required this time if staging: ## but using existing ones wfh.sendLog( 'transferor', "latches on existing transfers, and nothing else, settin staging" ) wfo.status = 'staging' needs_transfer += 1 else: wfh.sendLog( 'transferor', "should just be assigned now to %s" % sorted(sites_allowed)) wfo.status = 'staged' passing_along += 1 wfh.sendLog('transferor', "setting status to %s" % wfo.status) session.commit() continue else: ## there is an explicit transfer required if staging: ## and also using an existing one wfh.sendLog('transferor', "latches on existing transfers") if not options.test: wfo.status = 'staging' wfh.sendLog('transferor', "setting status to %s" % wfo.status) session.commit() wfh.sendLog('transferor', "needs a transfer") needs_transfer += 1 passing_along += 1 if no_goes: #sendEmail("no go for managing","No go for \n"+"\n".join( no_goes )) sendLog('transferor', "No go for \n" + "\n".join(no_goes), level='critical') print "accumulated transfers" print json.dumps(all_transfers, indent=2) fake_id = -1 wf_id_in_prestaging = set() for (site, items_to_transfer) in all_transfers.iteritems(): items_to_transfer = list(set(items_to_transfer)) ## convert to storage element site_se = SI.CE_to_SE(site) ## site that do not want input datasets if site in SI.sites_veto_transfer: print site, "does not want transfers" continue ## throttle the transfer size to T2s ? we'd be screwed by a noPU sample properly configured. ## massage a bit the items blocks = [it for it in items_to_transfer if '#' in it] block_datasets = list(set([it.split('#')[0] for it in blocks])) datasets = [it for it in items_to_transfer if not '#' in it] details_text = "Making a replica to %s (CE) %s (SE) for" % (site, site_se) #print "\t",len(blocks),"blocks" ## remove blocks if full dataset is send out blocks = [ block for block in blocks if not block.split('#')[0] in datasets ] #print "\t",len(blocks),"needed blocks for",list(set([block.split('#')[0] for block in blocks])) #print "\t",len(datasets),"datasets" #print "\t",datasets details_text += '\n\t%d blocks' % len(blocks) details_text += '\n\t%d needed blocks for %s' % ( len(blocks), sorted(list(set([block.split('#')[0] for block in blocks])))) details_text += '\n\t%d datasets' % len(datasets) details_text += '\n\t%s' % sorted(datasets) items_to_transfer = blocks + datasets if execute: sendLog('transferor', details_text) else: print "Would make a replica to", site, "(CE)", site_se, "(SE) for" print details_text ## operate the transfer if options and options.stop: ## ask to move-on answer = raw_input('Continue with that ?') if not answer.lower() in ['y', 'yes', 'go']: continue if execute: priority = 'normal' cds = [ ds for ds in datasets + block_datasets if ds in max_priority ] if cds and False: ## I don't think this is working. subscription should be updated on the fly and regularly for raising the priority if needed ## decide on an overall priority : that's a bit too large though if any([max_priority[ds] >= 90000 for ds in cds]): priority = 'high' elif all([max_priority[ds] < 80000 for ds in cds]): priority = 'low' result = makeReplicaRequest(url, site_se, items_to_transfer, 'prestaging', priority=priority) else: result = {'phedex': {'request_created': []}} fake_id -= 1 if not result: print "ERROR Could not make a replica request for", site, items_to_transfer, "pre-staging" continue for phedexid in [o['id'] for o in result['phedex']['request_created']]: new_transfer = session.query(Transfer).filter( Transfer.phedexid == int(phedexid)).first() if not new_transfer: new_transfer = session.query(Transfer).filter( Transfer.phedexid == -int(phedexid)).first() print phedexid, "transfer created" if not new_transfer: new_transfer = Transfer(phedexid=phedexid) session.add(new_transfer) else: new_transfer.phedexid = phedexid ## make it positive again new_transfer.workflows_id = set() for transfering in list( set(map(lambda it: it.split('#')[0], items_to_transfer))): new_transfer.workflows_id.update( workflow_dependencies[transfering]) new_transfer.workflows_id = list(new_transfer.workflows_id) wf_id_in_prestaging.update(new_transfer.workflows_id) session.commit() ## auto approve it if execute: approved = approveSubscription(url, phedexid, [site_se]) for wfid in wf_id_in_prestaging: tr_wf = session.query(Workflow).get(wfid) if tr_wf and tr_wf.status != 'staging': if execute: tr_wf.status = 'staging' if talk: print "setting", tr_wf.name, "to staging" session.commit()
def new_recoveror(url, specific, options=None): if userLock('recoveror'): return up = componentInfo(soft=['mcm', 'wtc']) if not up.check(): return CI = campaignInfo() SI = siteInfo() UC = unifiedConfiguration() wfs = session.query(Workflow).filter( Workflow.status.contains('recovery')).all() if specific: wfs.extend( session.query(Workflow).filter( Workflow.status == 'assistance-manual').all()) try: from_operator = json.loads( os.popen( 'curl -s http://vocms0113.cern.ch/actions/test.json').read()) ## now we have a list of things that we can take action on except: pass for wfo in wfs: if specific and not specific in wfo.name: continue if not specific and 'manual' in wfo.status: continue wfi = workflowInfo(url, wfo.name) send_recovery = False ## will make all acdc send_clone = False ## will make a clone send_back = False ## should just reject. manual ? send_manual = False ## will set in manual where_to_run, missing_to_run = wfi.getRecoveryInfo() task_to_recover = where_to_run.keys() ## if the site at which the recovery could run in drain or out ? for task in task_to_recover: not_ready = set(where_to_run[task]) - set(SI.sites_ready) if not_ready: print "the following sites are not ready for the ACDC", ",".join( sorted(not_ready)) ## do we have a way of telling if a site is going to be out for a long time ? # check on priority: high prio, restart if wfi.request['RequestPriority'] >= 85000: send_clone = True # check on age of the request injection_time = time.mktime( time.strptime( '.'.join(map(str, wfi.request['RequestDate'])), "%Y.%m.%d.%H.%M.%S")) / (60. * 60.) now = time.mktime(time.gmtime()) / (60. * 60.) if float(now - injection_time) < 14.: ## less than 14 days, start over send_clone = True else: send_manual = True if not send_recovery: ## check on whether the stats is very low pass if send_recovery: ## make acdc for all tasks for task in task_to_recover: actions = list( set([ case['solution'] for code, case in task_to_recover[task] ])) acdc = singleRecovery(url, task, wfi.request, actions, do=True) elif send_clone: ## this will get it cloned wfo.status = 'assistance-clone' session.commit() elif send_manual: wfo.status = 'assistance-manual'
def outcleanor(url, options): if options.approve: for user in ['*Vlimant']:#,'*Cremonesi']: deletes = listDelete( url , user = user) for (site,who,tid) in deletes: if 'MSS' in site: continue### ever print site,who,tid print "approving deletion" print approveSubscription(url, tid, nodes = [site], comments = 'Production cleaning by data ops') return sites_and_datasets = defaultdict(list) our_copies = defaultdict(list) wf_cleaned = {} wfs = [] for fetch in options.fetch.split(','): wfs.extend(session.query(Workflow).filter(Workflow.status==fetch).all()) random.shuffle( wfs ) last_answer = None for wfo in wfs : if options.number and len(wf_cleaned)>= options.number: print "Reached",options.number,"cleaned" break print '-'*100 wfi = workflowInfo(url, wfo.name) goes = {} # boolean per output for dataset in wfi.request['OutputDatasets']: goes[dataset] = False keep_one_out = True status = getDatasetStatus( dataset ) print "\n\tLooking at",dataset,status,"\n" vetoes = None if status == 'INVALID': vetoes = ['Export','Buffer'] ## can take themselves out keep_one_out = False # just wipe clean elif status == None: print dataset,"actually does not exist. skip" goes[dataset] = True continue elif status in ['PRODUCTION','VALID'] and wfo.status in ['forget','trouble']: print dataset,"should probably be invalidated. (",wfo.status,") skip" keep_one_out = False # just wipe clean continue ## you are not sure. just skip it for the time being elif status == 'PRODUCTION' and wfo.status in ['clean']: print dataset,"should probably be set valid .skip" continue ## you are not sure. just skip it for the time being if status == 'VALID' and dataset.startswith('/MinBias'): print "This is a /MinBias. skip" continue if '/DQM' in dataset: keep_one_out = False total_size = getDatasetSize( dataset ) our_presence = getDatasetPresence(url, dataset, complete=None, group="DataOps", vetoes=vetoes) also_our_presence = getDatasetPresence(url, dataset, complete=None, group="", vetoes=vetoes) ## merge in one unique dict for site in also_our_presence: if site in our_presence: there,frac = our_presence[site] other,ofrac = also_our_presence[site] our_presence[site] = (max(there,other),max(frac,ofrac)) else: our_presence[site] = also_our_presence[site] if our_presence: print our_presence ## analysis ops copies need to be taken into account anaops_presence = getDatasetPresence(url, dataset, complete=None, group="AnalysisOps") own_by_anaops = anaops_presence.keys() ## all our copies to_be_cleaned = our_presence.keys() if not len(to_be_cleaned): print "nowhere to be found of ours,",len(own_by_anaops),"in analysi ops pool" goes[dataset] = True continue print "Where we own bits of dataset" print to_be_cleaned if len(own_by_anaops): ## remove site with the anaops copies to_be_cleaned = list(set(to_be_cleaned) - set(own_by_anaops)) keep_one_out = False ## in that case, just remove our copies print "Own by anaops (therefore not keep a copy of ours)" print own_by_anaops else: ## we should not be looking at anything that was not passed to DDM, otherwise we'll be cutting the grass under our feet using_the_same = getWorkflowByInput(url, dataset, details=True) conflict = False for other in using_the_same: if other['RequestName'] == wfo.name: continue if other['RequestType'] == 'Resubmission': continue if not other['RequestStatus'] in ['announced','normal-archived','aborted','rejected','aborted-archived','rejected-archived','closed-out','None',None]: print other['RequestName'],'is in status',other['RequestStatus'],'preventing from cleaning',dataset conflict=True break if conflict: continue ## not being used. a bit less dangerous to clean-out ## keep one full copy out there full_copies = [site for (site,(there,fract)) in our_presence.items() if there] if keep_one_out: if not len(full_copies): print "we do not own a full copy of",dataset,status,wfo.status,".skip" continue stay_there = random.choice( full_copies ) #at a place own by ops print "Where we keep a full copy", stay_there to_be_cleaned.remove( stay_there ) our_copies[stay_there].append( dataset ) else: print "We do not want to keep a copy of ",dataset,status,wfo.status if len(to_be_cleaned): print "Where we can clean" print to_be_cleaned for site in to_be_cleaned: sites_and_datasets[site].append( (dataset, total_size*our_presence[site][1]/100., status) ) goes[dataset] = True else: print "no cleaning to be done" goes[dataset] = True print wfo.name,"scrutinized" if all(goes.values()): print "\t",wfo.name,"can toggle -out" def ask(): global last_answer last_answer = raw_input('go on ?') return last_answer if options.auto or ask() in ['y','']: if all(goes.values()): wfo.status = wfo.status+'-out' wf_cleaned[wfo.name] = wfo.status continue elif last_answer in ['q','n']: break else: return if options.auto: pass elif last_answer in ['q']: return print "Potential cleanups" for (site,items) in sites_and_datasets.items(): cleanup = sum([size for (_,size,_) in items]) print "\n\t potential cleanup of","%8.4f"%cleanup,"GB at ",site print "\n".join([ds+" "+st for ds,_,st in items]) datasets = [ ds for ds,_,st in items] print "Copies and bits we are going to delete" print json.dumps( sites_and_datasets, indent=2) print "Copies we are keeping" print json.dumps( our_copies, indent=2 ) print "Workflows cleaned for output" print json.dumps( wf_cleaned, indent=2 ) stamp = time.strftime("%Y%m%d%H%M%S", time.localtime()) open('outcleaning_%s.json'%stamp,'w').write( json.dumps( sites_and_datasets, indent=2)) open('keepcopies_%s.json'%stamp,'w').write( json.dumps( our_copies, indent=2)) open('wfcleanout_%s.json'%stamp,'w').write( json.dumps( wf_cleaned, indent=2)) if (not options.test) and (options.auto or raw_input("Satisfied ? (y will trigger status change and deletion requests)") in ['y']): for (site,items) in sites_and_datasets.items(): datasets = [ ds for ds,_,st in items] print "making deletion to",site result = makeDeleteRequest(url, site, datasets, "Cleanup output after production. DataOps will take care of approving it.") print result ## approve it right away ? if 'MSS' in site: continue if 'Export' in site: continue if 'Buffer' in site: continue for did in [item['id'] for item in result['phedex']['request_created']]: print "auto-approve disabled, but ready" #approveSubscription(url, did, nodes = [site], comments = 'Auto-approving production cleaning deletion') pass session.commit() else: print "Not making the deletion and changing statuses"
def rejector(url, specific, options=None): up = componentInfo(soft=['wtc', 'jira']) #if not up.check(): return if specific and specific.startswith('/'): ## this is for a dataset print setDatasetStatus(specific, 'INVALID') return if options.filelist: wfs = [] for line in filter(None, open(options.filelist).read().split('\n')): print line wfs.extend( session.query(Workflow).filter( Workflow.name.contains(line)).all()) elif specific: wfs = session.query(Workflow).filter( Workflow.name.contains(specific)).all() if not wfs: batches = batchInfo().content() for bname in batches: if specific == bname: for pid in batches[bname]: b_wfs = getWorkflowById(url, pid) for wf in b_wfs: wfs.append( session.query(Workflow).filter( Workflow.name == wf).first()) break else: wfs = session.query(Workflow).filter( Workflow.status == 'assistance-clone').all() #wfs.extend( session.query(Workflow).filter(Workflow.status == 'assistance-reject').all()) ## be careful then on clone case by case options.clone = True print "not supposed to function yet" return print len(wfs), "to reject" if len(wfs) > 1: print "\n".join([wfo.name for wfo in wfs]) answer = raw_input('Reject these') if not answer.lower() in ['y', 'yes']: return for wfo in wfs: #wfo = session.query(Workflow).filter(Workflow.name == specific).first() if not wfo: print "cannot reject", spec return wfi = workflowInfo(url, wfo.name) comment = "" if options.comments: comment = ", reason: " + options.comments if options.keep: wfi.sendLog( 'rejector', 'invalidating the workflow by unified operator%s' % comment) else: wfi.sendLog( 'rejector', 'invalidating the workflow and outputs by unified operator%s' % comment) results = invalidate(url, wfi, only_resub=True, with_output=(not options.keep)) if all(results): print wfo.name, "rejected" if options and options.clone: wfo.status = 'trouble' session.commit() schema = wfi.getSchema() schema['Requestor'] = os.getenv('USER') schema['Group'] = 'DATAOPS' schema['OriginalRequestName'] = wfo.name if 'ProcessingVersion' in schema: schema['ProcessingVersion'] = int( schema['ProcessingVersion'] ) + 1 ## dubious str->int conversion else: schema['ProcessingVersion'] = 2 for k in schema.keys(): if k.startswith('Team'): schema.pop(k) if k.startswith('checkbox'): schema.pop(k) ## a few tampering of the original request if options.Memory: if schema['RequestType'] == 'TaskChain': it = 1 while True: t = 'Task%d' % it it += 1 if t in schema: schema[t]['Memory'] = options.Memory else: break else: schema['Memory'] = options.Memory if options.short_task and schema['RequestType'] == 'TaskChain': translate = {} it = 1 while True: tt = 'Task%d' % it if tt in schema: tname = schema[tt]['TaskName'] ntname = 'T%d' % it translate[tname] = ntname it += 1 schema[tt]['TaskName'] = ntname if 'InputTask' in schema[tt]: itname = schema[tt]['InputTask'] schema[tt]['InputTask'] = translate[itname] else: break for k in schema.get('ProcessingString', {}).keys(): schema['ProcessingString'][ translate[k]] = schema['ProcessingString'].pop(k) for k in schema.get('AcquisitionEra', {}).keys(): schema['AcquisitionEra'][ translate[k]] = schema['AcquisitionEra'].pop(k) if options.Multicore: ## to do : set it properly in taskchains if schema['RequestType'] == 'TaskChain': tasks, set_to = options.Multicore.split( ':') if ':' in options.Multicore else ( "", options.Multicore) set_to = int(set_to) tasks = tasks.split(',') if tasks else ['Task1'] it = 1 while True: tt = 'Task%d' % it it += 1 if tt in schema: tname = schema[tt]['TaskName'] if tname in tasks or tt in tasks: mem = schema[tt]['Memory'] mcore = schema[tt].get('Multicore', 1) factor = (set_to / float(mcore)) fraction_constant = 0.4 mem_per_core_c = int( (1 - fraction_constant) * mem / float(mcore)) print "mem per core", mem_per_core_c print "base mem", mem ## adjusting the parameter in the clone schema[tt]['Memory'] += ( set_to - mcore) * mem_per_core_c schema[tt]['Multicore'] = set_to schema[tt]['TimePerEvent'] /= factor else: break else: schema['Multicore'] = options.Multicore if options.deterministic: if schema['RequestType'] == 'TaskChain': schema['Task1']['DeterministicPileup'] = True if options.EventsPerJob: if schema['RequestType'] == 'TaskChain': schema['Task1']['EventsPerJob'] = options.EventsPerJob else: schema['EventsPerJob'] = options.EventsPerJob if options.EventAwareLumiBased: schema['SplittingAlgo'] = 'EventAwareLumiBased' if options.TimePerEvent: schema['TimePerEvent'] = options.TimePerEvent if options.ProcessingString: schema['ProcessingString'] = options.ProcessingString if options.AcquisitionEra: schema['AcquisitionEra'] = options.AcquisitionEra if options.runs: schema['RunWhitelist'] = map(int, options.runs.split(',')) if options.PrepID: schema['PrepID'] = options.PrepID if schema['RequestType'] == 'TaskChain' and options.no_output: ntask = schema['TaskChain'] for it in range(1, ntask - 1): schema['Task%d' % it]['KeepOutput'] = False schema['TaskChain'] = ntask - 1 schema.pop('Task%d' % ntask) if options.priority: schema['RequestPriority'] = options.priority ## update to the current priority schema['RequestPriority'] = wfi.request['RequestPriority'] ## drop shit on the way to reqmgr2 schema = reqMgrClient.purgeClonedSchema(schema) print "submitting" if (options.to_stepchain and (schema['RequestType'] == 'TaskChain')): ## transform the schema into StepChain schema print "Transforming a TaskChain into a StepChain" mcore = 0 mem = 0 schema['RequestType'] = 'StepChain' schema['StepChain'] = schema.pop('TaskChain') schema['SizePerEvent'] = 0 schema['TimePerEvent'] = 0 step = 1 s_n = {} while True: if 'Task%d' % step in schema: sname = 'Step%d' % step schema[sname] = schema.pop('Task%d' % step) tmcore = schema[sname].pop('Multicore') tmem = schema[sname].pop('Memory') if mcore and tmcore != mcore: wfi.sendLog( 'rejector', 'the conversion to stepchain encoutered different value of Multicore %d != %d' % (tmcore, mcore)) sendLog( 'rejector', 'the conversion of %s to stepchain encoutered different value of Multicore %d != %d' % (wfo.name, tmcore, mcore), level='critical') mcore = max(mcore, tmcore) mem = max(mem, tmem) schema[sname]['StepName'] = schema[sname].pop( 'TaskName') s_n[schema[sname]['StepName']] = sname if 'InputTask' in schema[sname]: schema[sname]['InputStep'] = schema[sname].pop( 'InputTask') eff = 1. up_s = sname while True: ## climb up a step. supposedely already all converted up_s = s_n.get( schema[up_s].get('InputStep', None), None) if up_s: ## multiply with the efficiency eff *= schema[up_s].get( 'FilterEfficiency', 1.) else: ## or stop there break if not 'KeepOutput' in schema[sname]: ## this is a weird translation capability. Absence of keepoutput in step means : keep the output. while in TaskChain absence means : drop schema[sname]['KeepOutput'] = False schema['TimePerEvent'] += eff * schema[sname].pop( 'TimePerEvent') schema['SizePerEvent'] += eff * schema[sname].pop( 'SizePerEvent') step += 1 else: break schema['Multicore'] = mcore schema['Memory'] = mem print json.dumps(schema, indent=2) newWorkflow = reqMgrClient.submitWorkflow(url, schema) if not newWorkflow: msg = "Error in cloning {}".format(wfo.name) print(msg) wfi.sendLog('rejector', msg) # Get the error message time.sleep(5) data = reqMgrClient.requestManagerPost( url, "/reqmgr2/data/request", schema) wfi.sendLog('rejector', data) print json.dumps(schema, indent=2) return print newWorkflow data = reqMgrClient.setWorkflowApproved(url, newWorkflow) print data wfi.sendLog( 'rejector', 'Cloned into %s by unified operator %s' % (newWorkflow, comment)) #wfi.notifyRequestor('Cloned into %s by unified operator %s'%( newWorkflow, comment ),do_batch=False) else: wfo.status = 'trouble' if options.set_trouble else 'forget' wfi.notifyRequestor('Rejected by unified operator %s' % (comment), do_batch=False) session.commit() else: msg = "Error in rejecting {}: {}".format(wfo.name, results) print(msg) wfi.sendLog('rejector', msg)
def checkor(url, spec=None, options=None): if userLock(): return if duplicateLock() and not options.go: return fDB = closeoutInfo() UC = unifiedConfiguration() use_mcm = True up = componentInfo(mcm=use_mcm, soft=['mcm']) if not up.check(): return use_mcm = up.status['mcm'] def time_point(label="",sub_lap=False): now = time.mktime(time.gmtime()) nows = time.asctime(time.gmtime()) print "Time check (%s) point at : %s"%(label, nows) print "Since start: %s [s]"% ( now - time_point.start) if sub_lap: print "Sub Lap : %s [s]"% ( now - time_point.sub_lap ) time_point.sub_lap = now else: print "Lap : %s [s]"% ( now - time_point.lap ) time_point.lap = now time_point.sub_lap = now time_point.sub_lap = time_point.lap = time_point.start = time.mktime(time.gmtime()) runnings = session.query(Workflow).filter(Workflow.status == 'away').all() standings = session.query(Workflow).filter(Workflow.status.startswith('assistance')).all() ## intersect with what is actually in completed status in request manager now all_completed = set(getWorkflows(url, 'completed' )) wfs=[] if options.strict: ## the one which were running and now have completed print "strict option is on: checking workflows that freshly completed" wfs.extend( filter(lambda wfo: wfo.name in all_completed , runnings)) if options.update: print "update option is on: checking workflows that have not completed yet" wfs.extend( filter(lambda wfo: not wfo.name in all_completed , runnings)) if options.clear: print "clear option is on: checking workflows that are ready to toggle closed-out" wfs.extend( filter(lambda wfo: 'custodial' in wfo.status, standings)) if options.review: print "review option is on: checking the workflows that needed intervention" wfs.extend( filter(lambda wfo: not 'custodial' in wfo.status, standings)) ## what is left out are the wf which were running and ended up aborted/failed/... custodials = defaultdict(list) #sites : dataset list transfers = defaultdict(list) #sites : dataset list invalidations = [] #a list of files SI = siteInfo() CI = campaignInfo() mcm = McMClient(dev=False) if use_mcm else None def get_campaign(output, wfi): ## this should be a perfect matching of output->task->campaign campaign = None era = None wf_campaign = None if 'Campaign' in wfi.request: wf_campaign = wfi.request['Campaign'] try: era = output.split('/')[2].split('-')[0] except: era = None if wfi.isRelval(): campaign = wf_campaign else: campaign = era if era else wf_campaign return campaign ## retrieve bypass and onhold configuration bypasses = [] forcings = [] overrides = getForceCompletes() holdings = [] actors = UC.get('allowed_bypass') for bypassor,email in actors: bypass_file = '/afs/cern.ch/user/%s/%s/public/ops/bypass.json'%(bypassor[0],bypassor) if not os.path.isfile(bypass_file): #sendLog('checkor','no file %s',bypass_file) continue try: bypasses.extend( json.loads(open(bypass_file).read())) except: sendLog('checkor',"cannot get by-passes from %s for %s"%(bypass_file ,bypassor)) sendEmail("malformated by-pass information","%s is not json readable"%(bypass_file), destination=[email]) holding_file = '/afs/cern.ch/user/%s/%s/public/ops/onhold.json'%(bypassor[0],bypassor) if not os.path.isfile(holding_file): #sendLog('checkor',"no file %s"%holding_file) continue try: extending = json.loads(open(holding_file).read()) print bypassor,"is holding",extending holdings.extend( extending ) except: sendLog('checkor',"cannot get holdings from %s for %s"%(holding_file, bypassor)) sendEmail("malformated by-pass information","%s is not json readable"%(holding_file), destination=[email]) ## once this was force-completed, you want to bypass for rider,email in actors: rider_file = '/afs/cern.ch/user/%s/%s/public/ops/forcecomplete.json'%(rider[0],rider) if not os.path.isfile(rider_file): print "no file",rider_file #sendLog('checkor',"no file %s"%rider_file) continue try: bypasses.extend( json.loads(open( rider_file ).read() ) ) except: sendLog('checkor',"cannot get force complete list from %s"%rider) sendEmail("malformated force complet file","%s is not json readable"%rider_file, destination=[email]) if use_mcm: forcings = mcm.get('/restapi/requests/forcecomplete') #if forcings: # sendEmail('force completing mechanism','please check what checkor is doing with %s'%( ','.join(forcings))) pattern_fraction_pass = UC.get('pattern_fraction_pass') total_running_time = 5.*60. sleep_time = 1 if len(wfs): sleep_time = min(max(0.5, total_running_time / len(wfs)), 10) random.shuffle( wfs ) in_manual = 0 ## now you have a record of what file was invalidated globally from TT TMDB_invalid = dataCache.get('file_invalidation') #try: # TMDB_invalid = set([row[3] for row in csv.reader( os.popen('curl -s "https://docs.google.com/spreadsheets/d/11fFsDOTLTtRcI4Q3gXw0GNj4ZS8IoXMoQDC3CbOo_2o/export?format=csv"'))]) # TMDB_invalid = map(lambda e : e.split(':')[-1], TMDB_invalid) # print len(TMDB_invalid),"globally invalidated files" #except Exception as e: # print "TMDB not fetched" # print str(e) # TMDB_invalid = [] print len(wfs),"to consider, pausing for",sleep_time max_per_round = UC.get('max_per_round').get('checkor',None) if options.limit: max_per_round=options.limit if max_per_round and not spec: wfs = wfs[:max_per_round] for wfo in wfs: if spec and not (spec in wfo.name): continue time.sleep( sleep_time ) time_point("Starting with %s"% wfo.name) ## get info wfi = workflowInfo(url, wfo.name) wfi.sendLog('checkor',"checking on %s %s"%( wfo.name,wfo.status)) ## make sure the wm status is up to date. # and send things back/forward if necessary. wfo.wm_status = wfi.request['RequestStatus'] if wfo.wm_status == 'closed-out': ## manually closed-out wfi.sendLog('checkor',"%s is already %s, setting close"%( wfo.name , wfo.wm_status)) wfo.status = 'close' session.commit() continue elif wfo.wm_status in ['failed','aborted','aborted-archived','rejected','rejected-archived','aborted-completed']: ## went into trouble wfo.status = 'trouble' wfi.sendLog('checkor',"%s is in trouble %s"%(wfo.name, wfo.wm_status)) session.commit() continue elif wfo.wm_status in ['assigned','acquired']: ## not worth checking yet wfi.sendLog('checkor',"%s is not running yet"%wfo.name) session.commit() continue if '-onhold' in wfo.status: if wfo.name in holdings and wfo.name not in bypasses: wfi.sendLog('checkor',"%s is on hold"%wfo.name) continue if wfo.wm_status != 'completed': #and not wfo.name in bypasses: ## for sure move on with closeout check if in completed wfi.sendLog('checkor',"no need to check on %s in status %s"%(wfo.name, wfo.wm_status)) session.commit() continue if wfo.name in holdings and wfo.name not in bypasses: wfo.status = 'assistance-onhold' wfi.sendLog('checkor',"setting %s on hold"%wfo.name) session.commit() continue session.commit() #sub_assistance="" # if that string is filled, there will be need for manual assistance existing_assistance_tags = set(wfo.status.split('-')[1:]) #[0] should be assistance assistance_tags = set() is_closing = True ## get it from somewhere bypass_checks = False for bypass in bypasses: if bypass in wfo.name: wfi.sendLog('checkor',"we can bypass checks on %s because of keyword %s "%( wfo.name, bypass)) bypass_checks = True break pids = wfi.getPrepIDs() force_by_mcm = False force_by_user = False for force in forcings: if force in pids: wfi.sendLog('checkor',"we can bypass checks and force complete %s because of prepid %s "%( wfo.name, force)) bypass_checks = True force_by_mcm = True break for user in overrides: for force in overrides[user]: if force in wfo.name: wfi.sendLog('checkor',"we can bypass checks and force complete %s because of keyword %s of user %s"%( wfo.name, force, user)) bypass_checks = True force_by_user = True break tiers_with_no_check = copy.deepcopy(UC.get('tiers_with_no_check')) # dqm* vetoed_custodial_tier = copy.deepcopy(UC.get('tiers_with_no_custodial')) #dqm*, reco to_ddm_tier = copy.deepcopy(UC.get('tiers_to_DDM')) campaigns = {} ## this mapping of campaign per output dataset assumes era==campaing, which is not true for relval expected_outputs = copy.deepcopy( wfi.request['OutputDatasets'] ) for out in wfi.request['OutputDatasets']: c = get_campaign(out, wfi) campaigns[out] = c if c in CI.campaigns and 'custodial_override' in CI.campaigns[c]: vetoed_custodial_tier = list(set(vetoed_custodial_tier) - set(CI.campaigns[c]['custodial_override'])) ## add those that we need to check for custodial copy tiers_with_no_check = list(set(tiers_with_no_check) - set(CI.campaigns[c]['custodial_override'])) ## would remove DQM from the vetoed check check_output_text = "Initial outputs:"+",".join(sorted(wfi.request['OutputDatasets'] )) wfi.request['OutputDatasets'] = [ out for out in wfi.request['OutputDatasets'] if not any([out.split('/')[-1] == veto_tier for veto_tier in tiers_with_no_check])] check_output_text += "\nWill check on:"+",".join(sorted(wfi.request['OutputDatasets'] )) check_output_text += "\ntiers out:"+",".join( sorted(tiers_with_no_check )) check_output_text += "\ntiers no custodial:"+",".join( sorted(vetoed_custodial_tier) ) wfi.sendLog('checkor', check_output_text ) ## anything running on acdc : getting the real prepid is not worth it familly = getWorkflowById(url, wfi.request['PrepID'], details=True) acdc = [] acdc_inactive = [] forced_already=False acdc_bads = [] true_familly = [] for member in familly: if member['RequestType'] != 'Resubmission': continue if member['RequestName'] == wfo.name: continue if member['RequestDate'] < wfi.request['RequestDate']: continue if member['PrepID'] != wfi.request['PrepID'] : continue #if 'OriginalRequestName' in member and (not 'ACDC' in member['OriginalRequestName']) and member['OriginalRequestName'] != wfo.name: continue if member['RequestStatus'] == None: continue if not set(member['OutputDatasets']).issubset( set(expected_outputs)): if not member['RequestStatus'] in ['rejected-archived','rejected','aborted','aborted-archived']: ##this is not good at all wfi.sendLog('checkor','inconsistent ACDC %s'%member['RequestName'] ) #sendLog('checkor','inconsistent ACDC %s'%member['RequestName'], level='critical') acdc_bads.append( member['RequestName'] ) is_closing = False assistance_tags.add('manual') continue true_familly.append( member['RequestName'] ) #try: # parse_one(url, member['RequestName']) #except: # print "Could not make error report for",member['RequestName'] if member['RequestStatus'] in ['running-open','running-closed','assigned','acquired']: print wfo.name,"still has an ACDC running",member['RequestName'] acdc.append( member['RequestName'] ) ## cannot be bypassed! is_closing = False assistance_tags.add('recovering') if (force_by_mcm or force_by_user) and not forced_already: wfi.sendLog('checkor','%s is being forced completed while recovering'%wfo.name) wfi.notifyRequestor("The workflow %s was force completed"% wfo.name, do_batch=False) forceComplete(url, wfi) forced_already=True else: acdc_inactive.append( member['RequestName'] ) assistance_tags.add('recovered') if acdc_bads: #sendEmail('inconsistent ACDC','for %s, ACDC %s is inconsistent, preventing from closing'%( wfo.name, ','.join(acdc_bads) )) sendLog('checkor','For %s, ACDC %s is inconsistent, preventing from closing or will create a mess.'%( wfo.name, ','.join(acdc_bads) ), level='critical') time_point("checked workflow familly", sub_lap=True) ## completion check percent_completions = {} if not 'TotalInputEvents' in wfi.request: event_expected,lumi_expected = 0,0 if not 'recovery' in wfo.status: #sendEmail("missing member of the request","TotalInputEvents is missing from the workload of %s"% wfo.name, destination=['*****@*****.**']) sendLog('checkor',"TotalInputEvents is missing from the workload of %s"% wfo.name, level='critical') else: event_expected,lumi_expected = wfi.request['TotalInputEvents'],wfi.request['TotalInputLumis'] if 'RequestNumEvents' in wfi.request and int(wfi.request['RequestNumEvents']): event_expected = int(wfi.request['RequestNumEvents']) elif 'Task1' in wfi.request and 'RequestNumEvents' in wfi.request['Task1']: event_expected = wfi.request['Task1']['RequestNumEvents'] for i in range(1,20): if 'Task%d'%i in wfi.request: ## this is wrong ibsolute if 'FilterEfficiency' in wfi.request['Task%d'%i]: event_expected *= float(wfi.request['Task%d'%i]['FilterEfficiency']) event_expected = int(event_expected) fractions_pass = {} events_per_lumi = {} over_100_pass = False (lhe,prim,_,_) = wfi.getIO() if lhe or prim: over_100_pass = False time_point("execpted statistics", sub_lap=True) for output in wfi.request['OutputDatasets']: event_count,lumi_count = getDatasetEventsAndLumis(dataset=output) events_per_lumi[output] = event_count/float(lumi_count) if lumi_count else 100 percent_completions[output] = 0. if lumi_expected: percent_completions[output] = lumi_count / float( lumi_expected ) if event_expected: wfi.sendLog('checkor', "event completion real %s expected %s"%(event_count, event_expected )) percent_completions[output] = max(percent_completions[output], float(event_count) / float( event_expected ) ) default_pass = UC.get('default_fraction_pass') fractions_pass[output] = default_pass c = campaigns[output] if c in CI.campaigns and 'fractionpass' in CI.campaigns[c]: if type(CI.campaigns[c]['fractionpass']) == dict: tier = output.split('/')[-1] priority = str(wfi.request['RequestPriority']) ## defined per tier fractions_pass[output] = CI.campaigns[c]['fractionpass'].get('all', default_pass) if tier in CI.campaigns[c]['fractionpass']: fractions_pass[output] = CI.campaigns[c]['fractionpass'][tier] if priority in CI.campaigns[c]['fractionpass']: fractions_pass[output] = CI.campaigns[c]['fractionpass'][priority] else: fractions_pass[output] = CI.campaigns[c]['fractionpass'] wfi.sendLog('checkor', "overriding fraction to %s for %s by campaign requirement"%( fractions_pass[output], output)) if options.fractionpass: fractions_pass[output] = options.fractionpass print "overriding fraction to",fractions_pass[output],"by command line for",output for key in pattern_fraction_pass: if key in output: fractions_pass[output] = pattern_fraction_pass[key] print "overriding fraction to",fractions_pass[output],"by dataset key",key if not all([percent_completions[out] >= fractions_pass[out] for out in fractions_pass]): possible_recoveries = wfi.getRecoveryDoc() if possible_recoveries == []: wfi.sendLog('checkor','%s has missing statistics \n%s \n%s, but nothing is recoverable. passing through to annoucement'%( wfo.name, json.dumps(percent_completions, indent=2), json.dumps(fractions_pass, indent=2) )) sendLog('checkor','%s is not completed, but has nothing to be recovered, passing along ?'%wfo.name, level='critical') #sendEmail('nothing is recoverable','%s is not completed, but has nothing to be recovered, passing along ?'%wfo.name)#,destination=['*****@*****.**']) ## do not bypass for now, until Alan understands why we are loosing ACDC docs bypass_checks = True else: wfi.sendLog('checkor','%s is not completed \n%s \n%s'%( wfo.name, json.dumps(percent_completions, indent=2), json.dumps(fractions_pass, indent=2) )) ## hook for creating automatically ACDC ? if not bypass_checks: assistance_tags.add('recovery') is_closing = False if over_100_pass and any([percent_completions[out] >100 for out in fractions_pass]): print wfo.name,"is over completed" print json.dumps(percent_completions, indent=2) if not bypass_checks: assistance_tags.add('over100') is_closing = False time_point("checked output size", sub_lap=True) ## correct lumi < 300 event per lumi #for output in wfi.request['OutputDatasets']: #events_per_lumi[output] = getDatasetEventsPerLumi( output ) lumi_upper_limit = {} for output in wfi.request['OutputDatasets']: upper_limit = 301. campaign = campaigns[output] if campaign in CI.campaigns and 'lumisize' in CI.campaigns[campaign]: upper_limit = CI.campaigns[campaign]['lumisize'] print "overriding the upper lumi size to",upper_limit,"for",campaign if options.lumisize: upper_limit = options.lumisize print "overriding the upper lumi size to",upper_limit,"by command line" lumi_upper_limit[output] = upper_limit if wfi.request['RequestType'] in ['ReDigi','ReReco']: lumi_upper_limit[output] = -1 if any([ (lumi_upper_limit[out]>0 and events_per_lumi[out] >= lumi_upper_limit[out]) for out in events_per_lumi]): print wfo.name,"has big lumisections" print json.dumps(events_per_lumi, indent=2) ## hook for rejecting the request ? if not bypass_checks: assistance_tags.add('biglumi') is_closing = False any_presence = {} for output in wfi.request['OutputDatasets']: any_presence[output] = getDatasetPresence(url, output, vetoes=[]) time_point("checked dataset presence", sub_lap=True) ## custodial copy custodial_locations = {} custodial_presences = {} for output in wfi.request['OutputDatasets']: custodial_presences[output] = [s for s in any_presence[output] if 'MSS' in s] custodial_locations[output] = phedexClient.getCustodialSubscriptionRequestSite(output) if not custodial_locations[output]: custodial_locations[output] = [] time_point("checked custodiality", sub_lap=True) ## presence in phedex phedex_presence ={} for output in wfi.request['OutputDatasets']: phedex_presence[output] = phedexClient.getFileCountDataset(url, output ) time_point("checked phedex count", sub_lap=True) out_worth_checking = [out for out in custodial_locations.keys() if out.split('/')[-1] not in vetoed_custodial_tier] size_worth_checking = sum([getDatasetSize(out)/1023. for out in out_worth_checking ]) ## size in TBs of all outputs size_worht_going_to_ddm = sum([getDatasetSize(out)/1023. for out in out_worth_checking if out.split('/')[-1] in to_ddm_tier ]) ## size in TBs of all outputs if not all(map( lambda sites : len(sites)!=0, [custodial_locations[out] for out in out_worth_checking])): print wfo.name,"has not all custodial location" print json.dumps(custodial_locations, indent=2) ########## ## hook for making a custodial replica ? custodial = None ## get from other outputs for output in out_worth_checking: if len(custodial_locations[output]): custodial = custodial_locations[output][0] if custodial and float(SI.storage[custodial]) < size_worth_checking: print "cannot use the other output custodial:",custodial,"because of limited space" custodial = None ## try to get it from campaign configuration if not custodial: for output in out_worth_checking: campaign = campaigns[output] if campaign in CI.campaigns and 'custodial' in CI.campaigns[campaign]: custodial = CI.campaigns[campaign]['custodial'] print "Setting custodial to",custodial,"from campaign configuration" group = None if campaign in CI.campaigns and 'phedex_group' in CI.campaigns[campaign]: group = CI.campaigns[campaign]['phedex_group'] print "using group",group,"for replica" if custodial and float(SI.storage[custodial]) < size_worth_checking: print "cannot use the campaign configuration custodial:",custodial,"because of limited space" custodial = None ## get from the parent pick_custodial = True use_parent_custodial = UC.get('use_parent_custodial') tape_size_limit = options.tape_size_limit if options.tape_size_limit else UC.get("tape_size_limit") _,prim,_,_ = wfi.getIO() if not custodial and prim and use_parent_custodial: parent_dataset = prim.pop() ## this is terribly dangerous to assume only parents_custodial = phedexClient.getCustodialSubscriptionRequestSite( parent_dataset ) ###parents_custodial = findCustodialLocation(url, parent_dataset) if not parents_custodial: parents_custodial = [] if len(parents_custodial): custodial = parents_custodial[0] else: print "the input dataset",parent_dataset,"does not have custodial in the first place. abort" #sendEmail( "dataset has no custodial location", "Please take a look at %s in the logs of checkor"%parent_dataset) ## does not work for RAWOADSIM sendLog('checkor',"Please take a look at %s for missing custodial location"% parent_dataset) ## cannot be bypassed, this is an issue to fix is_closing = False pick_custodial = False assistance_tags.add('parentcustodial') if custodial and float(SI.storage[custodial]) < size_worth_checking: print "cannot use the parent custodial:",custodial,"because of limited space" custodial = None if not custodial and pick_custodial: ## pick one at random custodial = SI.pick_SE(size=size_worth_checking) if custodial and size_worht_going_to_ddm > tape_size_limit: print wfi.sendLog('checkor',"The total output size (%s TB) is too large for the limit set (%s TB)"%( size_worth_checking, tape_size_limit)) custodial = None if not custodial: print "cannot find a custodial for",wfo.name wfi.sendLog('checkor',"cannot find a custodial for %s probably because of the total output size %d"%( wfo.name, size_worth_checking)) sendLog('checkor',"cannot find a custodial for %s probably because of the total output size %d"%( wfo.name, size_worth_checking), level='critical') if custodial and (is_closing or bypass_checks): print "picked",custodial,"for tape copy" ## remember how much you added this round already ; this stays locally SI.storage[custodial] -= size_worth_checking ## register the custodial request, if there are no other big issues for output in out_worth_checking: if not len(custodial_locations[output]): if phedex_presence[output]>=1: wfi.sendLog('checkor','Using %s as a tape destination for %s'%(custodial, output)) custodials[custodial].append( output ) if group: custodials[custodial][-1]+='@%s'%group ## let's wait and see if that's needed assistance_tags.add('custodial') else: print "no file in phedex for",output," not good to add to custodial requests" #cannot be bypassed is_closing = False time_point("determined tape location", sub_lap=True) ## disk copy disk_copies = {} for output in wfi.request['OutputDatasets']: disk_copies[output] = [s for s in any_presence[output] if (not 'MSS' in s) and (not 'Buffer' in s)] if not all(map( lambda sites : len(sites)!=0, disk_copies.values())): print wfo.name,"has not all output on disk" print json.dumps(disk_copies, indent=2) ## presence in dbs dbs_presence = {} dbs_invalid = {} for output in wfi.request['OutputDatasets']: dbs_presence[output] = dbs3Client.getFileCountDataset( output ) dbs_invalid[output] = dbs3Client.getFileCountDataset( output, onlyInvalid=True) time_point("dbs file count", sub_lap=True) if not all([dbs_presence[out] == (dbs_invalid[out]+phedex_presence[out]) for out in wfi.request['OutputDatasets']]) and not options.ignorefiles: mismatch_notice = wfo.name+" has a dbs,phedex mismatch\n" mismatch_notice += "in dbs\n"+json.dumps(dbs_presence, indent=2) +"\n" mismatch_notice += "invalide in dbs\n"+json.dumps(dbs_invalid, indent=2) +"\n" mismatch_notice += "in phedex\n"+json.dumps(phedex_presence, indent=2) +"\n" wfi.sendLog('checkor',mismatch_notice) if not 'recovering' in assistance_tags: assistance_tags.add('filemismatch') #print this for show and tell if no recovery on-going for out in dbs_presence: _,_,missing_phedex,missing_dbs = getDatasetFiles(url, out) if missing_phedex: wfi.sendLog('checkor',"These %d files are missing in phedex\n%s"%(len(missing_phedex), "\n".join( missing_phedex ))) were_invalidated = sorted(set(missing_phedex) & set(TMDB_invalid )) if were_invalidated: wfi.sendLog('checkor',"These %d files were invalidated globally\n%s"%(len(were_invalidated), "\n".join(were_invalidated))) sendLog('checkor',"These %d files were invalidated globally\n%s\nand are invalidated in dbs"%(len(were_invalidated), "\n".join(were_invalidated)), level='critical') dbs3Client.setFileStatus( were_invalidated, newstatus=0 ) if missing_dbs: wfi.sendLog('checkor',"These %d files are missing in dbs\n%s"%(len(missing_dbs), "\n".join( missing_dbs ))) were_invalidated = sorted(set(missing_dbs) & set(TMDB_invalid )) if were_invalidated: wfi.sendLog('checkor',"These %d files were invalidated globally\n%s"%(len(were_invalidated), "\n".join(were_invalidated))) #if not bypass_checks: ## I don't think we can by pass this is_closing = False time_point("checked file count", sub_lap=True) fraction_invalid = 0.20 if not all([(dbs_invalid[out] <= int(fraction_invalid*dbs_presence[out])) for out in wfi.request['OutputDatasets']]) and not options.ignoreinvalid: print wfo.name,"has a dbs invalid file level too high" print json.dumps(dbs_presence, indent=2) print json.dumps(dbs_invalid, indent=2) print json.dumps(phedex_presence, indent=2) ## need to be going and taking an eye assistance_tags.add('invalidfiles') if not bypass_checks: #sub_assistance+="-invalidfiles" is_closing = False ## put that heavy part at the end ## duplication check duplications = {} files_per_rl = {} for output in wfi.request['OutputDatasets']: duplications[output] = "skiped" files_per_rl[output] = "skiped" time_point("checked invalidation", sub_lap=True) if (is_closing or bypass_checks) and (not options.ignoreduplicates): print "starting duplicate checker for",wfo.name for output in wfi.request['OutputDatasets']: print "\tchecking",output duplications[output] = True try: duplications[output],files_per_rl[output] = dbs3Client.duplicateRunLumiFiles( output , skipInvalid=True, verbose=True) except: try: duplications[output],files_per_rl[output] = dbs3Client.duplicateRunLumiFiles( output , skipInvalid=True, verbose=True) except Exception as e: wfi.sendLog('checkor','Not possible to check on duplicate lumi count on %s'%(output)) sendLog('checkor','Not possible to check on duplicate lumi count on %s\n%s'%(output,str(e)),level='critical') is_closing=False if is_closing and any(duplications.values()) and not options.ignoreduplicates: duplicate_notice = "" duplicate_notice += "%s has duplicates\n"%wfo.name duplicate_notice += json.dumps( duplications,indent=2) duplicate_notice += '\n' duplicate_notice += json.dumps( files_per_rl, indent=2) wfi.sendLog('checkor',duplicate_notice) ## hook for making file invalidation ? ## it shouldn't be allowed to bypass it assistance_tags.add('duplicates') is_closing = False time_point("checked duplicates", sub_lap=True) time_point("done with %s"%wfo.name) ## for visualization later on if not wfo.name in fDB.record: #print "adding",wfo.name,"to close out record" fDB.record[wfo.name] = { 'datasets' :{}, 'name' : wfo.name, 'closeOutWorkflow' : None, } fDB.record[wfo.name]['closeOutWorkflow'] = is_closing fDB.record[wfo.name]['priority'] = wfi.request['RequestPriority'] fDB.record[wfo.name]['prepid'] = wfi.request['PrepID'] for output in wfi.request['OutputDatasets']: if not output in fDB.record[wfo.name]['datasets']: fDB.record[wfo.name]['datasets'][output] = {} rec = fDB.record[wfo.name]['datasets'][output] #rec['percentage'] = float('%.2f'%(percent_completions[output]*100)) rec['percentage'] = math.floor(percent_completions[output]*10000)/100.## round down rec['duplicate'] = duplications[output] if output in duplications else 'N/A' rec['phedexReqs'] = float('%.2f'%any_presence[output][custodial_presences[output][0]][1]) if len(custodial_presences[output])!=0 else 'N/A' rec['closeOutDataset'] = is_closing rec['transPerc'] = float('%.2f'%any_presence[output][ disk_copies[output][0]][1]) if len(disk_copies[output])!=0 else 'N/A' rec['correctLumis'] = int(events_per_lumi[output]) if (events_per_lumi[output] > lumi_upper_limit[output]) else True rec['missingSubs'] = False if len(custodial_locations[output])==0 else ','.join(list(set(custodial_locations[output]))) rec['dbsFiles'] = dbs_presence[output] rec['dbsInvFiles'] = dbs_invalid[output] rec['phedexFiles'] = phedex_presence[output] rec['acdc'] = "%d / %d"%(len(acdc),len(acdc+acdc_inactive)) rec['familly'] = true_familly now = time.gmtime() rec['timestamp'] = time.mktime(now) rec['updated'] = time.asctime(now)+' (GMT)' ## make the lumi summary if wfi.request['RequestType'] == 'ReReco': try: os.system('python Unified/lumi_summary.py %s 1 > /dev/null'%(wfi.request['PrepID'])) os.system('python Unified/lumi_plot.py %s > /dev/null'%(wfi.request['PrepID'])) wfi.sendLog('checkor','Lumi summary available at %s/datalumi/lumi.%s.html'%(unified_url,wfi.request['PrepID'])) except Exception as e: print str(e) ## make the error report ## and move on if is_closing: ## toggle status to closed-out in request manager wfi.sendLog('checkor',"setting %s closed-out"% wfo.name) if not options.test: if wfo.wm_status in ['closed-out','announced','normal-archived']: print wfo.name,"is already",wfo.wm_status,"not trying to closed-out and assuming it does" res = None else: res = reqMgrClient.closeOutWorkflowCascade(url, wfo.name) print "close out answer",res if not res in ["None",None]: print "try to get the current status again" wfi_bis = workflowInfo(url, wfo.name) if wfi_bis.request['RequestStatus'] == 'closed-out': print "the request did toggle to closed-out" res = None if not res in ["None",None]: print "retrying to closing out" print res res = reqMgrClient.closeOutWorkflowCascade(url, wfo.name) if res in [None,"None"]: wfo.status = 'close' session.commit() if use_mcm and force_by_mcm: ## shoot large on all prepids, on closing the wf for pid in pids: mcm.delete('/restapi/requests/forcecomplete/%s'%pid) else: print "could not close out",wfo.name,"will try again next time" else: if not 'custodial' in assistance_tags or wfi.isRelval(): ## do only the report for those for member in acdc+acdc_inactive+[wfo.name]: try: parse_one(url, member) except: print "Could not make error report for",member ## full known list #recovering # has active ACDC ##OUT #recovered #had inactive ACDC #recovery #not over the pass bar #over100 # over 100% #biglumi # has a big lumiblock #parentcustodial # the parent does not have a valid subscription yet #custodial # has had the transfer made, is waiting for a valid custodial subscription to appear #filemismatch # there is a dbs/phedex mismatch #duplicates #a lumi section is there twice ## manual is not added yet, and should be so by recoveror print wfo.name,"was tagged with :",list(assistance_tags) if 'recovering' in assistance_tags: ## if active ACDC, being under threshold, filemismatch do not matter assistance_tags = assistance_tags - set(['recovery','filemismatch']) if 'recovery' in assistance_tags and 'recovered' in assistance_tags: ## should not set -recovery to anything that had ACDC already assistance_tags = assistance_tags - set(['recovery','recovered']) ## straight to manual assistance_tags.add('manual') in_manual += 1 if 'recovery' in assistance_tags and 'manual' in assistance_tags: ## this is likely because something bad is happening, so leave it to manual assistance_tags = assistance_tags - set(['recovery']) assistance_tags.add('manual') in_manual += 1 ## that means there is something that needs to be done acdc, lumi invalidation, custodial, name it print wfo.name,"needs assistance with",",".join( assistance_tags ) print wfo.name,"existing conditions",",".join( existing_assistance_tags ) ######################################### ##### notification to requester ######### go_notify=False if assistance_tags and not 'manual' in existing_assistance_tags and existing_assistance_tags != assistance_tags: go_notify=True if go_notify: #if wfo.name in already_notified: # print "double notification" # sendEmail('double notification','please take a look at %s'%(wfo.name)) #else: # already_notified.append( wfo.name ) ###detailslink = 'https://cmsweb.cern.ch/reqmgr/view/details/%s' #detailslink = 'https://cmsweb.cern.ch/reqmgr2/fetch?rid=%s'%(wfo.name) ###perflink = 'https://cmsweb.cern.ch/couchdb/workloadsummary/_design/WorkloadSummary/_show/histogramByWorkflow/%s'%(wfo.name) perflink = '%s/report/%s'%(unified_url,wfo.name) splitlink = 'https://cmsweb.cern.ch/reqmgr/view/splitting/%s'%(wfo.name) ## notify templates messages= { 'recovery': 'Samples completed with missing statistics:\n%s\n%s '%( '\n'.join(['%.2f %% complete for %s'%(percent_completions[output]*100, output) for output in wfi.request['OutputDatasets'] ] ), perflink ), 'biglumi': 'Samples completed with large luminosity blocks:\n%s\n%s '%('\n'.join(['%d > %d for %s'%(events_per_lumi[output], lumi_upper_limit[output], output) for output in wfi.request['OutputDatasets'] if (events_per_lumi[output] > lumi_upper_limit[output])]), splitlink), 'duplicates': 'Samples completed with duplicated luminosity blocks:\n%s\n'%( '\n'.join(['%s'%output for output in wfi.request['OutputDatasets'] if output in duplications and duplications[output] ] ) ), 'filemismatch': 'Samples completed with inconsistency in DBS/Phedex', #'manual' : 'Workflow completed and requires manual checks by Ops', } content = "The request PREPID (WORKFLOW) is facing issue in production.\n" motive = False for case in messages: if case in assistance_tags: content+= "\n"+messages[case]+"\n" motive = True content += "You are invited to check, while this is being taken care of by Comp-Ops.\n" content += "This is an automated message from Comp-Ops.\n" items_notified = set() if use_mcm and motive: wfi.notifyRequestor( content , mcm = mcm) ######################################### ## logic to set the status further if assistance_tags: new_status = 'assistance-'+'-'.join(sorted(assistance_tags) ) else: new_status = 'assistance' ## case where the workflow was in manual from recoveror if not 'manual' in wfo.status or new_status!='assistance-recovery': wfo.status = new_status if not options.test: wfi.sendLog('checkor','setting %s to %s'%(wfo.name, wfo.status)) session.commit() else: print "current status is",wfo.status,"not changing to anything" #open('already_notifified.json','w').write( json.dumps( already_notified , indent=2)) fDB.html() if not spec and in_manual!=0: sendEmail("fresh assistance status available","Fresh status are available at %s/assistance.html"%unified_url,destination=['*****@*****.**']) #it's a bit annoying pass ## custodial requests print "Custodials" print json.dumps(custodials, indent=2) for site in custodials: items_at = defaultdict(set) for i in custodials[site]: item, group = i.split('@') if '@' in i else (i,'DataOps') items_at[group].add( item ) for group,items in items_at.items(): print ','.join(items),'=>',site,'@',group if not options.test: result = makeReplicaRequest(url, site, sorted(items) ,"custodial copy at production close-out",custodial='y',priority='low', approve = (site in SI.sites_auto_approve) , group=group) print result print "File Invalidation" print invalidations
def getPileupDataset(url, workflow): return workflowInfo( url,workflow).getPileupDataset()
def singleRecovery(url, task, initial, actions, do=False): print "Inside single recovery!" payload = { "Requestor" : os.getenv('USER'), "Group" : 'DATAOPS', "RequestType" : "Resubmission", "ACDCServer" : initial['CouchURL'], "ACDCDatabase" : "acdcserver", "OriginalRequestName" : initial['RequestName'], "OpenRunningTimeout" : 0 } copy_over = ['PrepID','Campaign','RequestPriority', 'TimePerEvent', 'SizePerEvent', 'Group', 'Memory', 'RequestString' ,'CMSSWVersion'] for c in copy_over: if c in initial: payload[c] = copy.deepcopy(initial[c]) else: print c,"not in the initial payload" #a massage ? boost the recovery over the initial wf # payload['RequestPriority'] *= 10 #Max priority is 1M payload['RequestPriority'] = min(500000, payload['RequestPriority']*2 ) ## never above 500k #change parameters based on actions here if actions: for action in actions: if action.startswith('mem') and actions[action] != "" and actions[action] != 'Same': payload['Memory'] = actions[action] print "Memory set to " + actions[action] ## Taskchains needs to be treated special to set the memory to all tasks if 'TaskChain' in initial: it = 1 while True: t = 'Task%d'%it it += 1 if t in initial: payload[t] = copy.deepcopy(initial[t]) payload[t]['Memory'] = actions[action] else: break if action.startswith('split'): split_alert = (initial['RequestType'] in ['MonteCarlo'] ) for key in initial: if key == 'SplittingAlgo' and (initial[key] in ['EventBased']): split_alert = True elif key.startswith('Task') and key != 'TaskChain': for key2 in initial[key]: if key2 == 'TaskName': print "task",task.split('/')[-1] print "TaskName",initial[key][key2] if (initial[key][key2] == task) and (initial[key][key2] in ['EventBased']): split_alert = True if split_alert: sendLog('actor','Cannot change splitting for %s'%initial['RequestName'],level='warning') print "I should not be doing splitting for this type of request",initial['RequestName'] return None acdc_round = 0 initial_string = payload['RequestString'] if initial_string.startswith('ACDC'): if initial_string[4].isdigit(): acdc_round = int(initial_string[4]) acdc_round += 1 initial_string = initial_string.replace('ACDC_','').replace('ACDC%d_'%(acdc_round-1),'') payload['RequestString'] = 'ACDC%d_%s'%(acdc_round,initial_string) payload['InitialTaskPath'] = task if not do: print json.dumps( payload, indent=2) return None print "ACDC payload" # print json.dumps( payload , indent=2) print actions ## submit here acdc = reqMgrClient.submitWorkflow(url, payload) if not acdc: print "Error in making ACDC for",initial["RequestName"] acdc = reqMgrClient.submitWorkflow(url, payload) if not acdc: print "Error twice in making ACDC for",initial["RequestName"] sendLog('actor','Failed twice in making ACDCs for %s!'%initial['RequestName'],level='critical') return None ## change splitting if requested if actions: for action in actions: if action.startswith('split'): acdcInfo = workflowInfo(url, acdc) splittings = acdcInfo.getSplittings() if actions[action] != 'Same' and actions[action] != 'max': factor = int(actions[action][0:-1]) if 'x' in actions[action] else 2 for split in splittings: for act in ['avg_events_per_job','events_per_job','lumis_per_job']: if act in split: print "Changing %s (%d) by a factor %d"%( act, split[act], factor), split[act] /= factor print "to",split[act] break split['requestName'] = acdc print "changing the splitting of",acdc print json.dumps( split, indent=2 ) print reqMgrClient.setWorkflowSplitting(url, acdc, split ) elif 'max' in actions[action]: for split in splittings: for act in ['avg_events_per_job','events_per_job','lumis_per_job']: if act in split: print "Changing %s (%d) "%( act, split[act]), split[act] = 1 print "to max splitting ",split[act] break split['requestName'] = acdc print "changing the splitting of",acdc print json.dumps( split, indent=2 ) print reqMgrClient.setWorkflowSplitting(url, acdc, split ) data = reqMgrClient.setWorkflowApproved(url, acdc) print data return acdc
from assignSession import * import time from utils import getWorkLoad, checkTransferStatus, workflowInfo import pprint import sys url = 'cmsweb.cern.ch' #wfi = workflowInfo( url, "pdmvserv_HIG-2019GEMUpg14DR-00116_00086_v0__150330_112405_8526") wfi = workflowInfo( url, "pdmvserv_task_B2G-RunIIWinter15wmLHE-00001__v1_T_150402_161327_2265") print wfi.getLumiWhiteList() #print wfi.acquisitionEra() #pid = sys.argv[1] #tr = session.query(Transfer).filter(Transfer.phedexid== pid).first() #for wfid in tr.workflows_id: # wf = session.query(Workflow).get(wfid) # print wf.id,wf.name # wf.status = 'staging' #session.commit() #checks = checkTransferStatus( 'cmsweb.cern.ch',440053 ) #print checks #tr = session.query(Transfer).filter(Transfer.phedexid== 440100 ).first() #session.delete( tr ) #session.commit() #wf = session.query(Workflow).filter(Workflow.name=="pdmvserv_TOP-Summer12DR53X-00302_00379_v0__150331_100923_4420" # ).first()
def actor(url,options=None): if userLock('actor'): return up = componentInfo(mcm=False, soft=['mcm']) if not up.check(): return # CI = campaignInfo() SI = siteInfo() UC = unifiedConfiguration() # Need to look at the actions page https://vocms0113.cern.ch:80/getaction (can add ?days=20) and perform any actions listed try: action_list = json.loads(os.popen('curl -s -k https://vocms0113.cern.ch:80/getaction?days=15').read()) ## now we have a list of things that we can take action on except: print "Not able to load action list :(" sendLog('actor','Not able to load action list', level='critical') return print action_list if not action_list: print "EMPTY!" return for wfname in action_list: print '-'*100 print "Looking at",wfname,"for recovery options" to_clone = False to_acdc = False for key in action_list[wfname]: if key == 'Parameters': tasks = action_list[wfname][key] elif key == 'Action' and action_list[wfname][key] == 'acdc': print "Going to create ACDCs for ", wfname to_acdc = True elif key == 'Action' and action_list[wfname][key] == 'clone': print "Going to clone ", wfname to_clone = True if not to_acdc and not to_clone: sendLog('actor','Action submitted for something other than acdc and clone for workflow %s'%wfname,level='critical') print "Can only do acdcs and clones! Skipping workflow ",wfname continue if not tasks: sendLog('actor','Empty action submitted for workflow %s'%wfname,level='critical') print "Moving on. Parameters is blank for " + wfname continue wfi = workflowInfo(url, wfname) recover = True message_to_ops = "" message_to_user = "" #=========================================================== if to_clone and options.do: print "Let's try kill and clone: " wfi.sendLog('actor','Going to clone %s'%wfname) results=[] datasets = set(wfi.request['OutputDatasets']) comment="" if 'comment' in tasks: comment = ", reason: "+ tasks['comment'] wfi.sendLog('actor',"invalidating the workflow by traffic controller %s"%comment) #Reject all workflows in the family #first reject the original workflow. reqMgrClient.invalidateWorkflow(url, wfi.request['RequestName'], current_status=wfi.request['RequestStatus'], cascade=False) #Then reject any ACDCs associated with that workflow if 'ACDCs' in action_list[wfname]: children = action_list[wfname]['ACDCs'] for child in children: wfi.sendLog('actor',"rejecting %s"%child) wfi_acdc = workflowInfo(url, child) reqMgrClient.invalidateWorkflow(url, wfi_acdc.request['RequestName'], current_status=wfi_acdc.request['RequestStatus'], cascade=False) datasets.update( wfi_acdc.request['OutputDatasets'] ) #Invalidate all associated output datasets for dataset in datasets: results.append( setDatasetStatus(dataset, 'INVALID') ) if all(map(lambda result : result in ['None',None,True],results)): wfi.sendLog('actor',"%s and children are rejected"%wfname) cloned = None try: cloned = singleClone(url, wfname, tasks, comment, options.do) except: sendLog('actor','Failed to create clone for %s! Check logs for more information. Action will need to be resubmitted.'%wfname,level='critical') wfi.sendLog('actor','Failed to create clone for %s!'%wfname) remove_action(wfname) if not cloned: recover = False wfi.sendLog('actor','Failed to create clone for %s!'%wfname) sendLog('actor','Failed to create clone for %s!'%wfname,level='critical') else: wfi.sendLog('actor',"Workflow %s cloned"%wfname) #=========================================================== elif to_acdc: if 'AllSteps' in tasks: allTasksDefaults = tasks['AllSteps'] tasks.pop('AllSteps') for setting in allTasksDefaults: for task in tasks: if setting in tasks[task]: tasks[task][setting] = allTasksDefaults[setting] else: tasks[task].append({setting:allTasksDefaults[setting]}) print "Tasks is " print tasks all_tasks = wfi.getAllTasks() ## need a way to verify that this is the first round of ACDC, since the second round will have to be on the ACDC themselves try: WMErr = wfi.getWMErrors() # print WMErr except: sendLog('actor','Cannot create ACDCS for %s because WMErr cannot be reached.'%wfname,level='critical') continue if not WMErr: sendLog('actor','Cannot create ACDCS for %s because WMErr is blank.'%wfname,level='critical') print "Moving on. WMErr is blank" continue try: where_to_run, missing_to_run,missing_to_run_at = wfi.getRecoveryInfo() print "Where to run = " print where_to_run except: sendLog('actor','Cannot create ACDCS for %s because recovery info cannot be found.'%wfname,level='critical') print "Moving on. Cannot access recovery info for " + wfname continue if not where_to_run: sendLog('actor','Cannot create ACDCS for %s because site list cannot be found.'%wfname,level='critical') print "Moving on. where to run is blank" continue message_to_ops = "" message_to_user = "" num_tasks_to_recover = 0 for task in WMErr: if 'LogCollect' in task: continue if 'Cleanup' in task: continue if not 'jobfailed' in WMErr[task]: continue else: num_tasks_to_recover += 1 # print "Task to recover: " + task if not num_tasks_to_recover: print "\tno error for",wfname # recover = False if 'LheInputFiles' in wfi.request and wfi.request['LheInputFiles']: ## we do not try to recover pLHE sendLog('actor','Cannot create ACDCS for %s because it is a pLHE workflow.'%wfname,level='critical') print "We don't try to recover pLHE. Moving on." recover = False # sendEmail('cannot submit action', '%s is a pLHE workflow. We do not try to recover pLHE'%wfname) # if wfi.request['RequestType'] in ['ReReco']: # recover= False # print 'cannot submit action. ReReco' # sendEmail('cannot submit action', '%s is request type ReReco'%wfname) recovering = set() for task in tasks: assign_to_sites = set() print "Task names is " + task fulltaskname = '/' + wfname + '/' + task # print "Full task name is " + fulltaskname wrong_task = False for task_info in all_tasks: if fulltaskname == task_info.pathName: if task_info.taskType not in ['Processing','Production','Merge']: wrong_task=True wfi.sendLog('actor', "Skipping task %s because the taskType is %s. Can only ACDC Processing, Production, or Merge tasks"%( fulltaskname, task_info.taskType)) if wrong_task: continue print tasks[task] actions = tasks[task] for action in actions: if action.startswith('sites'): if type(actions[action]) != list: assign_to_sites=[SI.SE_to_CE(actions[action])] else: assign_to_sites=list(set([SI.SE_to_CE(site) for site in actions[action]])) # if action.startswith('mem') and actions[action] != "" and actions[action] != 'Same' and wfi.request['RequestType'] in ['TaskChain']: # recover = False; # print "Skipping %s for now until Allie fixes memory parameter for TaskChain ACDCs."%wfname # wfi.sendLog('actor',"Skipping %s for now until Allie fixes memory parameter for TaskChain ACDCs."%wfname) if not 'sites' in actions: assign_to_sites = list(set([SI.SE_to_CE(site) for site in where_to_run[task]])) print "Found",sorted(assign_to_sites),"as sites where to run the ACDC at, from the acdc doc of ",wfname print "Going to run at",sorted(assign_to_sites) if recover: print "Initiating recovery" acdc = singleRecovery(url, fulltaskname, wfi.request, actions, do = options.do) if not acdc: if options.do: if recovering: print wfname + " has been partially ACDC'ed. Needs manual attention." sendLog('actor', "%s has had %s/%s recoveries %s only"%( wfname, len(recovering), num_tasks_to_recover, list(recovering)), level='critical') wfi.sendLog('actor', "%s has had %s/%s recoveries %s only"%( wfname, len(recovering), num_tasks_to_recover, list(recovering))) break else: print wfname + " failed recovery once" recover = False break else: print "no action to take further" # sendLog('recoveror', "ACDC for %s can be done automatically"% wfname, level='critical') continue else: #ACDC was made correctly. Now we have to assign it. wfi.sendLog('actor','ACDC created for task %s. Actions taken \n%s'%(fulltaskname,list(actions))) team = wfi.request['Teams'][0] parameters={ 'SiteWhitelist' : sorted(assign_to_sites), 'AcquisitionEra' : wfi.acquisitionEra(), 'ProcessingString' : wfi.processingString(), 'MergedLFNBase' : wfi.request['MergedLFNBase'], 'ProcessingVersion' : wfi.request['ProcessingVersion'], } ## hackery for ACDC merge assignment if wfi.request['RequestType'] == 'TaskChain' and 'Merge' in task.split('/')[-1]: parameters['AcquisitionEra'] = None parameters['ProcessingString'] = None ## xrootd setttings on primary and secondary if 'xrootd' in actions: if actions['xrootd'] == 'enabled': print "Going to assign via xrootd" parameters['TrustSitelists'] = True elif actions['xrootd'] == 'disabled': parameters['TrustSitelists'] = False elif ('TrustSitelists' in wfi.request and wfi.request['TrustSitelists']=='true'): parameters['TrustSitelists'] = True else: parameters['TrustSitelists'] = False if 'TrustPUSitelists' in wfi.request and wfi.request['TrustPUSitelists']: parameters['TrustPUSitelists'] = True if options.ass: print "really doing the assignment of the ACDC",acdc parameters['execute']=True wfi.sendLog('actor',"%s was assigned for recovery"% acdc) else: print "no assignment done with this ACDC",acdc sendLog('actor',"%s needs to be assigned"%(acdc), level='critical') continue # print parameters result = reqMgrClient.assignWorkflow(url, acdc, team, parameters) if not result: print acdc,"was not assigned" sendLog('actor',"%s needs to be assigned"%(acdc), level='critical') else: recovering.add( acdc ) wfi.sendLog('actor',"ACDCs created for %s"%wfname) #=========================================================== if recover and options.do: remove_action(wfname) if message_to_user: print wfname,"to be notified to user(DUMMY)",message_to_user if message_to_ops: print 'message' #sendEmail( "notification in recoveror" , message_to_ops, destination=['*****@*****.**']) # sendLog('recoveror',message_to_ops,level='warning') return
def singleClone(url, wfname, actions, comment, do=False): wfi = workflowInfo(url, wfname) payload = wfi.getSchema() initial = wfi.request payload['Requestor'] = os.getenv('USER') payload['Group'] = 'DATAOPS' payload['OriginalRequestName'] = initial['RequestName'] payload['RequestPriority'] = initial['RequestPriority'] if 'ProcessingVersion' in initial: payload['ProcessingVersion'] = int(initial['ProcessingVersion']) +1 else: payload['ProcessingVersion'] = 2 ## drop parameters on the way to reqmgr2 paramBlacklist = ['BlockCloseMaxEvents', 'BlockCloseMaxFiles', 'BlockCloseMaxSize', 'BlockCloseMaxWaitTime', 'CouchWorkloadDBName', 'CustodialGroup', 'CustodialSubType', 'Dashboard', 'GracePeriod', 'HardTimeout', 'InitialPriority', 'inputMode', 'MaxMergeEvents', 'MaxMergeSize', 'MaxRSS', 'MaxVSize', 'MinMergeSize', 'NonCustodialGroup', 'NonCustodialSubType', 'OutputDatasets', 'ReqMgr2Only', 'RequestDate' 'RequestorDN', 'RequestName', 'RequestStatus', 'RequestTransition', 'RequestWorkflow', 'SiteWhitelist', 'SoftTimeout', 'SoftwareVersions', 'SubscriptionPriority', 'Team', 'timeStamp', 'TrustSitelists', 'TrustPUSitelists', 'TotalEstimatedJobs', 'TotalInputEvents', 'TotalInputLumis', 'TotalInputFiles','checkbox', 'DN', 'AutoApproveSubscriptionSites', 'NonCustodialSites', 'CustodialSites', 'OriginalRequestName', 'Teams', 'OutputModulesLFNBases', 'SiteBlacklist', 'AllowOpportunistic', '_id'] for p in paramBlacklist: if p in payload: payload.pop( p ) pass if actions: for action in actions: if action.startswith('mem') and actions[action] != "" and actions[action] != 'Same': if 'TaskChain' in payload: print "Setting memory for clone of task chain" it=1 while True: t = 'Task%d'%it it+=1 if t in payload: payload[t]['Memory'] = actions[action] print "Memory set for Task%d"%it else: break else: print "Setting memory for non-taskchain workflow" payload['Memory'] = actions[action] print "Memory set to " + actions[action] #This line is doesn't work for some reason # wfi.sendLog('actor','Memory of clone set to %d'%actions[action]) print "Clone payload" # print json.dumps( payload , indent=2) print actions #Create clone clone = reqMgrClient.submitWorkflow(url, payload) if not clone: print "Error in making clone for",initial["RequestName"] clone = reqMgrClient.submitWorkflow(url, payload) if not clone: print "Error twice in making clone for",initial["RequestName"] sendLog('actor','Failed to make a clone twice for %s!'%initial["RequestName"],level='critical') wfi.sendLog('actor','Failed to make a clone twice for %s!'%initial["RequestName"]) return None if actions: for action in actions: if action.startswith('split'): cloneinfo = workflowInfo(url, clone) splittings = cloneinfo.getSplittings() if actions[action] != 'Same' and actions[action] != 'max' and actions[action] != '': factor = int(actions[action][0:-1]) if 'x' in actions[action] else 2 for split in splittings: for act in ['avg_events_per_job','events_per_job','lumis_per_job']: if act in split: wfi.sendLog('actor','Changing %s (%d) by a factor %d'%( act, split[act], factor)) print "Changing %s (%d) by a factor %d"%( act, split[act], factor), split[act] /= factor print "to",split[act] break split['requestName'] = clone print "changing the splitting of",clone print json.dumps( split, indent=2 ) print reqMgrClient.setWorkflowSplitting(url, clone, split ) elif 'max' in actions[action]: for split in splittings: for act in ['avg_events_per_job','events_per_job','lumis_per_job']: if act in split: wfi.sendLog('actor','Max splitting set for %s (%d'%( act, split[act])) print "Changing %s (%d) "%( act, split[act]), split[act] = 1 print "to max splitting ",split[act] break split['requestName'] = clone print "changing the splitting of",clone print json.dumps( split, indent=2 ) print reqMgrClient.setWorkflowSplitting(url, clone, split ) #Approve data = reqMgrClient.setWorkflowApproved(url, clone) wfi.sendLog('actor','Cloned into %s'%clone) # wfi.sendLog('actor','Cloned into %s by unified operator %s'%( clone, comment )) # wfi.notifyRequestor('Cloned into %s by unified operator %s'%( clone, comment ),do_batch=False) print data return clone
def assignor(url, specific=None, talk=True, options=None): if userLock(): return if duplicateLock(): return #if notRunningBefore( 'stagor' ): return if not componentInfo().check(): return UC = unifiedConfiguration() CI = campaignInfo() SI = global_SI #LI = lockInfo() NLI = newLockInfo() n_assigned = 0 n_stalled = 0 wfos = [] if specific or options.early: wfos.extend( session.query(Workflow).filter( Workflow.status == 'considered').all()) wfos.extend( session.query(Workflow).filter(Workflow.status == 'staging').all()) if specific: wfos.extend( session.query(Workflow).filter( Workflow.status == 'considered-tried').all()) wfos.extend( session.query(Workflow).filter(Workflow.status == 'staged').all()) #if specific: # #wfos = session.query(Workflow).filter(Workflow.name==specific).all() # wfos = session.query(Workflow).filter(Workflow.name.contains(specific)).all() #if not wfos: # if specific: # wfos = session.query(Workflow).filter(Workflow.status=='considered').all() # wfos.extend( session.query(Workflow).filter(Workflow.status=='staging').all()) # wfos.extend(session.query(Workflow).filter(Workflow.status=='staged').all()) random.shuffle(wfos) for wfo in wfos: if options.limit and (n_stalled + n_assigned) > options.limit: break if specific: if not any(map(lambda sp: sp in wfo.name, specific.split(','))): continue #if not specific in wfo.name: continue print "\n\n" wfh = workflowInfo(url, wfo.name) wfh.sendLog('assignor', "%s to be assigned" % wfo.name) ## check if by configuration we gave it a GO if not CI.go(wfh.request['Campaign']) and not options.go: wfh.sendLog('assignor', "No go for %s" % wfh.request['Campaign']) n_stalled += 1 continue ## check on current status for by-passed assignment if wfh.request['RequestStatus'] != 'assignment-approved': if not options.test: wfh.sendLog('assignor', "setting %s away and skipping" % wfo.name) ## the module picking up from away will do what is necessary of it wfo.wm_status = wfh.request['RequestStatus'] wfo.status = 'away' session.commit() continue else: print wfo.name, wfh.request['RequestStatus'] ## retrieve from the schema, dbs and reqMgr what should be the next version version = wfh.getNextVersion() if not version: if options and options.ProcessingVersion: version = options.ProcessingVersion else: wfh.sendLog('assignor', "cannot decide on version number") n_stalled += 1 wfo.status = 'trouble' session.commit() continue ## the site whitelist takes into account siteInfo, campaignInfo, memory and cores (lheinput, primary, parent, secondary, sites_allowed) = wfh.getSiteWhiteList() original_sites_allowed = copy.deepcopy(sites_allowed) wfh.sendLog('assignor', "Site white list %s" % sorted(sites_allowed)) override_sec_location = CI.get(wfh.request['Campaign'], 'SecondaryLocation', []) blocks = [] if 'BlockWhitelist' in wfh.request: blocks = wfh.request['BlockWhitelist'] if 'RunWhitelist' in wfh.request and wfh.request['RunWhitelist']: ## augment with run white list for dataset in primary: blocks = list( set(blocks + getDatasetBlocks( dataset, runs=wfh.request['RunWhitelist']))) wfh.sendLog('assignor', "Allowed %s" % sorted(sites_allowed)) secondary_locations = None for sec in list(secondary): if override_sec_location: print "We don't care where the secondary is" print "Cannot pass for now" sendEmail("tempting to pass sec location check", "but we cannot yet IMO") #pass presence = getDatasetPresence(url, sec) print sec print json.dumps(presence, indent=2) one_secondary_locations = [ site for (site, (there, frac)) in presence.items() if frac > 98. ] #one_secondary_locations = [site for (site,(there,frac)) in presence.items() if there] if secondary_locations == None: secondary_locations = one_secondary_locations else: secondary_locations = list( set(secondary_locations) & set(one_secondary_locations)) ## reduce the site white list to site with secondary only #sites_allowed = [site for site in sites_allowed if any([osite.startswith(site) for osite in one_secondary_locations])] sites_allowed = [ site for site in sites_allowed if SI.CE_to_SE(site) in one_secondary_locations ] wfh.sendLog( 'assignor', "From secondary requirement, now Allowed%s" % sorted(sites_allowed)) initial_sites_allowed = copy.deepcopy( sites_allowed ) ## keep track of this, after secondary input location restriction : that's how you want to operate it sites_all_data = copy.deepcopy(sites_allowed) sites_with_data = copy.deepcopy(sites_allowed) sites_with_any_data = copy.deepcopy(sites_allowed) primary_locations = None available_fractions = {} set_lfn = '/store/mc' ## by default for prim in list(primary): set_lfn = getLFNbase(prim) presence = getDatasetPresence(url, prim, only_blocks=blocks) if talk: print prim print json.dumps(presence, indent=2) available_fractions[prim] = getDatasetBlocksFraction( url, prim, sites=[SI.CE_to_SE(site) for site in sites_allowed], only_blocks=blocks) #sites_all_data = [site for site in sites_with_data if any([osite.startswith(site) for osite in [psite for (psite,(there,frac)) in presence.items() if there]])] #sites_with_data = [site for site in sites_with_data if any([osite.startswith(site) for osite in [psite for (psite,frac) in presence.items() if frac[1]>90.]])] sites_all_data = [ site for site in sites_with_data if SI.CE_to_SE(site) in [ psite for (psite, (there, frac)) in presence.items() if there ] ] sites_with_data = [ site for site in sites_with_data if SI.CE_to_SE(site) in [psite for (psite, frac) in presence.items() if frac[1] > 90.] ] sites_with_any_data = [ site for site in sites_with_any_data if SI.CE_to_SE(site) in presence.keys() ] wfh.sendLog( 'assignor', "Holding the data but not allowed %s" % sorted( list( set([ se_site for se_site in presence.keys() if not SI.SE_to_CE(se_site) in sites_allowed ])))) if primary_locations == None: primary_locations = presence.keys() else: primary_locations = list( set(primary_locations) & set(presence.keys())) sites_with_data = list(set(sites_with_data)) sites_with_any_data = list(set(sites_with_any_data)) opportunistic_sites = [] down_time = False ## opportunistic running where any piece of data is available if secondary_locations or primary_locations: ## intersection of both any pieces of the primary and good IO #opportunistic_sites = [SI.SE_to_CE(site) for site in list((set(secondary_locations) & set(primary_locations) & set(SI.sites_with_goodIO)) - set(sites_allowed))] if secondary_locations and primary_locations: opportunistic_sites = [ SI.SE_to_CE(site) for site in list((set(secondary_locations) & set(primary_locations)) - set([SI.CE_to_SE(site) for site in sites_allowed])) ] elif primary_locations: opportunistic_sites = [ SI.SE_to_CE(site) for site in list( set(primary_locations) - set([SI.CE_to_SE(site) for site in sites_allowed])) ] else: opportunistic_sites = [] wfh.sendLog( 'assignor', "We could be running in addition at %s" % sorted(opportunistic_sites)) if any( [osite in SI.sites_not_ready for osite in opportunistic_sites]): wfh.sendLog( 'assignor', "One of the usable site is in downtime %s" % ([ osite in SI.sites_not_ready for osite in opportunistic_sites ])) down_time = True ## should this be send back to considered ? """ if available_fractions and not all([available>=1. for available in available_fractions.values()]): print "The input dataset is not located in full over sites" print json.dumps(available_fractions) if not options.test and not options.go: known = [] try: known = json.loads(open('cannot_assign.json').read()) except: pass if not wfo.name in known: sendEmail( "cannot be assigned","%s is not full over sites \n %s"%(wfo.name,json.dumps(available_fractions))) known.append( wfo.name ) open('cannot_assign.json','w').write(json.dumps( known, indent=2)) n_stalled+=1 continue ## skip skip skip """ ## should be 2 but for the time-being let's lower it to get things going copies_wanted, cpuh = wfh.getNCopies() if 'Campaign' in wfh.request and wfh.request[ 'Campaign'] in CI.campaigns and 'maxcopies' in CI.campaigns[ wfh.request['Campaign']]: copies_needed_from_campaign = CI.campaigns[ wfh.request['Campaign']]['maxcopies'] copies_wanted = min(copies_needed_from_campaign, copies_wanted) if not options.early: less_copies_than_requested = UC.get("less_copies_than_requested") copies_wanted = max( 1, copies_wanted - less_copies_than_requested) # take one out for the efficiency wfh.sendLog('assignor', "needed availability fraction %s" % copies_wanted) ## should also check on number of sources, if large enough, we should be able to overflow most, efficiently if available_fractions and not all([ available >= copies_wanted for available in available_fractions.values() ]): not_even_once = not all([ available >= 1. for available in available_fractions.values() ]) wfh.sendLog( 'assignor', "The input dataset is not available %s times, only %s" % (copies_wanted, available_fractions.values())) if down_time and not options.go and not options.early: wfo.status = 'considered' session.commit() wfh.sendLog( 'assignor', "sending back to considered because of site downtime, instead of waiting" ) sendEmail( "cannot be assigned due to downtime", "%s is not sufficiently available, due to down time of a site in the whitelist. check the assignor logs. sending back to considered." % wfo.name) continue #pass print json.dumps(available_fractions) if (options.go and not_even_once) or not options.go: known = [] try: known = json.loads(open('cannot_assign.json').read()) except: pass if not wfo.name in known and not options.limit and not options.go and not options.early: wfh.sendLog( 'assignor', "cannot be assigned, %s is not sufficiently available.\n %s" % (wfo.name, json.dumps(available_fractions))) sendEmail( "cannot be assigned", "%s is not sufficiently available.\n %s" % (wfo.name, json.dumps(available_fractions))) known.append(wfo.name) open('cannot_assign.json', 'w').write(json.dumps(known, indent=2)) n_stalled += 1 if options.early: if wfo.status == 'considered': wfh.sendLog('assignor', "setting considered-tried") wfo.status = 'considered-tried' session.commit() else: print "tried but status is", wfo.status continue ## default back to white list to original white list with any data print "Allowed", sites_allowed if options.primary_aaa: sites_allowed = initial_sites_allowed #options.useSiteListAsLocation = True options.TrustSitelists = True else: sites_allowed = sites_with_any_data wfh.sendLog('assignor', "Selected for any data %s" % sorted(sites_allowed)) if options.restrict: print "Allowed", sites_allowed sites_allowed = sites_with_any_data print "Selected", sites_allowed else: if set(sites_with_data) != set(sites_allowed): ## the data is not everywhere we wanted to run at : enable aaa print "Sites with 90% data not matching site white list (block choping!)" print "Resorting to AAA reading for", list( set(sites_allowed) - set(sites_with_data)), "?" print "Whitelist site with any data", list( set(sites_allowed) - set(sites_with_any_data)) #options.useSiteListAsLocation = True #print "Not commissioned yet" #continue #print "We could be running at",opportunistic_sites,"in addition" ##sites_allowed = list(set(sites_allowed+ opportunistic_sites)) if not len(sites_allowed): wfh.sendLog('assignor', "cannot be assign with no matched sites") sendEmail("cannot be assigned", "%s has no whitelist" % (wfo.name)) n_stalled += 1 continue t1_only = [ce for ce in sites_allowed if ce.startswith('T1')] if t1_only: # try to pick from T1 only first sites_out = [SI.pick_dSE([SI.CE_to_SE(ce) for ce in t1_only])] else: # then pick any otherwise sites_out = [ SI.pick_dSE([SI.CE_to_SE(ce) for ce in sites_allowed]) ] ## one last modification now that we know we can assign, and to make sure all ressource can be used by the request : set all ON sites to whitelist ###sites_allowed = original_sites_allowed ## not needed, afterall as secondary jobs go their own ways wfh.sendLog('assignor', "Placing the output on %s" % sites_out) parameters = { 'SiteWhitelist': sites_allowed, #'CustodialSites' : sites_custodial, 'NonCustodialSites': sites_out, 'AutoApproveSubscriptionSites': list(set(sites_out)), 'AcquisitionEra': wfh.acquisitionEra(), 'ProcessingString': wfh.processingString(), 'MergedLFNBase': set_lfn, 'ProcessingVersion': version, } ## plain assignment here team = 'production' if os.getenv('UNIFIED_TEAM'): team = os.getenv('UNIFIED_TEAM') if options and options.team: team = options.team ## high priority team agent #if wfh.request['RequestPriority'] >= 100000 and (wfh.request['TimePerEvent']*int(wfh.getRequestNumEvents()))/(8*3600.) < 10000: # team = 'highprio' # sendEmail("sending work with highprio team","%s"% wfo.name, destination=['*****@*****.**']) ## SDSC redirection #if "T2_US_UCSD" in sites_with_data and random.random() < -0.5 and wfh.request['Campaign']=='RunIISpring15DR74' and int(wfh.getRequestNumEvents()) < 600000 and not any([out.endswith('RAW') for out in wfh.request['OutputDatasets']]): # ## consider SDSC # parameters['SiteWhitelist'] = ['T2_US_UCSD','T3_US_SDSC'] # parameters['useSiteListAsLocation'] = True # team = 'allocation-based' # sendEmail("sending work to SDSC","%s was assigned to SDSC/UCSD"% wfo.name, destination=['*****@*****.**']) ## SDSC redirection #if wfh.request['Campaign']==R'unIIWinter15GS' and random.random() < -1.0: # parameters['SiteWhitelist'] = ['T3_US_SDSC'] # team = 'allocation-based' # sendEmail("sending work to SDSC","%s was assigned to SDSC"% wfo.name, destination=['*****@*****.**']) if False and 'T2_CH_CERN' in parameters['SiteWhitelist']: ## add some check on ### the amount pending to HLT ### the size of the request ### the priority of the request (maybe not if we decide to overflow during runs) parameters['SiteWhitelist'] = ['T2_CH_CERN_HLT'] team = 'hlt' ## reduce the splitting by factor of 4, regardless of type of splitting sendEmail("sending work to HLT", "%s was assigned to HLT" % wfo.name) ##parse options entered in command line if any if options: for key in reqMgrClient.assignWorkflow.keys: v = getattr(options, key) if v != None: if type(v) == str and ',' in v: parameters[key] = filter(None, v.split(',')) else: parameters[key] = v ## pick up campaign specific assignment parameters parameters.update(CI.parameters(wfh.request['Campaign'])) if not options.test: parameters['execute'] = True split_check = wfh.checkWorkflowSplitting() if split_check != True: parameters.update(split_check) if 'EventBased' in split_check.values(): wfh.sendLog('assignor', "Falling back to event splitting.") sendEmail( "Fallback to EventBased", "the workflow %s is too heavy to be processed as it is. Fallback to EventBased splitting" % wfo.name) elif 'EventsPerJob' in split_check.values(): wfh.sendLog('assignor', "Modifying the number of job per event") sendEmail( "Modifying the job per events", "the workflow %s is too heavy in number of jobs explosion" % wfo.name) # Handle run-dependent MC pstring = wfh.processingString() if 'PU_RD' in pstring: numEvents = wfh.getRequestNumEvents() eventsPerLumi = [getDatasetEventsPerLumi(prim) for prim in primary] eventsPerLumi = sum(eventsPerLumi) / float(len(eventsPerLumi)) reqJobs = 500 if 'PU_RD2' in pstring: reqJobs = 2000 eventsPerJob = int(numEvents / (reqJobs * 1.4)) lumisPerJob = int(eventsPerJob / eventsPerLumi) if lumisPerJob == 0: sendEmail( "issue with event splitting for run-dependent MC", "%s needs to be split by event with %s per job" % (wfo.name, eventsPerJob)) wfh.sendLog( 'assignor', "%s needs to be split by event with %s per job" % (wfo.name, eventsPerJob)) parameters['EventsPerJob'] = eventsPerJob else: spl = wfh.getSplittings()[0] eventsPerJobEstimated = spl[ 'events_per_job'] if 'events_per_job' in spl else None eventsPerJobEstimated = spl[ 'avg_events_per_job'] if 'avg_events_per_job' in spl else None if eventsPerJobEstimated and eventsPerJobEstimated > eventsPerJob: sendEmail( "setting lumi splitting for run-dependent MC", "%s was assigned with %s lumis/job" % (wfo.name, lumisPerJob)) wfh.sendLog( 'assignor', "%s was assigned with %s lumis/job" % (wfo.name, lumisPerJob)) parameters['LumisPerJob'] = lumisPerJob else: sendEmail("leaving splitting untouched for PU_RD*", "please check on " + wfo.name) wfh.sendLog( 'assignor', "leaving splitting untouched for PU_RD*, please check." ) result = reqMgrClient.assignWorkflow(url, wfo.name, team, parameters) # set status if not options.test: if result: wfo.status = 'away' session.commit() n_assigned += 1 try: ## refetch information and lock output new_wfi = workflowInfo(url, wfo.name) (_, prim, _, sec) = new_wfi.getIO() for secure in list(prim) + list( sec) + new_wfi.request['OutputDatasets']: ## lock all outputs flat NLI.lock(secure) #for site in [SI.CE_to_SE(site) for site in sites_allowed]: # for output in new_wfi.request['OutputDatasets']: # LI.lock( output, site, 'dataset in production') # for primary in prim: # LI.lock( primary, site, 'dataset used in input') # for secondary in sec: # LI.lock( secondary, site, 'required for mixing' ) except Exception as e: print "fail in locking output" print str(e) sendEmail("failed locking of output", str(e)) else: print "ERROR could not assign", wfo.name else: pass print "Assignment summary:" sendLog('assignor', "Assigned %d Stalled %s" % (n_assigned, n_stalled))
time_point("Starting addhoc") for item in addHocLocks: ds = item.split('#')[0] LI.lock(ds, reason='addhoc lock') newly_locking.add(ds) time_point("Starting reversed statuses check") for status in statuses: print time.asctime(time.gmtime()), "CEST, fetching", status time_point("checking %s" % status, sub_lap=True) wfls = getWorkflows(url, status=status, details=True) print len(wfls), "in", status for wl in wfls: wfi = workflowInfo(url, wl['RequestName'], request=wl, spec=False) (_, primaries, _, secondaries) = wfi.getIO() outputs = wfi.request['OutputDatasets'] ## unknonw to the system known = session.query(Workflow).filter( Workflow.name == wl['RequestName']).all() if not known: print wl['RequestName'], "is unknown to unified, relocking all I/O" for dataset in list(primaries) + list(secondaries) + outputs: print "\t", dataset also_locking_from_reqmgr.add(dataset) continue if status == 'assignment-approved': if all([wfo.status.startswith('considered') for wfo in known]): ## skip those only assignment-approved / considered
def getPriority(url, workflow): return workflowInfo( url,workflow).getPriority()
def assignor(url ,specific = None, talk=True, options=None): if userLock(): return if duplicateLock(): return if not componentInfo().check(): return UC = unifiedConfiguration() CI = campaignInfo() #SI = siteInfo() SI = global_SI() #NLI = newLockInfo() #if not NLI.free() and not options.go: return LI = lockInfo() if not LI.free() and not options.go: return n_assigned = 0 n_stalled = 0 wfos=[] fetch_from = [] if specific or options.early: fetch_from.extend(['considered','staging']) if specific: fetch_from.extend(['considered-tried']) fetch_from.extend(['staged']) if options.from_status: fetch_from = options.from_status.split(',') print "Overriding to read from",fetch_from for status in fetch_from: wfos.extend(session.query(Workflow).filter(Workflow.status==status).all()) ## in case of partial, go for fetching a list from json ? #if options.partial and not specific: # pass dataset_endpoints = json.loads(open('%s/dataset_endpoints.json'%monitor_dir).read()) aaa_mapping = json.loads(open('%s/equalizor.json'%monitor_pub_dir).read())['mapping'] all_stuck = set() all_stuck.update( json.loads( open('%s/stuck_transfers.json'%monitor_dir).read() )) all_stuck.update( getAllStuckDataset()) max_per_round = UC.get('max_per_round').get('assignor',None) max_cpuh_block = UC.get('max_cpuh_block') random.shuffle( wfos ) for wfo in wfos: if options.limit and (n_stalled+n_assigned)>options.limit: break if max_per_round and (n_stalled+n_assigned)>max_per_round: break if specific: if not any(map(lambda sp: sp in wfo.name,specific.split(','))): continue #if not specific in wfo.name: continue print "\n\n" wfh = workflowInfo( url, wfo.name) if options.priority and int(wfh.request['RequestPriority']) < options.priority: continue options_text="" if options.early: options_text+=", early option is ON" if options.partial: options_text+=", partial option is ON" options_text+=", good fraction is %.2f"%options.good_enough wfh.sendLog('assignor',"%s to be assigned%s"%(wfo.name, options_text)) ## the site whitelist takes into account siteInfo, campaignInfo, memory and cores (lheinput,primary,parent,secondary, sites_allowed) = wfh.getSiteWhiteList() output_tiers = list(set([o.split('/')[-1] for o in wfh.request['OutputDatasets']])) is_stuck = (all_stuck & primary) if is_stuck: wfh.sendLog('assignor',"%s are stuck input"%(','.join( is_stuck))) ## check if by configuration we gave it a GO no_go = False if not wfh.go(log=True) and not options.go: no_go = True allowed_secondary = {} assign_parameters = {} check_secondary = False for campaign in wfh.getCampaigns(): if campaign in CI.campaigns: assign_parameters.update( CI.campaigns[campaign] ) if campaign in CI.campaigns and 'secondaries' in CI.campaigns[campaign]: if CI.campaigns[campaign]['secondaries']: allowed_secondary.update( CI.campaigns[campaign]['secondaries'] ) check_secondary = True if campaign in CI.campaigns and 'banned_tier' in CI.campaigns[campaign]: banned_tier = list(set(CI.campaigns[campaign]['banned_tier']) & set(output_tiers)) if banned_tier: no_go=True wfh.sendLog('assignor','These data tiers %s are not allowed'%(','.join( banned_tier))) sendLog('assignor','These data tiers %s are not allowed'%(','.join( banned_tier)), level='critical') if secondary and check_secondary: if (set(secondary)&set(allowed_secondary.keys())!=set(secondary)): wfh.sendLog('assignor','%s is not an allowed secondary'%(', '.join(set(secondary)-set(allowed_secondary.keys())))) sendLog('assignor','%s is not an allowed secondary'%(', '.join(set(secondary)-set(allowed_secondary.keys()))), level='critical') if not options.go: no_go = True ## then get whether there is something more to be done by secondary for sec in secondary: if sec in allowed_secondary:# and 'parameters' in allowed_secondary[sec]: assign_parameters.update( allowed_secondary[sec] ) if no_go: n_stalled+=1 continue ## check on current status for by-passed assignment if wfh.request['RequestStatus'] !='assignment-approved': if not options.test: wfh.sendLog('assignor',"setting %s away and skipping"%wfo.name) ## the module picking up from away will do what is necessary of it wfo.wm_status = wfh.request['RequestStatus'] wfo.status = 'away' session.commit() continue else: print wfo.name,wfh.request['RequestStatus'] ## retrieve from the schema, dbs and reqMgr what should be the next version version=wfh.getNextVersion() if not version: if options and options.ProcessingVersion: version = options.ProcessingVersion else: wfh.sendLog('assignor',"cannot decide on version number") n_stalled+=1 wfo.status = 'trouble' session.commit() continue original_sites_allowed = copy.deepcopy( sites_allowed ) wfh.sendLog('assignor',"Site white list %s"%sorted(sites_allowed)) override_sec_location = CI.get(wfh.request['Campaign'], 'SecondaryLocation', []) blocks = [] if 'BlockWhitelist' in wfh.request: blocks = wfh.request['BlockWhitelist'] if 'RunWhitelist' in wfh.request and wfh.request['RunWhitelist']: ## augment with run white list for dataset in primary: blocks = list(set( blocks + getDatasetBlocks( dataset, runs=wfh.request['RunWhitelist'] ) )) wfh.sendLog('assignor',"Allowed %s"%sorted(sites_allowed)) secondary_locations=None primary_aaa = options.primary_aaa secondary_aaa = options.secondary_aaa do_partial = False #options.good_enough if options.partial else 0 if 'Campaign' in wfh.request and wfh.request['Campaign'] in CI.campaigns: assign_parameters.update( CI.campaigns[wfh.request['Campaign']] ) if 'primary_AAA' in assign_parameters: primary_aaa = primary_aaa or assign_parameters['primary_AAA'] if 'secondary_AAA' in assign_parameters: secondary_aaa = secondary_aaa or assign_parameters['secondary_AAA'] if 'partial_copy' in assign_parameters: ## can this only work if there is a stuck input ? maybe not ## this is a number. 0 means no print "Could do partial disk copy assignment" if is_stuck or options.partial: do_partial = assign_parameters['partial_copy'] wfh.sendLog('assignor',"Overiding partial copy assignment to %.2f fraction"% do_partial) #sendEmail('stuck input to assignment','%s is stuck for assigning %s and going fractional'%(','.join( is_stuck), wfo.name)) do_partial = options.good_enough if options.partial else do_partial for sec in list(secondary): if override_sec_location: print "We don't care where the secondary is" print "Cannot pass for now" #sendEmail("tempting to pass sec location check","but we cannot yet IMO") #pass if secondary_aaa: #just continue without checking continue presence = getDatasetPresence( url, sec ) print sec print json.dumps(presence, indent=2) one_secondary_locations = [site for (site,(there,frac)) in presence.items() if frac>98.] #one_secondary_locations = [site for (site,(there,frac)) in presence.items() if there] if secondary_locations==None: secondary_locations = one_secondary_locations else: secondary_locations = list(set(secondary_locations) & set(one_secondary_locations)) ## reduce the site white list to site with secondary only #sites_allowed = [site for site in sites_allowed if any([osite.startswith(site) for osite in one_secondary_locations])] sites_allowed = [site for site in sites_allowed if SI.CE_to_SE(site) in one_secondary_locations] wfh.sendLog('assignor',"From secondary requirement, now Allowed%s"%sorted(sites_allowed)) initial_sites_allowed = copy.deepcopy( sites_allowed ) ## keep track of this, after secondary input location restriction : that's how you want to operate it sites_all_data = copy.deepcopy( sites_allowed ) sites_with_data = copy.deepcopy( sites_allowed ) sites_with_any_data = copy.deepcopy( sites_allowed ) primary_locations = None available_fractions = {} set_lfn = '/store/mc' ## by default endpoints = set() for prim in list(primary): if prim in dataset_endpoints: print "endpoints from stagor",dataset_endpoints[prim] endpoints.update( dataset_endpoints[prim] ) set_lfn = getLFNbase( prim ) presence = getDatasetPresence( url, prim , only_blocks=blocks) if talk: print prim print json.dumps(presence, indent=2) available_fractions[prim] = getDatasetBlocksFraction(url, prim, sites = [SI.CE_to_SE(site) for site in sites_allowed] , only_blocks = blocks) #sites_all_data = [site for site in sites_with_data if any([osite.startswith(site) for osite in [psite for (psite,(there,frac)) in presence.items() if there]])] #sites_with_data = [site for site in sites_with_data if any([osite.startswith(site) for osite in [psite for (psite,frac) in presence.items() if frac[1]>90.]])] sites_all_data = [site for site in sites_with_data if SI.CE_to_SE(site) in [psite for (psite,(there,frac)) in presence.items() if there]] sites_with_data = [site for site in sites_with_data if SI.CE_to_SE(site) in [psite for (psite,frac) in presence.items() if frac[1]>90.]] sites_with_any_data = [site for site in sites_with_any_data if SI.CE_to_SE(site) in presence.keys()] wfh.sendLog('assignor',"Holding the data but not allowed %s"%sorted(list(set([se_site for se_site in presence.keys() if not SI.SE_to_CE(se_site) in sites_allowed])))) if primary_locations==None: primary_locations = presence.keys() else: primary_locations = list(set(primary_locations) & set(presence.keys() )) sites_with_data = list(set(sites_with_data)) sites_with_any_data = list(set(sites_with_any_data)) opportunistic_sites=[] down_time = False ## opportunistic running where any piece of data is available if secondary_locations or primary_locations: ## intersection of both any pieces of the primary and good IO #opportunistic_sites = [SI.SE_to_CE(site) for site in list((set(secondary_locations) & set(primary_locations) & set(SI.sites_with_goodIO)) - set(sites_allowed))] if secondary_locations and primary_locations: opportunistic_sites = [SI.SE_to_CE(site) for site in list((set(secondary_locations) & set(primary_locations)) - set([SI.CE_to_SE(site) for site in sites_allowed]))] elif primary_locations: opportunistic_sites = [SI.SE_to_CE(site) for site in list(set(primary_locations) - set([SI.CE_to_SE(site) for site in sites_allowed]))] else: opportunistic_sites = [] wfh.sendLog('assignor',"We could be running in addition at %s"% sorted(opportunistic_sites)) if any([osite in SI.sites_not_ready for osite in opportunistic_sites]): wfh.sendLog('assignor',"One of the usable site is in downtime %s"%([osite for osite in opportunistic_sites if osite in SI.sites_not_ready])) down_time = True ## should this be send back to considered ? ## should be 2 but for the time-being let's lower it to get things going copies_wanted,cpuh = wfh.getNCopies() wfh.sendLog('assignor',"we need %s CPUh"%cpuh) if cpuh>max_cpuh_block and not options.go: #sendEmail('large workflow','that wf %s has a large number of CPUh %s, not assigning, please check the logs'%(wfo.name, cpuh))#,destination=['*****@*****.**']) sendLog('assignor','%s requires a large numbr of CPUh %s , not assigning, please check with requester'%( wfo.name, cpuh), level='critical') wfh.sendLog('assignor',"Requiring a large number of CPUh %s, not assigning"%cpuh) continue if 'Campaign' in wfh.request and wfh.request['Campaign'] in CI.campaigns and 'maxcopies' in CI.campaigns[wfh.request['Campaign']]: copies_needed_from_campaign = CI.campaigns[wfh.request['Campaign']]['maxcopies'] copies_wanted = min(copies_needed_from_campaign, copies_wanted) if not options.early: less_copies_than_requested = UC.get("less_copies_than_requested") copies_wanted = max(1,copies_wanted-less_copies_than_requested) # take one out for the efficiency else: ## find out whether there is a site in the whitelist, that is lacking jobs and reduce to 1 copy needed to get things going pass wfh.sendLog('assignor',"needed availability fraction %s"% copies_wanted) ## should also check on number of sources, if large enough, we should be able to overflow most, efficiently ## default back to white list to original white list with any data wfh.sendLog('assignor',"Allowed sites :%s"% sorted(sites_allowed)) if primary_aaa: ## remove the sites not reachable localy if not in having the data if not sites_all_data: wfh.sendLog('assignor',"Overiding the primary on AAA setting to Off") primary_aaa=False else: aaa_grid = set(sites_all_data) for site in list(aaa_grid): aaa_grid.update( aaa_mapping.get(site,[]) ) sites_allowed = list(set(initial_sites_allowed) & aaa_grid) wfh.sendLog('assignor',"Selected to read primary through xrootd %s"%sorted(sites_allowed)) if not primary_aaa: sites_allowed = sites_with_any_data wfh.sendLog('assignor',"Selected for any data %s"%sorted(sites_allowed)) ### check on endpoints for on-going transfers if do_partial: if endpoints: end_sites = [SI.SE_to_CE(s) for s in endpoints] sites_allowed = list(set(sites_allowed + end_sites)) if down_time and not any(osite in SI.sites_not_ready for osite in end_sites): print "Flip the status of downtime, since our destinations are good" down_time = False print "with added endpoints",sorted(end_sites) else: print "Cannot do partial assignment without knowin the endpoints" n_stalled+=1 continue #if not len(sites_allowed): # if not options.early: # wfh.sendLog('assignor',"cannot be assign with no matched sites") # sendLog('assignor','%s has no whitelist'% wfo.name, level='critical') # n_stalled+=1 # continue low_pressure = SI.sites_low_pressure(0.4) ## if any of the site allowed is low pressure : reduce to 1 copy so that it gets started allowed_and_low = sorted(set(low_pressure) & set(sites_allowed)) if allowed_and_low: wfh.sendLog('assignor',"The workflow can run at %s under low pressure currently"%( ','.join( allowed_and_low ))) copies_wanted = max(1., copies_wanted-1.) if available_fractions and not all([available>=copies_wanted for available in available_fractions.values()]): not_even_once = not all([available>=1. for available in available_fractions.values()]) above_good = all([available >= do_partial for available in available_fractions.values()]) wfh.sendLog('assignor',"The input dataset is not available %s times, only %s"%( copies_wanted, available_fractions.values())) if down_time and not options.go and not options.early: wfo.status = 'considered' session.commit() wfh.sendLog('assignor',"sending back to considered because of site downtime, instead of waiting") #sendEmail( "cannot be assigned due to downtime","%s is not sufficiently available, due to down time of a site in the whitelist. check the assignor logs. sending back to considered."% wfo.name) sendLog('assignor','%s is not sufficiently available, due to down time of a site in the whitelist. sending back to considered.'%( wfo.name ), level='delay') n_stalled+=1 continue #pass print json.dumps(available_fractions) if (options.go and not_even_once) or not options.go: known = [] try: known = json.loads(open('cannot_assign.json').read()) except: pass if not wfo.name in known and not options.limit and not options.go and not options.early and not (do_partial and above_good): wfh.sendLog('assignor',"cannot be assigned, %s is not sufficiently available.\n %s"%(wfo.name,json.dumps(available_fractions))) #sendEmail( "cannot be assigned","%s is not sufficiently available.\n %s"%(wfo.name,json.dumps(available_fractions))) known.append( wfo.name ) open('cannot_assign.json','w').write(json.dumps( known, indent=2)) if options.early: if wfo.status == 'considered': wfh.sendLog('assignor',"setting considered-tried") wfo.status = 'considered-tried' session.commit() else: print "tried but status is",wfo.status if do_partial and above_good: print "Will move on with partial locations" else: n_stalled+=1 continue if not len(sites_allowed): if not options.early: wfh.sendLog('assignor',"cannot be assign with no matched sites") sendLog('assignor','%s has no whitelist'% wfo.name, level='critical') n_stalled+=1 continue t1_only = [ce for ce in sites_allowed if ce.startswith('T1')] if t1_only: # try to pick from T1 only first sites_out = [SI.pick_dSE([SI.CE_to_SE(ce) for ce in t1_only])] else: # then pick any otherwise sites_out = [SI.pick_dSE([SI.CE_to_SE(ce) for ce in sites_allowed])] wfh.sendLog('assignor',"Placing the output on %s"%sites_out) parameters={ 'SiteWhitelist' : sites_allowed, 'NonCustodialSites' : sites_out, 'AutoApproveSubscriptionSites' : list(set(sites_out)), 'AcquisitionEra' : wfh.acquisitionEra(), 'ProcessingString' : wfh.processingString(), 'MergedLFNBase' : set_lfn, 'ProcessingVersion' : version, } if primary_aaa: parameters['TrustSitelists'] = True wfh.sendLog('assignor',"Reading primary through xrootd at %s"%sorted(sites_allowed)) if secondary_aaa: parameters['TrustPUSitelists'] = True wfh.sendLog('assignor',"Reading secondary through xrootd at %s"%sorted(sites_allowed)) if 'parameters' in assign_parameters: parameters.update( assign_parameters['parameters'] ) ## plain assignment here team='production' if os.getenv('UNIFIED_TEAM'): team = os.getenv('UNIFIED_TEAM') if options and options.team: team = options.team if False and 'T2_CH_CERN' in parameters['SiteWhitelist']: ## add some check on ### the amount pending to HLT ### the size of the request ### the priority of the request (maybe not if we decide to overflow during runs) parameters['SiteWhitelist'] = ['T2_CH_CERN_HLT'] team = 'hlt' ## reduce the splitting by factor of 4, regardless of type of splitting sendEmail("sending work to HLT","%s was assigned to HLT"%wfo.name) ##parse options entered in command line if any if options: for key in reqMgrClient.assignWorkflow.keys: v=getattr(options,key) if v!=None: if type(v)==str and ',' in v: parameters[key] = filter(None,v.split(',')) else: parameters[key] = v if lheinput: ## throttle reading LHE article wfh.sendLog('assignor', 'Setting the number of events per job to 500k max') parameters['EventsPerJob'] = 500000 ## pick up campaign specific assignment parameters #parameters.update( CI.parameters(wfh.request['Campaign']) ) parameters.update( assign_parameters.get('parameters',{}) ) if not options.test: parameters['execute'] = True split_check = wfh.checkWorkflowSplitting() if split_check!=True: parameters.update( split_check ) if 'NoGo' in split_check.values(): wfh.sendLog('assignor', "Failing splitting check") sendLog('assignor','the workflow %s is failing the splitting check. Verify in the logs'% wfo.name, level='critical') n_stalled+=1 continue if 'EventBased' in split_check.values(): wfh.sendLog('assignor', "Falling back to event splitting.") #sendEmail("Fallback to EventBased","the workflow %s is too heavy to be processed as it is. Fallback to EventBased splitting"%wfo.name) sendLog('assignor','the workflow %s is too heavy to be processed as it is. Fallback to EventBased splitting ?'%wfo.name, level='critical') ## we have a problem here, that EventBased should never be used as a backup if not options.go: n_stalled+=1 continue continue ## skip all together elif 'EventsPerJob' in split_check.values(): wfh.sendLog('assignor', "Modifying the number of events per job") #sendEmail("Modifying the job per events","the workflow %s is too heavy in number of jobs explosion"%wfo.name) sendLog('assignor',"the workflow %s is too heavy in number of jobs explosion"%wfo.name, level='critical') elif 'EventsPerLumi' in split_check.values(): wfh.sendLog('assignor', "Modifying the number of events per lumi to be able to process this") # Handle run-dependent MC pstring = wfh.processingString() if 'PU_RD' in pstring: numEvents = wfh.getRequestNumEvents() eventsPerLumi = [getDatasetEventsPerLumi(prim) for prim in primary] eventsPerLumi = sum(eventsPerLumi)/float(len(eventsPerLumi)) reqJobs = 500 if 'PU_RD2' in pstring: reqJobs = 2000 eventsPerJob = int(numEvents/(reqJobs*1.4)) lumisPerJob = int(eventsPerJob/eventsPerLumi) if lumisPerJob==0: #sendEmail("issue with event splitting for run-dependent MC","%s needs to be split by event with %s per job"%(wfo.name, eventsPerJob)) sendLog('assignor', "%s needs to be split by event with %s per job"%(wfo.name, eventsPerJob), level='critical') wfh.sendLog('assignor', "%s needs to be split by event with %s per job"%(wfo.name, eventsPerJob)) parameters['EventsPerJob'] = eventsPerJob else: spl = wfh.getSplittings()[0] eventsPerJobEstimated = spl['events_per_job'] if 'events_per_job' in spl else None eventsPerJobEstimated = spl['avg_events_per_job'] if 'avg_events_per_job' in spl else None if eventsPerJobEstimated and eventsPerJobEstimated > eventsPerJob: #sendEmail("setting lumi splitting for run-dependent MC","%s was assigned with %s lumis/job"%( wfo.name, lumisPerJob)) sendLog('assignor',"%s was assigned with %s lumis/job"%( wfo.name, lumisPerJob), level='critical') wfh.sendLog('assignor',"%s was assigned with %s lumis/job"%( wfo.name, lumisPerJob)) parameters['LumisPerJob'] = lumisPerJob else: #sendEmail("leaving splitting untouched for PU_RD*","please check on "+wfo.name) sendLog('assignor',"leaving splitting untouched for %s, please check on %s"%( pstring, wfo.name), level='critical') wfh.sendLog('assignor',"leaving splitting untouched for PU_RD*, please check.") result = reqMgrClient.assignWorkflow(url, wfo.name, team, parameters) # set status if not options.test: if result: wfo.status = 'away' session.commit() n_assigned+=1 wfh.sendLog('assignor',"Properly assigned\n%s"%(json.dumps( parameters, indent=2))) try: ## refetch information and lock output new_wfi = workflowInfo( url, wfo.name) (_,prim,_,sec) = new_wfi.getIO() for secure in list(prim)+list(sec)+new_wfi.request['OutputDatasets']: ## lock all outputs flat #NLI.lock( secure ) LI.lock( secure, reason = 'assigning') #for site in [SI.CE_to_SE(site) for site in sites_allowed]: # for output in new_wfi.request['OutputDatasets']: # LI.lock( output, site, 'dataset in production') # for primary in prim: # LI.lock( primary, site, 'dataset used in input') # for secondary in sec: # LI.lock( secondary, site, 'required for mixing' ) except Exception as e: print "fail in locking output" print str(e) sendEmail("failed locking of output",str(e)) else: wfh.sendLog('assignor',"Failed to assign. Please check the logs") print "ERROR could not assign",wfo.name else: pass print "Assignment summary:" sendLog('assignor',"Assigned %d Stalled %s"%(n_assigned, n_stalled))
def getEra(url, workflow): return workflowInfo( url, workflow).getEra()
def close(self): if os.path.isfile('.closor_stop'): print "The closing of workflows is shortened" return url = self.url batch_go = self.batch_go CI = self.CI UC = self.UC wfo = self.wfo jump_the_line = self.jump_the_line batch_goodness = self.batch_goodness check_parentage_to_announce = UC.get('check_parentage_to_announce') check_fullcopy_to_announce = UC.get('check_fullcopy_to_announce') ## what is the expected #lumis self.wfi = workflowInfo(url, wfo.name) wfi = self.wfi wfo.wm_status = wfi.request['RequestStatus'] if wfi.isRelval(): has_batch_go = False batch_name = wfi.getCampaign() if not batch_name in batch_go: ## do the esimatation whethere this can be announced : only once per batch in_batches = getWorkflowByCampaign(url, batch_name, details=True) batch_go[batch_name] = all( map( lambda s: not s in [ 'completed', 'running-open', 'running-closed', 'acquired', 'staged', 'staging', 'assigned', 'assignment-approved' ], [r['RequestStatus'] for r in in_batches])) ## already verified has_batch_go = batch_go[batch_name] if not has_batch_go: wfi.sendLog( 'closor', 'Cannot close for now because the batch <a href=https://dmytro.web.cern.ch/dmytro/cmsprodmon/workflows.php?campaign=%s>%s</a> is not all close' % (batch_name, batch_name)) return if wfi.request['RequestStatus'] in ['announced', 'normal-archived' ] and not options.force: ## manually announced ?? self.to_status = 'done' self.to_wm_status = wfi.request['RequestStatus'] wfi.sendLog( 'closor', '%s is announced already : %s' % (wfo.name, self.to_wm_status)) return if jump_the_line: wfi.sendLog('closor', 'Announcing while completing') expected_lumis = 1 if not 'TotalInputLumis' in wfi.request: print wfo.name, "has not been assigned yet, or the database is corrupted" elif wfi.request['TotalInputLumis'] == 0: print wfo.name, "is corrupted with 0 expected lumis" else: expected_lumis = wfi.request['TotalInputLumis'] ## what are the outputs outputs = wfi.request['OutputDatasets'] ## check whether the number of lumis is as expected for each all_OK = defaultdict(lambda: False) stats = defaultdict(int) #print outputs if len(outputs): print wfo.name, wfi.request['RequestStatus'] for out in outputs: event_count, lumi_count = getDatasetEventsAndLumis(dataset=out) self.outs.append(Output(datasetname=out)) odb = self.outs[-1] odb.workflow = wfo odb.nlumis = lumi_count odb.nevents = event_count odb.workfow_id = wfo.id if odb.expectedlumis < expected_lumis: odb.expectedlumis = expected_lumis else: expected_lumis = odb.expectedlumis odb.date = time.mktime(time.gmtime()) fraction = lumi_count / float(expected_lumis) * 100. completion_line = "%60s %d/%d = %3.2f%%" % ( out, lumi_count, expected_lumis, fraction) wfi.sendLog('closor', "\t%s" % completion_line) if wfi.isRelval() and fraction < batch_goodness: self.batch_warnings[wfi.getCampaign()].add(completion_line) if fraction < 50: self.batch_extreme_warnings[wfi.getCampaign()].add( completion_line) stats[out] = lumi_count all_OK[out] = True ## check for at least one full copy prior to moving on #in_full = {} for out in outputs: all_OK[out] = True #in_full[out] = [] #presence = getDatasetPresence( url, out ) #where = [site for site,info in presence.items() if info[0]] #if where: # all_OK[out] = True # print out,"is in full at",",".join(where) # in_full[out] = copy.deepcopy(where) #else: # going_to = wfi.request['NonCustodialSites']+wfi.request['CustodialSites'] # wfi.sendLog('closor',"%s is not in full anywhere. send to %s"%(out, ",".join(sorted(going_to)))) # at_destination = dict([(k,v) for (k,v) in presence.items() if k in going_to]) # else_where = dict([(k,v) for (k,v) in presence.items() if not k in going_to]) # print json.dumps( at_destination ) # print json.dumps( else_where, indent=2 ) ## do the full stuck transfer study, missing files and shit ! #for there in going_to: # late_info = findLateFiles(url, out, going_to = there ) # for l in late_info: # l.update({"workflow":wfo.name,"dataset":out}) # self.all_late_files.extend( late_info ) # if check_fullcopy_to_announce: ## only set this false if the check is relevant # all_OK[out] = False ## verify if we have to do harvesting #if not options.no_harvest and not jump_the_line: # #(OK, requests) = spawn_harvesting(url, wfi, in_full) # sites_for_DQMHarvest = UC.get("sites_for_DQMHarvest") # (OK, requests) = spawn_harvesting(url, wfi, sites_for_DQMHarvest) # print "Harvesting workflow has been created and assigned to: " # print sites_for_DQMHarvest # all_OK.update( OK ) ## only that status can let me go into announced if all(all_OK.values()) and ( (wfi.request['RequestStatus'] in ['closed-out']) or options.force or jump_the_line): print wfo.name, "to be announced" results = [] if not results: for out in outputs: print "dealing with", out if out in stats and not stats[out]: continue _, dsn, process_string, tier = out.split('/') if all_OK[out]: print "setting valid" results.append( setDatasetStatus(out, 'VALID', withFiles=False)) if all_OK[out] and wfi.isRelval(): ## make the specific relval rules and the replicas ## figure the destination(s) out destinations = set() if tier in UC.get("tiers_to_rucio_relval"): wfi.sendLog( 'closor', "Data Tier: %s is blacklisted, so skipping dataset placement for: %s" % (tier, out)) continue if tier != "RECO" and tier != "ALCARECO": destinations.add('T2_CH_CERN') if tier == "GEN-SIM": destinations.add('T1_US_FNAL_Disk') if tier == "GEN-SIM-DIGI-RAW": destinations.add('T1_US_FNAL_Disk') if tier == "GEN-SIM-RECO": destinations.add('T1_US_FNAL_Disk') if "RelValTTBar" in dsn and "TkAlMinBias" in process_string and tier != "ALCARECO": destinations.add('T2_CH_CERN') if "MinimumBias" in dsn and "SiStripCalMinBias" in process_string and tier != "ALCARECO": destinations.add('T2_CH_CERN') if destinations: wfi.sendLog( 'closor', '%s to go to %s' % (out, ', '.join(sorted(destinations)))) elif all_OK[out]: campaign = None try: campaign = out.split('/')[2].split('-')[0] except: if 'Campaign' in wfi.request and wfi.request[ 'Campaign']: campaign = wfi.request['Campaign'] to_DDM = False ## campaign override if campaign and campaign in CI.campaigns and 'toDDM' in CI.campaigns[ campaign] and tier in CI.campaigns[campaign][ 'toDDM']: to_DDM = True ## by typical enabling if tier in UC.get("tiers_to_rucio_nonrelval"): wfi.sendLog( 'closor', "Data Tier: %s is blacklisted, so skipping dataset placement for: %s" % (tier, out)) continue if tier in UC.get("tiers_to_DDM"): to_DDM = True ## check for unitarity if not tier in UC.get("tiers_no_DDM") + UC.get( "tiers_to_DDM"): print "tier", tier, "neither TO or NO DDM for", out results.append('Not recognitized tier %s' % tier) #sendEmail("failed DDM injection","could not recognize %s for injecting in DDM"% out) sendLog( 'closor', "could not recognize %s for injecting in DDM" % out, level='critical') continue n_copies = 1 destinations = [] if to_DDM and campaign and campaign in CI.campaigns and 'DDMcopies' in CI.campaigns[ campaign]: ddm_instructions = CI.campaigns[campaign][ 'DDMcopies'] if type(ddm_instructions) == int: n_copies = CI.campaigns[campaign]['DDMcopies'] elif type(ddm_instructions) == dict: ## a more fancy configuration for ddmtier, indication in ddm_instructions.items( ): if ddmtier == tier or ddmtier in [ '*', 'all' ]: ## this is for us if 'N' in indication: n_copies = indication['N'] if 'host' in indication: destinations = indication['host'] destination_spec = "" if destinations: destination_spec = "--destination=" + ",".join( destinations) group_spec = "" ## not used yet else: print wfo.name, "no stats for announcing", out results.append('No Stats') # adding check for PrentageResolved flag from ReqMgr: if wfi.request[ 'RequestType'] == 'StepChain' and check_parentage_to_announce: if wfi.request['ParentageResolved']: results.append(True) else: wfi.sendLog( 'closor', "Delayed announcement of %s due to unresolved Parentage dependencies" % wfi.request['RequestName']) results.append('No ParentageResolved') if all( map(lambda result: result in ['None', None, True], results)): if not jump_the_line: ## only announce if all previous are fine res = reqMgrClient.announceWorkflowCascade( url, wfo.name) if not res in ['None', None]: ## check the status again, it might well have toggled wl_bis = workflowInfo(url, wfo.name) self.to_wm_status = wl_bis.request['RequestStatus'] if wl_bis.request['RequestStatus'] in [ 'announced', 'normal-archived' ]: res = None else: res = reqMgrClient.announceWorkflowCascade( url, wfo.name) results.append(res) print results if all(map(lambda result: result in ['None', None, True], results)): if jump_the_line: if not 'announced' in wfo.status: self.to_status = wfo.status.replace( 'announce', 'announced') else: self.to_status = 'done' self.closing = True wfi.sendLog('closor', "workflow outputs are announced") else: wfi.sendLog( 'closor', "Error with %s to be announced \n%s" % (wfo.name, json.dumps(results))) elif wfi.request['RequestStatus'] in [ 'failed', 'aborted', 'aborted-archived', 'rejected', 'rejected-archived', 'aborted-completed' ]: if wfi.isRelval(): self.to_status = 'forget' self.to_wm_status = wfi.request['RequestStatus'] wfi.sendLog( 'closor', "%s is %s, but will not be set in trouble to find a replacement." % (wfo.name, self.to_wm_status)) else: self.to_status = 'trouble' self.to_wm_status = wfi.request['RequestStatus'] else: print wfo.name, "not good for announcing:", wfi.request[ 'RequestStatus'] wfi.sendLog('closor', "cannot be announced") self.held.add(wfo.name)
def getCurrentStatus(url, workflow): return workflowInfo( url,workflow).getCurrentStatus()
def assignWorkflow(url, workflowname, team, parameters ): #local import so it doesn't screw with all other stuff from utils import workflowInfo defaults = copy.deepcopy( assignWorkflow.defaults ) defaults["Team"+team] = "checked" defaults["checkbox"+workflowname] = "checked" from utils import workflowInfo wf = workflowInfo(url, workflowname) # set the maxrss watchdog to what is specified in the request defaults['MaxRSS'] = wf.request['Memory']*1024+10 defaults.update( parameters ) if not set(assignWorkflow.mandatories).issubset( set(parameters.keys())): print "There are missing parameters" print list(set(assignWorkflow.mandatories) - set(parameters.keys())) return False if wf.request['RequestType'] == 'ReDigi': defaults['Dashboard'] = 'reprocessing' defaults['dashboard'] = 'reprocessing' if defaults['SiteBlacklist'] and defaults['SiteWhitelist']: defaults['SiteWhitelist'] = list(set(defaults['SiteWhitelist']) - set(defaults['SiteBlacklist'])) defaults['SiteBlacklist'] = [] if not defaults['SiteWhitelist']: print "Cannot assign with no site whitelist" return False for aux in assignWorkflow.auxiliaries: if aux in defaults: par = defaults.pop( aux ) if aux == 'EventsPerJob': wf = workflowInfo(url, workflowname) t = wf.firstTask() params = wf.getSplittings()[0] if par < params['events_per_job']: params.update({"requestName":workflowname, "splittingTask" : '/%s/%s'%(workflowname,t), "events_per_job": par, "splittingAlgo":"EventBased"}) print setWorkflowSplitting(url, params) elif aux == 'EventsPerLumi': wf = workflowInfo(url, workflowname) t = wf.firstTask() params = wf.getSplittings()[0] if params['splittingAlgo'] != 'EventBased': print "Ignoring changing events per lumi for",params['splittingAlgo'] continue (_,prim,_,_) = wf.getIO() if prim: print "Ignoring changing events per lumi for wf that take input" continue if str(par).startswith('x'): multiplier = float(str(par).replace('x','')) par = int(params['events_per_lumi'] * multiplier) else: if 'FilterEfficiency' in wf.request and wf.request['FilterEfficiency']: par = int(par/wf.request['FilterEfficiency']) params.update({"requestName":workflowname, "splittingTask" : '/%s/%s'%(workflowname,t), "events_per_lumi": par}) print setWorkflowSplitting(url, params) elif aux == 'SplittingAlgorithm': wf = workflowInfo(url, workflowname) ### do it for all major tasks #for (t,params) in wf.getTaskAndSplittings(): # params.update({"requestName":workflowname, # "splittingTask" : '/%s/%s'%(workflowname,t), # "splittingAlgo" : par}) # setWorkflowSplitting(url, params) t = wf.firstTask() params = wf.getSplittings()[0] params.update({"requestName":workflowname, "splittingTask" : '/%s/%s'%(workflowname,t), "splittingAlgo" : par}) #swap values if "avg_events_per_job" in params and not "events_per_job" in params: params['events_per_job' ] = params.pop('avg_events_per_job') print params print setWorkflowSplitting(url, params) elif aux == 'LumisPerJob': wf = workflowInfo(url, workflowname) t = wf.firstTask() #params = wf.getSplittings()[0] params = {"requestName":workflowname, "splittingTask" : '/%s/%s'%(workflowname,t), "lumis_per_job" : par, #"halt_job_on_file_boundaries" : True, "splittingAlgo" : "LumiBased"} print setWorkflowSplitting(url, params) else: print "No action for ",aux if not 'execute' in defaults or not defaults['execute']: print json.dumps( defaults ,indent=2) return False else: defaults.pop('execute') print json.dumps( defaults ,indent=2) if defaults['useSiteListAsLocation'] =='False' or defaults['useSiteListAsLocation'] == False: defaults.pop('useSiteListAsLocation') jsonEncodedParams = {} for paramKey in defaults.keys(): jsonEncodedParams[paramKey] = json.dumps(defaults[paramKey]) encodedParams = urllib.urlencode(jsonEncodedParams, False) #encodedParams = urllib.urlencode(parameters, True) headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"} conn = httplib.HTTPSConnection(url, cert_file = os.getenv('X509_USER_PROXY'), key_file = os.getenv('X509_USER_PROXY')) conn.request("POST", "/reqmgr/assign/handleAssignmentPage", encodedParams, headers) response = conn.getresponse() if response.status != 200: ## try again conn.request("POST", "/reqmgr/assign/handleAssignmentPage", encodedParams, headers) response = conn.getresponse() if response.status != 200: ## and again !! conn.request("POST", "/reqmgr/assign/handleAssignmentPage", encodedParams, headers) response = conn.getresponse() if response.status != 200: print 'could not assign request with following parameters:' for item in defaults.keys(): print item + ": " + str(defaults[item]) print 'Response from http call:' print 'Status:',response.status,'Reason:',response.reason print 'Explanation:' data = response.read() return False print 'Assigned workflow:',workflowname,'to site:',defaults['SiteWhitelist'],'and team',team conn.close() return True
def getProcString(url, workflow): return workflowInfo( url,workflow).getProcString()
def singleRecovery(url, task, initial, actions, do=False): payload = { "Requestor": os.getenv('USER'), "Group": 'DATAOPS', "RequestType": "Resubmission", "ACDCServer": initial['ConfigCacheUrl'], "ACDCDatabase": "acdcserver", "OriginalRequestName": initial['RequestName'], "OpenRunningTimeout": 0 } copy_over = [ 'PrepID', 'Campaign', 'RequestPriority', 'TimePerEvent', 'SizePerEvent', 'Group', 'Memory', 'RequestString', 'CMSSWVersion' ] for c in copy_over: if c in initial: payload[c] = copy.deepcopy(initial[c]) else: print c, "not in the initial payload" #a massage ? boost the recovery over the initial wf payload['RequestPriority'] *= 2 payload['RequestPriority'] = min(500000, payload['RequestPriority']) if actions: for action in actions: #if action.startswith('split'): # factor = int(action.split('-')[-1]) if '-' in action else 2 # print "Changing time per event (%s) by a factor %d"%( payload['TimePerEvent'], factor) # ## mention it's taking 2 times longer to have a 2 times finer splitting # payload['TimePerEvent'] = factor*payload['TimePerEvent'] if action.startswith('mem'): arg = action.split('-', 1)[-1] increase = set_to = None tasks, set_to = arg.split(':') if ':' in arg else (None, arg) tasks = tasks.split(',') if tasks else [] if set_to.startswith('+'): increase = int(set_to[1:]) else: set_to = int(set_to) ## increase the memory requirement by 1G if 'TaskChain' in initial: mem_dict = {} it = 1 while True: t = 'Task%d' % it it += 1 if t in initial: tname = payload.setdefault(t, initial[t])['TaskName'] mem = mem_dict.setdefault(tname, payload[t]['Memory']) if tasks and not tname in tasks: print tname, "not concerned" continue if set_to: mem_dict[tname] = set_to else: mem_dict[tname] += increase else: break payload['Memory'] = mem_dict else: payload['Memory'] = set_to #increase = int(action.split('-')[-1]) if '-' in action else 1000 ## increase the memory requirement by 1G #payload['Memory'] += increase if action.startswith('split') and ( initial['RequestType'] in ['MonteCarlo'] or (initial['RequestType'] in ['TaskChain'] and not 'InputDataset' in initial['Task1'])): print "I should not be doing splitting for this type of request", initial[ 'RequestName'] return None if action.startswith('core'): arg = action.split('-', 1)[-1] tasks, set_to = arg.split(':') if ':' in arg else (None, arg) tasks = tasks.split(',') if tasks else [] set_to = int(set_to) if 'TaskChain' in initial: core_dict = {} mem_dict = payload['Memory'] if type( payload['Memory']) == dict else {} it = 1 while True: t = 'Task%d' % it it += 1 if t in initial: tname = payload.setdefault(t, initial[t])['TaskName'] mcore = core_dict.setdefault( tname, payload[t]['Multicore']) mem = mem_dict.setdefault(tname, payload[t]['Memory']) if tasks and not tname in tasks: print tname, "not concerned" continue factor = (set_to / float(mcore)) fraction_constant = 0.4 mem_per_core_c = int( (1 - fraction_constant) * mem / float(mcore)) ##scale the memory mem_dict[tname] += (set_to - mcore) * mem_per_core_c ## scale time/event time_dict[ tname] = payload[t]['TimePerEvent'] / factor ## set the number of cores core_dict[tname] = set_to else: break payload['Multicore'] = core_dict ##payload['TimePerEvent'] = time_dict ## cannot be used yet else: payload['Multicore'] = increase acdc_round = 0 initial_string = payload['RequestString'] if initial_string.startswith('ACDC'): if initial_string[4].isdigit(): acdc_round = int(initial_string[4]) acdc_round += 1 #print acdc_round #print "This is not allowed yet" #return None initial_string = initial_string.replace('ACDC_', '').replace( 'ACDC%d_' % (acdc_round - 1), '') payload['RequestString'] = 'ACDC%d_%s' % (acdc_round, initial_string) payload['InitialTaskPath'] = task if not do: print json.dumps(payload, indent=2) return None print "ACDC payload" print json.dumps(payload, indent=2) print actions ## submit acdc = reqMgrClient.submitWorkflow(url, payload) if not acdc: print "Error in making ACDC for", initial["RequestName"] acdc = reqMgrClient.submitWorkflow(url, payload) if not acdc: print "Error twice in making ACDC for", initial["RequestName"] return None ## perform modifications if actions: for action in actions: if action.startswith('split'): factor = int(action.split('-')[-1]) if '-' in action else 2 acdcInfo = workflowInfo(url, acdc) splittings = acdcInfo.getSplittings() for split in splittings: for act in [ 'avg_events_per_job', 'events_per_job', 'lumis_per_job' ]: if act in split: print "Changing %s (%d) by a factor %d" % ( act, split[act], factor), split[act] /= factor print "to", split[act] break split['requestName'] = acdc print "changing the splitting of", acdc print json.dumps(split, indent=2) print reqMgrClient.setWorkflowSplitting(url, acdc, split) data = reqMgrClient.setWorkflowApproved(url, acdc) print data return acdc
def getRequestNumEvents(url, workflow): return workflowInfo( url,workflow).getRequestNumEvents()
def recoveror(url, specific, options=None): if userLock('recoveror'): return up = componentInfo(soft=['mcm', 'wtc']) if not up.check(): return CI = campaignInfo() SI = siteInfo() UC = unifiedConfiguration() use_recoveror = UC.get('use_recoveror') if not use_recoveror and not options.go: print "We are told not to run recoveror" return def make_int_keys(d): for code in d: d[int(code)] = d.pop(code) error_codes_to_recover = UC.get('error_codes_to_recover') error_codes_to_block = UC.get('error_codes_to_block') error_codes_to_notify = UC.get('error_codes_to_notify') make_int_keys(error_codes_to_recover) make_int_keys(error_codes_to_block) make_int_keys(error_codes_to_notify) #wfs = session.query(Workflow).filter(Workflow.status == 'assistance-recovery').all() wfs = session.query(Workflow).filter( Workflow.status.contains('recovery')).all() if specific: wfs.extend( session.query(Workflow).filter( Workflow.status == 'assistance-manual').all()) for wfo in wfs: if specific and not specific in wfo.name: continue if not specific and 'manual' in wfo.status: continue wfi = workflowInfo(url, wfo.name) ## need a way to verify that this is the first round of ACDC, since the second round will have to be on the ACDC themselves all_errors = {} try: ## this is clearly very truncated and should be changed completely wfi.getSummary() all_errors = wfi.summary['errors'] except: pass print '-' * 100 print "Looking at", wfo.name, "for recovery options" recover = True if not 'MergedLFNBase' in wfi.request: print "f****d up" sendEmail('missing lfn', '%s wl cache is screwed up' % wfo.name) recover = False if not len(all_errors): print "\tno error for", wfo.name recover = False task_to_recover = defaultdict(list) message_to_ops = "" message_to_user = "" if 'LheInputFilese' in wfi.request and wfi.request['LheInputFiles']: ## we do not try to recover pLHE recover = False if wfi.request['RequestType'] in ['MonteCarlo', 'ReReco']: recover = False if 'Campaign' in wfi.request: c = wfi.request['Campaign'] if c in CI.campaigns and 'recover' in CI.campaigns[c]: recover = CI.campaigns[c]['recover'] for task, errors in all_errors.items(): print "\tTask", task ## collect all error codes and #jobs regardless of step at which it occured all_codes = [] for name, codes in errors.items(): if type(codes) == int: continue all_codes.extend([ (int(code), info['jobs'], name, list(set([e['type'] for e in info['errors']])), list(set([e['details'] for e in info['errors']]))) for code, info in codes.items() ]) all_codes.sort(key=lambda i: i[1], reverse=True) sum_failed = sum([l[1] for l in all_codes]) for errorCode, njobs, name, types, details in all_codes: rate = 100 * njobs / float(sum_failed) #print ("\t\t %10d (%6s%%) failures with error code %10d (%"+str(max_legend)+"s) at stage %s")%(njobs, "%4.2f"%rate, errorCode, legend, name) print( "\t\t %10d (%6s%%) failures with error code %10d (%30s) at stage %s" ) % (njobs, "%4.2f" % rate, errorCode, ','.join(types), name) added_in_recover = False #if options.go: # force the recovery of any task with error ? if errorCode in error_codes_to_recover: ## the error code is registered for case in error_codes_to_recover[errorCode]: match = case['details'] matched = (match == None) if not matched: matched = False for detail in details: if match in detail: print "[recover] Could find keyword", match, "in" print 50 * "#" print detail print 50 * "#" matched = True break if matched and rate > case['rate']: print "\t\t => we should be able to recover that", case[ 'legend'] task_to_recover[task].append((code, case)) added_in_recover = True message_to_user = "" else: print "\t\t recoverable but not frequent enough, needs", case[ 'rate'] if errorCode in error_codes_to_block: for case in error_codes_to_block[errorCode]: match = case['details'] matched = (match == None) if not matched: matched = False for detail in details: if match in detail: print "[block] Could find keyword", match, "in" print 50 * "#" print detail print 50 * "#" matched = True break if matched and rate > case['rate']: print "\t\t => that error means no ACDC on that workflow", case[ 'legend'] if not options.go: message_to_ops += "%s has an error %s blocking an ACDC.\n%s\n " % ( wfo.name, errorCode, '#' * 50) recover = False added_in_recover = False if errorCode in error_codes_to_notify and not added_in_recover: print "\t\t => we should notify people on this" message_to_user += "%s has an error %s in processing.\n%s\n" % ( wfo.name, errorCode, '#' * 50) if message_to_user: print wfo.name, "to be notified to user(DUMMY)", message_to_user if message_to_ops: #sendEmail( "notification in recoveror" , message_to_ops, destination=['*****@*****.**']) sendLog('recoveror', message_to_ops, level='warning') if len(task_to_recover) != len(all_errors): print "Should not be doing partial ACDC. skipping" #sendEmail('recoveror','do not want to make partial acdc on %s'%wfo.name) sendLog('recoveror', 'do not want to make partial acdc on %s' % wfo.name, level='warning') recover = False if task_to_recover and recover: print "Initiating recovery" print ', '.join(task_to_recover.keys()), "to be recovered" recovering = set() for task in task_to_recover: print "Will be making a recovery workflow for", task ## from here you can fetch known solutions, to known error codes actions = list( set([ case['solution'] for code, case in task_to_recover[task] ])) acdc = singleRecovery(url, task, wfi.request, actions, do=options.do) if not acdc: if options.do: if recovering: print wfo.name, "has been partially ACDCed. Needs manual attention" #sendEmail( "failed ACDC partial recovery","%s has had %s/%s recoveries %s only"%( wfo.name, len(recovering), len(task_to_recover), list(recovering)), destination=['*****@*****.**']) sendLog('recoveror', "%s has had %s/%s recoveries %s only" % (wfo.name, len(recovering), len(task_to_recover), list(recovering)), level='critical') continue else: print wfo.name, "failed recovery once" #break continue else: print "no action to take further" sendLog('recoveror', "ACDC for %s can be done automatically" % wfo.name, level='critical') continue ## and assign it ? team = wfi.request['Team'] #assign_to_sites = set(SI.sites_ready) ## that needs to be massaged to prevent assigning to something out. assign_to_sites = set(SI.all_sites) parameters = { #'SiteWhitelist' : wfi.request['SiteWhitelist'], 'SiteWhitelist': sorted(assign_to_sites), 'AcquisitionEra': wfi.acquisitionEra(), 'ProcessingString': wfi.processingString(), 'MergedLFNBase': wfi.request['MergedLFNBase'], 'ProcessingVersion': wfi.request['ProcessingVersion'], } ## hackery for ACDC merge assignment if wfi.request[ 'RequestType'] == 'TaskChain' and 'Merge' in task.split( '/')[-1]: parameters['AcquisitionEra'] = None parameters['ProcessingString'] = None ## xrootd setttings on primary and secondary if 'TrustSitelists' in wfi.request and wfi.request[ 'TrustSitelists']: parameters['TrustSitelists'] = True if 'TrustPUSitelists' in wfi.request and wfi.request[ 'TrustPUSitelists']: parameters['TrustPUSitelists'] = True if options.ass: print "really doing the assignment of the ACDC", acdc parameters['execute'] = True wfi.sendLog('recoveror', "%s was assigned for recovery" % acdc) else: print "no assignment done with this ACDC", acdc sendLog('recoveror', "%s needs to be assigned" % (acdc), level='critical') result = reqMgrClient.assignWorkflow(url, acdc, team, parameters) if not result: print acdc, "was not asigned" sendLog('recoveror', "%s needs to be assigned" % (acdc), level='critical') else: recovering.add(acdc) current = None if recovering: #if all went well, set the status to -recovering current = wfo.status if options.ass: current = current.replace('recovery', 'recovering') else: current = 'assistance-manual' print 'created ACDC: ' + ', '.join(recovering) else: ## was set to be recovered, and no acdc was made current = 'assistance-manual' if current: print wfo.name, "setting the status to", current wfo.status = current session.commit() else: ## this workflow should be handled manually at that point print wfo.name, "needs manual intervention" wfo.status = 'assistance-manual' session.commit()
def rejector(url, specific, options=None): #use_mcm = True #up = componentInfo(mcm=use_mcm, soft=['mcm']) up = componentInfo() if not up.check(): return #use_mcm = up.status['mcm'] #mcm = McMClient(dev=False) if use_mcm else None if specific and specific.startswith('/'): ## this is for a dataset print setDatasetStatus(specific, 'INVALID') return if options.filelist: wfs = [] for line in filter(None, open(options.filelist).read().split('\n')): print line wfs.extend( session.query(Workflow).filter(Workflow.name.contains(line)).all()) elif specific: wfs = session.query(Workflow).filter(Workflow.name.contains(specific)).all() else: wfs = session.query(Workflow).filter(Workflow.status == 'assistance-clone').all() #wfs.extend( session.query(Workflow).filter(Workflow.status == 'assistance-reject').all()) ## be careful then on clone case by case options.clone = True print "not supposed to function yet" return print len(wfs),"to reject" if len(wfs)>1: print "\n".join( [wfo.name for wfo in wfs] ) answer = raw_input('Reject these') if not answer.lower() in ['y','yes']: return for wfo in wfs: #wfo = session.query(Workflow).filter(Workflow.name == specific).first() if not wfo: print "cannot reject",spec return results=[] wfi = workflowInfo(url, wfo.name) datasets = set(wfi.request['OutputDatasets']) reqMgrClient.invalidateWorkflow(url, wfo.name, current_status=wfi.request['RequestStatus']) comment="" if options.comments: comment = ", reason: "+options.comments wfi.sendLog('rejector','invalidating the workflow by unified operator%s'%comment) ## need to find the whole familly and reject the whole gang familly = getWorkflowById( url, wfi.request['PrepID'] , details=True) for fwl in familly: if fwl['RequestDate'] < wfi.request['RequestDate']: continue if fwl['RequestType']!='Resubmission': continue ## does not work on second order acd if 'OriginalRequestName' in fwl and fwl['OriginalRequestName'] != wfi.request['RequestName']: continue print "rejecting",fwl['RequestName'] reqMgrClient.invalidateWorkflow(url, fwl['RequestName'], current_status=fwl['RequestStatus'], cascade=False) datasets.update( fwl['OutputDatasets'] ) for dataset in datasets: if options.keep: print "keeping",dataset,"in its current status" else: results.append( setDatasetStatus(dataset, 'INVALID') ) pass if all(map(lambda result : result in ['None',None,True],results)): print wfo.name,"and",datasets,"are rejected" if options and options.clone: wfo.status = 'trouble' session.commit() schema = wfi.getSchema() schema['Requestor'] = os.getenv('USER') schema['Group'] = 'DATAOPS' schema['OriginalRequestName'] = wfo.name if 'ProcessingVersion' in schema: schema['ProcessingVersion'] = int(schema['ProcessingVersion'])+1 ## dubious str->int conversion else: schema['ProcessingVersion']=2 for k in schema.keys(): if k.startswith('Team'): schema.pop(k) if k.startswith('checkbox'): schema.pop(k) ## a few tampering of the original request if options.Memory: if schema['RequestType'] == 'TaskChain': it=1 while True: t = 'Task%d'%it it+=1 if t in schema: schema[t]['Memory'] = options.Memory else: break else: schema['Memory'] = options.Memory if options.Multicore: ## to do : set it properly in taslchains schema['Multicore'] = options.Multicore if options.deterministic: if schema['RequestType'] == 'TaskChain': schema['Task1']['DeterministicPileup'] = True if options.EventsPerJob: if schema['RequestType'] == 'TaskChain': schema['Task1']['EventsPerJob'] = options.EventsPerJob else: schema['EventsPerJob'] = options.EventsPerJob if options.EventAwareLumiBased: schema['SplittingAlgo'] = 'EventAwareLumiBased' if options.TimePerEvent: schema['TimePerEvent'] = options.TimePerEvent if options.ProcessingString: schema['ProcessingString'] = options.ProcessingString if options.AcquisitionEra: schema['AcquisitionEra'] = options.AcquisitionEra if options.runs: schema['RunWhitelist'] = map(int,options.runs.split(',')) if options.PrepID: schema['PrepID'] =options.PrepID if schema['RequestType'] == 'TaskChain' and options.no_output: ntask = schema['TaskChain'] for it in range(1,ntask-1): schema['Task%d'%it]['KeepOutput'] = False schema['TaskChain'] = ntask-1 schema.pop('Task%d'%ntask) ## update to the current priority schema['RequestPriority'] = wfi.request['RequestPriority'] ## drop shit on the way to reqmgr2 paramBlacklist = ['BlockCloseMaxEvents', 'BlockCloseMaxFiles', 'BlockCloseMaxSize', 'BlockCloseMaxWaitTime', 'CouchWorkloadDBName', 'CustodialGroup', 'CustodialSubType', 'Dashboard', 'GracePeriod', 'HardTimeout', 'InitialPriority', 'inputMode', 'MaxMergeEvents', 'MaxMergeSize', 'MaxRSS', 'MaxVSize', 'MinMergeSize', 'NonCustodialGroup', 'NonCustodialSubType', 'OutputDatasets', 'ReqMgr2Only', 'RequestDate' 'RequestorDN', 'RequestName', 'RequestStatus', 'RequestTransition', 'RequestWorkflow', 'SiteWhitelist', 'SoftTimeout', 'SoftwareVersions', 'SubscriptionPriority', 'Team', 'timeStamp', 'TrustSitelists', 'TrustPUSitelists', 'TotalEstimatedJobs', 'TotalInputEvents', 'TotalInputLumis', 'TotalInputFiles'] for p in paramBlacklist: if p in schema: schema.pop( p ) #pass print "submitting" if (options.to_stepchain and (schema['RequestType']=='TaskChain')): ## transform the schema into StepChain schema print "Transforming a TaskChain into a StepChain" schema['RequestType'] = 'StepChain' schema['StepChain'] = schema.pop('TaskChain') step=1 while True: if 'Task%d'%step in schema: schema['Step%d'%step] = schema.pop('Task%d'%step) schema['Step%d'%step]['StepName'] = schema['Step%d'%step].pop('TaskName') if 'InputTask' in schema['Step%d'%step]: schema['Step%d'%step]['InputStep'] = schema['Step%d'%step].pop('InputTask') if not 'KeepOutput' in schema['Step%d'%step]: ## this is a weird translation capability. Absence of keepoutput in step means : keep the output. while in TaskChain absence means : drop schema['Step%d'%step]['KeepOutput'] = False step+=1 else: break print json.dumps( schema, indent=2 ) newWorkflow = reqMgrClient.submitWorkflow(url, schema) if not newWorkflow: print "error in cloning",wfo.name print json.dumps( schema, indent=2 ) return print newWorkflow data = reqMgrClient.setWorkflowApproved(url, newWorkflow) print data wfi.sendLog('rejector','Cloned into %s by unified operator %s'%( newWorkflow, comment )) wfi.notifyRequestor('Cloned into %s by unified operator %s'%( newWorkflow, comment ),do_batch=False) else: wfo.status = 'forget' wfi.notifyRequestor('Rejected by unified operator %s'%( comment ),do_batch=False) session.commit() else: print "error in rejecting",wfo.name,results
def stagor(url,specific =None, options=None): if not componentInfo().check(): return SI = siteInfo() CI = campaignInfo() UC = unifiedConfiguration() done_by_wf_id = {} done_by_input = {} completion_by_input = {} good_enough = 100.0 lost = json.loads(open('lost_blocks_datasets.json').read()) still_lost = [] for dataset in lost: l = findLostBlocks(url ,dataset) if not l: print dataset,"is not really lost" else: still_lost.append( dataset ) open('lost_blocks_datasets.json','w').write( json.dumps( still_lost, indent=2) ) if options.fast: print "doing the fast check of staged with threshold:",options.goodavailability for wfo in session.query(Workflow).filter(Workflow.status == 'staging').all(): if specific and not specific in wfo.name: continue wfi = workflowInfo(url, wfo.name) sites_allowed = getSiteWhiteList( wfi.getIO() ) if 'SiteWhitelist' in CI.parameters(wfi.request['Campaign']): sites_allowed = CI.parameters(wfi.request['Campaign'])['SiteWhitelist'] if 'SiteBlacklist' in CI.parameters(wfi.request['Campaign']): sites_allowed = list(set(sites_allowed) - set(CI.parameters(wfi.request['Campaign'])['SiteBlacklist'])) _,primaries,_,secondaries = wfi.getIO() se_allowed = [SI.CE_to_SE(site) for site in sites_allowed] all_check = True for dataset in list(primaries):#+list(secondaries) ? #print se_allowed available = getDatasetBlocksFraction( url , dataset , sites=se_allowed ) all_check &= (available >= options.goodavailability) if not all_check: break if all_check: print "\t\t",wfo.name,"can go staged" wfo.status = 'staged' session.commit() else: print "\t",wfo.name,"can wait a bit more" return for wfo in session.query(Workflow).filter(Workflow.status == 'staging').all(): wfi = workflowInfo(url, wfo.name) _,primaries,_,secondaries = wfi.getIO() for dataset in list(primaries)+list(secondaries): done_by_input[dataset] = {} completion_by_input[dataset] = {} print wfo.name,"needs",dataset for transfer in session.query(Transfer).all(): if specific and str(transfer.phedexid)!=str(specific): continue skip=True for wfid in transfer.workflows_id: tr_wf = session.query(Workflow).get(wfid) if tr_wf: if tr_wf.status == 'staging': print "\t",transfer.phedexid,"is staging for",tr_wf.name skip=False if skip: continue if transfer.phedexid<0: continue ## check the status of transfers checks = checkTransferApproval(url, transfer.phedexid) approved = all(checks.values()) if not approved: print transfer.phedexid,"is not yet approved" approveSubscription(url, transfer.phedexid) continue ## check on transfer completion checks = checkTransferStatus(url, transfer.phedexid, nocollapse=True) if not specific: for dsname in checks: if not dsname in done_by_input: done_by_input[dsname]={} if not dsname in completion_by_input: completion_by_input[dsname] = {} done_by_input[dsname][transfer.phedexid]=all(map(lambda i:i>=good_enough, checks[dsname].values())) completion_by_input[dsname][transfer.phedexid]=checks[dsname].values() if checks: print "Checks for",transfer.phedexid,[node.values() for node in checks.values()] done = all(map(lambda i:i>=good_enough,list(itertools.chain.from_iterable([node.values() for node in checks.values()])))) else: ## it is empty, is that a sign that all is done and away ? print "ERROR with the scubscriptions API of ",transfer.phedexid print "Most likely something else is overiding the transfer request. Need to work on finding the replacement automatically, if the replacement exists" done = False ## the thing above is NOT giving the right number #done = False for wfid in transfer.workflows_id: tr_wf = session.query(Workflow).get(wfid) if tr_wf:# and tr_wf.status == 'staging': if not tr_wf.id in done_by_wf_id: done_by_wf_id[tr_wf.id]={} done_by_wf_id[tr_wf.id][transfer.phedexid]=done if done: ## transfer.status = 'done' print transfer.phedexid,"is done" else: print transfer.phedexid,"not finished" pprint.pprint( checks ) #print done_by_input print "\n----\n" for dsname in done_by_input: fractions = None if dsname in completion_by_input: fractions = itertools.chain.from_iterable([check.values() for check in completion_by_input.values()]) ## the workflows in the waiting room for the dataset using_its = getWorkflowByInput(url, dsname) #print using_its using_wfos = [] for using_it in using_its: wf = session.query(Workflow).filter(Workflow.name == using_it).first() if wf: using_wfos.append( wf ) if not len(done_by_input[dsname]): print "For dataset",dsname,"there are no transfer report. That's an issue." for wf in using_wfos: if wf.status == 'staging': if UC.get("stagor_sends_back"): print "sending",wf.name,"back to considered" wf.status = 'considered' session.commit() sendEmail( "send back to considered","%s was send back and might be trouble"% wf.name) else: print "would send",wf.name,"back to considered" sendEmail( "subscription lagging behind","susbscriptions to get %s running are not appearing in phedex. I would have send it back to considered but that's not good."% wf.name) continue #need_sites = int(len(done_by_input[dsname].values())*0.7)+1 need_sites = len(done_by_input[dsname].values()) #if need_sites > 10: need_sites = int(need_sites/2.) got = done_by_input[dsname].values().count(True) if all([wf.status != 'staging' for wf in using_wfos]): ## not a single ds-using wf is in staging => moved on already ## just forget about it print "presence of",dsname,"does not matter anymore" print "\t",done_by_input[dsname] print "\t",[wf.status for wf in using_wfos] print "\tneeds",need_sites continue #?? ## should the need_sites reduces with time ? # with dataset choping, reducing that number might work as a block black-list. if len(done_by_input[dsname].values()) and all(done_by_input[dsname].values()): print dsname,"is everywhere we wanted" ## the input dataset is fully transfered, should consider setting the corresponding wf to staged for wf in using_wfos: if wf.status == 'staging': print wf.name,"is with us. setting staged and move on" wf.status = 'staged' session.commit() elif fractions and len(list(fractions))>1 and set(fractions)==1: print dsname,"is everywhere at the same fraction" print "We do not want this in the end. we want the data we asked for" continue ## the input dataset is fully transfered, should consider setting the corresponding wf to staged for wf in using_wfos: if wf.status == 'staging': print wf.name,"is with us everywhere the same. setting staged and move on" wf.status = 'staged' session.commit() elif got >= need_sites: print dsname,"is almost everywhere we wanted" #print "We do not want this in the end. we want the data we asked for" #continue ## the input dataset is fully transfered, should consider setting the corresponding wf to staged for wf in using_wfos: if wf.status == 'staging': print wf.name,"is almost with us. setting staged and move on" wf.status = 'staged' session.commit() else: print "incomplete",dsname lost = findLostBlocks(url, dsname) try: known_lost = json.loads(open('lost_blocks_datasets.json').read()) except: print "enable to get the known_lost from local json file" known_lost = [] if lost and not dsname in known_lost: lost_names = [item['name'] for item in lost] ## make a deeper investigation of the block location to see whether it's really no-where no-where print "We have lost",len(lost),"blocks",lost_names #print json.dumps( lost , indent=2 ) sendEmail('we have lost a few blocks', str(len(lost))+" in total.\nDetails \n:"+json.dumps( lost , indent=2 )) known_lost.append(dsname) rr= open('lost_blocks_datasets.json','w') rr.write( json.dumps( known_lost, indent=2)) rr.close() ## should the status be change to held-staging and pending on a ticket print "\t",done_by_input[dsname] print "\tneeds",need_sites print "\tgot",got for wfid in done_by_wf_id: #print done_by_wf_id[wfid].values() ## ask that all related transfer get into a valid state if all(done_by_wf_id[wfid].values()): pass
def injector(url, options, specific): mlock = moduleLock() if mlock(): return use_mcm = True up = componentInfo(soft=['mcm','wtc','jira'] ) if not up.check(): return use_mcm = up.status['mcm'] UC = unifiedConfiguration() transform_keywords = UC.get('convert_to_stepchain') workflows = getWorkflows(url, status=options.wmstatus, user=options.user) for user in UC.get("user_rereco"): workflows.extend( getWorkflows(url, status=options.wmstatus, user=user, rtype="ReReco")) for user in (options.user_relval.split(',') if options.user_relval else UC.get("user_relval")) : workflows.extend( getWorkflows(url, status=options.wmstatus, user=user, rtype="TaskChain")) for user in (options.user_storeresults.split(',') if options.user_storeresults else UC.get("user_storeresults")) : workflows.extend( getWorkflows(url, status=options.wmstatus, user=user, rtype="StoreResults")) print len(workflows),"in line" cannot_inject = set() to_convert = set() status_cache = defaultdict(str) ## browse for assignment-approved requests, browsed for ours, insert the diff for wf in workflows: if specific and not specific in wf: continue exists = session.query(Workflow).filter(Workflow.name == wf ).first() if not exists: wfi = workflowInfo(url, wf) ## check first that there isn't related here with something valid can_add = True ## first try at finding a match familly = session.query(Workflow).filter(Workflow.name.contains(wfi.request['PrepID'])).all() if not familly: pids = wfi.getPrepIDs() req_familly = [] for pid in pids: req_familly.extend( getWorkflowById( url, pid, details=True) ) familly = [] print len(req_familly),"members" for req_member in req_familly: #print "member",req_member['RequestName'] owfi = workflowInfo(url, req_member['RequestName'], request=req_member) other_pids = owfi.getPrepIDs() if set(pids) == set(other_pids): ## this is a real match familly.extend( session.query(Workflow).filter(Workflow.name == req_member['RequestName']).all() ) for lwfo in familly: if lwfo: ## we have it already if not lwfo.status in ['forget','trouble','forget-unlock','forget-out-unlock']: wfi.sendLog('injector',"Should not put %s because of %s %s"%( wf, lwfo.name,lwfo.status )) sendLog('injector',"Should not put %s because of %s %s"%( wf, lwfo.name,lwfo.status ), level='critical') print "Should not put",wf,"because of",lwfo.name,lwfo.status cannot_inject.add( wf ) can_add = False ## add a check on validity of input datasets _,prim,par,sec = wfi.getIO() for d in list(prim)+list(par)+list(sec): if not d in status_cache: status_cache[d] = getDatasetStatus(d) if status_cache[d] != 'VALID': wfi.sendLog('injector',"One of the input is not VALID. %s : %s"%( d, status_cache[d])) sendLog('injector',"One of the input of %s is not VALID. %s : %s"%( wf, d, status_cache[d]), level='critical') can_add = False #else: # ##make sure that all blocks get closed # closeAllBlocks(url, d) ## check for any file in phedex, to verify existence _,ph_files,_,_ = getDatasetFiles(url, d) if not ph_files and not ( 'StoreResults' == wfi.request.setdefault('RequestType',None) ): wfi.sendLog('injector',"One of the input has no file in phedex: %s" % d ) sendLog('injector',"One of the input has no file in phedex: %s"% d, level='critical') can_add = False ### ban some workflow that you don't like anymore #outputs = wfi.request['OutputDatasets'] if not can_add: continue ## temporary hack to transform specific taskchain into stepchains good_for_stepchain = wfi.isGoodToConvertToStepChain( keywords = transform_keywords) #good_for_stepchain = wfi.isGoodToConvertToStepChain( keywords = None) ## match keywords and technical constraints if (not options.no_convert) and good_for_stepchain and not wfi.isRelval(): to_convert.add( wf ) wfi.sendLog('injector','Transforming %s TaskChain into StepChain'%wf) sendEmail('convertion to stepchain','Transforming %s TaskChain into StepChain'%wf) wfi.sendLog('injector',"considering %s"%wf) new_wf = Workflow( name = wf , status = options.setstatus, wm_status = options.wmstatus) session.add( new_wf ) session.commit() time.sleep(0.5) else: #print "already have",wf pass if cannot_inject: #sendEmail('workflow duplicates','These workflow cannot be added in because of duplicates \n\n %s'%( '\n'.join(cannot_inject))) sendLog('injector','These workflow cannot be added in because of duplicates \n\n %s'%( '\n'.join(cannot_inject)), level='critical') for wf in to_convert: os.system('./Unified/rejector.py --clone --to_step --comments \"Transform to StepChain\" %s'% wf) ## passing a round of invalidation of what needs to be invalidated if use_mcm and (options.invalidate or True): invalidator(url) no_replacement = set() #print "getting all transfers" #all_transfers=session.query(Transfer).all() #print "go!" ## pick up replacements for wf in session.query(Workflow).filter(Workflow.status == 'trouble').all(): print wf.name if specific and not specific in wf.name: continue print wf.name wfi = workflowInfo(url, wf.name ) wl = wfi.request #getWorkLoad(url, wf.name) familly = getWorkflowById( url, wl['PrepID'] ) true_familly = [] for member in familly: if member == wf.name: continue fwl = getWorkLoad(url , member) if options.replace: if member != options.replace: continue else: if fwl['RequestDate'] < wl['RequestDate']: continue if fwl['RequestType']=='Resubmission': continue if fwl['RequestStatus'] in ['None',None,'new']: continue if fwl['RequestStatus'] in ['rejected','rejected-archived','aborted','aborted-archived']: continue true_familly.append( fwl ) if len(true_familly)==0: #sendLog('injector','%s had no replacement'%wf.name, level='critical') if wfi.isRelval(): #wfi.sendLog('injector','the workflow was found in trouble with no replacement. As a relval, there is no clean way to handle this.') wfi.sendLog('injector','the workflow was found in trouble with no replacement. As a relval, there is no clean way to handle this. Setting forget') wf.status = 'forget' session.commit() else: wfi.sendLog('injector','the workflow was found in trouble with no replacement') no_replacement.add( wf.name ) continue else: wfi.sendLog('injector','the workflow was found in trouble and has a replacement') print wf.name,"has",len(familly),"familly members" print wf.name,"has",len(true_familly),"true familly members" ##we cannot have more than one of them !!! pick the last one if len(true_familly)>1: #sendEmail('multiple wf','please take a look at injector for %s'%wf.name) sendLog('injector','Multiple wf in line, will take the last one for %s \n%s'%( wf.name, ', '.join(fwl['RequestName'] for fwl in true_familly)), level='critical') for fwl in true_familly[-1:]: member = fwl['RequestName'] new_wf = session.query(Workflow).filter(Workflow.name == member).first() if not new_wf: sendLog('injector',"putting %s as replacement of %s"%( member, wf.name)) status = 'away' if fwl['RequestStatus'] in ['assignment-approved']: status = 'considered' new_wf = Workflow( name = member, status = status, wm_status = fwl['RequestStatus']) wf.status = 'forget' session.add( new_wf ) else: if new_wf.status == 'forget': continue sendLog('injector',"getting %s as replacement of %s"%( new_wf.name, wf.name )) wf.status = 'forget' for tr in session.query(TransferImp).filter( TransferImp.workflow_id == wf.id).all(): ## get all transfer working for the old workflow existing = session.query(TransferImp).filter( TransferImp.phedexid == tr.phedexid).filter( TransferImp.workflow_id == new_wf.id).all() tr.active = False ## disable the old one if not existing: ## create the transfer object for the new dependency tri = TransferImp( phedexid = tr.phedexid, workflow = new_wf) session.add( tri ) session.commit() ## don't do that automatically #wf.status = 'forget' session.commit() if no_replacement: #sendEmail('workflow with no replacement','%s \n are dangling there'%( '\n'.join(no_replacement))) sendLog('injector','workflow with no replacement\n%s \n are dangling there'% ( '\n'.join(no_replacement)), level='critical')
def main(): #Create option parser usage = "usage: %prog (-w workflow|-f filelist) (-t TASK|--all) [--tesbed]" parser = OptionParser(usage=usage) parser.add_option("-f","--file", dest="file", default=None, help="Text file with a list of workflows") parser.add_option("-w","--workflow", default=None, help="Coma separated list of wf to handle") parser.add_option("-t","--task", default=None, help="Coma separated task to be recovered") parser.add_option("-p","--path", default=None, help="Coma separated list of paths to recover") parser.add_option("-a","--all", help="Make acdc for all tasks to be recovered",default=False, action='store_true') parser.add_option("-m","--memory", dest="memory", default=None, type=int, help="Memory to override the original request memory") parser.add_option("-c","--mcore", dest="mcore", default=None, help="Multicore to override the original request multicore") parser.add_option("--testbed", default=False, action="store_true") (options, args) = parser.parse_args() global url url = testbed_url if options.testbed else prod_url if options.all : options.task = 'all' if not options.task: parser.error("Provide the -t Task Name or --all") sys.exit(1) if not ((options.workflow) or (options.path) or (options.file)): parser.error("Provide the -w Workflow Name or the -p path or the -f workflow filelist") sys.exit(1) wfs = None wf_and_task = defaultdict(set) if options.file: wfs = [l.strip() for l in open(options.file) if l.strip()] elif options.workflow: wfs = options.workflow.split(',') elif options.path: ## self contained paths = options.path.split(',') for p in paths: _,wf,t = p.split('/',2) wf_and_task[wf].add('/%s/%s'%(wf,t)) else: parser.error("Either provide a -f filelist or a -w workflow or -p path") sys.exit(1) if not wf_and_task: if options.task == 'all': for wfname in wfs: wf_and_task[wfname] = None else: for wfname in wfs: wf_and_task[wfname].update( [('/%s/%s'%(wfname,task)).replace('//','/') for task in options.task.split(',')] ) if not wf_and_task: parser.error("Provide the -w Workflow Name and the -t Task Name or --all") sys.exit(1) for wfname,tasks in wf_and_task.items(): wfi = workflowInfo(url, wfname) if tasks == None: where,how_much,how_much_where = wfi.getRecoveryInfo() tasks = sorted(how_much.keys()) else: tasks = sorted(tasks) created = {} print "Workflow:",wfname print "Tasks:",tasks for task in tasks: r = makeACDC(url=url, wfi=wfi, task=task, memory = options.memory, mcore = options.mcore) if not r: print "Error in creating ACDC for",task,"on",wfname break created[task] = r if len(created)!=len(tasks): print "Error in creating all required ACDCs" sys.exit(1) print "Created:" for task in created: print created[task],"for",task
wfs_no_location_in_GQ = defaultdict(list) si = siteInfo() #bad_blocks = defaultdict( set ) unprocessable = set() not_runable_acdc=set() agents_down = defaultdict(set) failed_workflow = set() files_locations = {} stuck_all_done = set() heavy_duty = {} for wf in wfs: if spec and not spec in wf['RequestName']: continue wfi = workflowInfo(url, wf['RequestName'], request=wf) sitewhitelist = wfi.request['SiteWhitelist'] wqs = wfi.getWorkQueue() stats = wfi.getWMStats() if not 'AgentJobInfo' in stats: stats['AgentJobInfo'] = {} ## skip wf that unified does not know about, leaves acdc wfo = session.query(Workflow).filter(Workflow.name == wf['RequestName']).first() if not (wfo or wf['RequestType']=='Resubmission'): print "not knonw or not acdc : %s"%(wf['RequestName']) continue ## test the heavyness if 'TotalInputLumis' in wf and 'TotalEstimatedJobs' in wf and wf['TotalEstimatedJobs']: heavy = (wf['TotalInputLumis'] / float(wf['TotalEstimatedJobs']))
def assignor(url, specific=None, talk=True, options=None): if userLock('assignor'): return CI = campaignInfo() SI = siteInfo() wfos = [] if specific: wfos = session.query(Workflow).filter(Workflow.name == specific).all() if not wfos: if specific: wfos = session.query(Workflow).filter( Workflow.status == 'considered').all() wfos.extend( session.query(Workflow).filter( Workflow.status == 'staging').all()) wfos.extend( session.query(Workflow).filter(Workflow.status == 'staged').all()) for wfo in wfos: if specific: if not any(map(lambda sp: sp in wfo.name, specific.split(','))): continue #if not specific in wfo.name: continue print wfo.name, "to be assigned" wfh = workflowInfo(url, wfo.name) ## check if by configuration we gave it a GO if not CI.go(wfh.request['Campaign']) and not options.go: print "No go for", wfh.request['Campaign'] continue ## check on current status for by-passed assignment if wfh.request['RequestStatus'] != 'assignment-approved': print wfo.name, wfh.request['RequestStatus'], "skipping" if not options.test: continue ## retrieve from the schema, dbs and reqMgr what should be the next version version = wfh.getNextVersion() if not version: if options and options.ProcessingVersion: version = options.ProcessingVersion else: print "cannot decide on version number" continue (lheinput, primary, parent, secondary) = wfh.getIO() sites_allowed = getSiteWhiteList( (lheinput, primary, parent, secondary)) print "Allowed", sites_allowed sites_out = [SI.pick_dSE([SI.CE_to_SE(ce) for ce in sites_allowed])] sites_custodial = [] if len(sites_custodial) == 0: print "No custodial, it's fine, it's covered in close-out" if len(sites_custodial) > 1: print "more than one custodial for", wfo.name sys.exit(36) secondary_locations = None for sec in list(secondary): presence = getDatasetPresence(url, sec) print sec print json.dumps(presence, indent=2) #one_secondary_locations = [site for (site,(there,frac)) in presence.items() if frac>90.] one_secondary_locations = [ site for (site, (there, frac)) in presence.items() if there ] if secondary_locations == None: secondary_locations = one_secondary_locations else: secondary_locations = list( set(secondary_locations) & set(one_secondary_locations)) ## reduce the site white list to site with secondary only sites_allowed = [ site for site in sites_allowed if any([ osite.startswith(site) for osite in one_secondary_locations ]) ] sites_all_data = copy.deepcopy(sites_allowed) sites_with_data = copy.deepcopy(sites_allowed) sites_with_any_data = copy.deepcopy(sites_allowed) primary_locations = None available_fractions = {} for prim in list(primary): presence = getDatasetPresence(url, prim) if talk: print prim print json.dumps(presence, indent=2) available_fractions[prim] = getDatasetBlocksFraction( url, prim, sites=[SI.CE_to_SE(site) for site in sites_allowed]) sites_all_data = [ site for site in sites_with_data if any([ osite.startswith(site) for osite in [ psite for (psite, (there, frac)) in presence.items() if there ] ]) ] sites_with_data = [ site for site in sites_with_data if any([ osite.startswith(site) for osite in [ psite for (psite, frac) in presence.items() if frac[1] > 90. ] ]) ] sites_with_any_data = [ site for site in sites_with_any_data if any([osite.startswith(site) for osite in presence.keys()]) ] if primary_locations == None: primary_locations = presence.keys() else: primary_locations = list( set(primary_locations) & set(presence.keys())) sites_with_data = list(set(sites_with_data)) sites_with_any_data = list(set(sites_with_any_data)) opportunistic_sites = [] ## opportunistic running where any piece of data is available if secondary_locations and primary_locations: ## intersection of both any pieces of the primary and good IO #opportunistic_sites = [SI.SE_to_CE(site) for site in list((set(secondary_locations) & set(primary_locations) & set(SI.sites_with_goodIO)) - set(sites_allowed))] opportunistic_sites = [ SI.SE_to_CE(site) for site in list((set(secondary_locations) & set(primary_locations)) - set(sites_allowed)) ] print "We could be running at", opportunistic_sites, "in addition" if available_fractions and not all( [available >= 1. for available in available_fractions.values()]): print "The input dataset is not located in full at any site" print json.dumps(available_fractions) if not options.test and not options.go: continue ## skip skip skip copies_wanted = 2. if available_fractions and not all([ available >= copies_wanted for available in available_fractions.values() ]): print "The input dataset is not available", copies_wanted, "times, only", available_fractions.values( ) if not options.go: continue ## default back to white list to original white list with any data print "Allowed", sites_allowed sites_allowed = sites_with_any_data print "Selected for any data", sites_allowed if options.restrict: print "Allowed", sites_allowed sites_allowed = sites_with_any_data print "Selected", sites_allowed else: if set(sites_with_data) != set(sites_allowed): ## the data is not everywhere we wanted to run at : enable aaa print "Sites with 90% data not matching site white list (block choping!)" print "Resorting to AAA reading for", list( set(sites_allowed) - set(sites_with_data)), "?" print "Whitelist site with any data", list( set(sites_allowed) - set(sites_with_any_data)) #options.useSiteListAsLocation = True #print "Not commissioned yet" #continue #print "We could be running at",opportunistic_sites,"in addition" ##sites_allowed = list(set(sites_allowed+ opportunistic_sites)) if not len(sites_allowed): print wfo.name, "cannot be assign with no matched sites" continue parameters = { 'SiteWhitelist': sites_allowed, 'CustodialSites': sites_custodial, 'NonCustodialSites': sites_out, 'AutoApproveSubscriptionSites': list(set(sites_out)), 'AcquisitionEra': wfh.acquisitionEra(), 'ProcessingString': wfh.processingString(), 'MergedLFNBase': '/store/mc', ## to be figured out ! from Hi shit 'ProcessingVersion': version, } ##parse options entered in command line if any if options: for key in reqMgrClient.assignWorkflow.keys: v = getattr(options, key) if v != None: if ',' in v: parameters[key] = filter(None, v.split(',')) else: parameters[key] = v ## pick up campaign specific assignment parameters parameters.update(CI.parameters(wfh.request['Campaign'])) if not options.test: parameters['execute'] = True if not wfh.checkWorkflowSplitting(): ## needs to go to event based ? fail for now print "Falling back to event splitting ?" #parameters['SplittingAlgorithm'] = 'EventBased' continue ## plain assignment here team = 'production' if options and options.team: team = options.team result = reqMgrClient.assignWorkflow(url, wfo.name, team, parameters) # set status if not options.test: if result: wfo.status = 'away' session.commit() else: print "ERROR could not assign", wfo.name else: pass
def singleRecovery(url, task , initial, actions, do=False): payload = { "Requestor" : os.getenv('USER'), "Group" : 'DATAOPS', "RequestType" : "Resubmission", "ACDCServer" : "https://cmsweb.cern.ch/couchdb", "ACDCDatabase" : "acdcserver", "OriginalRequestName" : initial['RequestName'] } copy_over = ['PrepID','RequestPriority', 'TimePerEvent', 'SizePerEvent', 'Group', 'Memory', 'RequestString' ] for c in copy_over: payload[c] = copy.deepcopy(initial[c]) if actions: for action in actions: #if action.startswith('split'): # factor = int(action.split('-')[-1]) if '-' in action else 2 # print "Changing time per event (%s) by a factor %d"%( payload['TimePerEvent'], factor) # ## mention it's taking 2 times longer to have a 2 times finer splitting # payload['TimePerEvent'] = factor*payload['TimePerEvent'] if action.startswith('mem'): increase = int(action.split('-')[-1]) if '-' in action else 1000 ## increase the memory requirement by 1G payload['Memory'] += increase if payload['RequestString'].startswith('ACDC'): print "This is not allowed yet" return None payload['RequestString'] = 'ACDC_'+payload['RequestString'] payload['InitialTaskPath'] = task if not do: print json.dumps( payload, indent=2) return None ## submit response = reqMgrClient.submitWorkflow(url, payload) m = re.search("details\/(.*)\'",response) if not m: print "Error in making ACDC for",initial["RequestName"] print response response = reqMgrClient.submitWorkflow(url, payload) m = re.search("details\/(.*)\'",response) if not m: print "Error twice in making ACDC for",initial["RequestName"] print response return None acdc = m.group(1) ## perform modifications if actions: for action in actions: if action.startswith('split'): factor = int(action.split('-')[-1]) if '-' in action else 2 acdcInfo = workflowInfo(url, acdc) splittings = acdcInfo.getSplittings() for split in splittings: for act in ['avg_events_per_job','events_per_job','lumis_per_job']: if act in split: print "Changing %s (%d) by a factor %d"%( act, split[act], factor), split[act] /= factor print "to",split[act] break split['requestName'] = acdc print "changing the splitting of",acdc print json.dumps( split, indent=2 ) print reqMgrClient.setWorkflowSplitting(url, split ) data = reqMgrClient.setWorkflowApproved(url, acdc) print data return acdc
def stagor(url, specific=None, options=None): if not componentInfo().check(): return SI = siteInfo() CI = campaignInfo() UC = unifiedConfiguration() TS = transferStatuses() cached_transfer_statuses = TS.content() transfer_statuses = {} done_by_wf_id = {} done_by_input = {} completion_by_input = {} good_enough = 100.0 lost_blocks = json.loads( eosRead('%s/lost_blocks_datasets.json' % monitor_dir)) lost_files = json.loads( eosRead('%s/lost_files_datasets.json' % monitor_dir)) known_lost_blocks = {} known_lost_files = {} for dataset in set(lost_blocks.keys() + lost_files.keys()): b, f = findLostBlocksFiles(url, dataset) if dataset in lost_blocks and not b: print dataset, "has no really lost blocks" else: known_lost_blocks[dataset] = [i['name'] for i in b] if dataset in lost_files and not f: print dataset, "has no really lost files" else: known_lost_files[dataset] = [i['name'] for i in f] def time_point(label="", sub_lap=False): now = time.mktime(time.gmtime()) nows = time.asctime(time.gmtime()) print "Time check (%s) point at : %s" % (label, nows) print "Since start: %s [s]" % (now - time_point.start) if sub_lap: print "Sub Lap : %s [s]" % (now - time_point.sub_lap) time_point.sub_lap = now else: print "Lap : %s [s]" % (now - time_point.lap) time_point.lap = now time_point.sub_lap = now time_point.sub_lap = time_point.lap = time_point.start = time.mktime( time.gmtime()) time_point("Check cached transfer") ## collect all datasets that are needed for wf in staging, correcting the status of those that are not really in staging wfois = [] needs = defaultdict(list) needs_by_priority = defaultdict(list) for wfo in session.query(Workflow).filter( Workflow.status == 'staging').all(): wfi = workflowInfo(url, wfo.name) if wfi.request['RequestStatus'] in [ 'running-open', 'running-closed', 'completed', 'assigned', 'acquired' ]: wfi.sendLog('stagor', "is in status %s" % wfi.request['RequestStatus']) wfo.status = 'away' session.commit() continue if not wfi.request['RequestStatus'] in ['assignment-approved']: ## should be setting 'away' too ## that usually happens for relvals if wfi.request['RequestStatus'] in [ 'rejected', 'aborted', 'aborted-completed', 'aborted-archived', 'rejected-archived' ] and wfi.isRelval(): wfo.status = 'forget' session.commit() continue else: print wfo.name, "is", wfi.request['RequestStatus'] #sendEmail("wrong status in staging. debug","%s is in %s, should set away."%(wfo.name,wfi.request['RequestStatus'])) sendLog("stagor", "%s is in %s, set away" % (wfo.name, wfi.request['RequestStatus']), level='critical') wfo.status = 'away' session.commit() continue wfois.append((wfo, wfi)) _, primaries, _, secondaries = wfi.getIO() for dataset in list(primaries) + list(secondaries): needs[wfo.name].append(dataset) done_by_input[dataset] = {} completion_by_input[dataset] = {} needs_by_priority[wfi.request['RequestPriority']].append(dataset) wfi.sendLog('stagor', '%s needs %s' % (wfo.name, dataset)) time_point("Check staging workflows") open('%s/dataset_requirements.json' % monitor_dir, 'w').write(json.dumps(needs, indent=2)) for prio in needs_by_priority: needs_by_priority[prio] = list(set(needs_by_priority[prio])) open('%s/dataset_priorities.json' % monitor_dir, 'w').write(json.dumps(needs_by_priority, indent=2)) dataset_endpoints = defaultdict(set) endpoint_in_downtime = defaultdict(set) #endpoint_completed = defaultdict(set) endpoint_incompleted = defaultdict(set) #endpoint = defaultdict(set) send_back_to_considered = set() ## first check if anything is inactive all_actives = set([ transfer.phedexid for transfer in session.query(TransferImp).filter( TransferImp.active).all() ]) for active_phedexid in all_actives: skip = True transfers_phedexid = session.query(TransferImp).filter( TransferImp.phedexid == active_phedexid).all() for imp in transfers_phedexid: if imp.workflow.status == 'staging': skip = False sendLog( 'stagor', "\t%s is staging for %s" % (imp.phedexid, imp.workflow.name)) if skip: sendLog('stagor', "setting %s inactive" % active_phedexid) for imp in transfers_phedexid: imp.active = False session.commit() all_actives = sorted( set([ transfer.phedexid for transfer in session.query( TransferImp).filter(TransferImp.active).all() ])) for phedexid in all_actives: if specific: continue ## check on transfer completion not_cached = False if phedexid in cached_transfer_statuses: ### use a cache for transfer that already looked done sendLog('stagor', "read %s from cache" % phedexid) checks = cached_transfer_statuses[phedexid] else: ## I actually would like to avoid that all I can sendLog('stagor', 'Performing spurious transfer check on %s' % phedexid, level='critical') checks = checkTransferStatus(url, phedexid, nocollapse=True) try: print json.dumps(checks, indent=2) except: print checks if not checks: ## this is going to bias quite heavily the rest of the code. we should abort here #sendLog('stagor','Ending stagor because of skewed input from checkTransferStatus', level='critical') #return False sendLog( 'stagor', 'Stagor has got a skewed input from checkTransferStatus', level='critical') checks = {} pass else: TS.add(phedexid, checks) time_point("Check transfer status %s" % phedexid, sub_lap=True) if not specific: for dsname in checks: if not dsname in done_by_input: done_by_input[dsname] = {} if not dsname in completion_by_input: completion_by_input[dsname] = {} done_by_input[dsname][phedexid] = all( map(lambda i: i >= good_enough, checks[dsname].values())) completion_by_input[dsname][phedexid] = checks[dsname].values() if checks: sendLog( 'stagor', "Checks for %s are %s" % (phedexid, [node.values() for node in checks.values()])) done = all( map( lambda i: i >= good_enough, list( itertools.chain.from_iterable( [node.values() for node in checks.values()])))) else: ## it is empty, is that a sign that all is done and away ? if not_cached: print "Transfer status was not cached" else: print "ERROR with the scubscriptions API of ", phedexid print "Most likely something else is overiding the transfer request. Need to work on finding the replacement automatically, if the replacement exists" done = False transfers_phedexid = session.query(TransferImp).filter( TransferImp.phedexid == phedexid).all() for imp in transfers_phedexid: tr_wf = imp.workflow if tr_wf: # and tr_wf.status == 'staging': if not tr_wf.id in done_by_wf_id: done_by_wf_id[tr_wf.id] = {} done_by_wf_id[tr_wf.id][phedexid] = done if done: imp.active = False session.commit() for ds in checks: for s, v in checks[ds].items(): dataset_endpoints[ds].add(s) if done: sendLog('stagor', "%s is done" % phedexid) TS.add(phedexid, checks) else: sendLog( 'stagor', "%s is not finished %s" % (phedexid, pprint.pformat(checks))) ##pprint.pprint( checks ) ## check if the destination is in down-time for ds in checks: sites_incomplete = [ SI.SE_to_CE(s) for s, v in checks[ds].items() if v < good_enough ] sites_incomplete_down = [ s for s in sites_incomplete if not s in SI.sites_ready ] ## no space means no transfer should go there : NO, it does not work in the long run #sites_incomplete_down = [SI.SE_to_CE(s) for s,v in checks[ds].items() if (v<good_enough and (SI.disk[s]==0 or (not SI.SE_to_CE(s) in SI.sites_ready)))] if sites_incomplete_down: sendLog( 'stagor', "%s are in downtime, while waiting for %s to get there" % (",".join(sites_incomplete_down), ds)) endpoint_in_downtime[ds].update(sites_incomplete_down) if sites_incomplete: endpoint_incompleted[ds].update(sites_incomplete) time_point("Check on-going transfers") print "End points" for k in dataset_endpoints: dataset_endpoints[k] = list(dataset_endpoints[k]) print json.dumps(dataset_endpoints, indent=2) print "End point in down time" for k in endpoint_in_downtime: endpoint_in_downtime[k] = list(endpoint_in_downtime[k]) print json.dumps(endpoint_in_downtime, indent=2) print "End point incomplete in down time" for k in endpoint_incompleted: endpoint_incompleted[k] = list(endpoint_incompleted[k]) print json.dumps(endpoint_incompleted, indent=2) #open('%s/transfer_statuses.json'%monitor_dir,'w').write( json.dumps( transfer_statuses, indent=2)) eosFile('%s/transfer_statuses.json' % monitor_dir, 'w').write(json.dumps(TS.content(), indent=2)).close() eosFile('%s/dataset_endpoints.json' % monitor_dir, 'w').write(json.dumps(dataset_endpoints, indent=2)).close() already_stuck = json.loads( eosRead('%s/stuck_transfers.json' % monitor_pub_dir)).keys() already_stuck.extend(getAllStuckDataset()) missing_in_action = defaultdict(list) print "-" * 10, "Checking on workflows in staging", "-" * 10 #forget_about = ['/MinBias_TuneCUETP8M1_13TeV-pythia8/RunIISummer15GS-MCRUN2_71_V1-v2/GEN-SIM'] #for what in forget_about: # if not done_by_input[what]: # done_by_input[what] = {'fake':True} ## come back to workflows and check if they can go available_cache = defaultdict(lambda: defaultdict(float)) presence_cache = defaultdict(dict) time_point("Preparing for more") for wfo, wfi in wfois: print "#" * 30 time_point("Forward checking %s" % wfo.name, sub_lap=True) ## the site white list takes site, campaign, memory and core information (_, primaries, _, secondaries, sites_allowed) = wfi.getSiteWhiteList(verbose=False) se_allowed = [SI.CE_to_SE(site) for site in sites_allowed] se_allowed.sort() se_allowed_key = ','.join(se_allowed) readys = {} for need in list(primaries) + list(secondaries): if not need in done_by_input: wfi.sendLog('stagor', "missing transfer report for %s" % need) readys[need] = False ## should warn someone about this !!! ## it cannot happen, by construction sendEmail('missing transfer report', '%s does not have a transfer report' % (need)) continue if not done_by_input[need] and need in list(secondaries): wfi.sendLog( 'stagor', "assuming it is OK for secondary %s to have no attached transfers" % need) readys[need] = True done_by_input[need] = {"fake": True} continue if len(done_by_input[need]) and all(done_by_input[need].values()): wfi.sendLog('stagor', "%s is ready" % need) print json.dumps(done_by_input[need], indent=2) readys[need] = True else: wfi.sendLog( 'stagor', "%s is not ready \n%s" % (need, json.dumps(done_by_input[need], indent=2))) readys[need] = False if readys and all(readys.values()): if wfo.status == 'staging': wfi.sendLog('stagor', "all needs are fullfilled, setting staged") wfo.status = 'staged' session.commit() else: wfi.sendLog('stagor', "all needs are fullfilled, already") print json.dumps(readys, indent=2) else: wfi.sendLog('stagor', "missing requirements") copies_needed, _ = wfi.getNCopies() jump_ahead = False re_transfer = False ## there is missing input let's do something more elaborated for need in list(primaries): #+list(secondaries): if endpoint_in_downtime[need] and endpoint_in_downtime[ need] == endpoint_incompleted[need]: #print need,"is going to an end point in downtime" wfi.sendLog( 'stagor', "%s has only incomplete endpoint in downtime\n%s" % (need, endpoint_in_downtime[need])) re_transfer = True if not se_allowed_key in available_cache[need]: available_cache[need][ se_allowed_key] = getDatasetBlocksFraction( url, need, sites=se_allowed) if available_cache[need][se_allowed_key] >= copies_needed: wfi.sendLog( 'stagor', "assuming it is OK to move on like this already for %s" % need) jump_ahead = True else: wfi.sendLog( 'stagor', "Available %s times" % available_cache[need][se_allowed_key]) missing_and_downtime = list( set(endpoint_in_downtime[need]) & set(endpoint_incompleted[need])) if missing_and_downtime: wfi.sendLog( 'stagor', "%s is incomplete at %s which is in downtime, trying to move along" % (need, ','.join(missing_and_downtime))) jump_ahead = True else: wfi.sendLog( 'stagor', "continue waiting for transfers for optimum production performance." ) ## compute a time since staging to filter jump starting ? # check whether the inputs is already in the stuck list ... for need in list(primaries) + list(secondaries): if need in already_stuck: wfi.sendLog('stagor', "%s is stuck, so try to jump ahead" % need) jump_ahead = True if jump_ahead or re_transfer: details_text = "checking on availability for %s to jump ahead" % wfo.name details_text += '\n%s wants %s copies' % (wfo.name, copies_needed) copies_needed = max(1, copies_needed - 1) details_text += '\nlowering by one unit to %s' % copies_needed wfi.sendLog('stagor', details_text) all_check = True prim_where = set() for need in list(primaries): if not se_allowed_key in presence_cache[need]: presence_cache[need][ se_allowed_key] = getDatasetPresence( url, need, within_sites=se_allowed) presence = presence_cache[need][se_allowed_key] prim_where.update(presence.keys()) available = available_cache[need][se_allowed_key] this_check = (available >= copies_needed) wfi.sendLog( 'stagor', "%s is available %s times (%s), at %s" % (need, available, this_check, se_allowed_key)) all_check &= this_check if not all_check: break for need in list(secondaries): ## I do not want to check on the secon ## this below does not function because the primary could be all available, and the secondary not complete at a certain site that does not matter at that point this_check = all(done_by_input[need].values()) wfi.sendLog( 'stagor', "%s is this much transfered %s" % (need, json.dumps(done_by_input[need], indent=2))) all_check &= this_check #if not se_allowed_key in presence_cache[need]: # presence_cache[need][se_allowed_key] = getDatasetPresence( url, need , within_sites=se_allowed) ## restrict to where the primary is #presence = dict([(k,v) for (k,v) in presence_cache[need][se_allowed_key].items() if k in prim_where]) #this_check = all([there for (there,frac) in presence.values()]) #print need,"is present at all sites:",this_check #all_check&= this_check if all_check and not re_transfer: wfi.sendLog( 'stagor', "needs are sufficiently fullfilled, setting staged") wfo.status = 'staged' session.commit() else: print wfo.name, "has to wait a bit more" wfi.sendLog('stagor', "needs to wait a bit more") else: wfi.sendLog('stagor', "not checking availability") if re_transfer: wfi.sendLog( 'stagor', "Sending back to considered because of endpoint in downtime" ) if wfo.status == 'staging': wfo.status = 'considered' session.commit() send_back_to_considered.add(wfo.name) time_point("Checked affected workflows") if send_back_to_considered: #sendEmail("transfer to endpoint in downtime","sending back to considered the following workflows \n%s"%('\n'.join( send_back_to_considered))) sendLog('stagor', "sending back to considered the following workflows \n%s" % ('\n'.join(send_back_to_considered)), level='critical') print "-" * 10, "Checking on non-available datasets", "-" * 10 ## now check on those that are not fully available for dsname in available_cache.keys(): ## squash the se_allowed_key key available_cache[dsname] = min(available_cache[dsname].values()) really_stuck_dataset = set() for dsname, available in available_cache.items(): using_its = getWorkflowByInput(url, dsname) #print using_its using_wfos = [] for using_it in using_its: wf = session.query(Workflow).filter( Workflow.name == using_it).first() if wf: using_wfos.append(wf) if not len(done_by_input[dsname]): print "For dataset", dsname, "there are no transfer report. That's an issue." for wf in using_wfos: if wf.status == 'staging': if UC.get("stagor_sends_back"): print "sending", wf.name, "back to considered" wf.status = 'considered' session.commit() #sendEmail( "send back to considered","%s was send back and might be trouble"% wf.name) sendLog('stagor', "%s was send back and might be trouble" % wf.name, level='critical') else: print "would send", wf.name, "back to considered" #sendEmail( "subscription lagging behind","susbscriptions to get %s running are not appearing in phedex. I would have send it back to considered but that's not good."% wf.name) sendLog( 'stagor', "susbscriptions to get %s running are not appearing in phedex. I would have send it back to considered but that's not good." % wf.name, level='critical') continue ## not compatible with checking on secondary availability #if all([wf.status != 'staging' for wf in using_wfos]): # ## means despite all checks that input is not needed # continue if available < 1.: print "incomplete", dsname ## there is a problem in the method below that it does not account for files stuck in T1*Buffer only lost_blocks, lost_files = findLostBlocksFiles( url, dsname) if (not dsname.endswith('/RAW')) else ([], []) lost_block_names = [item['name'] for item in lost_blocks] lost_file_names = [item['name'] for item in lost_files] if lost_blocks: #print json.dumps( lost , indent=2 ) ## estimate for how much ! fraction_loss, _, n_missing = getDatasetBlockFraction( dsname, lost_block_names) print "We have lost", len( lost_block_names ), "blocks", lost_block_names, "for %f%%" % (100. * fraction_loss) if fraction_loss > 0.05: ## 95% completion mark #sendEmail('we have lost too many blocks','%s is missing %d blocks, for %d events, %f %% loss'%(dsname, len(lost_block_names), n_missing, fraction_loss)) sendLog( 'stagor', '%s is missing %d blocks, for %d events, %3.2f %% loss' % (dsname, len(lost_block_names), n_missing, 100 * fraction_loss), level='critical') ## the workflow should be rejected ! for wf in using_wfos: if wf.status == 'staging': print wf.name, "is doomed. setting to trouble" wf.status = 'trouble' session.commit() sendLog( 'stagor', '%s has too much loss on the input dataset %s. Missing %d blocks, for %d events, %3.2f %% loss' % (wf.name, dsname, len(lost_block_names), n_missing, 100 * fraction_loss), level='critical') else: ## probably enough to make a ggus and remove if not dsname in known_lost_blocks: #sendEmail('we have lost a few blocks', '%s is missing %d blocks, for %d events, %f %% loss\n\n%s'%(dsname, len(lost_block_names), n_missing, fraction_loss, '\n'.join( lost_block_names ) )) sendLog( 'stagor', '%s is missing %d blocks, for %d events, %f %% loss\n\n%s' % (dsname, len(lost_block_names), n_missing, fraction_loss, '\n'.join(lost_block_names)), level='critical') known_lost_blocks[dsname] = [ i['name'] for i in lost_blocks ] really_stuck_dataset.add(dsname) if lost_files: fraction_loss, _, n_missing = getDatasetFileFraction( dsname, lost_file_names) print "We have lost", len( lost_file_names ), "files", lost_file_names, "for %f%%" % fraction_loss if fraction_loss > 0.05: #sendEmail('we have lost too many files','%s is missing %d files, for %d events, %f %% loss'%(dsname, len(lost_file_names),n_missing, fraction_loss)) sendLog( 'stagor', '%s is missing %d files, for %d events, %f %% loss' % (dsname, len(lost_file_names), n_missing, fraction_loss), level='critical') for wf in using_wfos: if wf.status == 'staging': print wf.name, "is doomed. setting to trouble" wf.status = 'trouble' session.commit() else: ## probably enough to make a ggus and remove if not dsname in known_lost_files: #sendEmail('we have lost a few files','%s is missing %d files, for %d events, %f %% loss\n\n%s'%(dsname, len(lost_file_names),n_missing, fraction_loss, '\n'.join(lost_file_names))) sendLog( 'stagor', '%s is missing %d files, for %d events, %f %% loss\n\n%s' % (dsname, len(lost_file_names), n_missing, fraction_loss, '\n'.join(lost_file_names)), level='critical') known_lost_files[dsname] = [ i['name'] for i in lost_files ] ## should the status be change to held-staging and pending on a ticket missings = [ pid for (pid, d) in done_by_input[dsname].items() if d == False ] print "\t", done_by_input[dsname] print "\tneeds", len(done_by_input[dsname]) print "\tgot", done_by_input[dsname].values().count(True) print "\tmissing", missings missing_in_action[dsname].extend(missings) rr = eosFile('%s/lost_blocks_datasets.json' % monitor_dir, 'w') rr.write(json.dumps(known_lost_blocks, indent=2)) rr.close() rr = eosFile('%s/lost_files_datasets.json' % monitor_dir, 'w') rr.write(json.dumps(known_lost_files, indent=2)) rr.close() eosFile('%s/incomplete_transfers.json' % monitor_dir, 'w').write(json.dumps(missing_in_action, indent=2)).close() print "Stuck transfers and datasets" print json.dumps(missing_in_action, indent=2) TD = transferDataset() datasets_by_phid = defaultdict(set) for dataset in missing_in_action: for phid in missing_in_action[dataset]: #print dataset,"stuck through",phid datasets_by_phid[phid].add(dataset) for k in datasets_by_phid: #datasets_by_phid[k] = list(datasets_by_phid[k]) TD.add(k, list(datasets_by_phid[k])) #eosFile('%s/datasets_by_phid.json'%base_eos_dir,'w').write( json.dumps(datasets_by_phid, indent=2 )).close() eosFile('%s/really_stuck_dataset.json' % base_eos_dir, 'w').write(json.dumps(list(really_stuck_dataset), indent=2)).close() print '\n' * 2, "Datasets really stuck" print '\n'.join(really_stuck_dataset) ############# ## not going further for what matters ############# return
def injector(url, options, specific): mlock = moduleLock() if mlock(): return use_mcm = True up = componentInfo(soft=['mcm', 'wtc']) if not up.check(): return use_mcm = up.status['mcm'] UC = unifiedConfiguration() transform_keywords = UC.get('convert_to_stepchain') workflows = getWorkflows(url, status=options.wmstatus, user=options.user) for user in UC.get("user_rereco"): workflows.extend( getWorkflows(url, status=options.wmstatus, user=user, rtype="ReReco")) for user in (options.user_relval.split(',') if options.user_relval else UC.get("user_relval")): workflows.extend( getWorkflows(url, status=options.wmstatus, user=user, rtype="TaskChain")) for user in (options.user_storeresults.split(',') if options.user_storeresults else UC.get("user_storeresults")): workflows.extend( getWorkflows(url, status=options.wmstatus, user=user, rtype="StoreResults")) print len(workflows), "in line" cannot_inject = set() to_convert = set() status_cache = defaultdict(str) ## browse for assignment-approved requests, browsed for ours, insert the diff for wf in workflows: if specific and not specific in wf: continue exists = session.query(Workflow).filter(Workflow.name == wf).first() if not exists: wfi = workflowInfo(url, wf) ## check first that there isn't related here with something valid can_add = True ## first try at finding a match familly = session.query(Workflow).filter( Workflow.name.contains(wfi.request['PrepID'])).all() if not familly: pids = wfi.getPrepIDs() req_familly = [] for pid in pids: req_familly.extend(getWorkflowById(url, pid, details=True)) familly = [] print len(req_familly), "members" for req_member in req_familly: #print "member",req_member['RequestName'] owfi = workflowInfo(url, req_member['RequestName'], request=req_member) other_pids = owfi.getPrepIDs() if set(pids) == set(other_pids): ## this is a real match familly.extend( session.query(Workflow).filter( Workflow.name == req_member['RequestName']).all()) for lwfo in familly: if lwfo: ## we have it already if not lwfo.status in [ 'forget', 'trouble', 'forget-unlock', 'forget-out-unlock' ]: wfi.sendLog( 'injector', "Should not put %s because of %s %s" % (wf, lwfo.name, lwfo.status)) sendLog('injector', "Should not put %s because of %s %s" % (wf, lwfo.name, lwfo.status), level='critical') print "Should not put", wf, "because of", lwfo.name, lwfo.status cannot_inject.add(wf) can_add = False ## add a check on validity of input datasets _, prim, par, sec = wfi.getIO() for d in list(prim) + list(par) + list(sec): if not d in status_cache: status_cache[d] = getDatasetStatus(d) if status_cache[d] != 'VALID': wfi.sendLog( 'injector', "One of the input is not VALID. %s : %s" % (d, status_cache[d])) sendLog('injector', "One of the input of %s is not VALID. %s : %s" % (wf, d, status_cache[d]), level='critical') can_add = False ## check for any file in phedex, to verify existence _, ph_files, _, _ = getDatasetFiles(url, d) if not ph_files and not ('StoreResults' == wfi.request.setdefault( 'RequestType', None)): wfi.sendLog( 'injector', "One of the input has no file in phedex: %s" % d) sendLog('injector', "One of the input has no file in phedex: %s" % d, level='critical') can_add = False ### ban some workflow that you don't like anymore #outputs = wfi.request['OutputDatasets'] if not can_add: continue ## temporary hack to transform specific taskchain into stepchains #good_for_stepchain = wfi.isGoodToConvertToStepChain( keywords = transform_keywords) good_for_stepchain = wfi.isGoodToConvertToStepChain(keywords=None) ## match keywords and technical constraints #if (not options.no_convert) and good_for_stepchain and not wfi.isRelval(): # to_convert.add( wf ) # wfi.sendLog('injector','Transforming %s TaskChain into StepChain'%wf) # #sendEmail('convertion to stepchain','Transforming %s TaskChain into StepChain'%wf) wfi.sendLog('injector', "considering %s" % wf) new_wf = Workflow(name=wf, status=options.setstatus, wm_status=options.wmstatus) session.add(new_wf) session.commit() time.sleep(0.5) else: #print "already have",wf pass if cannot_inject: #sendEmail('workflow duplicates','These workflow cannot be added in because of duplicates \n\n %s'%( '\n'.join(cannot_inject))) sendLog( 'injector', 'These workflow cannot be added in because of duplicates \n\n %s' % ('\n'.join(cannot_inject)), level='warning') for wf in to_convert: os.system( './Unified/rejector.py --clone --to_step --comments \"Transform to StepChain\" %s' % wf) ## passing a round of invalidation of what needs to be invalidated if use_mcm and (options.invalidate or True): invalidator(url) no_replacement = set() #print "getting all transfers" #all_transfers=session.query(Transfer).all() #print "go!" ## pick up replacements for wf in session.query(Workflow).filter( Workflow.status == 'trouble').all(): print wf.name if specific and not specific in wf.name: continue print wf.name wfi = workflowInfo(url, wf.name) wl = wfi.request #getWorkLoad(url, wf.name) familly = getWorkflowById(url, wl['PrepID']) true_familly = [] for member in familly: if member == wf.name: continue fwl = getWorkLoad(url, member) if options.replace: if member != options.replace: continue else: if fwl['RequestDate'] < wl['RequestDate']: continue if fwl['RequestType'] == 'Resubmission': continue if fwl['RequestStatus'] in ['None', None, 'new']: continue if fwl['RequestStatus'] in [ 'rejected', 'rejected-archived', 'aborted', 'aborted-archived' ]: continue true_familly.append(fwl) if len(true_familly) == 0: #sendLog('injector','%s had no replacement'%wf.name, level='critical') if wfi.isRelval(): #wfi.sendLog('injector','the workflow was found in trouble with no replacement. As a relval, there is no clean way to handle this.') wfi.sendLog( 'injector', 'the workflow was found in trouble with no replacement. As a relval, there is no clean way to handle this. Setting forget' ) wf.status = 'forget' session.commit() else: wfi.sendLog( 'injector', 'the workflow was found in trouble with no replacement') no_replacement.add(wf.name) continue else: wfi.sendLog( 'injector', 'the workflow was found in trouble and has a replacement') print wf.name, "has", len(familly), "familly members" print wf.name, "has", len(true_familly), "true familly members" ##we cannot have more than one of them !!! pick the last one if len(true_familly) > 1: #sendEmail('multiple wf','please take a look at injector for %s'%wf.name) sendLog('injector', 'Multiple wf in line, will take the last one for %s \n%s' % (wf.name, ', '.join(fwl['RequestName'] for fwl in true_familly)), level='critical') for fwl in true_familly[-1:]: member = fwl['RequestName'] new_wf = session.query(Workflow).filter( Workflow.name == member).first() if not new_wf: sendLog('injector', "putting %s as replacement of %s" % (member, wf.name)) status = 'away' if fwl['RequestStatus'] in ['assignment-approved']: status = 'considered' new_wf = Workflow(name=member, status=status, wm_status=fwl['RequestStatus']) wf.status = 'forget' session.add(new_wf) else: if new_wf.status == 'forget': continue sendLog( 'injector', "getting %s as replacement of %s" % (new_wf.name, wf.name)) wf.status = 'forget' for tr in session.query(TransferImp).filter( TransferImp.workflow_id == wf.id).all(): ## get all transfer working for the old workflow existing = session.query(TransferImp).filter( TransferImp.phedexid == tr.phedexid).filter( TransferImp.workflow_id == new_wf.id).all() tr.active = False ## disable the old one if not existing: ## create the transfer object for the new dependency tri = TransferImp(phedexid=tr.phedexid, workflow=new_wf) session.add(tri) session.commit() ## don't do that automatically #wf.status = 'forget' session.commit() if no_replacement: #sendEmail('workflow with no replacement','%s \n are dangling there'%( '\n'.join(no_replacement))) sendLog('injector', 'workflow with no replacement\n%s \n are dangling there' % ('\n'.join(no_replacement)), level='critical')
for status in reversed(statuses): wfls = getWorkflows(url , status = status,details=True) print len(wfls),"in",status for wl in wfls: ## unknonw to the system known = session.query(Workflow).filter(Workflow.name==wl['RequestName']).all() if not known: #print wl['RequestName'],"is unknown, this is bad news" ## no it is not continue if status == 'assignment-approved': if all([wfo.status == 'considered' for wfo in known]): ## skip those only assignment-approved / considered continue wfi = workflowInfo( url, wl['RequestName'], request = wl ,spec=False) (_,primaries,_,secondaries) = wfi.getIO() outputs = wfi.request['OutputDatasets'] for dataset in list(primaries)+list(secondaries)+outputs: if 'FAKE' in dataset: continue if 'None' in dataset: continue newly_locking.add(dataset) print len(newly_locking),"locks so far" waiting_for_custodial={} stuck_custodial={} lagging_custodial={} missing_approval_custodial={} transfer_timeout = UC.get("transfer_timeout") secondary_timeout = defaultdict(int)
def singleRecovery(url, task, initial, actions, do=False): print "Inside single recovery!" payload = { "Requestor": os.getenv('USER'), "Group": 'DATAOPS', "RequestType": "Resubmission", "ACDCServer": initial['ConfigCacheUrl'], "ACDCDatabase": "acdcserver", "OriginalRequestName": initial['RequestName'], "OpenRunningTimeout": 0 } copy_over = [ 'PrepID', 'Campaign', 'RequestPriority', 'TimePerEvent', 'SizePerEvent', 'Group', 'Memory', 'RequestString', 'CMSSWVersion' ] for c in copy_over: if c in initial: payload[c] = copy.deepcopy(initial[c]) else: print c, "not in the initial payload" #a massage ? boost the recovery over the initial wf # payload['RequestPriority'] *= 10 #Max priority is 1M payload['RequestPriority'] = min(500000, payload['RequestPriority'] * 2) ## never above 500k #change parameters based on actions here if actions: for action in actions: if action.startswith('mem') and actions[action] != "" and actions[ action] != 'Same': #if multicore parameter is also used, need to scale memory by the new number of cores if 'multicore' in actions and actions['multicore'] != "": continue ## Taskchains needs to be treated special to set the memory to all tasks set_to = int(actions[action]) if 'TaskChain' in initial: mem_dict = {} it = 1 while True: t = 'Task%d' % it it += 1 if t in initial: tname = payload.setdefault(t, initial[t])['TaskName'] mem = mem_dict.setdefault(tname, payload[t]['Memory']) mem_dict[tname] = set_to else: break payload['Memory'] = mem_dict print "Memory set to: ", json.dumps(mem_dict, indent=2) else: payload['Memory'] = set_to print "Memory set to: ", set_to if action.startswith('multicore') and actions[action] != "": set_to = int(actions[action]) ## Taskchains needs to be treated special to set the multicore and memory values to all tasks if 'TaskChain' in initial: mem_dict = payload['Memory'] if type( payload['Memory']) == dict else {} core_dict = {} it = 1 while True: t = 'Task%d' % it it += 1 if t in initial: tname = payload.setdefault(t, initial[t])['TaskName'] mem = mem_dict.setdefault(tname, payload[t]['Memory']) #Need to scale the memory by the new number of cores initial_cores = payload[t].setdefault( 'Multicore', 1) if 'memory' in actions and actions[ 'memory'] != "" and actions[ 'memory'] != 'Same': mem = actions['memory'] fraction_constant = 0.4 mem_per_core_c = int((1 - fraction_constant) * mem / float(initial_cores)) mem_dict[tname] = int(mem + (set_to - initial_cores) * mem_per_core_c) core_dict[tname] = set_to print "For ", t print "Multicore set to ", set_to print "Memory set to ", mem_dict[tname] else: break payload['Memory'] = mem_dict payload['Multicore'] = core_dict else: #Need to scale the memory by the new number of cores initial_cores = initial.setdefault('Multicore', 1) mem = payload['Memory'] if 'memory' in actions and actions[ 'memory'] != "" and actions['memory'] != 'Same': mem = actions['memory'] fraction_constant = 0.4 mem_per_core_c = int( (1 - fraction_constant) * mem / float(initial_cores)) payload['Multicore'] = set_to payload['Memory'] = int(mem + (set_to - initial_cores) * mem_per_core_c) print "Multicore set to ", set_to print "Memory set to ", payload['Memory'] if action.startswith('split'): split_alert = (initial['RequestType'] in ['MonteCarlo']) for key in initial: if key == 'SplittingAlgo' and (initial[key] in ['EventBased']): split_alert = True elif key.startswith('Task') and key != 'TaskChain': for key2 in initial[key]: if key2 == 'TaskName': this_taskname = initial[key][key2] recover_task = task.split('/')[-1] print "For recovery of task", recover_task print "Looking at task", this_taskname if (recover_task == this_taskname) and ( initial[key]['SplittingAlgo'] in ['EventBased']): ## the task to be recovered is actually of the wrong type to allow change of splitting sendLog( 'actor', 'To recover on %s, changing the splitting on %s is not really allowed and this will be ignored instead of failing acdc.' % (task, initial[key]['SplittingAlgo']), level='critical') ## do not send an alert and stop the acdc #split_alert = True if split_alert: sendLog('actor', 'Cannot change splitting for %s' % initial['RequestName'], level='critical') print "I should not be doing splitting for this type of request", initial[ 'RequestName'] return None acdc_round = 0 initial_string = payload['RequestString'] if initial_string.startswith('ACDC'): if initial_string[4].isdigit(): acdc_round = int(initial_string[4]) acdc_round += 1 initial_string = initial_string.replace('ACDC_', '').replace( 'ACDC%d_' % (acdc_round - 1), '') payload['RequestString'] = 'ACDC%d_%s' % (acdc_round, initial_string) payload['InitialTaskPath'] = task if not do: print json.dumps(payload, indent=2) return None print "ACDC payload" # print json.dumps( payload , indent=2) print actions ## submit here acdc = reqMgrClient.submitWorkflow(url, payload) if not acdc: print "Error in making ACDC for", initial["RequestName"] acdc = reqMgrClient.submitWorkflow(url, payload) if not acdc: print "Error twice in making ACDC for", initial["RequestName"] sendLog('actor', 'Failed twice in making ACDCs for %s!' % initial['RequestName'], level='critical') return None ## change splitting if requested if actions: for action in actions: if action.startswith('split'): acdcInfo = workflowInfo(url, acdc) splittings = acdcInfo.getSplittingsNew(strip=True) if actions[action] != 'Same' and actions[action] != 'max': factor = int( actions[action][0:-1]) if 'x' in actions[action] else 2 for split in splittings: split_par = split['splitParams'] if split['splitAlgo'] in ['EventBased']: sendLog( 'actor', "Changing the splitting on %s for %s is not permitted. Not changing." % (split['splitAlgo'], initial["RequestName"]), level='critical') continue for act in [ 'avg_events_per_job', 'events_per_job', 'lumis_per_job' ]: if act in split_par: print "Changing %s (%d) by a factor %d" % ( act, split_par[act], factor), split_par[act] /= factor print "to", split_par[act] break #split['requestName'] = acdc #print "changing the splitting of",acdc #print json.dumps( split, indent=2 ) #print reqMgrClient.setWorkflowSplitting(url, acdc, split ) elif 'max' in actions[action]: for split in splittings: split_par = split['splitParams'] for act in [ 'avg_events_per_job', 'events_per_job', 'lumis_per_job' ]: if act in split_par: print "Changing %s (%d) " % (act, split_par[act]), split_par[act] = 1 print "to max splitting ", split_par[act] break #split['requestName'] = acdc #print "changing the splitting of",acdc #print json.dumps( split, indent=2 ) #print reqMgrClient.setWorkflowSplitting(url, acdc, split ) print "changing the splitting of", acdc print json.dumps(splittings, indent=2) done = reqMgrClient.setWorkflowSplitting(url, acdc, splittings) ## check on done == True data = reqMgrClient.setWorkflowApproved(url, acdc) print data return acdc
def spawn_harvesting(url, wfi, sites_for_DQMHarvest): SI = global_SI() all_OK = {} requests = [] outputs = wfi.request['OutputDatasets'] if ('EnableHarvesting' in wfi.request and not wfi.request['EnableHarvesting']) and ( 'DQMConfigCacheID' in wfi.request and wfi.request['DQMConfigCacheID']): if not 'MergedLFNBase' in wfi.request: print "f****d up" sendEmail('screwed up wl cache', '%s wl cache is bad' % (wfi.request['RequestName'])) all_OK['fake'] = False return all_OK, requests wfi = workflowInfo(url, wfi.request['RequestName']) dqms = [out for out in outputs if '/DQM' in out] #if not all([in_full[dqm_input] for dqm_input in dqms]): # wfi.sendLog('closor',"will not be able to assign the harvesting: holding up") # for dqm_input in dqms: # all_OK[dqm_input] = False ## raise the subscription to high priority for dqm_input in dqms: ## handle it properly harvesting_schema = { 'Requestor': os.getenv('USER'), 'RequestType': 'DQMHarvest', 'Group': 'DATAOPS' } copy_over = [ 'AcquisitionEra', 'ProcessingString', 'DQMUploadUrl', 'CMSSWVersion', 'CouchDBName', 'CouchWorkloadDBName', 'ConfigCacheUrl', 'DbsUrl', 'inputMode', 'DQMConfigCacheID', 'OpenRunningTimeout', 'ScramArch', 'CMSSWVersion', 'Campaign', 'Memory', #dummy 'SizePerEvent', #dummy 'GlobalTag', #dummy ] for item in copy_over: if item in wfi.request: harvesting_schema[item] = copy.deepcopy(wfi.request[item]) else: print item, "is not in initial schema" harvesting_schema['InputDataset'] = dqm_input harvesting_schema['TimePerEvent'] = 1 harvesting_schema['PrepID'] = 'Harvest-' + wfi.request['PrepID'] if len(wfi.request['RequestString']) > 60: wfi.request['RequestString'] = wfi.request[ 'RequestString'][:60] print "truncating request string", wfi.request['RequestString'] harvesting_schema[ 'RequestString'] = 'HARVEST-' + wfi.request['RequestString'] harvesting_schema['DQMHarvestUnit'] = 'byRun' harvesting_schema['RequestPriority'] = min( wfi.request['RequestPriority'] * 10, 999999) harvest_request = reqMgrClient.submitWorkflow( url, harvesting_schema) if not harvest_request: print "Error in making harvesting for", wfi.request[ 'RequestName'] print "schema" print json.dumps(harvesting_schema, indent=2) harvest_request = reqMgrClient.submitWorkflow( url, harvesting_schema) if not harvest_request: print "Error twice in harvesting for", wfi.request[ 'RequestName'] print "schema" print json.dumps(harvesting_schema, indent=2) if harvest_request: requests.append(harvest_request) ## should we protect for setting approved ? no, it's notified below, assignment will fail, likely data = reqMgrClient.setWorkflowApproved(url, harvest_request) print "created", harvest_request, "for harvesting of", dqm_input wfi.sendLog( 'closor', "created %s for harvesting of %s" % (harvest_request, dqm_input)) ## assign it directly team = wfi.request['Team'] parameters = { 'SiteWhitelist': [ SI.SE_to_CE(se) for se in wfi.request['NonCustodialSites'] ], 'AcquisitionEra': wfi.acquisitionEra(), 'ProcessingString': wfi.processingString(), 'MergedLFNBase': wfi.request['MergedLFNBase'], 'ProcessingVersion': wfi.request['ProcessingVersion'], 'execute': True } #if in_full[dqm_input]: # print "using full copy at",in_full[dqm_input] # parameters['SiteWhitelist'] = [SI.SE_to_CE(se) for se in in_full[dqm_input]] #else: # print "cannot do anything if not having a full copy somewhere" # all_OK[dqm_input]=False # continue parameters['SiteWhitelist'] = sites_for_DQMHarvest result = reqMgrClient.assignWorkflow(url, harvest_request, team, parameters) if not result: #sendEmail('harvesting request created','%s was created at announcement of %s in %s, failed to assign'%(harvest_request, dqm_input, wfi.request['RequestName']), destination=[wfi.request['Requestor']+'@cern.ch']) wfi.sendLog( 'closor', '%s was created at announcement of %s in %s, failed to assign' % (harvest_request, dqm_input, wfi.request['RequestName'])) sendLog( 'closor', '%s was created at announcement of %s in %s, failed to assign' % (harvest_request, dqm_input, wfi.request['RequestName']), level='critical') else: #sendEmail('harvesting request assigned','%s was created at announcement of %s in %s, and assigned'%(harvest_request, dqm_input, wfi.request['RequestName']), destination=[wfi.request['Requestor']+'@cern.ch']) wfi.sendLog( 'closor', '%s was created at announcement of %s in %s, and assigned' % (harvest_request, dqm_input, wfi.request['RequestName'])) else: #print "could not make the harvesting for",wfo.name,"not announcing" wfi.sendLog('closor', "could not make the harvesting request") sendLog('closor', "could not make the harvesting request for %s" % wfi.request['RequestName'], level='critical') all_OK[dqm_input] = False return (all_OK, requests)
wfs.extend(getWorkflows(url, 'running-open', details=True)) wfs.extend(getWorkflows(url, 'running-closed', details=True)) jobs_for = defaultdict(lambda: defaultdict(int)) wf_for = defaultdict(lambda: defaultdict(set)) agent_for = defaultdict(lambda: defaultdict(set)) s_block_locations = {} block_locations = defaultdict(lambda: defaultdict(list)) wfs_no_location_in_GQ = defaultdict(list) si = siteInfo() #bad_blocks = defaultdict( set ) unprocessable = set() for wf in wfs: if spec and not spec in wf['RequestName']: continue wfi = workflowInfo(url, wf['RequestName'], request=wf) sitewhitelist = wfi.request['SiteWhitelist'] wqs = wfi.getWorkQueue() #wqes = [w[w['type']] for w in wqs] print wf['RequestName'], len(wqs), "elements" for wq in wqs: wqe = wq[wq['type']] if not wqe['Status'] in ['Available', 'Acquired']: #print wqe['Status'] continue try: pid = filter(lambda w: w.count('-') == 2, wqe['RequestName'].split('_'))[0] camp = pid.split('-')[1] except:
def injector(url, options, specific): use_mcm = True up = componentInfo(mcm=use_mcm, soft=['mcm']) if not up.check(): return use_mcm = up.status['mcm'] workflows = getWorkflows(url, status=options.wmstatus, user=options.user) workflows.extend( getWorkflows(url, status=options.wmstatus, user='******', rtype="ReReco") ) ## regardless of users, pick up all ReReco on the table print len(workflows), "in line" cannot_inject = set() ## browse for assignment-approved requests, browsed for ours, insert the diff for wf in workflows: if specific and not specific in wf: continue exists = session.query(Workflow).filter(Workflow.name == wf).first() if not exists: wfi = workflowInfo(url, wf) #wl = getWorkLoad(url, wf) ## check first that there isn't related here with something valid can_add = True ## first try at finding a match # print wfi.request familly = session.query(Workflow).filter( Workflow.name.contains(wfi.request['PrepID'])).all() if not familly: #req_familly = getWorkflowById( url, wl['PrepID']) #familly = [session.query(Workflow).filter(Workflow.name == member).first() for member in req_familly] pids = wfi.getPrepIDs() req_familly = [] for pid in pids: req_familly.extend(getWorkflowById(url, pid, details=True)) familly = [] print len(req_familly), "members" for req_member in req_familly: #print "member",req_member['RequestName'] owfi = workflowInfo(url, req_member['RequestName'], request=req_member) other_pids = owfi.getPrepIDs() if set(pids) == set(other_pids): ## this is a real match familly.extend( session.query(Workflow).filter( Workflow.name == req_member['RequestName']).all()) for lwfo in familly: if lwfo: ## we have it already if not lwfo.status in [ 'forget', 'trouble', 'forget-unlock', 'forget-out-unlock' ]: sendLog( 'injector', "Should not put %s because of %s %s" % (wf, lwfo.name, lwfo.status)) print "Should not put", wf, "because of", lwfo.name, lwfo.status cannot_inject.add(wf) can_add = False if not can_add: continue wfi.sendLog('injector', "considering %s" % wf) new_wf = Workflow(name=wf, status=options.setstatus, wm_status=options.wmstatus) session.add(new_wf) session.commit() time.sleep(0.5) else: #print "already have",wf pass if cannot_inject: #sendEmail('workflow duplicates','These workflow cannot be added in because of duplicates \n\n %s'%( '\n'.join(cannot_inject))) sendLog( 'injector', 'These workflow cannot be added in because of duplicates \n\n %s' % ('\n'.join(cannot_inject)), level='warning') ## passing a round of invalidation of what needs to be invalidated if use_mcm and (options.invalidate or True): invalidator(url) no_replacement = set() ## pick up replacements for wf in session.query(Workflow).filter( Workflow.status == 'trouble').all(): print wf.name if specific and not specific in wf.name: continue print wf.name wfi = workflowInfo(url, wf.name) wl = wfi.request #getWorkLoad(url, wf.name) familly = getWorkflowById(url, wl['PrepID']) true_familly = [] for member in familly: if member == wf.name: continue fwl = getWorkLoad(url, member) if options.replace: if member != options.replace: continue else: if fwl['RequestDate'] < wl['RequestDate']: continue if fwl['RequestType'] == 'Resubmission': continue if fwl['RequestStatus'] in ['None', None, 'new']: continue if fwl['RequestStatus'] in [ 'rejected', 'rejected-archived', 'aborted', 'aborted-archived' ]: continue true_familly.append(fwl) if len(true_familly) == 0: #sendLog('injector','%s had no replacement'%wf.name, level='critical') wfi.sendLog( 'injector', 'the workflow was found in trouble with no replacement') no_replacement.add(wf.name) continue else: wfi.sendLog( 'injector', 'the workflow was found in trouble and has a replacement') print wf.name, "has", len(familly), "familly members" print wf.name, "has", len(true_familly), "true familly members" ##we cannot have more than one of them !!! pick the last one if len(true_familly) > 1: #sendEmail('multiple wf','please take a look at injector for %s'%wf.name) sendLog('injector', 'Multiple wf in line, will take the last one for %s \n%s' % (wf.name, ', '.join(fwl['RequestName'] for fwl in true_familly)), level='critical') for fwl in true_familly[-1:]: member = fwl['RequestName'] new_wf = session.query(Workflow).filter( Workflow.name == member).first() if not new_wf: sendLog('injector', "putting %s as replacement of %s" % (member, wf.name)) status = 'away' if fwl['RequestStatus'] in ['assignment-approved']: status = 'considered' new_wf = Workflow(name=member, status=status, wm_status=fwl['RequestStatus']) wf.status = 'forget' session.add(new_wf) else: if new_wf.status == 'forget': continue sendLog( 'injector', "getting %s as replacement of %s" % (new_wf.name, wf.name)) wf.status = 'forget' for tr in session.query(Transfer).all(): if wf.id in tr.workflows_id: sw = copy.deepcopy(tr.workflows_id) sw.remove(wf.id) sw.append(new_wf.id) tr.workflows_id = sw print tr.phedexid, "got", new_wf.name if new_wf.status != 'away': print "\t setting it considered" new_wf.status = 'considered' if tr.phedexid < 0: ## set it back to positive tr.phedexid = -tr.phedexid session.commit() ## don't do that automatically #wf.status = 'forget' session.commit() if no_replacement: #sendEmail('workflow with no replacement','%s \n are dangling there'%( '\n'.join(no_replacement))) sendLog('injector', 'workflow with no replacement, %s \n are dangling there' % ('\n'.join(no_replacement)), level='critical')
def outcleanor(url, options): if options.approve: for user in ['*Vlimant']: #,'*Cremonesi']: deletes = listDelete(url, user=user) for (site, who, tid) in deletes: if 'MSS' in site: continue ### ever print site, who, tid print "approving deletion" print approveSubscription( url, tid, nodes=[site], comments='Production cleaning by data ops') return sites_and_datasets = defaultdict(list) our_copies = defaultdict(list) wf_cleaned = {} wfs = [] for fetch in options.fetch.split(','): wfs.extend( session.query(Workflow).filter(Workflow.status == fetch).all()) random.shuffle(wfs) last_answer = None for wfo in wfs: if options.number and len(wf_cleaned) >= options.number: print "Reached", options.number, "cleaned" break print '-' * 100 wfi = workflowInfo(url, wfo.name) goes = {} # boolean per output for dataset in wfi.request['OutputDatasets']: goes[dataset] = False keep_one_out = True status = getDatasetStatus(dataset) print "\n\tLooking at", dataset, status, "\n" vetoes = None if status == 'INVALID': vetoes = ['Export', 'Buffer'] ## can take themselves out keep_one_out = False # just wipe clean elif status == None: print dataset, "actually does not exist. skip" goes[dataset] = True continue elif status in ['PRODUCTION', 'VALID' ] and wfo.status in ['forget', 'trouble']: print dataset, "should probably be invalidated. (", wfo.status, ") skip" keep_one_out = False # just wipe clean continue ## you are not sure. just skip it for the time being elif status == 'PRODUCTION' and wfo.status in ['clean']: print dataset, "should probably be set valid .skip" continue ## you are not sure. just skip it for the time being if status == 'VALID' and dataset.startswith('/MinBias'): print "This is a /MinBias. skip" continue if '/DQM' in dataset: keep_one_out = False total_size = getDatasetSize(dataset) our_presence = getDatasetPresence(url, dataset, complete=None, group="DataOps", vetoes=vetoes) also_our_presence = getDatasetPresence(url, dataset, complete=None, group="", vetoes=vetoes) ## merge in one unique dict for site in also_our_presence: if site in our_presence: there, frac = our_presence[site] other, ofrac = also_our_presence[site] our_presence[site] = (max(there, other), max(frac, ofrac)) else: our_presence[site] = also_our_presence[site] if our_presence: print our_presence ## analysis ops copies need to be taken into account anaops_presence = getDatasetPresence(url, dataset, complete=None, group="AnalysisOps") own_by_anaops = anaops_presence.keys() ## all our copies to_be_cleaned = our_presence.keys() if not len(to_be_cleaned): print "nowhere to be found of ours,", len( own_by_anaops), "in analysi ops pool" goes[dataset] = True continue print "Where we own bits of dataset" print to_be_cleaned if len(own_by_anaops): ## remove site with the anaops copies to_be_cleaned = list(set(to_be_cleaned) - set(own_by_anaops)) keep_one_out = False ## in that case, just remove our copies print "Own by anaops (therefore not keep a copy of ours)" print own_by_anaops else: ## we should not be looking at anything that was not passed to DDM, otherwise we'll be cutting the grass under our feet using_the_same = getWorkflowByInput(url, dataset, details=True) conflict = False for other in using_the_same: if other['RequestName'] == wfo.name: continue if other['RequestType'] == 'Resubmission': continue if not other['RequestStatus'] in [ 'announced', 'normal-archived', 'aborted', 'rejected', 'aborted-archived', 'rejected-archived', 'closed-out', 'None', None ]: print other['RequestName'], 'is in status', other[ 'RequestStatus'], 'preventing from cleaning', dataset conflict = True break if conflict: continue ## not being used. a bit less dangerous to clean-out ## keep one full copy out there full_copies = [ site for (site, (there, fract)) in our_presence.items() if there ] if keep_one_out: if not len(full_copies): print "we do not own a full copy of", dataset, status, wfo.status, ".skip" continue stay_there = random.choice( full_copies) #at a place own by ops print "Where we keep a full copy", stay_there to_be_cleaned.remove(stay_there) our_copies[stay_there].append(dataset) else: print "We do not want to keep a copy of ", dataset, status, wfo.status if len(to_be_cleaned): print "Where we can clean" print to_be_cleaned for site in to_be_cleaned: sites_and_datasets[site].append( (dataset, total_size * our_presence[site][1] / 100., status)) goes[dataset] = True else: print "no cleaning to be done" goes[dataset] = True print wfo.name, "scrutinized" if all(goes.values()): print "\t", wfo.name, "can toggle -out" def ask(): global last_answer last_answer = raw_input('go on ?') return last_answer if options.auto or ask() in ['y', '']: if all(goes.values()): wfo.status = wfo.status + '-out' wf_cleaned[wfo.name] = wfo.status continue elif last_answer in ['q', 'n']: break else: return if options.auto: pass elif last_answer in ['q']: return print "Potential cleanups" for (site, items) in sites_and_datasets.items(): cleanup = sum([size for (_, size, _) in items]) print "\n\t potential cleanup of", "%8.4f" % cleanup, "GB at ", site print "\n".join([ds + " " + st for ds, _, st in items]) datasets = [ds for ds, _, st in items] print "Copies and bits we are going to delete" print json.dumps(sites_and_datasets, indent=2) print "Copies we are keeping" print json.dumps(our_copies, indent=2) print "Workflows cleaned for output" print json.dumps(wf_cleaned, indent=2) stamp = time.strftime("%Y%m%d%H%M%S", time.localtime()) open('outcleaning_%s.json' % stamp, 'w').write(json.dumps(sites_and_datasets, indent=2)) open('keepcopies_%s.json' % stamp, 'w').write(json.dumps(our_copies, indent=2)) open('wfcleanout_%s.json' % stamp, 'w').write(json.dumps(wf_cleaned, indent=2)) if (not options.test) and (options.auto or raw_input( "Satisfied ? (y will trigger status change and deletion requests)") in ['y']): for (site, items) in sites_and_datasets.items(): datasets = [ds for ds, _, st in items] print "making deletion to", site result = makeDeleteRequest( url, site, datasets, "Cleanup output after production. DataOps will take care of approving it." ) print result ## approve it right away ? if 'MSS' in site: continue if 'Export' in site: continue if 'Buffer' in site: continue for did in [ item['id'] for item in result['phedex']['request_created'] ]: print "auto-approve disabled, but ready" #approveSubscription(url, did, nodes = [site], comments = 'Auto-approving production cleaning deletion') pass session.commit() else: print "Not making the deletion and changing statuses"
def recoveror(url,specific,options=None): if userLock('recoveror'): return up = componentInfo() CI = campaignInfo() UC = unifiedConfiguration() def make_int_keys( d ): for code in d: d[int(code)] = d.pop(code) error_codes_to_recover = UC.get('error_codes_to_recover') error_codes_to_block = UC.get('error_codes_to_block') error_codes_to_notify = UC.get('error_codes_to_notify') make_int_keys( error_codes_to_recover ) make_int_keys( error_codes_to_block ) make_int_keys( error_codes_to_notify ) wfs = session.query(Workflow).filter(Workflow.status == 'assistance-recovery').all() if specific: wfs.extend( session.query(Workflow).filter(Workflow.status == 'assistance-manual').all() ) for wfo in wfs: if specific and not specific in wfo.name:continue if not specific and 'manual' in wfo.status: continue wfi = workflowInfo(url, wfo.name, deprecated=True) ## need deprecated info for mergedlfnbase ## need a way to verify that this is the first round of ACDC, since the second round will have to be on the ACDC themselves all_errors = None try: wfi.getSummary() all_errors = wfi.summary['errors'] except: pass print '-'*100 print "Looking at",wfo.name,"for recovery options" if not len(all_errors): print "\tno error for",wfo.name task_to_recover = defaultdict(list) message_to_ops = "" message_to_user = "" recover=True if 'LheInputFilese' in wfi.request and wfi.request['LheInputFiles']: ## we do not try to recover pLHE recover = False if 'Campaign' in wfi.request: c = wfi.request['Campaign'] if c in CI.campaigns and 'recover' in CI.campaigns[c]: recover=CI.campaigns[c]['recover'] for task,errors in all_errors.items(): print "\tTask",task ## collect all error codes and #jobs regardless of step at which it occured all_codes = [] for name, codes in errors.items(): if type(codes)==int: continue all_codes.extend( [(int(code),info['jobs'],name,list(set([e['type'] for e in info['errors']])),list(set([e['details'] for e in info['errors']])) ) for code,info in codes.items()] ) all_codes.sort(key=lambda i:i[1], reverse=True) sum_failed = sum([l[1] for l in all_codes]) for errorCode,njobs,name,types,details in all_codes: rate = 100*njobs/float(sum_failed) #print ("\t\t %10d (%6s%%) failures with error code %10d (%"+str(max_legend)+"s) at stage %s")%(njobs, "%4.2f"%rate, errorCode, legend, name) print ("\t\t %10d (%6s%%) failures with error code %10d (%30s) at stage %s")%(njobs, "%4.2f"%rate, errorCode, ','.join(types), name) added_in_recover=False #if options.go: # force the recovery of any task with error ? if errorCode in error_codes_to_recover: ## the error code is registered for case in error_codes_to_recover[errorCode]: match = case['details'] matched= (match==None) if not matched: matched=False for detail in details: if match in detail: print "[recover] Could find keyword",match,"in" print 50*"#" print detail print 50*"#" matched = True break if matched and rate > case['rate']: print "\t\t => we should be able to recover that", case['legend'] task_to_recover[task].append( (code,case) ) added_in_recover=True message_to_user = "" else: print "\t\t recoverable but not frequent enough, needs",case['rate'] if errorCode in error_codes_to_block: for case in error_codes_to_block[errorCode]: match = case['details'] matched= (match==None) if not matched: matched=False for detail in details: if match in detail: print "[block] Could find keyword",match,"in" print 50*"#" print detail print 50*"#" matched = True break if matched and rate > case['rate']: print "\t\t => that error means no ACDC on that workflow", case['legend'] if not options.go: message_to_ops += "%s has an error %s blocking an ACDC.\n%s\n "%( wfo.name, errorCode, '#'*50 ) recover = False added_in_recover=False if errorCode in error_codes_to_notify and not added_in_recover: print "\t\t => we should notify people on this" message_to_user += "%s has an error %s in processing.\n%s\n" %( wfo.name, errorCode, '#'*50 ) if message_to_user: print wfo.name,"to be notified to user(DUMMY)",message_to_user if message_to_ops: sendEmail( "notification in recoveror" , message_to_ops, destination=['*****@*****.**','*****@*****.**']) if task_to_recover and recover: print "Initiating recovery" print ', '.join(task_to_recover.keys()),"to be recovered" recovering=set() for task in task_to_recover: print "Will be making a recovery workflow for",task ## from here you can fetch known solutions, to known error codes actions = list(set([case['solution'] for code,case in task_to_recover[task] ])) acdc = singleRecovery(url, task, wfi.request , actions, do = options.do) if not acdc: if options.do: if recovering: print wfo.name,"has been partially ACDCed. Needs manual attention" sendEmail( "failed ACDC partial recovery","%s has had %s/%s recoveries %s only"%( wfo.name, len(recovering), len(task_to_recover), list(recovering)), destination=['*****@*****.**','*****@*****.**']) continue else: print wfo.name,"failed recovery once" break else: print "no action to take further" sendEmail("an ACDC that can be done automatically","please check https://cmst2.web.cern.ch/cmst2/unified/logs/recoveror/last.log for details", destination=['*****@*****.**','*****@*****.**']) continue ## and assign it ? team = wfi.request['Teams'][0] parameters={ 'SiteWhitelist' : wfi.request['SiteWhitelist'], 'AcquisitionEra' : wfi.acquisitionEra(), 'ProcessingString' : wfi.processingString(), 'MergedLFNBase' : wfi.deprecated_request['MergedLFNBase'], 'ProcessingVersion' : wfi.request['ProcessingVersion'], } if options.ass: print "really doing the assignment of the ACDC",acdc parameters['execute']=True sendEmail("an ACDC was done and WAS assigned", "%s was assigned, please check https://cmst2.web.cern.ch/cmst2/unified/logs/recoveror/last.log for details"%( acdc ), destination=['*****@*****.**','*****@*****.**']) else: print "no assignment done with this ACDC",acdc sendEmail("an ACDC was done and need to be assigned", "%s needs to be assigned, please check https://cmst2.web.cern.ch/cmst2/unified/logs/recoveror/last.log for details"%( acdc ), destination=['*****@*****.**','*****@*****.**']) result = reqMgrClient.assignWorkflow(url, acdc, team, parameters) recovering.add( acdc ) if recovering: #if all went well, set the status to -recovering current = wfo.status if options.ass: current = current.replace('recovery','recovering') else: current = 'assistance-manual' print wfo.name,"setting the status to",current print ', '.join( recovering ) wfo.status = current session.commit() else: ## this workflow should be handled manually at that point print wfo.name,"needs manual intervention" wfo.status = 'assistance-manual' session.commit()
def transferor(url, specific=None, talk=True, options=None): if userLock(): return mlock = moduleLock() if mlock(): return use_mcm = True up = componentInfo(soft=['mcm', 'wtc', 'jira']) if not up.check(): return use_mcm = up.status['mcm'] if options and options.test: execute = False else: execute = True SI = siteInfo() CI = campaignInfo() #NLI = newLockInfo() #if not NLI.free(): return LI = lockInfo() if not LI.free(): return mcm = McMClient(dev=False) dss = DSS() #allowed_secondary = UC.get('') print "counting all being handled..." being_handled = len( session.query(Workflow).filter(Workflow.status == 'away').all()) being_handled += len( session.query(Workflow).filter( Workflow.status.startswith('stag')).all()) being_transfered = len( session.query(Workflow).filter(Workflow.status == 'staging').all()) #being_handled += len(session.query(Workflow).filter(Workflow.status.startswith('assistance-')).all()) being_handled += len( session.query(Workflow).filter( Workflow.status.startswith('assistance-')).filter( ~Workflow.status.contains('custodial')).all()) max_to_handle = options.maxworkflows max_to_transfer = options.maxstaging allowed_to_handle = max(0, max_to_handle - being_handled) allowed_to_transfer = max(0, max_to_transfer - being_transfered) wf_buffer = 5 if allowed_to_handle <= wf_buffer: ## buffer for having several wf per transfer print "Not allowed to run more than", max_to_handle, "at a time. Currently", being_handled, "and", wf_buffer, "buffer" else: print being_handled, "already being handled", max_to_handle, "max allowed,", allowed_to_handle, "remaining", "and", wf_buffer, "buffer" if allowed_to_transfer <= wf_buffer: print "Not allowed to transfer more than", max_to_transfer, "at a time. Currently", being_transfered, "and", wf_buffer, "buffer" else: print being_transfered, "already being transfered", max_to_transfer, "max allowed,", allowed_to_transfer, "remaining", "and", wf_buffer, "buffer" print "... done" all_transfers = defaultdict(list) workflow_dependencies = defaultdict( set) ## list of wf.id per input dataset wfs_and_wfh = [] max_per_round = UC.get('max_per_round').get('transferor', None) print "getting all wf to consider ..." cache = getWorkflows(url, 'assignment-approved', details=True) all_to_include = session.query(Workflow).filter( Workflow.status.startswith('considered')).all() if len(cache) > 2000: max_to_include = max_per_round random.shuffle(cache) ## randomize first by wf name cache = sorted(cache, key=lambda r: r['RequestPriority'], reverse=True) ## order by prio highest = [r['RequestName'] for r in cache[:max_to_include]] all_to_include = [wfo for wfo in all_to_include if wfo.name in highest] print "limiting what to consider to", max_to_include, "because there is too much stuff going on. Got", len( all_to_include) for wfo in all_to_include: print "\t", wfo.name if specific and not specific in wfo.name: continue cache_r = filter(lambda d: d['RequestName'] == wfo.name, cache) if len(cache_r): wfs_and_wfh.append((wfo, workflowInfo(url, wfo.name, spec=False, request=cache_r[0]))) else: wfs_and_wfh.append((wfo, workflowInfo(url, wfo.name, spec=False))) print "... done" transfers_per_sites = defaultdict(int) input_sizes = defaultdict(float) ignored_input_sizes = defaultdict(float) input_cput = {} input_st = {} ## list the size of those in transfer already in_transfer_priority = None min_transfer_priority = None print "getting all wf in staging ..." #stucks = json.loads(open('%s/stuck_transfers.json'%monitor_pub_dir).read()) stucks = json.loads(eosRead('%s/stuck_transfers.json' % monitor_pub_dir)) for wfo in session.query(Workflow).filter( Workflow.status == 'staging').all(): wfh = workflowInfo(url, wfo.name, spec=False) #(lheinput,primary,parent,secondary) = wfh.getIO() #sites_allowed = getSiteWhiteList( (lheinput,primary,parent,secondary) ) print wfo.name, "staging" (lheinput, primary, parent, secondary, sites_allowed) = wfh.getSiteWhiteList() for site in sites_allowed: ## we should get the actual transfer destination instead of the full white list transfers_per_sites[site] += 1 #input_cput[wfo.name] = wfh.getComputingTime() #input_st[wfo.name] = wfh.getSystemTime() blocks = wfh.getBlocks() for prim in primary: ds_s = dss.get(prim, blocks=blocks) if prim in stucks: wfh.sendLog( 'transferor', "%s appears stuck, so not counting it %s [GB]" % (prim, ds_s)) ignored_input_sizes[prim] = max(ds_s, ignored_input_sizes[prim]) else: input_sizes[prim] = max(ds_s, input_sizes[prim]) wfh.sendLog('transferor', "%s needs %s [GB]" % (wfo.name, ds_s)) if in_transfer_priority == None: in_transfer_priority = int(wfh.request['RequestPriority']) else: in_transfer_priority = max(in_transfer_priority, int(wfh.request['RequestPriority'])) if min_transfer_priority == None: min_transfer_priority = int(wfh.request['RequestPriority']) else: min_transfer_priority = min(min_transfer_priority, int(wfh.request['RequestPriority'])) try: print "Ignored input sizes" ignored_values = list(ignored_input_sizes.items()) ignored_values.sort(key=lambda i: i[1]) print "\n".join(map(str, ignored_values)) print "Considered input sizes" considered_values = list(input_sizes.items()) considered_values.sort(key=lambda i: i[1]) print "\n".join(map(str, considered_values)) except Exception as e: print "trying to print the summary of input size" print str(e) print "... done" print "Max priority in transfer already", in_transfer_priority print "Min priority in transfer already", min_transfer_priority print "transfers per sites" print json.dumps(transfers_per_sites, indent=2) in_transfer_already = sum(input_sizes.values()) cput_in_transfer_already = sum(input_cput.values()) st_in_transfer_already = sum(input_st.values()) ## list the size of all inputs primary_input_per_workflow_gb = defaultdict(float) print "getting all input sizes ..." input_blocks = {} for (wfo, wfh) in wfs_and_wfh: (_, primary, _, _) = wfh.getIO() blocks = wfh.getBlocks() input_blocks[wfo.name] = blocks for prim in primary: ## do not count it if it appears stalled ! prim_size = dss.get(prim, blocks=blocks) input_sizes[prim] = max(prim_size, input_sizes[prim]) primary_input_per_workflow_gb[wfo.name] += prim_size print "... done" # shuffle first by name random.shuffle(wfs_and_wfh) # Sort smallest transfers first; allows us to transfer as many as possible workflows. def prio_and_size(i, j): if int(i[1].request['RequestPriority']) == int( j[1].request['RequestPriority']): return cmp(int(primary_input_per_workflow_gb.get(j[0].name, 0)), int(primary_input_per_workflow_gb.get(i[0].name, 0))) else: return cmp(int(i[1].request['RequestPriority']), int(j[1].request['RequestPriority'])) #wfs_and_wfh.sort(cmp = prio_and_size, reverse=True) #wfs_and_wfh.sort(cmp = lambda i,j : cmp(int(primary_input_per_workflow_gb.get(i[0].name, 0)), int(primary_input_per_workflow_gb.get(j[0].name, 0)) )) #sort by priority higher first wfs_and_wfh.sort(cmp=lambda i, j: cmp(int(i[1].request[ 'RequestPriority']), int(j[1].request['RequestPriority'])), reverse=True) if min_transfer_priority == None or in_transfer_priority == None: print "nothing is lining up for transfer" sendLog( "transferor", "No request in staging, using first request to set priority limit") if len(wfs_and_wfh): min_transfer_priority = wfs_and_wfh[0][1].request[ 'RequestPriority'] in_transfer_priority = wfs_and_wfh[0][1].request['RequestPriority'] else: return cput_grand_total = sum(input_cput.values()) cput_to_transfer = cput_grand_total - cput_in_transfer_already st_grand_total = sum(input_st.values()) st_to_transfer = st_grand_total - st_in_transfer_already print "%15.4f [CPU h] worth already in transfer" % cput_in_transfer_already print "%15.4f [CPU h] worth is the current requested transfer load" % cput_to_transfer print "%15.4f [h] worth of absolute system time in transfer" % ( cput_in_transfer_already / SI.availableSlots()) print "%15.4f [h] worth of absolute system time is the current requested transfer load" % ( cput_to_transfer / SI.availableSlots()) print "%15.4f [h] worth of theoritical system time in transfer" % ( st_in_transfer_already) print "%15.4f [h] worth of theoritical system time is the current requested transfer load" % ( st_to_transfer) grand_total = sum(input_sizes.values()) to_transfer = grand_total - in_transfer_already grand_transfer_limit = options.maxtransfer #grand_transfer_limit = SI.total_disk()*0.25*1024## half of the free sapce in TB->GB transfer_limit = grand_transfer_limit - in_transfer_already print "%15.4f GB already being transfered" % in_transfer_already print "%15.4f GB is the current requested transfer load" % to_transfer print "%15.4f GB is the global transfer limit" % grand_transfer_limit print "%15.4f GB is the available limit" % transfer_limit max_staging_per_site = options.maxstagingpersite # the max priority value per dataset. max_priority = defaultdict(int) needs_transfer = 0 ## so that we can count'em passing_along = 0 transfer_sizes = defaultdict(float) went_over_budget = False destination_cache = {} no_goes = set() if max_per_round and not spec: wfs_and_wfh = wfs_and_wfh[:max_per_round] for (wfo, wfh) in wfs_and_wfh: print wfo.name, "to be transfered with priority", wfh.request[ 'RequestPriority'] if wfh.request['RequestStatus'] != 'assignment-approved': if wfh.request['RequestStatus'] in [ 'aborted', 'rejected', 'rejected-archived', 'aborted-archived' ]: if wfh.isRelval(): wfo.status = 'forget' else: wfo.status = 'trouble' ## so that we look or a replacement else: wfo.status = 'away' wfh.sendLog( 'transferor', '%s in status %s, setting %s' % (wfo.name, wfh.request['RequestStatus'], wfo.status)) continue (lheinput, primary, parent, secondary, sites_allowed) = wfh.getSiteWhiteList() blocks = input_blocks.get(wfo.name, wfh.getBlocks()) if blocks: print "Reading only", len(blocks), "blocks in input" this_load = sum([dss.get(prim, blocks=blocks) for prim in primary]) no_budget = False if (this_load and (sum(transfer_sizes.values()) + this_load > transfer_limit or went_over_budget)): if went_over_budget: wfh.sendLog('transferor', "Transfer has gone over bubget.") else: wfh.sendLog('transferor', "Transfer will go over bubget.") wfh.sendLog( 'transferor', "%15.4f GB this load, %15.4f GB already this round, %15.4f GB is the available limit" % (this_load, sum(transfer_sizes.values()), transfer_limit)) #if sum(transfer_sizes.values()) > transfer_limit: went_over_budget = True if in_transfer_priority != None and min_transfer_priority != None: if int( wfh.request['RequestPriority'] ) >= in_transfer_priority and min_transfer_priority != in_transfer_priority: wfh.sendLog( 'transferor', "Higher priority sample %s >= %s go-on over budget" % (wfh.request['RequestPriority'], in_transfer_priority)) else: if not options.go: wfh.sendLog( 'transferor', "%s minimum priority %s < %s : stop" % (min_transfer_priority, wfh.request['RequestPriority'], in_transfer_priority)) no_budget = True ## throtlle by campaign go no_go = False if not wfh.go(log=True) and not options.go: no_go = True no_goes.add(wfo.name) allowed_secondary = {} overide_parameters = {} check_secondary = (not wfh.isRelval()) output_tiers = list( set([o.split('/')[-1] for o in wfh.request['OutputDatasets']])) for campaign in wfh.getCampaigns(): if campaign in CI.campaigns: overide_parameters.update(CI.campaigns[campaign]) if campaign in CI.campaigns and 'secondaries' in CI.campaigns[ campaign]: if CI.campaigns[campaign]['secondaries']: allowed_secondary.update( CI.campaigns[campaign]['secondaries']) check_secondary = True if campaign in CI.campaigns and 'banned_tier' in CI.campaigns[ campaign]: banned_tier = list( set(CI.campaigns[campaign]['banned_tier']) & set(output_tiers)) if banned_tier: no_go = True wfh.sendLog( 'transferor', 'These data tiers %s are not allowed' % (','.join(banned_tier))) sendLog('transferor', 'These data tiers %s are not allowed in %s' % (','.join(banned_tier), wfo.name), level='critical') if secondary and check_secondary: if (set(secondary) & set(allowed_secondary.keys()) != set(secondary)): msg = '%s is not an allowed secondary' % ( ', '.join(set(secondary) - set(allowed_secondary.keys()))) wfh.sendLog('transferor', msg) critical_msg = msg + '\nWorkflow URL: https://dmytro.web.cern.ch/dmytro/cmsprodmon/workflows.php?prep_id=task_{}'.format( wfh.getPrepIDs()[0]) sendLog('transferor', critical_msg, level='critical') if not options.go: no_go = True for sec in secondary: if sec in allowed_secondary: overide_parameters.update(allowed_secondary[sec]) if 'SiteWhitelist' in overide_parameters: sites_allowed = list( set(sites_allowed) & set(overide_parameters['SiteWhitelist'])) wfh.sendLog( 'transferor', 'Intersecting with the overriding whitelist parameters, allowed sites become {}' .format(sites_allowed)) if no_go: continue if passing_along >= allowed_to_handle: #if int(wfh.request['RequestPriority']) >= in_transfer_priority and min_transfer_priority!=in_transfer_priority: if in_transfer_priority != None and min_transfer_priority != None: if int(wfh.request['RequestPriority'] ) >= in_transfer_priority and int( wfh.request['RequestPriority'] ) != min_transfer_priority: ## higher priority, and not only this priority being transfered wfh.sendLog( 'transferor', "Higher priority sample %s >= %s go-on over %s" % (wfh.request['RequestPriority'], in_transfer_priority, max_to_handle)) else: wfh.sendLog( 'transferor', " Not allowed to pass more than %s at a time. Currently %s handled, and adding %s" % (max_to_handle, being_handled, passing_along)) if not options.go: ## should not allow to jump that fence break if this_load and needs_transfer >= allowed_to_transfer: if in_transfer_priority != None and min_transfer_priority != None: if int(wfh.request['RequestPriority'] ) >= in_transfer_priority and int( wfh.request['RequestPriority'] ) != min_transfer_priority: ## higher priority, and not only this priority being transfered wfh.sendLog( 'transferor', "Higher priority sample %s >= %s go-on over %s" % (wfh.request['RequestPriority'], in_transfer_priority, max_to_transfer)) else: wfh.sendLog( 'transferor', "Not allowed to transfer more than %s at a time. Currently %s transfering, and adding %s" % (max_to_transfer, being_transfered, needs_transfer)) if not options.go: no_budget = True if no_budget: continue # break ## try this for a while to make things faster ## the site white list considers site, campaign, memory and core information if options and options.tosites: sites_allowed = options.tosites.split(',') for dataset in list(primary) + list(parent) + list(secondary): LI.lock(dataset, reason='staging') if not sites_allowed: wfh.sendLog('transferor', "not possible site to run at") #sendEmail("no possible sites","%s has no possible sites to run at"%( wfo.name )) sendLog('transferor', "%s has no possible sites to run at" % (wfo.name), level='critical') continue can_go = True staging = False allowed = True primary_destinations = set() if primary: copies_needed_from_CPUh, CPUh = wfh.getNCopies() if talk: print wfo.name, 'reads', ', '.join(primary), 'in primary' ## chope the primary dataset for prim in primary: ## keep track of what needs what workflow_dependencies[prim].add(wfo.id) max_priority[prim] = max(max_priority[prim], int(wfh.request['RequestPriority'])) wfh.sendLog( 'transferor', "Would make %s from cpu requirement %s" % (copies_needed_from_CPUh, CPUh)) copies_needed = copies_needed_from_CPUh if 'Campaign' in wfh.request and wfh.request[ 'Campaign'] in CI.campaigns and 'maxcopies' in CI.campaigns[ wfh.request['Campaign']]: copies_needed_from_campaign = CI.campaigns[ wfh.request['Campaign']]['maxcopies'] copies_needed = min(copies_needed_from_campaign, copies_needed) wfh.sendLog( 'transferor', "Maxed to %s by campaign configuration %s" % (copies_needed, wfh.request['Campaign'])) if blocks: print "limiting to blocks", "\n".join(sorted(blocks)) ### new ways of making the whole thing destinations, all_block_names = getDatasetDestinations( url, prim, within_sites=[SI.CE_to_SE(site) for site in sites_allowed], only_blocks=blocks) print json.dumps(destinations, indent=2) ## get where the dataset is in full and completed prim_location = [ site for (site, info) in destinations.items() if info['completion'] == 100 and info['data_fraction'] == 1 ] ## the rest is places it is going to be #prim_destination = [site for site in destinations.keys() if not site in prim_location] prim_destination = [ site for (site, info) in destinations.items() if info['data_fraction'] == 1 and info['completion'] != 100 ] ## veto the site with no current disk space, for things that are not relval prim_destination = [ site for site in prim_destination if (SI.disk[site] or wfh.isRelval()) ] if len(prim_location) >= copies_needed: wfh.sendLog( 'transferor', "The input is all fully in place at %s sites %s" % (len(prim_location), sorted(prim_location))) continue copies_needed = max(0, copies_needed - len(prim_location)) wfh.sendLog( 'transferor', "Counting existing copies ; now need %s" % copies_needed) copies_being_made = [ sum([ info['blocks'].keys().count(block) for site, info in destinations.items() if site in prim_destination ]) for block in all_block_names ] latching_on_transfers = set() [ latching_on_transfers.update(info['blocks'].values()) for site, info in destinations.items() if site in prim_destination ] latching_on_transfers = list(latching_on_transfers) #print latching_on_transfers ## figure out where all this is going to go prim_to_distribute = [ site for site in sites_allowed if not SI.CE_to_SE(site) in prim_location ] prim_to_distribute = [ site for site in prim_to_distribute if not SI.CE_to_SE(site) in prim_destination ] ## take out the ones that cannot receive transfers potential_destinations = len(prim_to_distribute) #prim_to_distribute = [site for site in prim_to_distribute if not SI.CE_to_SE(site) in SI.sites_veto_transfer] prim_to_distribute = [ site for site in prim_to_distribute if (SI.disk[SI.CE_to_SE(site)] or wfh.isRelval()) ] ## do we want to restrict transfers if the amount of site in vetoe are too large ? wfh.sendLog( 'transferor', "Could be going to: %s" % sorted(prim_to_distribute)) if not prim_to_distribute or any([ transfers_per_sites[site] < max_staging_per_site for site in prim_to_distribute ]): ## means there is openings let me go print "There are transfer slots available:", [ (site, transfers_per_sites[site]) for site in prim_to_distribute ] else: if int( wfh.request['RequestPriority'] ) >= in_transfer_priority and min_transfer_priority != in_transfer_priority: wfh.sendLog( 'transferor', "Higher priority sample %s >= %s go-on over transfer slots available" % (wfh.request['RequestPriority'], in_transfer_priority)) else: wfh.sendLog( 'transferor', "Not allowed to transfer more than %s per site at a time. Going overboard for %s" % (max_staging_per_site, sorted([ site for site in prim_to_distribute if transfers_per_sites[site] >= max_staging_per_site ]))) if not options.go: allowed = False break for latching in latching_on_transfers: existings = session.query(TransferImp).filter( TransferImp.phedexid == int(latching)).filter( TransferImp.workflow_id == wfo.id).all() if not existings: tri = TransferImp(phedexid=int(latching), workflow=wfo) print "adding", wfo.id, "with phedexid", latching session.add(tri) else: for existing in existings: existing.active = True session.flush() can_go = False transfer_sizes[prim] = max(this_load, transfer_sizes[prim]) staging = True # reduce the number of copies required by the on-going full transfer : how do we bootstrap on waiting for them ?? #copies_needed = max(0,copies_needed - len(prim_destination)) copies_needed = max(0, copies_needed - min(copies_being_made)) wfh.sendLog( 'transferor', "Counting the copies being made ; then need %s" % copies_needed) if copies_needed == 0: wfh.sendLog( 'transferor', "The input is either fully in place or getting in full somewhere with %s" % latching_on_transfers) can_go = True continue elif len(prim_to_distribute) == 0: wfh.sendLog( 'transferor', "We are going to need extra copies of %s, but no destinations seems available" % (prim)) sendLog( 'transferor', "We are going to need extra copies of %s, but no destinations seems available" % (prim), level='critical') print json.dumps(prim_to_distribute, indent=2) print json.dumps(prim_location, indent=2) print json.dumps(prim_destination, indent=2) prim_to_distribute = [ site for site in sites_allowed if not SI.CE_to_SE(site) in prim_location ] #prim_to_distribute = [site for site in prim_to_distribute if not SI.CE_to_SE(site) in SI.sites_veto_transfer ] prim_to_distribute = [ site for site in prim_to_distribute if (SI.disk[SI.CE_to_SE(site)] or wfh.isRelval()) ] print "changed to" print json.dumps(prim_to_distribute, indent=2) if len( prim_to_distribute ) > 0: ## maybe that a parameter we can play with to limit the if not options or options.chop: ### hard include the tape disk andpoint ? #tapes = [site for site in getDatasetPresence( url, prim, vetos=['T0','T2','T3','Disk']) if site.endswith('MSS')] chops, sizes = getDatasetChops( prim, chop_threshold=options.chopsize, only_blocks=blocks) spreading = distributeToSites(chops, prim_to_distribute, n_copies=copies_needed, weights=SI.cpu_pledges, sizes=sizes) ## prune the blocks/destination that are already in the making, so that subscription don't overlap for site in spreading: for block in list(spreading[site]): if site in destinations and block in destinations[ site]['blocks'].keys(): ## prune it spreading[site].remove(block) transfer_sizes[prim] = sum(sizes) if not spreading: sendLog( 'transferor', 'cannot send %s to any site, it cannot fit anywhere' % prim, level='critical') wfh.sendLog( 'transferor', "cannot send to any site. %s cannot seem to fit anywhere" % (prim)) staging = False can_go = False else: spreading = {} for site in prim_to_distribute: if blocks: spreading[site] = blocks else: spreading[site] = [prim] transfer_sizes[prim] = max(this_load, transfer_sizes[prim]) can_go = False wfh.sendLog( 'transferor', "selected CE destinations %s" % (sorted(spreading.keys()))) for (site, items) in spreading.items(): all_transfers[site].extend(items) transfers_per_sites[site] += 1 primary_destinations.add(site) else: can_go = False allowed = False if not allowed: wfh.sendLog('transferor', "Not allowed to move on with") continue if secondary: override_sec_destination = [] if 'SecondaryLocation' in CI.campaigns[wfh.request['Campaign']]: override_sec_destination = CI.campaigns[ wfh.request['Campaign']]['SecondaryLocation'] if 'SecondaryLocation' in overide_parameters: override_sec_destination = overide_parameters[ 'SecondaryLocation'] print wfo.name, 'reads', ', '.join(secondary), 'in secondary' for sec in secondary: workflow_dependencies[sec].add(wfo.id) if True: ## new style, failing on minbias if not sec in destination_cache: ## this is barbbaric, and does not show the correct picture on workflow by workflow with different whitelist destination_cache[sec], _ = getDatasetDestinations( url, sec) ## NO SITE WHITE LIST ADDED #destination_cache[sec],_ = getDatasetDestinations(url, sec, within_sites = [SI.CE_to_SE(site) for site in sites_allowed]) ## limit to the site whitelist NOW se_allowed = set( [SI.CE_to_SE(site) for site in sites_allowed]) destinations = dict([ (k, v) for (k, v) in destination_cache[sec].items() if k in se_allowed ]) ## truncate location/destination to those making up for >90% of the dataset bad_destinations = [ destinations.pop(site) for (site, info) in destinations.items() if info['data_fraction'] < 0.9 ] print sec, json.dumps(destinations, indent=2) sec_location = [ site for (site, info) in destinations.items() if info['completion'] >= 95 ] sec_destination = [ site for site in destinations.keys() if not site in sec_location ] ## this is in SE else: ## old style presence = getDatasetPresence(url, sec) sec_location = [ site for site, pres in presence.items() if pres[1] > 90. ] ## more than 90% of the minbias at sites subscriptions = listSubscriptions(url, sec) sec_destination = [site for site in subscriptions] ## how to make unified understand that it has to wait for the secondary if the sec_destination and #sec_to_distribute = [site for site in sites_allowed if not any([osite.startswith(site) for osite in sec_location])] sec_to_distribute = [ site for site in sites_allowed if not SI.CE_to_SE(site) in sec_location ] #sec_to_distribute = [site for site in sec_to_distribute if not any([osite.startswith(site) for osite in sec_destination])] sec_to_distribute = [ site for site in sec_to_distribute if not SI.CE_to_SE(site) in sec_destination ] presitespace_sec_to_distribute = copy.deepcopy( sec_to_distribute) #sec_to_distribute = [site for site in sec_to_distribute if not any([osite.startswith(site) for osite in SI.sites_veto_transfer])] #sec_to_distribute = [site for site in sec_to_distribute if not SI.CE_to_SE(site) in SI.sites_veto_transfer] sec_to_distribute = [ site for site in sec_to_distribute if (SI.disk[SI.CE_to_SE(site)] or wfh.isRelval()) ] ## at this point you have a problem if len(sec_to_distribute) == 0 and len( presitespace_sec_to_distribute): sendLog( 'transferor', '%s is getting no possible destinations because of lack of space. To be decided what to do in general' % (sec), level='critical') if override_sec_destination: ## intersect with where we want the PU to be not_needed_anymore = list( set(sec_to_distribute) - set(override_sec_destination)) #sendEmail("secondary superfluous","the dataset %s could be removed from %s"%( sec, not_needed_anymore )) sendLog( 'transferor', "the dataset %s could be removed from %s" % (sec, not_needed_anymore)) sec_to_distribute = list( set(sec_to_distribute) & set(override_sec_destination)) if len(sec_to_distribute) > 0: print "secondary could go to", sorted(sec_to_distribute) sec_size = dss.get(sec) for site in sec_to_distribute: site_se = SI.CE_to_SE(site) if (SI.disk[site_se] * 1024.) > sec_size or wfh.isRelval(): wfh.sendLog('transferor', 'Sending %s to %s' % (sec, site)) all_transfers[site].append(sec) can_go = False else: print "could not send the secondary input to", site_se, "because it is too big for the available disk", SI.disk[ site_se] * 1024, "GB need", sec_size if primary_destinations and site in primary_destinations: #sendEmail('secondary input too big','%s is too big (%s) for %s (%s)'%( sec, sec_size, site_se, SI.disk[site_se]*1024)) sendLog( 'transferor', '%s is too big (%s) for %s (%s). %s will not be able to run there.' % (sec, sec_size, site_se, SI.disk[site_se] * 1024, wfo.name), level='critical') wfh.sendLog( 'transferor', '%s is too big (%s) for %s (%s). will not be able to run there.' % (sec, sec_size, site_se, SI.disk[site_se] * 1024)) else: ## this is bas overall print "the secondary input does not have to be send to site" ## is that possible to do something more if can_go: ## no explicit transfer required this time if staging: ## but using existing ones wfh.sendLog( 'transferor', "latches on existing transfers, and nothing else, settin staging" ) wfo.status = 'staging' needs_transfer += 1 else: wfh.sendLog( 'transferor', "should just be assigned now to %s" % sorted(sites_allowed)) wfo.status = 'staged' passing_along += 1 wfh.sendLog('transferor', "setting %s status to %s" % (wfo.name, wfo.status)) #session.commit() continue else: ## there is an explicit transfer required if staging: ## and also using an existing one wfh.sendLog('transferor', "latches on existing transfers") if not options.test: wfo.status = 'staging' wfh.sendLog( 'transferor', "setting %s status to %s" % (wfo.name, wfo.status)) #session.commit() wfh.sendLog('transferor', "needs a transfer") needs_transfer += 1 passing_along += 1 if no_goes: #sendEmail("no go for managing","No go for \n"+"\n".join( no_goes )) sendLog('transferor', "No go for \n" + "\n".join(sorted(no_goes)), level='critical') print "accumulated transfers" print json.dumps(all_transfers, indent=2) fake_id = -1 wf_id_in_prestaging = set() for (site, items_to_transfer) in all_transfers.iteritems(): items_to_transfer = list(set(items_to_transfer)) ## convert to storage element site_se = SI.CE_to_SE(site) ## site that do not want input datasets #if site in SI.sites_veto_transfer: # print site,"does not want transfers" # continue ## throttle the transfer size to T2s ? we'd be screwed by a noPU sample properly configured. ## massage a bit the items blocks = [it for it in items_to_transfer if '#' in it] block_datasets = list(set([it.split('#')[0] for it in blocks])) datasets = [it for it in items_to_transfer if not '#' in it] details_text = "Making a replica to %s (CE) %s (SE) for" % (site, site_se) #print "\t",len(blocks),"blocks" ## remove blocks if full dataset is send out blocks = [ block for block in blocks if not block.split('#')[0] in datasets ] #print "\t",len(blocks),"needed blocks for",list(set([block.split('#')[0] for block in blocks])) #print "\t",len(datasets),"datasets" #print "\t",datasets details_text += '\n\t%d blocks' % len(blocks) details_text += '\n\t%d needed blocks for %s' % ( len(blocks), sorted(list(set([block.split('#')[0] for block in blocks])))) details_text += '\n\t%d datasets' % len(datasets) details_text += '\n\t%s' % sorted(datasets) items_to_transfer = blocks + datasets if execute: sendLog('transferor', details_text) else: print "Would make a replica to", site, "(CE)", site_se, "(SE) for" print details_text ## operate the transfer if options and options.stop: ## ask to move-on answer = raw_input('Continue with that ?') if not answer.lower() in ['y', 'yes', 'go']: continue transfered_items = defaultdict(set) if execute: priority = 'normal' cds = [ ds for ds in set(datasets + block_datasets) if ds in max_priority ] ## bucketize the transfers by priority of workflows prioritized_items = defaultdict(set) for item in items_to_transfer: d = item.split('#')[0] p = max_priority.get(d, 80000) q = 'normal' if p > 100000: q = 'reserved' elif p < 70000: q = 'low' prioritized_items[q].add(item) for priority, items in prioritized_items.items(): result = makeReplicaRequest(url, site_se, list(items), 'prestaging', priority=priority, approve=True) if result: these_transfers = [ o['id'] for o in result['phedex']['request_created'] ] #phedexids.extend( these_transfers ) for ph in these_transfers: transfered_items[ph].update(items) else: sendLog( 'transferor', 'Could not make a replica request for items %s to site %s' % (items, site_se), level='critical') #result = makeReplicaRequest(url, site_se, items_to_transfer, 'prestaging', priority=priority, approve=True) #phedexids = [o['id'] for o in result['phedex']['request_created']]: #else: # #result= {'phedex':{'request_created' : []}} # phedexids = [] # fake_id-=1 if not transfered_items: sendLog( 'transferor', 'Could not make a replica request for items %s to site %s' % (items_to_transfer, site), level='critical') continue for phedexid, items in transfered_items.items(): print phedexid, "transfer created" for transfering in list( set(map(lambda it: it.split('#')[0], items))): for wfid in workflow_dependencies[transfering]: new_transfer = session.query(TransferImp).filter( TransferImp.phedexid == int(phedexid)).filter( TransferImp.workflow_id == wfid).first() if not new_transfer: new_transfer = TransferImp( phedexid=phedexid, workflow=session.query(Workflow).get(wfid)) session.add(new_transfer) else: new_transfer.active = True wf_id_in_prestaging.add(wfid) #session.commit() for wfid in wf_id_in_prestaging: tr_wf = session.query(Workflow).get(wfid) if tr_wf and tr_wf.status != 'staging': if execute: tr_wf.status = 'staging' if talk: print "setting", tr_wf.name, "to staging" #session.commit() ## one big session commit at the end that everything went fine session.commit()
def assignor(url ,specific = None, talk=True, options=None): if userLock(): return if duplicateLock(): return #if notRunningBefore( 'stagor' ): return if not componentInfo().check(): return UC = unifiedConfiguration() CI = campaignInfo() SI = global_SI #LI = lockInfo() NLI = newLockInfo() n_assigned = 0 n_stalled = 0 wfos=[] if specific: wfos = session.query(Workflow).filter(Workflow.name==specific).all() if not wfos: if specific: wfos = session.query(Workflow).filter(Workflow.status=='considered').all() wfos.extend( session.query(Workflow).filter(Workflow.status=='staging').all()) wfos.extend(session.query(Workflow).filter(Workflow.status=='staged').all()) for wfo in wfos: if specific: if not any(map(lambda sp: sp in wfo.name,specific.split(','))): continue #if not specific in wfo.name: continue print "\n\n",wfo.name,"\n\tto be assigned" wfh = workflowInfo( url, wfo.name) ## check if by configuration we gave it a GO if not CI.go( wfh.request['Campaign'] ) and not options.go: print "No go for",wfh.request['Campaign'] n_stalled+=1 continue ## check on current status for by-passed assignment if wfh.request['RequestStatus'] !='assignment-approved': if not options.test: print wfo.name,wfh.request['RequestStatus'],"setting away and skipping" ## the module picking up from away will do what is necessary of it wfo.wm_status = wfh.request['RequestStatus'] wfo.status = 'away' session.commit() continue else: print wfo.name,wfh.request['RequestStatus'] ## retrieve from the schema, dbs and reqMgr what should be the next version version=wfh.getNextVersion() if not version: if options and options.ProcessingVersion: version = options.ProcessingVersion else: print "cannot decide on version number" n_stalled+=1 continue #(lheinput,primary,parent,secondary) = wfh.getIO() #sites_allowed = getSiteWhiteList( (lheinput,primary,parent,secondary) ) (lheinput,primary,parent,secondary, sites_allowed) = wfh.getSiteWhiteList() print "Site white list",sorted(sites_allowed) override_sec_location = CI.get(wfh.request['Campaign'], 'SecondaryLocation', []) c_sites_allowed = CI.get(wfh.request['Campaign'], 'SiteWhitelist' , []) if c_sites_allowed: print "Would like to use the new whitelist, but will not until things went through a bit" sendEmail("using a restricted site white list","for %s"%(c_sites_allowed)) sites_allowed = list(set(sites_allowed) & set(c_sites_allowed)) c_black_list = CI.get(wfh.request['Campaign'], 'SiteBlacklist', []) if c_black_list: print "Reducing the whitelist due to black list in campaign configuration" print "Removing",c_black_list sites_allowed = list(set(sites_allowed) - set(c_black_list)) blocks = [] if 'BlockWhitelist' in wfh.request: blocks = wfh.request['BlockWhitelist'] ncores = wfh.request.get('Multicore',1) memory_allowed = SI.sitesByMemory( wfh.request['Memory'] , maxCore=ncores) if memory_allowed!=None: print "sites allowing", wfh.request['Memory'],"MB and",ncores,"core are",memory_allowed sites_allowed = list(set(sites_allowed) & set(memory_allowed)) print "Allowed",sorted(sites_allowed) secondary_locations=None for sec in list(secondary): if override_sec_location: print "We don't care where the secondary is" print "Cannot pass for now" sendEmail("tempting to pass sec location check","but we cannot yet IMO") #pass presence = getDatasetPresence( url, sec ) print sec print json.dumps(presence, indent=2) one_secondary_locations = [site for (site,(there,frac)) in presence.items() if frac>98.] #one_secondary_locations = [site for (site,(there,frac)) in presence.items() if there] if secondary_locations==None: secondary_locations = one_secondary_locations else: secondary_locations = list(set(secondary_locations) & set(one_secondary_locations)) ## reduce the site white list to site with secondary only #sites_allowed = [site for site in sites_allowed if any([osite.startswith(site) for osite in one_secondary_locations])] sites_allowed = [site for site in sites_allowed if SI.CE_to_SE(site) in one_secondary_locations] print "From secondary requirement, now Allowed",sorted(sites_allowed) initial_sites_allowed = copy.deepcopy( sites_allowed ) ## keep track of this, after secondary input location restriction : that's how you want to operate it sites_all_data = copy.deepcopy( sites_allowed ) sites_with_data = copy.deepcopy( sites_allowed ) sites_with_any_data = copy.deepcopy( sites_allowed ) primary_locations = None available_fractions = {} set_lfn = '/store/mc' ## by default for prim in list(primary): set_lfn = getLFNbase( prim ) presence = getDatasetPresence( url, prim , only_blocks=blocks) if talk: print prim print json.dumps(presence, indent=2) available_fractions[prim] = getDatasetBlocksFraction(url, prim, sites = [SI.CE_to_SE(site) for site in sites_allowed] , only_blocks = blocks) #sites_all_data = [site for site in sites_with_data if any([osite.startswith(site) for osite in [psite for (psite,(there,frac)) in presence.items() if there]])] #sites_with_data = [site for site in sites_with_data if any([osite.startswith(site) for osite in [psite for (psite,frac) in presence.items() if frac[1]>90.]])] sites_all_data = [site for site in sites_with_data if SI.CE_to_SE(site) in [psite for (psite,(there,frac)) in presence.items() if there]] sites_with_data = [site for site in sites_with_data if SI.CE_to_SE(site) in [psite for (psite,frac) in presence.items() if frac[1]>90.]] sites_with_any_data = [site for site in sites_with_any_data if SI.CE_to_SE(site) in presence.keys()] print "Holding the data but not allowed",list(set([se_site for se_site in presence.keys() if not SI.SE_to_CE(se_site) in sites_allowed])) if primary_locations==None: primary_locations = presence.keys() else: primary_locations = list(set(primary_locations) & set(presence.keys() )) sites_with_data = list(set(sites_with_data)) sites_with_any_data = list(set(sites_with_any_data)) opportunistic_sites=[] down_time = False ## opportunistic running where any piece of data is available if secondary_locations or primary_locations: ## intersection of both any pieces of the primary and good IO #opportunistic_sites = [SI.SE_to_CE(site) for site in list((set(secondary_locations) & set(primary_locations) & set(SI.sites_with_goodIO)) - set(sites_allowed))] if secondary_locations and primary_locations: opportunistic_sites = [SI.SE_to_CE(site) for site in list((set(secondary_locations) & set(primary_locations)) - set([SI.CE_to_SE(site) for site in sites_allowed]))] elif primary_locations: opportunistic_sites = [SI.SE_to_CE(site) for site in list(set(primary_locations) - set([SI.CE_to_SE(site) for site in sites_allowed]))] else: opportunistic_sites = [] print "We could be running at",sorted(opportunistic_sites),"in addition" if any([osite in SI.sites_not_ready for osite in opportunistic_sites]): print "One of the destination site is in downtime" down_time = True ## should this be send back to considered ? """ if available_fractions and not all([available>=1. for available in available_fractions.values()]): print "The input dataset is not located in full over sites" print json.dumps(available_fractions) if not options.test and not options.go: known = [] try: known = json.loads(open('cannot_assign.json').read()) except: pass if not wfo.name in known: sendEmail( "cannot be assigned","%s is not full over sites \n %s"%(wfo.name,json.dumps(available_fractions))) known.append( wfo.name ) open('cannot_assign.json','w').write(json.dumps( known, indent=2)) n_stalled+=1 continue ## skip skip skip """ ## should be 2 but for the time-being let's lower it to get things going copies_wanted,cpuh = wfh.getNCopies() less_copies_than_requested = UC.get("less_copies_than_requested") copies_wanted = max(1,copies_wanted-less_copies_than_requested) # take one out for the efficiency if available_fractions and not all([available>=copies_wanted for available in available_fractions.values()]): not_even_once = not all([available>=1. for available in available_fractions.values()]) print "The input dataset is not available",copies_wanted,"times, only",available_fractions.values() if down_time and not options.go: wfo.status = 'considered' session.commit() print "sending back to considered because of site downtime, instead of waiting" sendEmail( "cannot be assigned due to downtime","%s is not sufficiently available, due to down time of a site in the whitelist. check the assignor logs. sending back to considered."% wfo.name) continue #pass print json.dumps(available_fractions) if (options.go and not_even_once) or not options.go: known = [] try: known = json.loads(open('cannot_assign.json').read()) except: pass if not wfo.name in known: sendEmail( "cannot be assigned","%s is not sufficiently available. Probably phedex information lagging behind. \n %s"%(wfo.name,json.dumps(available_fractions))) known.append( wfo.name ) open('cannot_assign.json','w').write(json.dumps( known, indent=2)) n_stalled+=1 continue ## default back to white list to original white list with any data print "Allowed",sites_allowed if options.primary_aaa: sites_allowed = initial_sites_allowed options.useSiteListAsLocation = True else: sites_allowed = sites_with_any_data print "Selected for any data",sites_allowed if options.restrict: print "Allowed",sites_allowed sites_allowed = sites_with_any_data print "Selected",sites_allowed else: if set(sites_with_data) != set(sites_allowed): ## the data is not everywhere we wanted to run at : enable aaa print "Sites with 90% data not matching site white list (block choping!)" print "Resorting to AAA reading for",list(set(sites_allowed) - set(sites_with_data)),"?" print "Whitelist site with any data",list(set(sites_allowed) - set(sites_with_any_data)) #options.useSiteListAsLocation = True #print "Not commissioned yet" #continue #print "We could be running at",opportunistic_sites,"in addition" ##sites_allowed = list(set(sites_allowed+ opportunistic_sites)) if not len(sites_allowed): print wfo.name,"cannot be assign with no matched sites" sendEmail( "cannot be assigned","%s has no whitelist"%(wfo.name)) n_stalled+=1 continue t1_only = [ce for ce in sites_allowed if ce.startswith('T1')] if t1_only: # try to pick from T1 only first sites_out = [SI.pick_dSE([SI.CE_to_SE(ce) for ce in t1_only])] else: # then pick any otherwise sites_out = [SI.pick_dSE([SI.CE_to_SE(ce) for ce in sites_allowed])] print "Placing the output on", sites_out parameters={ 'SiteWhitelist' : sites_allowed, #'CustodialSites' : sites_custodial, 'NonCustodialSites' : sites_out, 'AutoApproveSubscriptionSites' : list(set(sites_out)), 'AcquisitionEra' : wfh.acquisitionEra(), 'ProcessingString' : wfh.processingString(), 'MergedLFNBase' : set_lfn, 'ProcessingVersion' : version, } ## plain assignment here team='production' if options and options.team: team = options.team #if wfh.request['RequestPriority'] >= 100000 and (wfh.request['TimePerEvent']*int(wfh.getRequestNumEvents()))/(8*3600.) < 10000: # team = 'highprio' # sendEmail("sending work with highprio team","%s"% wfo.name, destination=['*****@*****.**']) if "T2_US_UCSD" in sites_with_data and random.random() < -0.5 and wfh.request['Campaign']=='RunIISpring15DR74' and int(wfh.getRequestNumEvents()) < 600000 and not any([out.endswith('RAW') for out in wfh.request['OutputDatasets']]): ## consider SDSC parameters['SiteWhitelist'] = ['T2_US_UCSD','T3_US_SDSC'] parameters['useSiteListAsLocation'] = True team = 'allocation-based' sendEmail("sending work to SDSC","%s was assigned to SDSC/UCSD"% wfo.name, destination=['*****@*****.**']) if wfh.request['Campaign']=='RunIIWinter15GS' and random.random() < -1.0: parameters['SiteWhitelist'] = ['T3_US_SDSC'] team = 'allocation-based' sendEmail("sending work to SDSC","%s was assigned to SDSC"% wfo.name, destination=['*****@*****.**']) ##parse options entered in command line if any if options: for key in reqMgrClient.assignWorkflow.keys: v=getattr(options,key) if v!=None: if type(v)==str and ',' in v: parameters[key] = filter(None,v.split(',')) else: parameters[key] = v ## pick up campaign specific assignment parameters parameters.update( CI.parameters(wfh.request['Campaign']) ) if not options.test: parameters['execute'] = True split_check = wfh.checkWorkflowSplitting() if split_check!=True: parameters.update( split_check ) if 'EventBased' in split_check.values(): print "Falling back to event splitting." sendEmail("Fallback to EventBased","the workflow %s is too heavy to be processed as it is. Fallback to EventBased splitting"%wfo.name) elif 'EventsPerJob' in split_check.values(): print "Modifying the number of job per event" sendEmail("Modifying the job per events","the workflow %s is too heavy in number of jobs explosion"%wfo.name) # Handle run-dependent MC pstring = wfh.processingString() if 'PU_RD' in pstring: numEvents = wfh.getRequestNumEvents() eventsPerLumi = [getDatasetEventsPerLumi(prim) for prim in primary] eventsPerLumi = sum(eventsPerLumi)/float(len(eventsPerLumi)) reqJobs = 500 if 'PU_RD2' in pstring: reqJobs = 2000 eventsPerJob = int(numEvents/(reqJobs*1.4)) lumisPerJob = int(eventsPerJob/eventsPerLumi) if lumisPerJob==0: print "There is no go for assigning that request without event splitting" sendEmail("issue with event splitting for run-dependent MC","%s needs to be split by event with %s per job"%(wfo.name, eventsPerJob)) print "need to go down to",eventsPerJob,"events per job" parameters['EventsPerJob'] = eventsPerJob else: spl = wfh.getSplittings()[0] eventsPerJobEstimated = spl['events_per_job'] if 'events_per_job' in spl else None eventsPerJobEstimated = spl['avg_events_per_job'] if 'avg_events_per_job' in spl else None if eventsPerJobEstimated and eventsPerJobEstimated > eventsPerJob: print "need to go down to",lumisPerJob,"in assignment" sendEmail("setting lumi splitting for run-dependent MC","%s was assigned with %s lumis/job"%( wfo.name, lumisPerJob)) parameters['LumisPerJob'] = lumisPerJob else: print "the regular splitting should work for",pstring sendEmail("leaving splitting untouched for PU_RD*","please check on "+wfo.name) result = reqMgrClient.assignWorkflow(url, wfo.name, team, parameters) # set status if not options.test: if result: wfo.status = 'away' session.commit() n_assigned+=1 try: ## refetch information and lock output new_wfi = workflowInfo( url, wfo.name) (_,prim,_,sec) = new_wfi.getIO() for secure in list(prim)+list(sec)+new_wfi.request['OutputDatasets']: ## lock all outputs flat NLI.lock( secure ) #for site in [SI.CE_to_SE(site) for site in sites_allowed]: # for output in new_wfi.request['OutputDatasets']: # LI.lock( output, site, 'dataset in production') # for primary in prim: # LI.lock( primary, site, 'dataset used in input') # for secondary in sec: # LI.lock( secondary, site, 'required for mixing' ) except Exception as e: print "fail in locking output" print str(e) sendEmail("failed locking of output",str(e)) else: print "ERROR could not assign",wfo.name else: pass print "Assignment summary:" print "Assigned",n_assigned print "Stalled",n_stalled
def invalidator(url, invalid_status='INVALID'): use_mcm = True up = componentInfo(mcm=use_mcm) if not up.check(): return mcm = McMClient(dev=False) invalids = mcm.getA('invalidations',query='status=announced') print len(invalids),"Object to be invalidated" text_to_batch = defaultdict(str) text_to_request = defaultdict(str) for invalid in invalids: acknowledge= False pid = invalid['prepid'] batch_lookup = invalid['prepid'] text = "" if invalid['type'] == 'request': wfn = invalid['object'] print "need to invalidate the workflow",wfn wfo = session.query(Workflow).filter(Workflow.name == wfn).first() if wfo: ## set forget of that thing (although checkor will recover from it) print "setting the status of",wfo.status,"to forget" wfo.status = 'forget' session.commit() else: ## do not go on like this, do not acknoledge it print wfn,"is set to be rejected, but we do not know about it yet" #continue wfi = workflowInfo(url, wfn) success = "not rejected" ## to do, we should find a way to reject the workflow and any related acdc success = reqMgrClient.invalidateWorkflow(url, wfn, current_status = wfi.request['RequestStatus']) ## need to find the whole familly and reject the whole gang familly = getWorkflowById( url, wfi.request['PrepID'] , details=True) for fwl in familly: ## take out all acdc if fwl['RequestDate'] < wfi.request['RequestDate']:continue if fwl['RequestType']!='Resubmission': continue print "rejecting",fwl['RequestName'] success = reqMgrClient.invalidateWorkflow(url, fwl['RequestName'], current_status=fwl['RequestStatus']) print success wfi.sendLog('invalidator',"rejection is performed from McM invalidations request") acknowledge= True text = "The workflow %s (%s) was rejected due to invalidation in McM" % ( wfn, pid ) batch_lookup = wfn ##so that the batch id is taken as the one containing the workflow name elif invalid['type'] == 'dataset': dataset = invalid['object'] if '?' in dataset: continue if 'None' in dataset: continue if 'None-' in dataset: continue if 'FAKE-' in dataset: continue print "setting",dataset,"to",invalid_status success = setDatasetStatus(dataset , invalid_status ) if success: acknowledge= True text = "The dataset %s (%s) was set INVALID due to invalidation in McM" % ( dataset, pid ) else: print "invalidation of",dataset,"did not go so well" else: print "\t\t",invalid['type']," type not recognized" if acknowledge: ## acknoldge invalidation in mcm, provided we can have the api print "acknowledgment to mcm" mcm.get('/restapi/invalidations/acknowledge/%s'%( invalid['_id'] )) # prepare the text for batches batches = [] batches.extend(mcm.getA('batches',query='contains=%s'%batch_lookup)) batches = filter(lambda b : b['status'] in ['announced','done','reset'], batches) if len(batches): bid = batches[-1]['prepid'] print "batch nofication to",bid text_to_batch[bid] += text+"\n\n" # prepare the text for requests text_to_request[pid] += text+"\n\n" for bid,text in text_to_batch.items(): if not text: continue text += '\n This is an automated message' mcm.put('/restapi/batches/notify',{ "notes" : text, "prepid" : bid}) pass for pid,text in text_to_request.items(): if not text: continue text += '\n This is an automated message' mcm.put('/restapi/requests/notify',{ "message" : text, "prepids" : [pid]})
def singleClone(url, wfname, actions, comment, do=False): wfi = workflowInfo(url, wfname) payload = wfi.getSchema() initial = wfi.request payload['Requestor'] = os.getenv('USER') payload['Group'] = 'DATAOPS' payload['OriginalRequestName'] = initial['RequestName'] payload['RequestPriority'] = initial['RequestPriority'] if 'ProcessingVersion' in initial: payload['ProcessingVersion'] = int(initial['ProcessingVersion']) + 1 else: payload['ProcessingVersion'] = 2 payload = reqMgrClient.purgeClonedSchema(payload) if actions: for action in actions: if action.startswith('mem') and actions[action] != "" and actions[ action] != 'Same': if 'TaskChain' in payload: print "Setting memory for clone of task chain" mem_dict = {} it = 1 while True: t = 'Task%d' % it it += 1 if t in payload: tname = payload[t]['TaskName'] mem_dict[tname] = int(actions[action]) print "Memory set for Task%d" % it else: break payload['Memory'] = mem_dict else: print "Setting memory for non-taskchain workflow" payload['Memory'] = int(actions[action]) print "Memory set to " + actions[action] print "Clone payload" # print json.dumps( payload , indent=2) print actions #Create clone clone = reqMgrClient.submitWorkflow(url, payload) if not clone: print "Error in making clone for", initial["RequestName"] clone = reqMgrClient.submitWorkflow(url, payload) if not clone: print "Error twice in making clone for", initial["RequestName"] sendLog('actor', 'Failed to make a clone twice for %s!' % initial["RequestName"], level='critical') wfi.sendLog( 'actor', 'Failed to make a clone twice for %s!' % initial["RequestName"]) return None if actions: for action in actions: if action.startswith('split'): cloneinfo = workflowInfo(url, clone) splittings = cloneinfo.getSplittingsNew(strip=True) if actions[action] != 'Same' and actions[ action] != 'max' and actions[action] != '': factor = int( actions[action][0:-1]) if 'x' in actions[action] else 2 for split in splittings: split_par = split['splitParams'] for act in [ 'avg_events_per_job', 'events_per_job', 'lumis_per_job' ]: if act in split_par: wfi.sendLog( 'actor', 'Changing %s (%d) by a factor %d' % (act, split_par[act], factor)) split_par[act] /= factor print "to", split_par[act] break #split['requestName'] = clone #print "changing the splitting of",clone #print json.dumps( split, indent=2 ) #print reqMgrClient.setWorkflowSplitting(url, clone, split ) elif 'max' in actions[action]: for split in splittings: split_par = split['splitParams'] for act in [ 'avg_events_per_job', 'events_per_job', 'lumis_per_job' ]: if act in split_par: wfi.sendLog( 'actor', 'Max splitting set for %s (%d' % (act, split_par[act])) print "Changing %s (%d) " % (act, split_par[act]), split_par[act] = 1 print "to max splitting ", split_par[act] break #split['requestName'] = clone #print "changing the splitting of",clone #print json.dumps( split, indent=2 ) #print reqMgrClient.setWorkflowSplitting(url, clone, split ) print "changing the splitting of", clone print json.dumps(splittings, indent=2) print reqMgrClient.setWorkflowSplitting(url, clone, splittings) #Approve data = reqMgrClient.setWorkflowApproved(url, clone) #wfi.sendLog('actor','Cloned into %s'%clone) # wfi.sendLog('actor','Cloned into %s by unified operator %s'%( clone, comment )) # wfi.notifyRequestor('Cloned into %s by unified operator %s'%( clone, comment ),do_batch=False) print data return clone
def assignWorkflow(url, workflowname, team, parameters): #local import so it doesn't screw with all other stuff from utils import workflowInfo defaults = copy.deepcopy(assignWorkflow.defaults) defaults["Team" + team] = "checked" defaults["checkbox" + workflowname] = "checked" from utils import workflowInfo wf = workflowInfo(url, workflowname) # set the maxrss watchdog to what is specified in the request defaults['MaxRSS'] = wf.request['Memory'] * 1024 defaults.update(parameters) #if ('Multicore' in wf.request and wf.request['Multicore']>1): # defaults['MaxRSS'] = int((wf.request['Memory']*1024+10) * 1.5 * wf.request['Multicore']) # defaults['MaxVSize'] = int(10*defaults['MaxRSS']) pop_useless = ['AcquisitionEra', 'ProcessingString'] for what in pop_useless: if defaults[what] == None: defaults.pop(what) if not set(assignWorkflow.mandatories).issubset(set(parameters.keys())): print "There are missing parameters" print list(set(assignWorkflow.mandatories) - set(parameters.keys())) return False if wf.request['RequestType'] in ['ReDigi', 'ReReco']: defaults['Dashboard'] = 'reprocessing' elif 'SubRequestType' in wf.request and wf.request['SubRequestType'] in [ 'ReDigi' ]: defaults['Dashboard'] = 'reprocessing' if defaults['SiteBlacklist'] and defaults['SiteWhitelist']: defaults['SiteWhitelist'] = list( set(defaults['SiteWhitelist']) - set(defaults['SiteBlacklist'])) defaults['SiteBlacklist'] = [] if not defaults['SiteWhitelist']: print "Cannot assign with no site whitelist" return False for aux in assignWorkflow.auxiliaries: if aux in defaults: par = defaults.pop(aux) if aux == 'EventsPerJob': wf = workflowInfo(url, workflowname) t = wf.firstTask() par = int(float(par)) params = wf.getSplittings()[0] if par < params['events_per_job']: params.update({ "requestName": workflowname, "splittingTask": '/%s/%s' % (workflowname, t), "events_per_job": par, "splittingAlgo": "EventBased" }) print setWorkflowSplitting(url, workflowname, params) elif aux == 'EventsPerLumi': wf = workflowInfo(url, workflowname) t = wf.firstTask() params = wf.getSplittings()[0] if params['splittingAlgo'] != 'EventBased': print "Ignoring changing events per lumi for", params[ 'splittingAlgo'] continue (_, prim, _, _) = wf.getIO() if prim: print "Ignoring changing events per lumi for wf that take input" continue if str(par).startswith('x'): multiplier = float(str(par).replace('x', '')) par = int(params['events_per_lumi'] * multiplier) else: if 'FilterEfficiency' in wf.request and wf.request[ 'FilterEfficiency']: par = int(float(par) / wf.request['FilterEfficiency']) else: par = int(float(str(par))) params.update({ "requestName": workflowname, "splittingTask": '/%s/%s' % (workflowname, t), "events_per_lumi": par }) print setWorkflowSplitting(url, workflowname, params) elif aux == 'SplittingAlgorithm': wf = workflowInfo(url, workflowname) ### do it for all major tasks #for (t,params) in wf.getTaskAndSplittings(): # params.update({"requestName":workflowname, # "splittingTask" : '/%s/%s'%(workflowname,t), # "splittingAlgo" : par}) # setWorkflowSplitting(url, workflowname, params) t = wf.firstTask() params = wf.getSplittings()[0] params.update({ "requestName": workflowname, "splittingTask": '/%s/%s' % (workflowname, t), "splittingAlgo": par }) #swap values if "avg_events_per_job" in params and not "events_per_job" in params: params['events_per_job'] = params.pop('avg_events_per_job') print params print setWorkflowSplitting(url, workflowname, params) elif aux == 'LumisPerJob': wf = workflowInfo(url, workflowname) t = wf.firstTask() #params = wf.getSplittings()[0] params = { "requestName": workflowname, "splittingTask": '/%s/%s' % (workflowname, t), "lumis_per_job": int(par), "halt_job_on_file_boundaries": True, "splittingAlgo": "LumiBased" } print setWorkflowSplitting(url, workflowname, params) else: print "No action for ", aux if not 'execute' in defaults or not defaults['execute']: print json.dumps(defaults, indent=2) return False else: defaults.pop('execute') print json.dumps(defaults, indent=2) res = setWorkflowAssignment(url, workflowname, defaults) if res: print 'Assigned workflow:', workflowname, 'to site:', defaults[ 'SiteWhitelist'], 'and team', team return True else: print "error in assigning", workflowname return False