def GET(self, *args): """ Reset all requests in a batch (or list of) and set the status to reset """ res = [] bdb = database('batches') rdb = database('requests') bids = args[0] for bid in bids.split(','): mcm_b = bdb.get(bid) for r in mcm_b['requests']: if not 'pdmv_prep_id' in r['content']: continue rid = r['content']['pdmv_prep_id'] if not rdb.document_exists(rid): continue mcm_r = request(rdb.get(rid)) try: mcm_r.reset() rdb.update(mcm_r.json()) except Exception as ex: continue mcm_b['status'] = 'reset' bdb.update(mcm_b) res.append({'prepid': bid, 'results': True}) return dumps(res)
def get(self, batch_ids): """ Reset all requests in a batch (or list of) and set the status to reset """ res = [] bdb = database('batches') rdb = database('requests') for bid in batch_ids.split(','): mcm_b = bdb.get(bid) for r in mcm_b['requests']: if 'pdmv_prep_id' not in r['content']: continue rid = r['content']['pdmv_prep_id'] if not rdb.document_exists(rid): continue mcm_r = request(rdb.get(rid)) try: mcm_r.reset() rdb.update(mcm_r.json()) except Exception: continue batch_to_update = batch(mcm_b) batch_to_update.set_attribute('status', 'reset') batch_to_update.update_history({'action': 'set status', 'step': 'reset'}) bdb.update(batch_to_update.json()) res.append({'prepid': bid, 'results': True}) return res
def GET(self, *args): """ Does a soft reset to all relevant request in the chain """ if not len(args): return dumps({"results" : False, "message" : "no argument provided"}) arg0 = args[0] crdb = database('chained_requests') rdb = database('requests') mcm_cr = chained_request(crdb.get(arg0)) for rid in reversed(mcm_cr.get_attribute('chain')[:mcm_cr.get_attribute('step')+1]): ## from the current one to the first one REVERSED mcm_r = request(rdb.get(rid)) try: mcm_r.reset(hard=False) except Exception as e: return dumps({'prepid' : arg0, 'results' : False, 'message' : str(e)}) mcm_r.reload() mcm_cr = chained_request(crdb.get(arg0)) mcm_cr.set_attribute('step', max(0, mcm_cr.get_attribute('chain').index(rid)-1)) mcm_cr.reload() return dumps({'prepid' : arg0, 'results':True})
def get(self, mccm_id): """ Return whether all requests in MccM are approve-approved """ mccm_db = database('mccms') if not mccm_db.document_exists(mccm_id): return {"results": False} mccm_doc = mccm_db.get(prepid=mccm_id) req_db = database('requests') query = '' for root_request in mccm_doc.get('requests', []): if isinstance(root_request, str) or isinstance(root_request, unicode): query += '%s\n' % (root_request) elif isinstance(root_request, list): # List always contains two elements - start and end of a range query += '%s -> %s\n' % (root_request[0], root_request[1]) req_lister = RequestLister() req_lister.logger = self.logger requests = req_lister.get_list_of_ids(req_db, {'contents' : query}) allowed_approvals = set(['approve', 'submit']) for request_prepid in requests: req = req_db.get(request_prepid) approval = req.get('approval') if approval not in allowed_approvals: return {'results': False} return {'results': True}
def reset_all(self, message, what = 'Chained validation run test', notify_one=None): crdb = database('chained_requests') rdb = database('requests') mcm_cr = chained_request(crdb.get(self.crid)) if self.scratch: chain = mcm_cr.get_attribute('chain') else: chain = mcm_cr.get_attribute('chain')[mcm_cr.get_attribute('step'):] for rid in chain: mcm_r = request(rdb.get(rid )) s_label = 'chainvalid-%s' % rid semaphore_events.decrement(s_label) if not semaphore_events.is_set(s_label): ##someone else is still validating that chain, so no reset ! mcm_r.notify('%s failed for request %s' % (what, mcm_r.get_attribute('prepid')), message) continue ## do not reset anything that does not look ok already # this might leave things half-way inconsistent in terms of status if mcm_r.get_attribute('status') != 'new': mcm_r.notify('%s failed for request %s' % (what, mcm_r.get_attribute('prepid')), message) continue notify = True if notify_one and notify_one != rid: notify = False mcm_r.test_failure( message, what = what, rewind=True, with_notification=notify)
def __retrieve_chains(self, prepid, campid): # initialize db connections try: chaindb = database('chained_campaigns') cdb = database('campaigns') except database.DatabaseAccessError as ex: return False # get all chains # '>' & '>=' operators in queries for string keys return the same #candidate_chains = chaindb.query('prepid>=chain_'+campid) candidate_chains = chaindb.query('prepid>=chain_'+campid+'_',page_num=-1) candidate_chains.extend(chaindb.query('prepid==chain_'+campid,page_num=-1)) # map only prepids ccids = map(lambda x: x['_id'], candidate_chains) chains = self.get_attribute('chains') new_chains = {} # cross examine (avoid deleted, keep new ones) for ccid in ccids: if ccid in chains: new_chains[ccid] = chains[ccid] else: new_chains[ccid] = {'flag' : False } # make persistent self.set_attribute('chains', new_chains) return True
def get(self): """ Get all the generated chains from a ticket """ kwargs = self.parser.parse_args() page = kwargs['page'] limit = kwargs['limit'] if page < 0: page = 0 limit = 999999 ticket_prepid = kwargs['ticket'] chained_requests_db = database('chained_requests') mccms_db = database('mccms') mccm_query = mccms_db.construct_lucene_query({'prepid': ticket_prepid}) result = mccms_db.full_text_search("search", mccm_query, page=-1) if len(result) == 0: self.logger.warning("Mccm prepid %s doesn't exit in db" % ticket_prepid) return {} self.logger.info("Getting generated chains from ticket %s" % ticket_prepid) generated_chains = list(result[0]['generated_chains'].iterkeys()) generated_chains.sort() start = page * limit if start > len(generated_chains): return [] end = start + limit end = end if end <= len(generated_chains) else len(generated_chains) chained_request_list = [] while start < end: fetch_till = start + 20 fetch_till = end if fetch_till > end else fetch_till chained_request_query = chained_requests_db.construct_lucene_query({'prepid': generated_chains[start:fetch_till]}, boolean_operator="OR") chained_request_list += chained_requests_db.full_text_search("search", chained_request_query) start += 20 return chained_request_list
def overwrite(self, json_input): """ Update the document with the input, regardless of revision clash. This has to be used to much care """ try: if self.__class__.__name__ =="batch": db = database(self.__class__.__name__ + "es") else: db = database(self.__class__.__name__ + "s") except (database.DatabaseNotFoundException, database.DatabaseAccessError) as ex: self.logger.error("Problem with database creation:\n{0}".format(ex)) return False with locker.lock(self.get_attribute('_id')): if not db.document_exists(self.get_attribute('_id')): return False ## reload the doc with db self.__init__(db.get(self.get_attribute('_id'))) ## add what was provided on top self._json_base__json.update( json_input ) ## save back saved = db.update(self.json()) if not saved: return False return True
def find_chains(self): # validate request if not self.get_attribute('prepid'): raise self.PrepIdNotDefinedException() # initialize db connections try: reqdb = database('requests') campaigndb = database('campaigns') except database.DatabaseAccessError as ex: return False # validate prepid if not reqdb.document_exists(self.get_attribute('prepid')): raise self.PrepIdDoesNotExistException( self.get_attribute('prepid')) # get campaign id req = request(json_input=reqdb.get(self.get_attribute('prepid'))) # check if campaign exists campid = req.get_attribute('member_of_campaign') if not campid: self.logger.error('action %s has not a campaign defined' % (self.get_attribute('prepid'))) raise ValueError('Error: Campaign was not set for', self.get_attribute('prepid')) if not campaigndb.document_exists(campid): raise self.PrepIdDoesNotExistException(campid) # get all chains return self.__retrieve_chains(self.get_attribute('prepid'), campid)
def find_chains(self): # validate request if not self.get_attribute('prepid'): raise self.PrepIdNotDefinedException() # initialize db connections try: reqdb = database('requests') campaigndb = database('campaigns') except database.DatabaseAccessError as ex: return False # validate prepid if not reqdb.document_exists(self.get_attribute('prepid')): raise self.PrepIdDoesNotExistException(self.get_attribute('prepid')) # get campaign id req = request(json_input=reqdb.get(self.get_attribute('prepid'))) # check if campaign exists campid = req.get_attribute('member_of_campaign') if not campid: self.logger.error('action %s has not a campaign defined' % (self.get_attribute('prepid'))) raise ValueError('Error: Campaign was not set for', self.get_attribute('prepid')) if not campaigndb.document_exists(campid): raise self.PrepIdDoesNotExistException(campid) # get all chains return self.__retrieve_chains(self.get_attribute('prepid'), campid)
def __retrieve_chains(self, prepid, campid): # initialize db connections try: chaindb = database('chained_campaigns') cdb = database('campaigns') except database.DatabaseAccessError as ex: return False # get all chains # '>' & '>=' operators in queries for string keys return the same #candidate_chains = chaindb.query('prepid>=chain_'+campid) candidate_chains = chaindb.query('prepid>=chain_' + campid + '_', page_num=-1) candidate_chains.extend( chaindb.query('prepid==chain_' + campid, page_num=-1)) # map only prepids ccids = map(lambda x: x['_id'], candidate_chains) chains = self.get_attribute('chains') new_chains = {} # cross examine (avoid deleted, keep new ones) for ccid in ccids: if ccid in chains: new_chains[ccid] = chains[ccid] else: new_chains[ccid] = {'flag': False} # make persistent self.set_attribute('chains', new_chains) return True
def get(self, batch_ids): """ Reset all requests in a batch (or list of) and set the status to reset """ res = [] bdb = database('batches') rdb = database('requests') for bid in batch_ids.split(','): mcm_b = bdb.get(bid) for r in mcm_b['requests']: if 'pdmv_prep_id' not in r['content']: continue rid = r['content']['pdmv_prep_id'] if not rdb.document_exists(rid): continue mcm_r = request(rdb.get(rid)) try: mcm_r.reset() rdb.update(mcm_r.json()) except Exception: continue batch_to_update = batch(mcm_b) batch_to_update.set_attribute('status', 'reset') batch_to_update.update_history({ 'action': 'set status', 'step': 'reset' }) bdb.update(batch_to_update.json()) res.append({'prepid': bid, 'results': True}) return res
def get(self, chained_request_id): """ Does a soft reset to all relevant request in the chain """ crdb = database('chained_requests') rdb = database('requests') mcm_cr = chained_request(crdb.get(chained_request_id)) for rid in reversed( mcm_cr.get_attribute('chain')[:mcm_cr.get_attribute('step') + 1]): # from the current one to the first one REVERSED mcm_r = request(rdb.get(rid)) try: mcm_r.reset(hard=False) except Exception as e: return { 'prepid': chained_request_id, 'results': False, 'message': str(e) } mcm_r.reload() mcm_cr = chained_request(crdb.get(chained_request_id)) mcm_cr.set_attribute( 'step', max(0, mcm_cr.get_attribute('chain').index(rid) - 1)) mcm_cr.reload() return {'prepid': chained_request_id, 'results': True}
def add_campaign(self, campaign_id, flow_name=None): self.logger.log('Adding a new campaign %s to chained campaign %s' % (campaign_id, self.get_attribute('_id'))) try: from couchdb_layer.mcm_database import database except ImportError as ex: self.logger.error('Could not import database connector class. Reason: %s' % (ex), level='critical') return False try: camp_db = database('campaigns') flow_db = database('flows') except database.DatabaseAccessError as ex: return False if not camp_db.document_exists(campaign_id): raise self.CampaignDoesNotExistException(campaign_id) # check to see if flow_name is none (campaign_id = root) if flow_name is not None: if not flow_db.document_exists(flow_name): raise self.FlowDoesNotExistException(flow_name) camps = self.get_attribute('campaigns') if not camps or camps is None: camps = [] camps.append([campaign_id, flow_name]) self.set_attribute('campaigns', camps) return True
def get(self, mccm_id): """ Cancel the MccM ticket provided in argument. Does not delete it but put the status as cancelled. """ db = database('mccms') udb = database('users') mcm_mccm = mccm(db.get(mccm_id)) curr_user = user(udb.get(mcm_mccm.current_user)) self.logger.info("Canceling an mccm: %s" % (mccm_id)) if mcm_mccm.get_attribute('status') == 'done': self.logger.info("You cannot cancel 'done' mccm ticket") return {"results": False, "message": "Cannot cancel done tickets"} if not mcm_mccm.get_attribute("pwg") in curr_user.get_pwgs(): self.logger.info("User's PWGs: %s doesnt include ticket's PWG: %s" % ( curr_user.get_pwgs(), mcm_mccm.get_attribute("pwg"))) return {"results": False, "message": "You cannot cancel ticket with different PWG than yours"} mcm_mccm.set_attribute('status', 'cancelled') mcm_mccm.update_history({'action': 'cancelled'}) saved = db.update(mcm_mccm.json()) if saved: return {"results": True} else: return {"results": False, "message": "Could not save the ticket to be cancelled."}
def toggle_last_request(self): ## let it toggle the last request to a given approval only if the chained request allows it if self.get_attribute('approval') == 'none': return ccdb = database('chained_campaigns') mcm_cc = ccdb.get(self.get_attribute('member_of_campaign')) (next_campaign_id, flow_name) = mcm_cc['campaigns'][self.get_attribute('step')] fdb = database('flows') mcm_f = flow(fdb.get(flow_name)) # check whether we have to do something even more subtle with the request if mcm_f.get_attribute('approval') == 'submit' or self.get_attribute('approval') == 'submit': rdb = database('requests') next_request = request(rdb.get(self.get_attribute('chain')[self.get_attribute('step')])) current_r_approval = next_request.get_attribute('approval') time_out = 0 #self.logger.error('Trying to move %s from %s to submit'% (next_request.get_attribute('prepid'), current_r_approval)) while current_r_approval != 'submit' and time_out <= 10: time_out += 1 #get it back from db to avoid _red issues next_request = request(rdb.get(next_request.get_attribute('prepid'))) with locker.lock('{0}-wait-for-approval'.format( next_request.get_attribute('prepid') )): next_request.approve() request_saved = rdb.save(next_request.json()) if not request_saved: raise self.ChainedRequestCannotFlowException(self.get_attribute('_id'), 'Could not save the new request %s while trying to move to submit approval' % ( next_request.get_attribute('prepid'))) current_r_approval = next_request.get_attribute('approval') pass return True
def GET(self, *args): """ Reset all requests in a batch (or list of) and set the status to reset """ res=[] bdb = database('batches') rdb = database('requests') bids = args[0] for bid in bids.split(','): mcm_b = bdb.get(bid) for r in mcm_b['requests']: if not 'pdmv_prep_id' in r['content']: continue rid = r['content']['pdmv_prep_id'] if not rdb.document_exists( rid ): continue mcm_r = request( rdb.get( rid ) ) try: mcm_r.reset() rdb.update( mcm_r.json() ) except Exception as ex: continue mcm_b['status'] = 'reset' bdb.update( mcm_b ) res.append({'prepid':bid, 'results': True}) return dumps(res)
def import_request(self, data): db = database(self.db_name) json_input = loads(data) if 'pwg' not in json_input or 'member_of_campaign' not in json_input: self.logger.error('Now pwg or member of campaign attribute for new chained request') return {"results": False} if 'prepid' in json_input: req = chained_request(json_input) cr_id = req.get_attribute('prepid') else: cr_id = ChainedRequestPrepId().next_id(json_input['pwg'], json_input['member_of_campaign']) if not cr_id: return {"results": False} req = chained_request(db.get(cr_id)) for key in json_input: if key not in ['prepid', '_id', '_rev', 'history']: req.set_attribute(key, json_input[key]) if not req.get_attribute('prepid'): self.logger.error('prepid returned was None') raise ValueError('Prepid returned was None') if 'chain_type' in json_input: chain_type = json_input['chain_type'] else: ccdb = database('chained_campaigns') chain_type = ccdb.get(json_input['member_of_campaign']).get('chain_type', 'TaskChain') req.set_attribute('chain_type', chain_type) self.logger.info('Created new chained_request %s' % cr_id) # update history with the submission details req.update_history({'action': 'created'}) return self.save_request(db, req)
def next_prepid(self, pwg, camp): if not pwg or not camp: return None with locker.lock("{0}-{1}".format(pwg, camp)): db = database(self.db_name) query_results = db.raw_query('serial_number', { 'group': True, 'key': [camp, pwg] }) sn = 1 if query_results: sn = query_results[0]['value'] + 1 pid = '%s-%s-%05d' % (pwg, camp, sn) if sn == 1: self.logger.info('Beginning new prepid family: %s-%s' % (pwg, camp)) db_camp = database('campaigns', cache_enabled=True) req_camp = campaign(db_camp.get(camp)) new_request = request( req_camp.add_request({ '_id': pid, 'prepid': pid, 'pwg': pwg, 'member_of_campaign': camp })) new_request.update_history({'action': 'created'}) db.save(new_request.json()) self.logger.info('New prepid : %s ' % pid) return pid
def next_id(self, pwg, campaign): ccamp_db = database(self.ccamp_db_name) creq_db = database(self.creq_db_name) if not pwg: self.logger.error('Physics working group provided is None.') return None if not campaign: self.logger.error('Campaign id provided is None.') return None with locker.lock("{0}-{1}".format(pwg, campaign)): if not ccamp_db.document_exists(campaign): self.logger.error('Campaign id {0} does not exist.'.format(campaign)) return None if (campaign, pwg) in self.serial_number_cache: sn = self.serial_number_cache[(campaign, pwg)] + 1 else: sn=1 serial_number_lookup = creq_db.raw_query('serial_number', {'group':True, 'key':[campaign, pwg]}) if serial_number_lookup: sn = serial_number_lookup[0]['value']+1 ## construct the new id new_prepid = pwg + '-' + campaign + '-' + str(sn).zfill(5) if sn==1: self.logger.log('Beginning new prepid family: %s' % (new_prepid)) new_request = chained_request({'_id':new_prepid, 'prepid':new_prepid, 'pwg':pwg, 'member_of_campaign':campaign}) new_request.update_history({'action':'created'}) creq_db.save(new_request.json()) self.serial_number_cache[(campaign, pwg)] = sn self.logger.log('New chain id: %s' % new_prepid, level='debug') return new_prepid
def internal_run(self): from tools.installer import installer from tools.batch_control import batch_control location = installer( self.crid, care_on_existing=False, clean_on_exit=True) try: crdb = database('chained_requests') rdb = database('requests') mcm_cr = chained_request(crdb.get(self.crid)) mcm_rs = [] for rid in mcm_cr.get_attribute('chain'): mcm_rs.append( request( rdb.get( rid ) )) test_script = location.location() + 'validation_run_test.sh' with open(test_script, 'w') as there: there.write(mcm_cr.get_setup(directory=location.location(), run=True, validation=True)) batch_test = batch_control( self.crid, test_script ) try: success = batch_test.test() except: self.reset_all( traceback.format_exc() ) return if not success: self.reset_all( '\t .out \n%s\n\t .err \n%s\n ' % ( batch_test.log_out, batch_test.log_err) ) return last_fail=mcm_rs[0] trace="" for mcm_r in mcm_rs: ### if not mcm_r.is_root: continue ##disable for dr request (success,trace) = mcm_r.pickup_all_performance(location.location()) if not success: last_fail = mcm_r break self.logger.error('I came all the way to here and %s (request %s)' % ( success, self.crid )) if success: for mcm_r in mcm_rs: if mcm_r.is_root: mcm_current = request( rdb.get(mcm_r.get_attribute('prepid'))) if mcm_current.json()['_rev'] == mcm_r.json()['_rev']: mcm_r.set_status(with_notification=True) if not mcm_r.reload(): self.reset_all( 'The request %s could not be saved after the runtest procedure' % (mcm_r.get_attribute('prepid'))) return else: self.reset_all( 'The request %s has changed during the run test procedure'%(mcm_r.get_attribute('prepid')), notify_one = mcm_r.get_attribute('prepid')) return else: self.reset_all( trace , notify_one = last_fail.get_attribute('prepid') ) return except: mess = 'We have been taken out of run_safe of runtest_genvalid for %s because \n %s \n During an un-excepted exception. Please contact support.' % ( self.crid, traceback.format_exc()) self.logger.error(mess) finally: location.close()
def create_flow(self, jsdata): cdb = database('campaigns') db = database(self.db_name) data = threaded_loads(jsdata) if '_rev' in data: return {"results": 'Cannot create a flow with _rev'} try: f = flow(json_input=data) except flow.IllegalAttributeName as ex: return {"results": False} except ValueError as ex: self.logger.error('Could not initialize flow object. Reason: %s' % ex) return {"results": False} if not f.get_attribute('prepid'): self.logger.error('prepid is not defined.') return { "results": False, 'message': 'Error: PrepId was not defined.' } f.set_attribute('_id', f.get_attribute('prepid')) #uniquing the allowed campaigns if passed duplicates by mistake if len(list(set(f.get_attribute('allowed_campaigns'))) ) != f.get_attribute('allowed_campaigns'): f.set_attribute('allowed_campaigns', list(set(f.get_attribute('allowed_campaigns')))) self.logger.log('Creating new flow %s ...' % (f.get_attribute('_id'))) nc = f.get_attribute('next_campaign') result = self.are_campaigns_correct( nc, f.get_attribute('allowed_campaigns'), cdb) if result is not True: return result ## adjust the requests parameters based on what was provided as next campaign self.set_default_request_parameters(nc, cdb, f) # update history f.update_history({'action': 'created'}) # save the flow to db if not db.save(f.json()): self.logger.error( 'Could not save newly created flow %s to database.' % (f.get_attribute('_id'))) return {"results": False} #return right away instead of trying and failing on missing next or allowed if not nc or not len(f.get_attribute('allowed_campaigns')): return {"results": True} # update all relevant campaigns with the "Next" parameter return self.update_derived_objects(flow().json(), f.json())
def get_database(self): try: if self.__class__.__name__ == "batch": return database(self.__class__.__name__ + "es") else: return database(self.__class__.__name__ + "s") except (database.DatabaseNotFoundException, database.DatabaseAccessError) as ex: self.logger.error("Problem with database creation:\n{0}".format(ex)) return None
def GET(self, *args): """ Perform test for chained requests """ if not len(args): return dumps({"results" : False, "message" : "no argument provided"}) from tools.handlers import RunChainValid, validation_pool ## now in the core of the api runtest = RunChainValid(crid=args[0], lock=locker.lock(args[0])) crdb = database('chained_requests') rdb = database('requests') mcm_cr = chained_request(crdb.get(args[0])) mcm_rs = [] for rid in mcm_cr.get_attribute('chain')[mcm_cr.get_attribute('step'):]: mcm_r = request( rdb.get( rid ) ) if mcm_r.get_attribute('status') in ['approved','submitted','done']: return dumps({"results" : False, "prepid" : args[0], "message" : "request %s is in status %s" % ( rid, mcm_r.get_attribute('status'))}) for rid in mcm_cr.get_attribute('chain')[mcm_cr.get_attribute('step'):]: mcm_r = request(rdb.get(rid)) next = 'validation' if not mcm_r.is_root: next = 'approve' try: if mcm_r.get_attribute('approval') == 'none': ## no need to try and move it along if already further than that getattr(mcm_r,'ok_to_move_to_approval_%s' % next)(for_chain=True) mcm_r.update_history({'action' : 'approve', 'step' : next}) mcm_r.set_attribute('approval', next) mcm_r.reload() else: pass ## fail this for the moment. there is no way to handle this yet #text="It is not supported for the moment to test a chain of requests which are partially not new. Please contact an administrator" #runtest.reset_all( text , notify_one = rid ) #return dumps({"results" : False, "message" : text, "prepid" : args[0]}) text = 'Within chain %s \n'% mcm_cr.get_attribute('prepid') text += mcm_r.textified() mcm_r.notify('Approval %s in chain %s for request %s' % (next, mcm_cr.get_attribute('prepid'), mcm_r.get_attribute('prepid')), text, accumulate=True) except Exception as e: runtest.reset_all(str(e), notify_one=rid) return dumps({"results" : False, "message" : str(e),"prepid" : args[0]}) validation_pool.add_task(runtest.internal_run) #runtest.start() return dumps({"results" : True, "message" : "run test started","prepid" : args[0]})
def announce_with_text(self, bid, message): bdb = database('batches') if not semaphore_events.is_set(bid): return {"results": False, "message": "Batch {0} has on-going submissions.".format(bid), "prepid": bid} b = batch(bdb.get(bid)) workflows = '' for dictionary in b.get_attribute('requests'): workflows += dictionary['name'] + ',' workflows = workflows[:-1] r = '' result = {} if workflows != '': approver = RequestApprover(bid, workflows) result = approver.internal_run() if (result['results']): r = b.announce(message) else: r = b.announce(message) if r: map_wf_to_prepid = {} for dictionary in b.get_attribute('requests'): wf = dictionary.get('name') prepid = dictionary.get('content', {}).get('pdmv_prep_id') if not wf or not prepid: continue if wf not in map_wf_to_prepid: map_wf_to_prepid[wf] = [] map_wf_to_prepid[wf].append(prepid) rdb = database('requests') priority_coeff = settings.get_value('nanoaod_priority_increase_coefficient') for wf, requests in map_wf_to_prepid.iteritems(): if len(requests) == 1 and 'nanoaod' in requests[0].lower(): for r_prepid in requests: req = request(rdb.get(r_prepid)) current_priority = req.get_attribute('priority') new_priority = int(current_priority + priority_coeff * 1000) req.change_priority(new_priority) return { "results": bdb.update(b.json()), "message": r, "prepid": bid } else: return { "results": False, "prepid": bid, "message": result['message'] if 'message' in result and not r else r }
def get_database(self): try: if self.__class__.__name__ == "batch": return database(self.__class__.__name__ + "es") else: return database(self.__class__.__name__ + "s") except (database.DatabaseNotFoundException, database.DatabaseAccessError) as ex: self.logger.error( "Problem with database creation:\n{0}".format(ex)) return None
def GET(self, *args): """ Look for batches that are new and with 1 requests or /N and announce them, or /batchid or /batchid/N """ self.N_to_go=1 bid=None if len(args): if args[0].isdigit(): self.N_to_go=int(args[0]) else: bid = args[0] if len(args)==2: self.N_to_go=int(args[1]) bdb = database('batches') res=[] if settings().get_value('batch_announce'): new_batches = bdb.queries(['status==new']) for new_batch in new_batches: if bid and new_batch['prepid']!=bid: continue if len(new_batch['requests'])>=self.N_to_go: ## it is good to be announced ! res.append( self.announce_with_text( new_batch['_id'], 'Automatic announcement.') ) else: self.logger.log('Not announcing any batch') if settings().get_value('batch_set_done'): ## check on on-going batches rdb = database('requests') announced_batches = bdb.queries(['status==announced']) for announced_batch in announced_batches: if bid and announced_batch['prepid']!=bid: continue this_bid = announced_batch['prepid'] all_done=False for r in announced_batch['requests']: wma_name = r['name'] rid = r['content']['pdmv_prep_id'] mcm_r = rdb.get( rid ) all_done = ( mcm_r['status'] == 'done' ) if not all_done: ## no need to go further break if all_done: ## set the status and save mcm_b = batch(announced_batch) mcm_b.set_status() bdb.update( mcm_b.json() ) res.append({"results": True, "prepid" : this_bid, "message" : "Set to done"}) else: res.append({"results": False, "prepid" : this_bid, "message" : "Not completed"}) else: self.logger.log('Not setting any batch to done') #anyways return something return dumps(res)
def GET(self, *args): """ Send a reminder to the production managers for existing opened mccm documents """ mdb = database('mccms') mccms = mdb.queries(['status==new']) udb = database('users') block_threshold = 0 if len(args): block_threshold = int(args[0]) mccms = filter(lambda m: m['block'] <= block_threshold, mccms) mccms = sorted(mccms, key=lambda m: m['block']) if len(mccms) == 0: return dumps({ "results": True, "message": "nothing to remind of at level %s, %s" % (block_threshold, mccms) }) l_type = locator() com = communicator() subject = 'Gentle reminder on %s tickets to be operated by you' % ( len(mccms)) message = '''\ Dear Production Managers, please find below the details of %s opened MccM tickets that need to be operated. ''' % (len(mccms)) for mccm in mccms: message += 'Ticket : %s (block %s)\n' % (mccm['prepid'], mccm['block']) message += ' %smccms?prepid=%s \n\n' % (l_type.baseurl(), mccm['prepid']) message += '\n' to_who = [settings().get_value('service_account')] to_who.extend( map(lambda u: u['email'], udb.query(query="role==production_manager", page_num=-1))) com.sendMail(to_who, subject, message) return dumps({ "results": True, "message": map(lambda m: m['prepid'], mccms) })
def streaming_function(): mccms_db = database('mccms') users_db = database('users') generator_contacts_query = users_db.construct_lucene_query({'role': 'generator_contact'}) generator_contacts = users_db.full_text_search("search", generator_contacts_query, page=-1) generator_contacts_by_pwg = {} generator_contacts_emails = set() for contact in generator_contacts: for pwg in contact.get('pwg', []): if pwg not in generator_contacts_by_pwg: generator_contacts_by_pwg[pwg] = [] generator_contacts_by_pwg[pwg].append(contact.get('email')) generator_contacts_emails.add(contact.get('email')) __query = mccms_db.construct_lucene_query({'status': 'new'}) mccms_tickets = mccms_db.full_text_search('search', __query, page=-1) authors_tickets_dict = dict() yield '<pre>' for ticket in mccms_tickets: yield 'Processing ticket %s\n' % (ticket['prepid']) mccm_ticket = mccm(json_input=ticket) pwg = mccm_ticket.get_attribute('pwg') authors = mccm_ticket.get_actors(what='author_email') yield '%s worked on %s\n' % (authors, ticket['prepid']) authors = filter(lambda e: e in generator_contacts_emails, list(set(authors + generator_contacts_by_pwg.get(pwg, [])))) yield '%s will be notified about %s\n' % (authors, ticket['prepid']) for author_email in authors: if author_email in generator_contacts_emails: if author_email not in authors_tickets_dict: authors_tickets_dict[author_email] = set() authors_tickets_dict[author_email].add(ticket['prepid']) subject_template = 'Gentle reminder on %s ticket%s to be operated by you' message_template = ('Dear GEN Contact,\nPlease find below the details of %s MccM ticket%s in status "new". ' + 'Please present them in next MccM googledoc or cancel tickets if these are not needed anymore.\n\n') base_url = locator().baseurl() mail_communicator = communicator() service_account = settings.get_value('service_account') for author_email, ticket_prepids in authors_tickets_dict.iteritems(): num_tickets = len(ticket_prepids) subject = subject_template % (num_tickets, '' if num_tickets == 1 else 's') message = message_template % (num_tickets, '' if num_tickets == 1 else 's') for ticket_prepid in ticket_prepids: message += 'Ticket: %s\n%smccms?prepid=%s\n\n' % (ticket_prepid, base_url, ticket_prepid) yield '.' yield '\n' message += 'You received this email because you are listed as generator contact of physics group(s) of these tickets.\n' self.logger.info('Email:%s\nSubject: %s\nMessage:%s' % (author_email, subject, message)) mail_communicator.sendMail([author_email, service_account], subject, message) yield 'Email sent to %s\n' % (author_email)
def generate_request(self, root_request_id): self.logger.info( 'Building a new chained_request for chained_campaign %s. Root request: %s' % (self.get_attribute('_id'), root_request_id)) try: rdb = database('requests') crdb = database('chained_requests') except database.DatabaseAccessError: return {} # check to see if root request id exists if not rdb.document_exists(root_request_id): return {} # init new creq # parse request id tok = root_request_id.split('-') pwg = tok[0] # generate new chain id cid = ChainedRequestPrepId().next_id(pwg, self.get_attribute('prepid')) creq = chained_request(crdb.get(cid)) # set values creq.set_attribute('pwg', pwg) creq.set_attribute('member_of_campaign', self.get_attribute('prepid')) creq.set_attribute('action_parameters', self.get_attribute('action_parameters')) # By default flag should be true creq.get_attribute('action_parameters')['flag'] = True if not creq.get_attribute('prepid'): raise ValueError('Prepid returned was None') # set the default values that will be carried over to the next step in the chain req = rdb.get(root_request_id) creq.set_attribute("dataset_name", req["dataset_name"]) creq.set_attribute("pwg", req["pwg"]) # add root request to chain creq.set_attribute('chain', [root_request_id]) # update history creq.update_history({'action': 'created'}) self.update_history({ 'action': 'add request', 'step': creq.get_attribute('_id') }) # save to database return creq.json()
def reset_all(self, message, what = 'Chained validation run test', notify_one=None): crdb = database('chained_requests') rdb = database('requests') mcm_cr = chained_request(crdb.get(self.crid)) for rid in mcm_cr.get_attribute('chain'): mcm_r = request( rdb.get( rid ) ) notify = True if notify_one and notify_one != rid: notify = False mcm_r.test_failure( message, what = what, rewind=True, with_notification=notify)
def delete_request(self, crid): crdb = database('chained_requests') rdb = database('requests') adb = database('actions') mcm_cr = chained_request(crdb.get(crid)) mcm_a = None ## get all objects mcm_r_s=[] for (i,rid) in enumerate(mcm_cr.get_attribute('chain')): mcm_r = request(rdb.get(rid)) #this is not a valid check as it is allowed to remove a chain around already running requests # if mcm_r.get_attribute('status') != 'new': # return {"results":False,"message" : "the request %s part of the chain %s for action %s is not in new status"%( mcm_r.get_attribute('prepid'), # crid, # mcm_a.get_attribute('prepid'))} in_chains = mcm_r.get_attribute('member_of_chain') in_chains.remove( crid ) mcm_r.set_attribute('member_of_chain', in_chains) if i==0: # the root is the action id mcm_a = action(adb.get(rid)) if len(in_chains)==0 and mcm_r.get_attribute('status')!='new': return {"results":False, "message" : "the request %s, not in status new, at the root of the chain will not be chained anymore"% rid} else: if len(in_chains)==0: return {"results":False,"message" : "the request %s, not at the root of the chain will not be chained anymore"% rid} mcm_r.update_history({'action':'leave','step':crid}) mcm_r_s.append( mcm_r ) ## check if possible to get rid of it ! # action for the chain is disabled chains = mcm_a.get_chains( mcm_cr.get_attribute('member_of_campaign')) if chains[crid]['flag']: return {"results":False,"message" : "the action %s for %s is not disabled"%(mcm_a.get_attribute('prepid'), crid)} #take it out mcm_a.remove_chain( mcm_cr.get_attribute('member_of_campaign'), mcm_cr.get_attribute('prepid') ) if not adb.update( mcm_a.json()): return {"results":False,"message" : "Could not save action "+ mcm_a.get_attribute('prepid')} ## then save all changes for mcm_r in mcm_r_s: if not rdb.update( mcm_r.json()): return {"results":False,"message" : "Could not save request "+ mcm_r.get_attribute('prepid')} else: mcm_r.notify("Request {0} left chain".format( mcm_r.get_attribute('prepid')), "Request {0} has successfuly left chain {1}".format( mcm_r.get_attribute('prepid'), crid)) return {"results": crdb.delete(crid)}
def set_action(self, aid, cc, block, staged=None, threshold=None, reserve=False, special=False): adb = database('actions') ccdb = database('chained_campaigns') mcm_a = action(adb.get(aid)) ccs = ccdb.queries(['alias==%s' % cc]) if not len(ccs): ccs = ccdb.queries(['prepid==%s' % cc]) if not len(ccs): return { "results": False, "message": "%s not a chained campaigns" % (cc) } mcm_cc = chained_campaign(ccs[0]) mcm_cc_name = mcm_cc.get_attribute('prepid') chains = mcm_a.get_attribute('chains') if mcm_cc_name not in chains: #detect mcm_a.find_chains() chains = mcm_a.get_attribute('chains') if mcm_cc_name not in chains: return { "results": False, "message": "Not able to find %s for %s" % (mcm_cc_name, aid) } chains[mcm_cc_name].update({"flag": True, "block_number": block}) if staged: chains[mcm_cc_name]['staged'] = staged if threshold: chains[mcm_cc_name]['threshold'] = threshold #set back the chains mcm_a.set_attribute('chains', chains) #save it since it is retrieved from scratch later adb.save(mcm_a.json()) #and generate the chained requests return self.generate_request(aid, reserve=reserve, special=special)
def create_flow(self, jsdata): cdb = database('campaigns') db = database(self.db_name) data = threaded_loads(jsdata) if '_rev' in data: return {"results": 'Cannot create a flow with _rev'} try: f = flow(json_input=data) except flow.IllegalAttributeName as ex: return {"results": False} except ValueError as ex: self.logger.error('Could not initialize flow object. Reason: %s' % ex) return {"results": False} if not f.get_attribute('prepid'): self.logger.error('prepid is not defined.') return {"results": False, 'message': 'Error: PrepId was not defined.'} f.set_attribute('_id', f.get_attribute('prepid')) #uniquing the allowed campaigns if passed duplicates by mistake if len(list(set(f.get_attribute('allowed_campaigns')))) != f.get_attribute('allowed_campaigns'): f.set_attribute('allowed_campaigns', list(set(f.get_attribute('allowed_campaigns')))) self.logger.log('Creating new flow %s ...' % (f.get_attribute('_id'))) nc = f.get_attribute('next_campaign') result = self.are_campaigns_correct(nc, f.get_attribute('allowed_campaigns'), cdb) if result is not True: return result ## adjust the requests parameters based on what was provided as next campaign self.set_default_request_parameters(nc, cdb, f) # update history f.update_history({'action': 'created'}) # save the flow to db if not db.save(f.json()): self.logger.error('Could not save newly created flow %s to database.' % (f.get_attribute('_id'))) return {"results": False} #return right away instead of trying and failing on missing next or allowed if not nc or not len(f.get_attribute('allowed_campaigns')): return {"results": True} # update all relevant campaigns with the "Next" parameter return self.update_derived_objects(flow().json() ,f.json())
def streaming_function(): mccms_db = database('mccms') users_db = database('users') __query = mccms_db.construct_lucene_query({'status': 'new'}) mccms_tickets = mccms_db.full_text_search('search', __query, page=-1) non_gen_contact_authors = set() authors_tickets_dict = dict() emails_prepids = dict() for ticket in mccms_tickets: yield '\nProcessing ticket %s' % (ticket['prepid']) mccm_ticket = mccm(json_input=ticket) authors = mccm_ticket.get_actors(what='author_email') for author_email in authors: if author_email in authors_tickets_dict: authors_tickets_dict[author_email].append(ticket['prepid']) elif author_email not in non_gen_contact_authors: __role_query = users_db.construct_lucene_query({'email': author_email}) result = users_db.full_text_search('search', __role_query, page=-1, include_fields='role,prepid') time.sleep(0.5) # we don't want to crash DB with a lot of single queries if result and result[0]['role'] == 'generator_contact': authors_tickets_dict[author_email] = [ticket['prepid']] emails_prepids[author_email] = result[0]['prepid'] else: non_gen_contact_authors.add(author_email) yield '.' subject_part1 = 'Gentle reminder on %s ' subject_part2 = ' to be operated by you' message_part1 = 'Dear GEN Contact, \nPlease find below the details of %s MccM ' message_part2 = ' in status "new". Please present them in next MccM googledoc or cancel tickets if these are not needed anymore.\n\n' base_url = locator().baseurl() mail_communicator = communicator() for author_email, ticket_prepids in authors_tickets_dict.iteritems(): num_tickets = len(ticket_prepids) full_message = (message_part1 % (num_tickets)) + ('ticket' if num_tickets == 1 else 'tickets') + message_part2 for ticket_prepid in ticket_prepids: full_message += 'Ticket: %s \n' % (ticket_prepid) full_message += '%smccms?prepid=%s \n\n' % (base_url, ticket_prepid) yield '.' full_message += '\n' subject = (subject_part1 % (num_tickets)) + ('ticket' if num_tickets == 1 else 'tickets') + subject_part2 notification( subject, full_message, [emails_prepids[author_email]], group=notification.REMINDERS, action_objects=ticket_prepids, object_type='mccms') mail_communicator.sendMail([author_email], subject, full_message) yield '\nEmail sent to %s\n' % (author_email)
def rewind_one(self, crid): crdb = database('chained_requests') rdb = database('requests') if not crdb.document_exists( crid ): return {"results":False, "message":"does not exist","prepid" : crid} mcm_cr = chained_request( crdb.get( crid) ) current_step = mcm_cr.get_attribute('step') if current_step==0: ## or should it be possible to cancel the initial requests of a chained request return {"results":False, "message":"already at the root","prepid" : crid} ## supposedly all the other requests were already reset! for next in mcm_cr.get_attribute('chain')[current_step+1:]: ## what if that next one is not in the db if not rdb.document_exists( next): self.logger.error('%s is part of %s but does not exist'%( next, crid)) continue mcm_r = request(rdb.get( next )) if mcm_r.get_attribute('status')!='new': # this cannot be right! self.logger.error('%s is after the current request and is not new: %s' % ( next, mcm_r.get_attribute('status'))) return {"results":False, "message":"%s is not new" % (next), "prepid" : crid} ##get the one to be reset current_id=mcm_cr.get_attribute('chain')[current_step] mcm_r = request( rdb.get( current_id )) mcm_r.reset() saved = rdb.update( mcm_r.json() ) if not saved: {"results":False, "message":"could not save the last request of the chain","prepid" : crid} ## the current chained request has very likely been updated : ## reload it as you have not changed anything to it yet mcm_cr = chained_request( crdb.get( crid) ) mcm_cr.set_attribute('step',current_step -1 ) # set status, last status mcm_cr.set_last_status() mcm_cr.set_attribute('status','processing') saved = crdb.update( mcm_cr.json()) if saved: return {"results":True,"prepid" : crid} else: return {"results" : False, "message" : "could not save chained requests. the DB is going to be inconsistent !", "prepid" : crid}
def generate_request(self, root_request_id): self.logger.log('Building a new chained_request for chained_campaign %s. Root request: %s' % (self.get_attribute('_id'), root_request_id )) try: rdb = database('requests') crdb = database('chained_requests') except database.DatabaseAccessError as ex: return {} # check to see if root request id exists if not rdb.document_exists(root_request_id): return {} # init new creq # parse request id tok = root_request_id.split('-') pwg = tok[0] camp = tok[1] # generate new chain id cid = ChainedRequestPrepId().next_id(pwg, self.get_attribute('prepid')) creq = chained_request(crdb.get(cid)) # set values creq.set_attribute('pwg', pwg) creq.set_attribute('member_of_campaign', self.get_attribute('prepid')) if not creq.get_attribute('prepid'): raise ValueError('Prepid returned was None') # set the default values that will be carried over to the next step in the chain req = rdb.get(root_request_id) creq.set_attribute("dataset_name", req["dataset_name"]) creq.set_attribute("pwg", req["pwg"]) # add root request to chain creq.set_attribute('chain', [root_request_id]) # update history creq.update_history({'action':'created'}) self.update_history({'action':'add request','step':creq.get_attribute('_id')}) # save to database return creq.json()
def GET(self, *args): """ Cancel the MccM ticket provided in argument. Does not delete it but put the status as cancelled. """ if not args: return dumps({ "results": False, "message": "No id given to cancel." }) db = database('mccms') mcm_mccm = mccm(db.get(args[0])) if mcm_mccm.get_attribute('status') == 'done': return dumps({ "results": False, "message": "Cannot cancel done tickets" }) mcm_mccm.set_attribute('status', 'cancelled') mcm_mccm.update_history({'action': 'cancelled'}) saved = db.update(mcm_mccm.json()) if saved: return dumps({"results": True}) else: return dumps({ "results": False, "message": "Could not save the ticket to be cancelled." })
def import_request(self, data): adb = database(self.db_name) try: mcm_a = action(json_input=threaded_loads(data)) except request.IllegalAttributeName as ex: return dumps({"results": False}) self.logger.log('Building new action %s by hand...' % (mcm_a.get_attribute('_id'))) priority_set = mcm_a.inspect_priority() saved = adb.update(mcm_a.json()) if saved: if priority_set: return { "results": True, "prepid": mcm_a.get_attribute('prepid') } else: return { "results": False, "prepid": mcm_a.get_attribute('prepid'), "message": "Priorities not set properly" } else: return {"results": False, "prepid": mcm_a.get_attribute('prepid')}
def get(self, batch_ids): """ Set batch status to hold (from new) or to new (from hold) """ res = [] bdb = database('batches') for bid in batch_ids.split(','): mcm_b = batch(bdb.get(bid)) if mcm_b.get_attribute('status') == 'new': mcm_b.set_attribute('status', 'hold') mcm_b.update_history({'action': 'set status', 'step': 'hold'}) elif mcm_b.get_attribute('status') == 'hold': mcm_b.set_attribute('status', 'new') mcm_b.update_history({'action': 'set status', 'step': 'new'}) else: res.append({ 'prepid': bid, 'results': False, 'message': 'Only status hold or new allowed' }) continue bdb.update(mcm_b.json()) res.append({'prepid': bid, 'results': True}) return res
def change_role(self, username, action): db = database(self.db_name) doc = user(db.get(username)) current_role = doc.get_attribute("role") if action == '-1': if current_role != self.all_roles[0]: doc.set_attribute( "role", self.all_roles[self.all_roles.index(current_role) - 1]) authenticator.set_user_role(username, doc.get_attribute("role")) doc.update_history({ 'action': 'decrease', 'step': doc.get_attribute("role") }) return {"results": db.update(doc.json())} return { "results": username + " already is user" } # else return that hes already a user if action == '1': if len(self.all_roles) != self.all_roles.index( current_role) + 1: # if current role is not the top one doc.set_attribute( "role", self.all_roles[self.all_roles.index(current_role) + 1]) authenticator.set_user_role(username, doc.get_attribute("role")) doc.update_history({ 'action': 'increase', 'step': doc.get_attribute("role") }) return {"results": db.update(doc.json())} return {"results": username + " already has top role"} return {"results": "Failed to update user: "******" role"}
def read_logs(self, pid, nlines): db = database(self.db_name) if not db.document_exists(pid): self.logger.error('Given prepid "%s" does not exist in the database.' % pid) return {"results": 'Error:Given prepid "%s" does not exist in the database.' % pid} try: data = open(self.logfile).read() except IOError as ex: self.logger.error('Could not access logs: "%s". Reason: %s' % (self.logfile, ex)) return {"results": "Error: Could not access logs."} # important = data[data.rindex('## Logger instance retrieved'):] # needs this otherwise, simultaneous submission would be truncated to the last to write ## Logger instance retrieved important = data[data.rindex('[%s] ## Logger instance retrieved' % (pid)):] if not important: raise ValueError('Malformed logs. Could not detect start of injection.') lines = filter(lambda l: pid in l, important.rsplit('\n')) if (nlines > 0): lines = lines[-nlines:] res = '' for line in lines: res += '%s<br>' % (line.replace('<breakline>', '<br>')) return res
def announce(self, data): if 'prepid' not in data or 'notes' not in data: raise ValueError('no prepid nor notes in batch announcement api') bdb = database('batches') bid = data['prepid'] res = [] if bid.__class__ == list: ##if it's multiple announce iterate on the list of prepids for el in bid: if not bdb.document_exists(el): res.append({ "results": False, "message": "%s is not a valid batch name" % el }) res.append(self.announce_with_text(el, data['notes'])) else: if not bdb.document_exists(bid): res = { "results": False, "message": "%s is not a valid batch name" % bid } res = self.announce_with_text(bid, data['notes']) return res
def GET(self, *args): """ Give out the list of releases that are being used through McM campaigns in optional status /status """ status=None if len(args): status=args[0] cdb = database('campaigns') if status: cs = cdb.queries(['status==%s'%status]) else: cs = cdb.queries([]) releases_set=set() releases=defaultdict(lambda : list()) for c in cs: if c['cmssw_release'] and not c['cmssw_release'] in releases[c['status']]: releases[c['status']].append(c['cmssw_release']) releases_set.add(c['cmssw_release']) ##extend to submitted requests, or chained requests that will get in the system ? return dumps({"results" : releases, "set" : list(releases_set)})
def get_setting(self, data): db = database('settings') if not db.document_exists(data): self.logger.error('Setting for {0} does not exist'.format(data)) return {"results": {}} return {"results": settings().get(data)}
def read_logs(self, pid, nlines): db = database(self.db_name) if not db.document_exists(pid): self.logger.error( 'Given prepid "%s" does not exist in the database.' % pid) return { "results": 'Error:Given prepid "%s" does not exist in the database.' % pid } try: data = open(self.logfile).read() except IOError as ex: self.logger.error('Could not access logs: "%s". Reason: %s' % (self.logfile, ex)) return {"results": "Error: Could not access logs."} # important = data[data.rindex('## Logger instance retrieved'):] # needs this otherwise, simultaneous submission would be truncated to the last to write ## Logger instance retrieved important = data[data.rindex('[%s] ## Logger instance retrieved' % (pid)):] if not important: raise ValueError( 'Malformed logs. Could not detect start of injection.') lines = filter(lambda l: pid in l, important.rsplit('\n')) if (nlines > 0): lines = lines[-nlines:] res = '' for line in lines: res += '%s<br>' % (line.replace('<breakline>', '<br>')) return res
def update(self, body): data = threaded_loads(body) db = database('settings') if '_rev' not in data: self.logger.error('Could not locate the CouchDB revision number in object: %s' % data) return {"results": False, 'message': 'could not locate revision number in the object'} if not db.document_exists(data['_id']): return {"results": False, 'message': 'mccm %s does not exist' % ( data['_id'])} else: if db.get(data['_id'])['_rev'] != data['_rev']: return {"results": False, 'message': 'revision clash'} new_version = setting(json_input=data) if not new_version.get_attribute('prepid') and not new_version.get_attribute('_id'): self.logger.error('Prepid returned was None') raise ValueError('Prepid returned was None') ## operate a check on whether it can be changed previous_version = setting(db.get(new_version.get_attribute('prepid'))) editable = previous_version.get_editable() for (key, right) in editable.items(): # does not need to inspect the ones that can be edited if right: continue if previous_version.get_attribute(key) != new_version.get_attribute(key): self.logger.error('Illegal change of parameter, %s: %s vs %s : %s' % ( key, previous_version.get_attribute(key), new_version.get_attribute(key), right)) return {"results": False, 'message': 'Illegal change of parameter %s' % key} self.logger.log('Updating setting %s...' % (new_version.get_attribute('prepid'))) return {"results": settings().set(new_version.get_attribute('prepid'), new_version.json())}
def PUT(self, *args): """ Parse the posted text document for request id and request ranges for display of actions """ adb = database('actions') all_ids = self.get_list_of_ids(adb) return dumps(self.get_objects(all_ids, adb))
def add_allowed_campaign(self, cid): self.logger.info('Adding new allowed campaign to flow %s' % (self.get_attribute('_id'))) try: from couchdb_layer.mcm_database import database except ImportError as ex: self.logger.error( 'Could not import database connector class. Reason: %s' % (ex)) return False # initialize database connector try: cdb = database('campaigns') except database.DatabaseAccessError as ex: return False # check campaign exists if not cdb.document_exists(cid): raise self.CampaignDoesNotExistException(cid) # check for duplicate allowed = self.get_attribute('allowed_campaigns') if cid in allowed: raise self.DuplicateCampaignEntry(cid) # append and make persistent allowed.append(cid) self.set_attribute('allowed_campaigns', allowed) # update history self.update_history({'action': 'add allowed campaign', 'step': cid}) return True
def create_campaign(self, data): db = database('campaigns') try: camp_mcm = campaign(json_input=threaded_loads(data)) except campaign.IllegalAttributeName as ex: return {"results":False} #id = RequestPrepId().generate_prepid(self.json['pwg'], self.json['member_of_campaign']) #self.json['prepid'] = loads(id)['prepid'] if not camp_mcm.get_attribute('prepid'): self.logger.error('Invalid prepid: Prepid returned None') return {"results":False} if '_' in camp_mcm.get_attribute('prepid'): self.logger.error('Invalid campaign name %s'%(camp_mcm.get_attribute('prepid'))) return {"results":False} camp_mcm.set_attribute('_id', camp_mcm.get_attribute('prepid')) camp_mcm.update_history({'action':'created'}) ## this is to create, not to update if db.document_exists( camp_mcm.get_attribute('prepid') ): return {"results":False} # save to db if not db.save(camp_mcm.json()): self.logger.error('Could not save object to database') return {"results":False} # create dedicated chained campaign self.create_chained_campaign(camp_mcm.get_attribute('_id'), db) return {"results":True}
def get_doc(self, mccm_id): db = database('mccms') if not db.document_exists(mccm_id): return {"results": {}} mccm_doc = db.get(prepid=mccm_id) return {"results": mccm_doc}
def create_chained_campaign(self, cid, db): if db.get(cid)['root'] < 1: cdb = database('chained_campaigns') dcc = chained_campaign({'prepid':'chain_'+cid, '_id':'chain_'+cid}) dcc.add_campaign(cid) # flow_name = None cdb.save(dcc.json())
def flow2(self, data): db = database('chained_requests') chain_id = data['prepid'] try: creq = chained_request(json_input=db.get(chain_id)) except Exception as ex: self.logger.error( 'Could not initialize chained_request object. Reason: %s' % (ex)) return {"results": str(ex)} self.logger.info( 'Attempting to flow to next step for chained_request %s' % (creq.get_attribute('_id'))) # if the chained_request can flow, do it inputds = '' inblack = [] inwhite = [] if 'input_dataset' in data: inputds = data['input_dataset'] if 'block_black_list' in data: inblack = data['block_black_list'] if 'block_white_list' in data: inwhite = data['block_white_list'] if 'force' in data: check_stats = data['force'] != 'force' if 'reserve' in data and data["reserve"]: reserve = data["reserve"] return creq.reserve(limit=reserve) return creq.flow_trial(inputds, inblack, inwhite, check_stats)
def multiple_inspect(self, ccids): crdb = database('chained_requests') res = [] for ccid in ccids.split(','): crlist = crdb.queries([ "member_of_campaign==%s" % ccid, "last_status==done", "status==processing" ]) self.logger.log('crlist %s in chained_camp %s ' % (crlist, ccid)) for cr in crlist: mcm_cr = chained_request(cr) if mcm_cr: res.append(mcm_cr.inspect()) else: res.append({ "prepid": cr, "results": False, 'message': '%s does not exist' % cr }) if len(res) > 1: return res elif len(res): return res[0] else: return []
def import_request(self, data): db = database(self.db_name) json_input = loads(data) if 'pwg' not in json_input or 'member_of_campaign' not in json_input: self.logger.error( 'Now pwg or member of campaign attribute for new chained request' ) return {"results": False} if 'prepid' in json_input: req = chained_request(json_input) cr_id = req.get_attribute('prepid') else: cr_id = ChainedRequestPrepId().next_id( json_input['pwg'], json_input['member_of_campaign']) if not cr_id: return {"results": False} req = chained_request(db.get(cr_id)) for key in json_input: if key not in ['prepid', '_id', '_rev', 'history']: req.set_attribute(key, json_input[key]) if not req.get_attribute('prepid'): self.logger.error('prepid returned was None') raise ValueError('Prepid returned was None') self.logger.info('Created new chained_request %s' % cr_id) # update history with the submission details req.update_history({'action': 'created'}) return self.save_request(db, req)
def set_processing_status(self, pid=None, status=None): if not pid or not status: rdb = database('requests') step_r = rdb.get( self.get_attribute('chain')[self.get_attribute('step')]) pid = step_r['prepid'] status = step_r['status'] if pid == self.get_attribute('chain')[self.get_attribute('step')]: expected_end = max(0, self.get_attribute('prepid').count('_') - 1) current_status = self.get_attribute('status') ## the current request is the one the status has just changed self.logger.log( 'processing status %s given %s and at %s and stops at %s ' % (current_status, status, self.get_attribute('step'), expected_end)) if self.get_attribute( 'step' ) == expected_end and status == 'done' and current_status == 'processing': ## you're supposed to be in processing status self.set_status() return True ##only when updating with a submitted request status do we change to processing if status in ['submitted'] and current_status == 'new': self.set_status() return True return False else: return False
def update_total_events(self): """ calculate total_evts for request list """ requests_db = database('requests') index = 0 fetched_requests = [] new_requests = self.get_request_list(self.get_attribute("requests")) while len(new_requests) > index: query = requests_db.construct_lucene_query( {'prepid': new_requests[index:index + 20]}, boolean_operator='OR') fetched_requests += requests_db.full_text_search("search", query, page=-1) index += 20 fetched_requests_dict = {} for req in fetched_requests: fetched_requests_dict[req['prepid']] = req[ 'total_events'] if 'total_events' in req else 0 events = 0 for req in new_requests: events += fetched_requests_dict[ req] if req in fetched_requests_dict else 0 self.set_attribute('total_events', events)
def flow2(self, data): try: vdata = loads(data) except ValueError as ex: self.logger.error('Could not start flowing to next step. Reason: %s' % (ex)) return {"results":str(ex)} db = database('chained_requests') try: creq = chained_request(json_input=db.get(vdata['prepid'])) except Exception as ex: self.logger.error('Could not initialize chained_request object. Reason: %s' % (ex)) return {"results":str(ex)} self.logger.log('Attempting to flow to next step for chained_request %s' % (creq.get_attribute('_id'))) # if the chained_request can flow, do it inputds = '' inblack = [] inwhite = [] if 'input_filename' in vdata: inputds = vdata['input_filename'] if 'block_black_list' in vdata: inblack = vdata['block_black_list'] if 'block_white_list' in vdata: inwhite = vdata['block_white_list'] if 'force' in vdata: check_stats = vdata['force']!='force' if 'reserve' in vdata and vdata["reserve"]: return creq.reserve() return creq.flow_trial( inputds, inblack, inwhite, check_stats)
def get(self, chained_request_id): kwargs = self.parser.parse_args() crdb = database('chained_requests') if not crdb.document_exists(chained_request_id): return { "results": False, "message": "Chained request with prepid {0} does not exist".format( chained_request_id) } cr = chained_request(crdb.get(chained_request_id)) events = None run = False valid = False directory = '' __scratch = kwargs["scratch"].lower() == 'true' if self.opt == 'test' or self.opt == 'valid': run = True if self.opt == 'valid': valid = True return cr.get_setup(directory=kwargs['directory'], run=run, events=kwargs['events'], validation=valid, scratch=__scratch)