def notify_batch(self, batch_id, message_notes): message = message_notes to_who = [settings.get_value('service_account')] l_type = locator() if l_type.isDev(): to_who.append(settings.get_value('hypernews_test')) else: to_who.append(settings.get_value('dataops_announce')) single_batch = batch(self.bdb.get(batch_id)) subject = single_batch.get_subject('[Notification]') current_message_id = single_batch.get_attribute('message_id') self.logger.info('current msgID: %s' % current_message_id) if current_message_id != '': result = single_batch.notify(subject, message, who=to_who, sender=None, reply_msg_ID=current_message_id) self.logger.info('result if True : %s' % result) else: result = single_batch.notify(subject, message, who=to_who, sender=None) self.logger.info('result if False : %s' % result) notification( subject, message, [], group=notification.BATCHES, target_role='production_manager', action_objects=[single_batch.get_attribute('prepid')], object_type='batches', base_object=single_batch ) single_batch.update_history({'action': 'notify', 'step': message}) single_batch.reload() return {'results': result}
def process_request_wrong_time_event(self, mcm_request, member_of_chain=None): """ Returns whether this request should be validated again """ validations_count = mcm_request.get_validations_count() max_validations = settings.get_value('max_validations') request_prepid = mcm_request.get_attribute('prepid') subject = 'Validation will %sbe retried for %s' % ( "" if validations_count < max_validations else "NOT ", request_prepid) if member_of_chain is not None: message = 'Validation for request %s in chain %s failed. It will %sbe retried. Number of validations done: %d/%d.' % ( request_prepid, member_of_chain, "" if validations_count < max_validations else "NOT ", validations_count, max_validations) else: message = 'Validation for request %s failed. It will %sbe retried. Number of validations done: %d/%d.' % ( request_prepid, "" if validations_count < max_validations else "NOT ", validations_count, max_validations) self.logger.info(message) notification(subject, message, [], group=notification.REQUEST_OPERATIONS, action_objects=[request_prepid], object_type='requests', base_object=mcm_request) mcm_request.notify(subject, message) return validations_count < max_validations
def multiple_inspect(self, cid, in_statuses=['submitted', 'approved']): clist = list(set(cid.rsplit(','))) res = [] rdb = database('requests') index = 0 self.logger.error("Chain inspect begin. Number of chains to be inspected: %s" % (len(clist))) try: while len(clist) > index: yield dumps({"current cr element": "%s/%s" % (index, len(clist))}, indent=2) query = rdb.construct_lucene_complex_query([ ('member_of_campaign', {'value': clist[index: index + 1]}), ('status', {'value': in_statuses}) ]) ##do another loop over the requests themselves req_page = 0 request_res = rdb.full_text_search('search', query, page=req_page) while len(request_res) > 0: self.logger.info("inspecting single requests. page: %s" % (req_page)) for r in request_res: self.logger.info("running inspect on request: %s" % (r['prepid'])) mcm_r = request(r) if mcm_r: #making it as a stream yield dumps(mcm_r.inspect(), indent=4) else: #making it as a stream yield dumps({"prepid": r, "results": False, 'message': '%s does not exist' % (r)}, indent=4) req_page += 1 request_res = rdb.full_text_search('search', query, page=req_page) time.sleep(0.5) index += 1 time.sleep(1) except Exception as e: subject = "Exception while inspecting request " message = "Request: %s \n %s traceback: \n %s" % (mcm_r.get_attribute('prepid'), str(e), traceback.format_exc()) self.logger.error(subject + message) notification( subject, message, [], group=notification.REQUEST_OPERATIONS, action_objects=[mcm_r.get_attribute('prepid')], object_type='requests', base_object=mcm_r) mcm_r.notify(subject, message, accumulate=True) self.logger.info("Campaign inspection finished!")
def streaming_function(): mccms_db = database('mccms') users_db = database('users') __query = mccms_db.construct_lucene_query({'status': 'new'}) mccms_tickets = mccms_db.full_text_search('search', __query, page=-1) non_gen_contact_authors = set() authors_tickets_dict = dict() emails_prepids = dict() for ticket in mccms_tickets: yield '\nProcessing ticket %s' % (ticket['prepid']) mccm_ticket = mccm(json_input=ticket) authors = mccm_ticket.get_actors(what='author_email') for author_email in authors: if author_email in authors_tickets_dict: authors_tickets_dict[author_email].append(ticket['prepid']) elif author_email not in non_gen_contact_authors: __role_query = users_db.construct_lucene_query({'email': author_email}) result = users_db.full_text_search('search', __role_query, page=-1, include_fields='role,prepid') time.sleep(0.5) # we don't want to crash DB with a lot of single queries if result and result[0]['role'] == 'generator_contact': authors_tickets_dict[author_email] = [ticket['prepid']] emails_prepids[author_email] = result[0]['prepid'] else: non_gen_contact_authors.add(author_email) yield '.' subject_part1 = 'Gentle reminder on %s ' subject_part2 = ' to be operated by you' message_part1 = 'Dear GEN Contact, \nPlease find below the details of %s MccM ' message_part2 = ' in status "new". Please present them in next MccM googledoc or cancel tickets if these are not needed anymore.\n\n' base_url = locator().baseurl() mail_communicator = communicator() for author_email, ticket_prepids in authors_tickets_dict.iteritems(): num_tickets = len(ticket_prepids) full_message = (message_part1 % (num_tickets)) + ('ticket' if num_tickets == 1 else 'tickets') + message_part2 for ticket_prepid in ticket_prepids: full_message += 'Ticket: %s \n' % (ticket_prepid) full_message += '%smccms?prepid=%s \n\n' % (base_url, ticket_prepid) yield '.' full_message += '\n' subject = (subject_part1 % (num_tickets)) + ('ticket' if num_tickets == 1 else 'tickets') + subject_part2 notification( subject, full_message, [emails_prepids[author_email]], group=notification.REMINDERS, action_objects=ticket_prepids, object_type='mccms') mail_communicator.sendMail([author_email], subject, full_message) yield '\nEmail sent to %s\n' % (author_email)
def send_email_failure(self, output, error): com = communicator() users_db = database('users') query = users_db.construct_lucene_query({'role': 'production_manager'}) production_managers = users_db.full_text_search('search', query, page=-1) subject = "There was an error while trying to approve workflows" text = "Workflows: %s\nOutput:\n%s\nError output: \n%s" % ( self.workflows, output, error) notification(subject, text, [], group=notification.REQUEST_OPERATIONS, target_role="production_manager") com.sendMail(map(lambda u: u['email'], production_managers), subject, text)
def get(self, block_threshold=0): """ Send a reminder to the production managers for existing opened mccm documents """ mdb = database('mccms') udb = database('users') __query = mdb.construct_lucene_query({'status': 'new'}) mccms = mdb.full_text_search('search', __query, page=-1) mccms = filter(lambda m: m['block'] <= block_threshold, mccms) mccms = sorted(mccms, key=lambda m: m['block']) if len(mccms) == 0: return {"results": True, "message": "nothing to remind of at level %s, %s" % (block_threshold, mccms)} l_type = locator() com = communicator() subject = 'Gentle reminder on %s tickets to be operated by you' % (len( mccms)) message = '''\ Dear Production Managers, please find below the details of %s opened MccM tickets that need to be operated. ''' % (len(mccms)) mccm_prepids = [] for _mccm in mccms: prepid = _mccm['prepid'] message += 'Ticket : %s (block %s)\n' % (prepid, _mccm['block']) message += ' %smccms?prepid=%s \n\n' % (l_type.baseurl(), prepid) mccm_prepids.append(prepid) message += '\n' to_who = [settings.get_value('service_account')] to_who.extend(map(lambda u: u['email'], udb.query(query="role==production_manager", page_num=-1))) notification( subject, message, [], group=notification.REMINDERS, action_objects=mccm_prepids, object_type='mccms', target_role='production_manager') com.sendMail( to_who, subject, message) return {"results": True, "message": map(lambda m: m['prepid'], mccms)}
def get(self, pwgs): """ Ask for the increase of the role of the current user to the given pwg """ # get who's there user_p = user_pack() udb = database(self.db_name) mcm_u = user(udb.get(user_p.get_username())) # get the requested pwgs pwgs = pwgs.split(',') # set the pwgs to the current user current = mcm_u.get_attribute('pwg') current = list(set(current + pwgs)) mcm_u.set_attribute('pwg', current) mcm_u.update_history({'action': 'ask role', 'step': pwgs}) udb.update(mcm_u.json()) # get the production managers emails __query = udb.construct_lucene_query({'role': 'production_manager'}) production_managers = udb.full_text_search('search', __query, page=-1) # send a notification to prod manager + service to_who = map(lambda u: u['email'], production_managers) + [ settings.get_value('service_account') ] to_who.append(user_p.get_email()) com = communicator() l_type = locator() subject = 'Increase role for user %s' % mcm_u.get_attribute('fullname') message = 'Please increase the role of the user %s to the next level.\n\n%susers?prepid=%s' % ( mcm_u.get_attribute('username'), l_type.baseurl(), mcm_u.get_attribute('username')) notification(subject, message, [], group=notification.USERS, action_objects=[mcm_u.get_attribute('prepid')], object_type='users', target_role='production_manager') com.sendMail(to_who, subject, message) return { "results": True, "message": "user %s in for %s" % (mcm_u.get_attribute('username'), current) }
def process_request_wrong_time_event(self, mcm_request, member_of_chain=None): """ Returns whether this request should be validated again """ validations_count = mcm_request.get_validations_count() max_validations = settings.get_value('max_validations') request_prepid = mcm_request.get_attribute('prepid') subject = 'Validation will %sbe retried for %s' % ( "" if validations_count < max_validations else "NOT ", request_prepid ) if member_of_chain is not None: message = 'Validation for request %s in chain %s failed. It will %sbe retried. Number of validations done: %d/%d.' % ( request_prepid, member_of_chain, "" if validations_count < max_validations else "NOT ", validations_count, max_validations ) else: message = 'Validation for request %s failed. It will %sbe retried. Number of validations done: %d/%d.' % ( request_prepid, "" if validations_count < max_validations else "NOT ", validations_count, max_validations ) self.logger.info(message) notification( subject, message, [], group=notification.REQUEST_OPERATIONS, action_objects=[request_prepid], object_type='requests', base_object=mcm_request ) mcm_request.notify(subject, message) return validations_count < max_validations
def notify_batch(self, batch_id, message_notes): message = message_notes to_who = [settings.get_value('service_account')] l_type = locator() if l_type.isDev(): to_who.append(settings.get_value('hypernews_test')) else: to_who.append(settings.get_value('dataops_announce')) single_batch = batch(self.bdb.get(batch_id)) subject = single_batch.get_subject('[Notification]') current_message_id = single_batch.get_attribute('message_id') self.logger.info('current msgID: %s' % current_message_id) if current_message_id != '': result = single_batch.notify(subject, message, who=to_who, sender=None, reply_msg_ID=current_message_id) self.logger.info('result if True : %s' % result) else: result = single_batch.notify(subject, message, who=to_who, sender=None) self.logger.info('result if False : %s' % result) notification(subject, message, [], group=notification.BATCHES, target_role='production_manager', action_objects=[single_batch.get_attribute('prepid')], object_type='batches', base_object=single_batch) single_batch.update_history({'action': 'notify', 'step': message}) single_batch.reload() return {'results': result}
def process_finished_chain_success(self, prepid, doc_info, job_out, error_out, log_out): mcm_chained_request = chained_request( self.chained_request_db.get(prepid)) requests_in_chain = [] success = True failed_request_prepid = None for request_prepid, doc_rev in doc_info[ self.CHAIN_REQUESTS].iteritems(): mcm_request = request(self.request_db.get(request_prepid)) # Increase counters for all requests, but save error message only for the first failed request if success and doc_rev != mcm_request.json()[self.DOC_REV]: message = 'The request %s in the chain %s has changed during the run test procedure, preventing from being saved' % ( request_prepid, prepid) success = False failed_request_prepid = request_prepid mcm_request.inc_validations_counter() mcm_request.reload() if not success: mcm_chained_request.reset_requests( message, notify_one=failed_request_prepid) return for request_prepid, doc_rev in doc_info[ self.CHAIN_REQUESTS].iteritems(): self.logger.info('Processing request %s in chain %s' % (request_prepid, prepid)) mcm_request = request(self.request_db.get(request_prepid)) success = True path = self.test_directory_path + prepid + '/' try: success, error = mcm_request.pickup_all_performance(path) error = 'Error:\n%s\n Error out:\n%s\n' % (error, error_out) except request.WrongTimeEvent as ex: self.logger.error('Exception: %s' % (ex)) error = 'Exception:\n%s\n' % (ex) success = False retry_validation = self.process_request_wrong_time_event( mcm_request, prepid) if retry_validation: return if not success: message = 'Error while picking up all the performance for request %s of chain %s: \n Error:\n%s\n Job out:\n%s\n Error out: \n%s\n Log out: \n%s\n' % ( request_prepid, prepid, error, job_out, error_out, log_out) mcm_chained_request.reset_requests(message, notify_one=request_prepid) return requests_in_chain.append(mcm_request) for mcm_request in requests_in_chain: mcm_request.set_status(with_notification=True) if not self.request_db.update(mcm_request.json()): request_prepid = mcm_request.get_attribute('prepid') message = "The request %s of chain %s could not be saved after the runtest procedure" % ( request_prepid, prepid) self.logger.error(message) # reset it and keep saving requests mcm_request.test_failure(message=message, what='Chain validation run test', rewind=True) mcm_chained_request.reload( save_current=False ) # setting new requests status change the chain object mcm_chained_request.set_attribute('validate', 0) if not self.chained_request_db.update(mcm_chained_request.json()): message = 'Problem saving changes in chain %s, set validate = False ASAP!' % prepid self.logger.error(message) notification( 'Chained validation run test', message, [], group=notification.CHAINED_REQUESTS, action_objects=[mcm_chained_request.get_attribute('prepid')], object_type='chained_requests', base_object=mcm_chained_request) mcm_chained_request.notify('Chained validation run test', message) return self.logger.info('Validation job for prepid %s SUCCESSFUL!!!' % prepid)
def generate_chained_requests(self, mccm_ticket, request_prepid, mcm_chained_campaign, reserve=False, with_notify=True, special=False): try: mcm_chained_campaign.reload(save_current=False) generated_chained_request = chained_request(mcm_chained_campaign.generate_request(request_prepid)) except Exception as e: message = "Unable to generate chained request for ticket %s request %s, message: " % (mccm_ticket.get_attribute('prepid'), request_prepid, str(e)) self.logger.error(message) return { "results": False, "message": message} requests_db = database('requests') self.overwrite_action_parameters_from_ticket(generated_chained_request, mccm_ticket) mcm_request = request(json_input=requests_db.get(request_prepid)) generated_chained_request.set_attribute('last_status', mcm_request.get_attribute('status')) if generated_chained_request.get_attribute('last_status') in ['submitted', 'done']: generated_chained_request.set_attribute('status', 'processing') if special: generated_chained_request.set_attribute('approval', 'none') new_chain_prepid = generated_chained_request.get_attribute('prepid') if not generated_chained_request.reload(): return { 'results': False, 'message': 'Unable to save chained request %s' % new_chain_prepid} # update the history of chained campaign mcm_chained_campaign.save() # let the root request know that it is part of a chained request chains = mcm_request.get_attribute('member_of_chain') chains.append(new_chain_prepid) chains.sort() mcm_request.set_attribute('member_of_chain', list(set(chains))) mcm_request.update_history({'action': 'join chain', 'step': new_chain_prepid}) if with_notify: subject = "Request %s joined chain" % mcm_request.get_attribute('prepid') message = "Request %s has successfully joined chain %s" % (mcm_request.get_attribute('prepid'), new_chain_prepid) notification( subject, message, [], group=notification.REQUEST_OPERATIONS, action_objects=[mcm_request.get_attribute('prepid')], object_type='requests', base_object=mcm_request) mcm_request.notify( subject, message, Nchild=0, accumulate=True) mcm_request.save() # do the reservation of the whole chain ? generated_requests = [] if reserve: results_dict = generated_chained_request.reserve(limit=reserve, save_requests=False) if results_dict['results'] and 'generated_requests' in results_dict: generated_requests = results_dict['generated_requests'] results_dict.pop('generated_requests') else: return { "results": False, "prepid": new_chain_prepid, "message": results_dict['message']} return { "results":True, "prepid": new_chain_prepid, 'generated_requests': generated_requests}
def multiple_inspect(self, cid, in_statuses=['submitted', 'approved']): clist = list(set(cid.rsplit(','))) res = [] rdb = database('requests') index = 0 self.logger.error( "Chain inspect begin. Number of chains to be inspected: %s" % (len(clist))) try: while len(clist) > index: yield dumps( {"current cr element": "%s/%s" % (index, len(clist))}, indent=2) query = rdb.construct_lucene_complex_query([ ('member_of_campaign', { 'value': clist[index:index + 1] }), ('status', { 'value': in_statuses }) ]) ##do another loop over the requests themselves req_page = 0 request_res = rdb.full_text_search('search', query, page=req_page) while len(request_res) > 0: self.logger.info("inspecting single requests. page: %s" % (req_page)) for r in request_res: self.logger.info("running inspect on request: %s" % (r['prepid'])) mcm_r = request(r) if mcm_r: #making it as a stream yield dumps(mcm_r.inspect(), indent=4) else: #making it as a stream yield dumps( { "prepid": r, "results": False, 'message': '%s does not exist' % (r) }, indent=4) req_page += 1 request_res = rdb.full_text_search('search', query, page=req_page) time.sleep(0.5) index += 1 time.sleep(1) except Exception as e: subject = "Exception while inspecting request " message = "Request: %s \n %s traceback: \n %s" % ( mcm_r.get_attribute('prepid'), str(e), traceback.format_exc()) self.logger.error(subject + message) notification(subject, message, [], group=notification.REQUEST_OPERATIONS, action_objects=[mcm_r.get_attribute('prepid')], object_type='requests', base_object=mcm_r) mcm_r.notify(subject, message, accumulate=True) self.logger.info("Campaign inspection finished!")
def internal_run(self): if not self.lock.acquire(blocking=False): self.logger.error( "Could not acquire lock for ChainRequestInjector. prepid %s" % (self.prepid)) return False try: crdb = database('chained_requests') rdb = database('requests') batch_name = None if not crdb.document_exists(self.prepid): # it's a request actually, pick up all chains containing it mcm_r = rdb.get(self.prepid) # mcm_crs = crdb.query(query="root_request==%s"% self.prepid) ## not only when its the root of mcm_crs = crdb.query(query="contains==%s" % self.prepid) task_name = 'task_' + self.prepid batch_type = 'Task_' + mcm_r['member_of_campaign'] else: mcm_crs = [crdb.get(self.prepid)] current_step_prepid = mcm_crs[0]['chain'][mcm_crs[0]['step']] mcm_request = rdb.get(current_step_prepid) task_name = 'task_' + current_step_prepid batch_type = 'Task_' + mcm_request['member_of_campaign'] if len(mcm_crs) == 0: return False mcm_rs = [] # upload all config files to config cache, with "configuration economy" already implemented for cr in mcm_crs: mcm_cr = chained_request(cr) chain = mcm_cr.get_attribute( 'chain')[mcm_cr.get_attribute('step'):] for request_prepid in chain: mcm_rs.append(request(rdb.get(request_prepid))) if self.check_approval and mcm_rs[-1].get_attribute( 'approval') != 'submit': message = 'requests %s is in "%s"/"%s" status/approval, requires "approved"/"submit"' % ( request_prepid, mcm_rs[-1].get_attribute('status'), mcm_rs[-1].get_attribute('approval')) self.logger.error(message) subject = '%s injection failed' % mcm_cr.get_attribute( 'prepid') notification( subject, message, [], group=notification.CHAINED_REQUESTS, action_objects=[mcm_cr.get_attribute('prepid')], object_type='chained_requests', base_object=mcm_cr) mcm_cr.notify(subject, message) return False if mcm_rs[-1].get_attribute('status') != 'approved': # change the return format to percolate the error message message = 'requests %s in in "%s"/"%s" status/approval, requires "approved"/"submit"' % ( request_prepid, mcm_rs[-1].get_attribute('status'), mcm_rs[-1].get_attribute('approval')) self.logger.error(message) subject = '%s injection failed' % mcm_cr.get_attribute( 'prepid') notification( subject, message, [], group=notification.CHAINED_REQUESTS, action_objects=[mcm_cr.get_attribute('prepid')], object_type='chained_requests', base_object=mcm_cr) mcm_cr.notify(subject, message) return False uploader = ConfigMakerAndUploader( prepid=request_prepid, lock=locker.lock(request_prepid)) if not uploader.internal_run(): message = 'Problem with uploading the configuration for request %s' % ( request_prepid) notification( 'Configuration upload failed', message, [], group=notification.CHAINED_REQUESTS, action_objects=[mcm_cr.get_attribute('prepid')], object_type='chained_requests', base_object=mcm_cr) mcm_cr.notify('Configuration upload failed', message) self.logger.error(message) return False mcm_r = mcm_rs[-1] batch_name = BatchPrepId().next_batch_id(batch_type, create_batch=True) semaphore_events.increment(batch_name) self.logger.error('found batch %s' % batch_name) with ssh_executor(server='vocms081.cern.ch') as ssh: cmd = self.make_command(mcm_r) self.logger.error('prepared command %s' % cmd) # modify here to have the command to be executed _, stdout, stderr = ssh.execute(cmd) output = stdout.read() error = stderr.read() self.logger.info(output) self.logger.info(error) injected_requests = [ l.split()[-1] for l in output.split('\n') if l.startswith('Injected workflow:') ] if not injected_requests: self.injection_error( 'Injection has succeeded but no request manager names were registered. Check with administrators. \nOutput: \n%s\n\nError: \n%s' % (output, error), mcm_rs) return False # what gets printed into the batch object added_requests = [] once = set() for mcm_r in mcm_rs: if mcm_r.get_attribute('prepid') in once: continue once.add(mcm_r.get_attribute('prepid')) added = [{ 'name': app_req, 'content': { 'pdmv_prep_id': mcm_r.get_attribute('prepid') } } for app_req in injected_requests] added_requests.extend(added) # edit the batch object with locker.lock(batch_name): bdb = database('batches') bat = batch(bdb.get(batch_name)) bat.add_requests(added_requests) bat.update_history({ 'action': 'updated', 'step': task_name }) bat.reload() # reload the content of all requests as they might have changed already added = [{ 'name': app_req, 'content': { 'pdmv_prep_id': task_name } } for app_req in injected_requests] seen = set() for cr in mcm_crs: mcm_cr = chained_request(cr) chain = mcm_cr.get_attribute( 'chain')[mcm_cr.get_attribute('step'):] message = "" for rn in chain: if rn in seen: continue # don't do it twice seen.add(rn) mcm_r = request(rdb.get(rn)) message += mcm_r.textified() message += "\n\n" mcm_r.set_attribute('reqmgr_name', added) mcm_r.update_history({ 'action': 'inject', 'step': batch_name }) if not self.check_approval: mcm_r.set_attribute('approval', 'submit') # set the status to submitted mcm_r.set_status( step=mcm_r._json_base__status.index('submitted'), with_notification=False) mcm_r.reload() mcm_cr.set_attribute('last_status', mcm_r.get_attribute('status')) # re-get the object mcm_cr = chained_request(crdb.get(cr['prepid'])) # take care of changes to the chain mcm_cr.update_history({ 'action': 'inject', 'step': batch_name }) mcm_cr.set_attribute( 'step', len(mcm_cr.get_attribute('chain')) - 1) mcm_cr.set_attribute('status', 'processing') subject = 'Injection succeeded for %s' % mcm_cr.get_attribute( 'prepid') notification( subject, message, [], group=notification.CHAINED_REQUESTS, action_objects=[mcm_cr.get_attribute('prepid')], object_type='chained_requests', base_object=mcm_cr) mcm_cr.notify(subject, message) mcm_cr.reload() return True except Exception: self.injection_error( "Error with injecting chains for %s :\n %s" % (self.prepid, traceback.format_exc()), []) finally: # we decrement batch id and release lock on prepid+lower semaphore if batch_name: # ditry thing for now. Because batch name can be None for certain use-cases in code above semaphore_events.decrement(batch_name) self.lock.release() self.queue_lock.release()
def announce(self, notes="", user=""): if self.get_attribute('status') != 'new': return False if len(self.get_attribute('requests')) == 0: return False current_notes = self.get_attribute('notes') if current_notes: current_notes += '\n' if notes: current_notes += notes self.set_attribute('notes', current_notes) total_events = 0 content = self.get_attribute('requests') total_requests = len(content) rdb = database('requests') # prepare the announcing message (campaign, batchNumber) = self.get_attribute('prepid').split('_')[-1].split('-') subject = self.get_subject() request_messages = {} for r in content: # loose binding of the prepid to the request name, might change later on if 'pdmv_prep_id' in r['content']: pid = r['content']['pdmv_prep_id'] else: pid = r['name'].split('_')[1] mcm_r = rdb.get(pid) total_events += mcm_r['total_events'] c = mcm_r['member_of_campaign'] if c not in request_messages: request_messages[c] = "" request_messages[c] += " * %s (%s) -> %s\n" % ( pid, mcm_r['dataset_name'], r['name']) campaigns = sorted(request_messages.keys()) message = "" message += "Dear Data Operation Team,\n\n" message += "may you please consider the following batch number %d of %s requests for the campaign%s %s:\n\n" % ( int(batchNumber), total_requests, "s" if len(campaigns) > 1 else "", ','.join(campaigns)) for c in campaigns: message += request_messages[c] message += "\n" message += "For a total of %s events\n\n" % (re.sub( "(\d)(?=(\d{3})+(?!\d))", r"\1,", "%d" % total_events)) if self.get_attribute('extension'): message += "This batch is for an extension : {0}\n".format( self.get_attribute('extension')) if self.get_attribute('version'): message += "This batch is a resubmission : v{0}\n".format( self.get_attribute('version') + 1) message += "Link to the batch:\n" l_type = locator() message += '%s/batches?prepid=%s \n\n' % (l_type.baseurl(), self.get_attribute('prepid')) if current_notes: message += "Additional comments for this batch:\n" + current_notes + '\n' self.logger.info('Message send for batch %s' % (self.get_attribute('prepid'))) self.get_current_user_role_level() to_who = [settings.get_value('service_account')] if l_type.isDev(): to_who.append(settings.get_value('hypernews_test')) else: to_who.append(settings.get_value('dataops_announce')) notification(subject, message, [], group=notification.BATCHES, target_role='production_manager', action_objects=[self.get_attribute('prepid')], object_type='batches', base_object=self) returned_id = self.notify(subject, message, who=to_who) self.set_attribute('message_id', returned_id) self.reload() # toggle the status # only when we are sure it functions self.set_status() self.set_status() return True
def get(self, chained_request_id): """ Perform test for chained requests """ crdb = database('chained_requests') rdb = database('requests') settingsDB = database('settings') mcm_cr = chained_request(crdb.get(chained_request_id)) if settingsDB.get('validation_stop')['value']: return { "results": False, 'message': ('validation jobs are halted to allow forthcoming mcm ''restart - try again later'), "prepid": chained_request_id} requires_validation = False for rid in mcm_cr.get_attribute('chain')[mcm_cr.get_attribute('step'):]: mcm_r = request(rdb.get(rid)) if not mcm_r.is_root and 'validation' not in mcm_r._json_base__status: # We dont care about non root request because they are not being used on chain run test break requires_validation = True if mcm_r.get_attribute('status') != 'new' or mcm_r.get_attribute('approval') != 'none': return { "results": False, "prepid": chained_request_id, "message": "request %s is in status %s, approval: %s" % (rid, mcm_r.get_attribute('status'), mcm_r.get_attribute('approval'))} try: mcm_r.ok_to_move_to_approval_validation(for_chain=True) mcm_r.update_history({'action': 'approve', 'step': 'validation'}) mcm_r.set_attribute('approval', 'validation') mcm_r.reset_validations_counter() mcm_r.reload() text = 'Within chain %s \n' % mcm_cr.get_attribute('prepid') text += mcm_r.textified() subject = 'Approval %s in chain %s for request %s' % ('validation', mcm_cr.get_attribute('prepid'), mcm_r.get_attribute('prepid')) notification( subject, text, [], group=notification.REQUEST_APPROVALS, action_objects=[mcm_r.get_attribute('prepid')], object_type='requests', base_object=mcm_r) mcm_r.notify(subject, text, accumulate=True) except Exception as e: mcm_cr.reset_requests(str(e), notify_one=rid) return { "results": False, "message": str(e), "prepid": chained_request_id} if not requires_validation: return { "results": True, "message": "No validation required", "prepid": chained_request_id} mcm_cr.set_attribute('validate', 1) if not crdb.update(mcm_cr.json()): return { "results": False, "message": "Failed while trying to update the document in DB", "prepid": chained_request_id} return { "results": True, "message": "run test will start soon", "prepid": chained_request_id}
def delete_request(self, crid): crdb = database('chained_requests') rdb = database('requests') mcm_cr = chained_request(crdb.get(crid)) # get all objects mcm_r_s = [] for (i, rid) in enumerate(mcm_cr.get_attribute('chain')): mcm_r = request(rdb.get(rid)) # this is not a valid check as it is allowed to remove a chain around already running requests # if mcm_r.get_attribute('status') != 'new': # return {"results":False,"message" : "the request %s part of the chain %s for action %s is not in new status"%( mcm_r.get_attribute('prepid'), # crid, # mcm_a.get_attribute('prepid'))} in_chains = mcm_r.get_attribute('member_of_chain') in_chains.remove(crid) self.logger.debug( "Removing ChainAction member_of_chain: %s to request: %s" % (mcm_cr.get_attribute("prepid"), mcm_r.get_attribute('prepid'))) mcm_r.set_attribute('member_of_chain', in_chains) if i == 0: if len(in_chains ) == 0 and mcm_r.get_attribute('status') != 'new': return { "results": False, "message": "the request %s, not in status new, at the root of the chain will not be chained anymore" % rid } else: if len(in_chains) == 0: return { "results": False, "message": "the request %s, not at the root of the chain will not be chained anymore" % rid } mcm_r.update_history({'action': 'leave', 'step': crid}) mcm_r_s.append(mcm_r) if mcm_cr.get_attribute('action_parameters')['flag']: return { "results": False, "message": "the action for %s is not disabled" % (crid) } # then save all changes for mcm_r in mcm_r_s: if not rdb.update(mcm_r.json()): return { "results": False, "message": "Could not save request " + mcm_r.get_attribute('prepid') } else: subject = "Request {0} left chain".format( mcm_r.get_attribute('prepid')) message = "Request {0} has successfuly left chain {1}".format( mcm_r.get_attribute('prepid'), crid) notification(subject, message, [], group=notification.REQUEST_OPERATIONS, action_objects=[mcm_r.get_attribute('prepid')], object_type='requests', base_object=mcm_r) mcm_r.notify(subject, message) return {"results": crdb.delete(crid)}
def get(self, chained_request_id): """ Perform test for chained requests """ crdb = database('chained_requests') rdb = database('requests') settingsDB = database('settings') mcm_cr = chained_request(crdb.get(chained_request_id)) if settingsDB.get('validation_stop')['value']: return { "results": False, 'message': ('validation jobs are halted to allow forthcoming mcm ' 'restart - try again later'), "prepid": chained_request_id } requires_validation = False for rid in mcm_cr.get_attribute( 'chain')[mcm_cr.get_attribute('step'):]: mcm_r = request(rdb.get(rid)) if not mcm_r.is_root and 'validation' not in mcm_r._json_base__status: # We dont care about non root request because they are not being used on chain run test break requires_validation = True if mcm_r.get_attribute('status') != 'new' or mcm_r.get_attribute( 'approval') != 'none': return { "results": False, "prepid": chained_request_id, "message": "request %s is in status %s, approval: %s" % (rid, mcm_r.get_attribute('status'), mcm_r.get_attribute('approval')) } try: mcm_r.ok_to_move_to_approval_validation(for_chain=True) mcm_r.update_history({ 'action': 'approve', 'step': 'validation' }) mcm_r.set_attribute('approval', 'validation') mcm_r.reset_validations_counter() mcm_r.reload() text = 'Within chain %s \n' % mcm_cr.get_attribute('prepid') text += mcm_r.textified() subject = 'Approval %s in chain %s for request %s' % ( 'validation', mcm_cr.get_attribute('prepid'), mcm_r.get_attribute('prepid')) notification(subject, text, [], group=notification.REQUEST_APPROVALS, action_objects=[mcm_r.get_attribute('prepid')], object_type='requests', base_object=mcm_r) mcm_r.notify(subject, text, accumulate=True) except Exception as e: mcm_cr.reset_requests(str(e), notify_one=rid) return { "results": False, "message": str(e), "prepid": chained_request_id } if not requires_validation: return { "results": True, "message": "No validation required", "prepid": chained_request_id } mcm_cr.set_attribute('validate', 1) if not crdb.update(mcm_cr.json()): return { "results": False, "message": "Failed while trying to update the document in DB", "prepid": chained_request_id } return { "results": True, "message": "run test will start soon", "prepid": chained_request_id }
def process_finished_chain_success(self, prepid, doc_info, job_out, error_out, log_out): mcm_chained_request = chained_request(self.chained_request_db.get(prepid)) requests_in_chain = [] success = True failed_request_prepid = None for request_prepid, doc_rev in doc_info[self.CHAIN_REQUESTS].iteritems(): mcm_request = request(self.request_db.get(request_prepid)) # Increase counters for all requests, but save error message only for the first failed request if success and doc_rev != mcm_request.json()[self.DOC_REV]: message = 'The request %s in the chain %s has changed during the run test procedure, preventing from being saved' % (request_prepid, prepid) success = False failed_request_prepid = request_prepid mcm_request.inc_validations_counter() mcm_request.reload() if not success: mcm_chained_request.reset_requests(message, notify_one=failed_request_prepid) return for request_prepid, doc_rev in doc_info[self.CHAIN_REQUESTS].iteritems(): self.logger.info('Processing request %s in chain %s' % (request_prepid, prepid)) mcm_request = request(self.request_db.get(request_prepid)) success = True path = self.test_directory_path + prepid + '/' try: success, error = mcm_request.pickup_all_performance(path) error = 'Error:\n%s\n Error out:\n%s\n' % (error, error_out) except request.WrongTimeEvent as ex: self.logger.error('Exception: %s' % (ex)) error = 'Exception:\n%s\n' % (ex) success = False retry_validation = self.process_request_wrong_time_event(mcm_request, prepid) if retry_validation: return if not success: message = 'Error while picking up all the performance for request %s of chain %s: \n Error:\n%s\n Job out:\n%s\n Error out: \n%s\n Log out: \n%s\n' % ( request_prepid, prepid, error, job_out, error_out, log_out) mcm_chained_request.reset_requests(message, notify_one=request_prepid) return requests_in_chain.append(mcm_request) for mcm_request in requests_in_chain: mcm_request.set_status(with_notification=True) if not self.request_db.update(mcm_request.json()): request_prepid = mcm_request.get_attribute('prepid') message = "The request %s of chain %s could not be saved after the runtest procedure" % (request_prepid, prepid) self.logger.error(message) # reset it and keep saving requests mcm_request.test_failure( message=message, what='Chain validation run test', rewind=True) mcm_chained_request.reload(save_current=False) # setting new requests status change the chain object mcm_chained_request.set_attribute('validate', 0) if not self.chained_request_db.update(mcm_chained_request.json()): message = 'Problem saving changes in chain %s, set validate = False ASAP!' % prepid self.logger.error(message) notification( 'Chained validation run test', message, [], group=notification.CHAINED_REQUESTS, action_objects=[mcm_chained_request.get_attribute('prepid')], object_type='chained_requests', base_object=mcm_chained_request) mcm_chained_request.notify('Chained validation run test', message) return self.logger.info('Validation job for prepid %s SUCCESSFUL!!!' % prepid)