def announce(self, ds_to_be_invalidated, r_to_be_rejected): if (len(ds_to_be_invalidated) != 0 or len(r_to_be_rejected) != 0): text = 'Dear Data Operation Team,\n\n' if len(r_to_be_rejected) != 0: text += 'please reject or abort the following requests:\n' text += self.print_invalidations(r_to_be_rejected) if len(ds_to_be_invalidated) != 0: text += '\nPlease invalidate the following datasets:\n' text += self.print_invalidations(ds_to_be_invalidated) text += '\nas a consequence of requests being reset.\n' to_who = [settings.get_value('service_account')] if self.l_type.isDev(): to_who.append(settings.get_value('hypernews_test')) else: to_who.append(settings.get_value('dataops_announce')) try: elem = (r_to_be_rejected + ds_to_be_invalidated)[0] sender = elem.current_user_email except IndexError: sender = None self.com.sendMail(to_who, 'Request and Datasets to be Invalidated', text, sender) for to_announce in itertools.chain(r_to_be_rejected, ds_to_be_invalidated): to_announce.set_announced() idb = database(self.db_name) idb.update(to_announce.json())
def notify_batch(self, batch_id, message_notes): message = message_notes to_who = [settings.get_value('service_account')] l_type = locator() if l_type.isDev(): to_who.append(settings.get_value('hypernews_test')) else: to_who.append(settings.get_value('dataops_announce')) single_batch = batch(self.bdb.get(batch_id)) subject = single_batch.get_subject('[Notification]') current_message_id = single_batch.get_attribute('message_id') self.logger.info('current msgID: %s' % current_message_id) if current_message_id != '': result = single_batch.notify(subject, message, who=to_who, sender=None, reply_msg_ID=current_message_id) self.logger.info('result if True : %s' % result) else: result = single_batch.notify(subject, message, who=to_who, sender=None) self.logger.info('result if False : %s' % result) notification( subject, message, [], group=notification.BATCHES, target_role='production_manager', action_objects=[single_batch.get_attribute('prepid')], object_type='batches', base_object=single_batch ) single_batch.update_history({'action': 'notify', 'step': message}) single_batch.reload() return {'results': result}
def process_request_wrong_time_event(self, mcm_request, member_of_chain=None): """ Returns whether this request should be validated again """ validations_count = mcm_request.get_validations_count() max_validations = settings.get_value('max_validations') request_prepid = mcm_request.get_attribute('prepid') subject = 'Validation will %sbe retried for %s' % ( "" if validations_count < max_validations else "NOT ", request_prepid) if member_of_chain is not None: message = 'Validation for request %s in chain %s failed. It will %sbe retried. Number of validations done: %d/%d.' % ( request_prepid, member_of_chain, "" if validations_count < max_validations else "NOT ", validations_count, max_validations) else: message = 'Validation for request %s failed. It will %sbe retried. Number of validations done: %d/%d.' % ( request_prepid, "" if validations_count < max_validations else "NOT ", validations_count, max_validations) self.logger.info(message) notification(subject, message, [], group=notification.REQUEST_OPERATIONS, action_objects=[request_prepid], object_type='requests', base_object=mcm_request) mcm_request.notify(subject, message) return validations_count < max_validations
def __init__(self): self.setup_directories() self.setup_logger() self.get_submmited_prepids() self.batch_retry_timeout = settings.get_value('batch_retry_timeout') self.check_term_runlimit = settings.get_value('check_term_runlimit') try: self.ssh_exec = ssh_executor() except Exception as e: self.ssh_exec = None self.logger.error(str(e) + 'traceback %s ' % traceback.format_exc()) return if locator().isDev(): self.group = '/dev' else: self.group = '/prod'
def get_meeting_date(): import datetime import tools.settings as settings t = datetime.date.today() meeting_day = int(settings.get_value('mccm_meeting_day')) w = 0 if meeting_day >= t.weekday() else 1 t = t + datetime.timedelta(days=meeting_day - t.weekday(), weeks=w) return t
def __init__(self): self.setup_directories() self.setup_logger() self.get_submmited_prepids() self.batch_retry_timeout = settings.get_value('batch_retry_timeout') self.check_term_runlimit = settings.get_value('check_term_runlimit') try: self.ssh_exec = ssh_executor() except Exception as e: self.ssh_exec = None self.logger.error( str(e) + 'traceback %s ' % traceback.format_exc()) return if locator().isDev(): self.group = '/dev' else: self.group = '/prod'
def get_pwgs(self): """ return all accessible PWGs for the user """ all_pwgs = settings.get_value('pwg') if self.get_attribute('role') in ['production_manager', 'administrator', 'generator_convener']: return all_pwgs else: return self.get_attribute('pwg')
def announce_with_text(self, bid, message): bdb = database('batches') if not semaphore_events.is_set(bid): return {"results": False, "message": "Batch {0} has on-going submissions.".format(bid), "prepid": bid} b = batch(bdb.get(bid)) workflows = '' for dictionary in b.get_attribute('requests'): workflows += dictionary['name'] + ',' workflows = workflows[:-1] r = '' result = {} if workflows != '': approver = RequestApprover(bid, workflows) result = approver.internal_run() if (result['results']): r = b.announce(message) else: r = b.announce(message) if r: map_wf_to_prepid = {} for dictionary in b.get_attribute('requests'): wf = dictionary.get('name') prepid = dictionary.get('content', {}).get('pdmv_prep_id') if not wf or not prepid: continue if wf not in map_wf_to_prepid: map_wf_to_prepid[wf] = [] map_wf_to_prepid[wf].append(prepid) rdb = database('requests') priority_coeff = settings.get_value('nanoaod_priority_increase_coefficient') for wf, requests in map_wf_to_prepid.iteritems(): if len(requests) == 1 and 'nanoaod' in requests[0].lower(): for r_prepid in requests: req = request(rdb.get(r_prepid)) current_priority = req.get_attribute('priority') new_priority = int(current_priority + priority_coeff * 1000) req.change_priority(new_priority) return { "results": bdb.update(b.json()), "message": r, "prepid": bid } else: return { "results": False, "prepid": bid, "message": result['message'] if 'message' in result and not r else r }
def notify(self, subject, message, who=[], actors=True, service=True, HN=False, sender=None, Nchild=-1, reply_msg_ID=None, accumulate=False): dest = map(lambda i: i, who) if actors: # add the actors to the object dest.extend(self.get_actors(what='author_email', Nchild=Nchild)) if service: # let the service know at any time dest.append(settings.get_value('service_account')) if HN: # back bone HN notification ? dest.append(settings.get_value('hypernews_test')) # be sure to not have duplicates dest = set(dest) exclude_emails = set(settings.get_value('exclude_from_notify')) dest = list(dest - exclude_emails) if not len(dest): dest.append(settings.get_value('service_account')) subject += '. And no destination was set' sender = sender if sender else self.current_user_email self.logger.info('Notification %s from %s send to %s [acc:%s]' % (subject, sender, ', '.join(dest), accumulate)) return self.com.sendMail(dest, subject, message, sender, reply_msg_ID, accumulate=accumulate)
def streaming_function(): mccms_db = database('mccms') users_db = database('users') generator_contacts_query = users_db.construct_lucene_query({'role': 'generator_contact'}) generator_contacts = users_db.full_text_search("search", generator_contacts_query, page=-1) generator_contacts_by_pwg = {} generator_contacts_emails = set() for contact in generator_contacts: for pwg in contact.get('pwg', []): if pwg not in generator_contacts_by_pwg: generator_contacts_by_pwg[pwg] = [] generator_contacts_by_pwg[pwg].append(contact.get('email')) generator_contacts_emails.add(contact.get('email')) __query = mccms_db.construct_lucene_query({'status': 'new'}) mccms_tickets = mccms_db.full_text_search('search', __query, page=-1) authors_tickets_dict = dict() yield '<pre>' for ticket in mccms_tickets: yield 'Processing ticket %s\n' % (ticket['prepid']) mccm_ticket = mccm(json_input=ticket) pwg = mccm_ticket.get_attribute('pwg') authors = mccm_ticket.get_actors(what='author_email') yield '%s worked on %s\n' % (authors, ticket['prepid']) authors = filter(lambda e: e in generator_contacts_emails, list(set(authors + generator_contacts_by_pwg.get(pwg, [])))) yield '%s will be notified about %s\n' % (authors, ticket['prepid']) for author_email in authors: if author_email in generator_contacts_emails: if author_email not in authors_tickets_dict: authors_tickets_dict[author_email] = set() authors_tickets_dict[author_email].add(ticket['prepid']) subject_template = 'Gentle reminder on %s ticket%s to be operated by you' message_template = ('Dear GEN Contact,\nPlease find below the details of %s MccM ticket%s in status "new". ' + 'Please present them in next MccM googledoc or cancel tickets if these are not needed anymore.\n\n') base_url = locator().baseurl() mail_communicator = communicator() service_account = settings.get_value('service_account') for author_email, ticket_prepids in authors_tickets_dict.iteritems(): num_tickets = len(ticket_prepids) subject = subject_template % (num_tickets, '' if num_tickets == 1 else 's') message = message_template % (num_tickets, '' if num_tickets == 1 else 's') for ticket_prepid in ticket_prepids: message += 'Ticket: %s\n%smccms?prepid=%s\n\n' % (ticket_prepid, base_url, ticket_prepid) yield '.' yield '\n' message += 'You received this email because you are listed as generator contact of physics group(s) of these tickets.\n' self.logger.info('Email:%s\nSubject: %s\nMessage:%s' % (author_email, subject, message)) mail_communicator.sendMail([author_email, service_account], subject, message) yield 'Email sent to %s\n' % (author_email)
def get_pwgs(self): """ return all accessible PWGs for the user """ all_pwgs = settings.get_value('pwg') if self.get_attribute('role') in [ 'production_manager', 'administrator', 'generator_convener' ]: return all_pwgs else: return self.get_attribute('pwg')
def __init__(self, json_input=None): json_input = json_input if json_input else {} self._json_base__schema = settings.get_value('cmsdriver_options') # how to get the options ? # in cmssw # import Configuration.PyReleaseValidation.Options as opt # map( lambda s : s.replace('--','') ,opt.parser._long_opt.keys() ) # update self according to json_input self.__update(json_input) self.__validate()
def notify_batch(self, batch_id, message_notes): message = message_notes to_who = [settings.get_value('service_account')] l_type = locator() if l_type.isDev(): to_who.append(settings.get_value('hypernews_test')) else: to_who.append(settings.get_value('dataops_announce')) single_batch = batch(self.bdb.get(batch_id)) subject = single_batch.get_subject('[Notification]') current_message_id = single_batch.get_attribute('message_id') self.logger.info('current msgID: %s' % current_message_id) if current_message_id != '': result = single_batch.notify(subject, message, who=to_who, sender=None, reply_msg_ID=current_message_id) self.logger.info('result if True : %s' % result) else: result = single_batch.notify(subject, message, who=to_who, sender=None) self.logger.info('result if False : %s' % result) notification(subject, message, [], group=notification.BATCHES, target_role='production_manager', action_objects=[single_batch.get_attribute('prepid')], object_type='batches', base_object=single_batch) single_batch.update_history({'action': 'notify', 'step': message}) single_batch.reload() return {'results': result}
def __init__(self, directory=None, prepid=None, server=None): self.ssh_client = None if not server: server = settings.get_value("node_for_test") self.ssh_server = server self.ssh_server_port = 22 self.ssh_credentials = '/afs/cern.ch/user/p/pdmvserv/private/credentials' self.hname = None # TO-DO # rename logger -> inject_logger # error_logger -> logger # to be in same naming convention as everywhere else self.error_logger = logging.getLogger("mcm_error") self.logger = InjectionLogAdapter(logging.getLogger("mcm_inject"), {'handle': prepid}) self.__build_ssh_client()
def get(self, action): """ Inspect the chained requests of all chained campaigns, requires /all """ if action != 'all': return {"results": 'Error: Incorrect argument provided'} is_running = settings.get_value('inspect_chained_campaigns_running') self.logger.info('InspectChainedRequests is running: %s' % (is_running)) if is_running: return {"results": 'Already running inspection'} # force pretify output in browser for multiple lines self.representations = {'text/plain': self.output_text} ccid_list = self.listAll() shuffle(ccid_list) return flask.Response(flask.stream_with_context(self.multiple_inspect(','.join(ccid_list))))
def get(self, block_threshold=0): """ Send a reminder to the production managers for existing opened mccm documents """ mdb = database('mccms') udb = database('users') __query = mdb.construct_lucene_query({'status': 'new'}) mccms = mdb.full_text_search('search', __query, page=-1) mccms = filter(lambda m: m['block'] <= block_threshold, mccms) mccms = sorted(mccms, key=lambda m: m['block']) if len(mccms) == 0: return {"results": True, "message": "nothing to remind of at level %s, %s" % (block_threshold, mccms)} l_type = locator() com = communicator() subject = 'Gentle reminder on %s tickets to be operated by you' % (len( mccms)) message = '''\ Dear Production Managers, please find below the details of %s opened MccM tickets that need to be operated. ''' % (len(mccms)) mccm_prepids = [] for _mccm in mccms: prepid = _mccm['prepid'] message += 'Ticket : %s (block %s)\n' % (prepid, _mccm['block']) message += ' %smccms?prepid=%s \n\n' % (l_type.baseurl(), prepid) mccm_prepids.append(prepid) message += '\n' to_who = [settings.get_value('service_account')] to_who.extend(map(lambda u: u['email'], udb.query(query="role==production_manager", page_num=-1))) notification( subject, message, [], group=notification.REMINDERS, action_objects=mccm_prepids, object_type='mccms', target_role='production_manager') com.sendMail( to_who, subject, message) return {"results": True, "message": map(lambda m: m['prepid'], mccms)}
def get(self, user_id=None): """ Retrieve the pwg of the provided user """ # this could be a specific database in couch, to hold the list, with maybe some added information about whatever the group does... all_pwgs = settings.get_value('pwg') db = database('users') all_pwgs.sort() if user_id is None: return {"results": all_pwgs} if db.document_exists(user_id): mcm_user = user(db.get(user_id)) return {"results": mcm_user.get_pwgs()} else: return {"results": []}
def get(self, pwgs): """ Ask for the increase of the role of the current user to the given pwg """ # get who's there user_p = user_pack() udb = database(self.db_name) mcm_u = user(udb.get(user_p.get_username())) # get the requested pwgs pwgs = pwgs.split(',') # set the pwgs to the current user current = mcm_u.get_attribute('pwg') current = list(set(current + pwgs)) mcm_u.set_attribute('pwg', current) mcm_u.update_history({'action': 'ask role', 'step': pwgs}) udb.update(mcm_u.json()) # get the production managers emails __query = udb.construct_lucene_query({'role': 'production_manager'}) production_managers = udb.full_text_search('search', __query, page=-1) # send a notification to prod manager + service to_who = map(lambda u: u['email'], production_managers) + [ settings.get_value('service_account') ] to_who.append(user_p.get_email()) com = communicator() l_type = locator() subject = 'Increase role for user %s' % mcm_u.get_attribute('fullname') message = 'Please increase the role of the user %s to the next level.\n\n%susers?prepid=%s' % ( mcm_u.get_attribute('username'), l_type.baseurl(), mcm_u.get_attribute('username')) notification(subject, message, [], group=notification.USERS, action_objects=[mcm_u.get_attribute('prepid')], object_type='users', target_role='production_manager') com.sendMail(to_who, subject, message) return { "results": True, "message": "user %s in for %s" % (mcm_u.get_attribute('username'), current) }
def get(self, action): """ Inspect the chained requests of all chained campaigns, requires /all """ if action != 'all': return {"results": 'Error: Incorrect argument provided'} is_running = settings.get_value('inspect_chained_campaigns_running') self.logger.info('InspectChainedRequests is running: %s' % (is_running)) if is_running: return {"results": 'Already running inspection'} # force pretify output in browser for multiple lines self.representations = {'text/plain': self.output_text} ccid_list = self.listAll() shuffle(ccid_list) return flask.Response( flask.stream_with_context( self.multiple_inspect(','.join(ccid_list))))
def get_ds_input(self, __output_dataset, __seq): try: input_ds = "" possible_dt_inputs = settings.get_value('datatier_input') # we take sequence 1step datetier # check if "step" is a string -> some DR requests has single step string with , in it... # some DR requests has it.... most probably the generated ones if isinstance(__seq[0]["step"], basestring): __step = __seq[0]["step"].split(",")[0].split(":")[0] else: __step = __seq[0]["step"][0].split(":")[0] if __step in possible_dt_inputs: __possible_inputs = possible_dt_inputs[__step] # highest priority is first.. we should take acording output_ds __prev_output = __output_dataset __prev_tiers = [el.split("/")[-1] for el in __prev_output] for elem in __possible_inputs: if elem in __prev_tiers: input_ds = __prev_output[__prev_tiers.index(elem)] # dirty stuff # self.logger.info("get_ds_input found a possible DS: %s" % (input_ds)) # self.logger.info("get_ds_input\t elem: %s __possible_inputs %s" % (elem, __possible_inputs)) break else: # if step is not defined in dictionary -> we default to previous logic input_ds = __output_dataset[0] # if we didn't find anything in for loop above, fall back to previous if not input_ds: if len(__output_dataset) > 0: # in case the output_dataset is "" input_ds = __output_dataset[0] self.logger.info("get_ds_input returns input_ds: %s" % (input_ds)) return input_ds except Exception: self.logger.error("Error looking for input dataset: %s" % (traceback.format_exc())) return ""
def process_request_wrong_time_event(self, mcm_request, member_of_chain=None): """ Returns whether this request should be validated again """ validations_count = mcm_request.get_validations_count() max_validations = settings.get_value('max_validations') request_prepid = mcm_request.get_attribute('prepid') subject = 'Validation will %sbe retried for %s' % ( "" if validations_count < max_validations else "NOT ", request_prepid ) if member_of_chain is not None: message = 'Validation for request %s in chain %s failed. It will %sbe retried. Number of validations done: %d/%d.' % ( request_prepid, member_of_chain, "" if validations_count < max_validations else "NOT ", validations_count, max_validations ) else: message = 'Validation for request %s failed. It will %sbe retried. Number of validations done: %d/%d.' % ( request_prepid, "" if validations_count < max_validations else "NOT ", validations_count, max_validations ) self.logger.info(message) notification( subject, message, [], group=notification.REQUEST_OPERATIONS, action_objects=[request_prepid], object_type='requests', base_object=mcm_request ) mcm_request.notify(subject, message) return validations_count < max_validations
def announce(self, notes="", user=""): if self.get_attribute('status') != 'new': return False if len(self.get_attribute('requests')) == 0: return False current_notes = self.get_attribute('notes') if current_notes: current_notes += '\n' if notes: current_notes += notes self.set_attribute('notes', current_notes) total_events = 0 content = self.get_attribute('requests') total_requests = len(content) rdb = database('requests') # prepare the announcing message (campaign, batchNumber) = self.get_attribute('prepid').split('_')[-1].split('-') subject = self.get_subject() request_messages = {} for r in content: # loose binding of the prepid to the request name, might change later on if 'pdmv_prep_id' in r['content']: pid = r['content']['pdmv_prep_id'] else: pid = r['name'].split('_')[1] mcm_r = rdb.get(pid) total_events += mcm_r['total_events'] c = mcm_r['member_of_campaign'] if c not in request_messages: request_messages[c] = "" request_messages[c] += " * %s (%s) -> %s\n" % ( pid, mcm_r['dataset_name'], r['name']) campaigns = sorted(request_messages.keys()) message = "" message += "Dear Data Operation Team,\n\n" message += "may you please consider the following batch number %d of %s requests for the campaign%s %s:\n\n" % ( int(batchNumber), total_requests, "s" if len(campaigns) > 1 else "", ','.join(campaigns)) for c in campaigns: message += request_messages[c] message += "\n" message += "For a total of %s events\n\n" % (re.sub( "(\d)(?=(\d{3})+(?!\d))", r"\1,", "%d" % total_events)) if self.get_attribute('extension'): message += "This batch is for an extension : {0}\n".format( self.get_attribute('extension')) if self.get_attribute('version'): message += "This batch is a resubmission : v{0}\n".format( self.get_attribute('version') + 1) message += "Link to the batch:\n" l_type = locator() message += '%s/batches?prepid=%s \n\n' % (l_type.baseurl(), self.get_attribute('prepid')) if current_notes: message += "Additional comments for this batch:\n" + current_notes + '\n' self.logger.info('Message send for batch %s' % (self.get_attribute('prepid'))) self.get_current_user_role_level() to_who = [settings.get_value('service_account')] if l_type.isDev(): to_who.append(settings.get_value('hypernews_test')) else: to_who.append(settings.get_value('dataops_announce')) notification(subject, message, [], group=notification.BATCHES, target_role='production_manager', action_objects=[self.get_attribute('prepid')], object_type='batches', base_object=self) returned_id = self.notify(subject, message, who=to_who) self.set_attribute('message_id', returned_id) self.reload() # toggle the status # only when we are sure it functions self.set_status() self.set_status() return True
def __init__(self): CampaignsRESTResource.__init__(self) self.access_user = settings.get_value('allowed_to_acknowledge') self.before_request() self.count_call()
def __init__(self): self.blocks = settings.get_value("priority_per_block")
def next_batch_id(self, next_campaign, version=0, extension=0, process_string="", flown_with="", create_batch=True): with locker.lock('batch name clashing protection'): self.bdb.logger.debug("working on batch prepid") if flown_with: batchName = flown_with + '_' + next_campaign else: batchName = next_campaign # find the max batch with similar name, descending guarantees that # the returned one will be biggest __query_options = {"endkey": '"%s-00001"' % (batchName), "startkey": '"%s-99999"' % (batchName), "descending": "true", "limit": 1} max_in_batch = settings.get_value('max_in_batch') top_batch = self.bdb.raw_query("prepid", __query_options) new_batch = True if len(top_batch) != 0: # we already have some existing batch, check if its fine for appending # get a single batch single_batch = self.bdb.get(top_batch[0]["id"]) if single_batch["status"] == "new": # check if batch is not locked in other threads. if len(single_batch["requests"]) + semaphore_events.count(single_batch['prepid']) < max_in_batch: # we found a needed batch self.bdb.logger.debug("found a matching batch:%s" % (single_batch["prepid"])) batchNumber = int(single_batch["prepid"].split("-")[-1]) new_batch = False if new_batch: # we default to max batch and increment its number self.bdb.logger.debug("no new batch. incementing:%s +1" % (single_batch["prepid"])) batchNumber = int(top_batch[0]["id"].split("-")[-1]) + 1 else: self.bdb.logger.debug("starting new batch family:%s" % (batchName)) batchNumber = 1 batchName += '-%05d' % (batchNumber) if not self.bdb.document_exists(batchName) and create_batch: newBatch = batch({ '_id': batchName, 'prepid': batchName, 'version': version, 'extension': extension, 'process_string': process_string}) notes = "" cdb = database('campaigns') cs = [] if not cdb.document_exists(next_campaign): ccdb = database('chained_campaigns') if ccdb.document_exists(next_campaign): mcm_cc = ccdb.get(next_campaign) for (c, f) in mcm_cc['campaigns']: cs.append(cdb.get(c)) else: cs = [cdb.get(next_campaign)] for mcm_c in cs: if mcm_c['notes']: notes += "Notes about the campaign %s:\n" % mcm_c['prepid'] + mcm_c['notes'] + "\n" if flown_with: fdb = database('flows') mcm_f = fdb.get(flown_with) if mcm_f['notes']: notes += "Notes about the flow:\n" + mcm_f['notes'] + "\n" if notes: newBatch.set_attribute('notes', notes) newBatch.update_history({'action': 'created'}) self.bdb.save(newBatch.json()) return batchName
class sequence(json_base): _json_base__schema = settings.get_value('cmsdriver_options') def __init__(self, json_input=None): json_input = json_input if json_input else {} self._json_base__schema = settings.get_value('cmsdriver_options') # how to get the options ? # in cmssw # import Configuration.PyReleaseValidation.Options as opt # map( lambda s : s.replace('--','') ,opt.parser._long_opt.keys() ) # update self according to json_input self.__update(json_input) self.__validate() def __validate(self): if not self._json_base__json: return for key in self._json_base__schema: if key not in self._json_base__json: raise self.IllegalAttributeName(key) # for all parameters in json_input store their values # in self._json_base__json def __update(self, json_input): self._json_base__json = {} if not json_input: self._json_base__json = deepcopy(self._json_base__schema) else: for key in self._json_base__schema: if key in json_input: self._json_base__json[key] = json_input[key] else: self._json_base__json[key] = deepcopy( self._json_base__schema[key]) def srepr(self, arg): if isinstance(arg, basestring): # Python 3: isinstance(arg, str) return arg.decode('utf-8') elif isinstance( arg, int ): # in case we have int we should make it string for cmsDriver construction return str(arg) try: return ",".join(self.srepr(x) for x in arg) except TypeError: # catch when for loop fails return arg.decode('utf-8') # not a sequence so just return repr def to_command_line(self, attribute): if attribute == 'index': return '' if attribute == 'nThreads': if int(self.get_attribute('nThreads')) <= 1: return '' if self.get_attribute(attribute) == '': return '' elif self.get_attribute(attribute) == True: return "--" + str(attribute) elif self.get_attribute(attribute) == False: return '' elif attribute == 'extra' and self.get_attribute(attribute): return self.get_attribute(attribute) elif self.get_attribute(attribute): return "--" + attribute + " " + self.srepr( self.get_attribute(attribute)) else: return '' def to_string(self): text = '' keys = self.json().keys() keys.sort() for key in keys: if key in []: continue text += key + str(self.get_attribute(key)) return text def build_cmsDriver(self): # always MC in McM. better to say it command = '--mc ' for key in self.json(): if key == "inline_custom": if int(self.get_attribute(key)) == 0: # if inline_custom is 0 continue # means that cmssw might not have inline_custom support addone = self.to_command_line(key) # prevent from having over spaces if addone: command += addone command += ' ' return command
def next_batch_id(self, next_campaign, version=0, extension=0, process_string="", flown_with="", create_batch=True): with locker.lock('batch name clashing protection'): self.bdb.logger.debug("working on batch prepid") if flown_with: batchName = flown_with + '_' + next_campaign else: batchName = next_campaign # find the max batch with similar name, descending guarantees that # the returned one will be biggest __query_options = { "endkey": '"%s-00001"' % (batchName), "startkey": '"%s-99999"' % (batchName), "descending": "true", "limit": 1 } max_in_batch = settings.get_value('max_in_batch') top_batch = self.bdb.raw_query("prepid", __query_options) new_batch = True if len(top_batch) != 0: # we already have some existing batch, check if its fine for appending # get a single batch single_batch = self.bdb.get(top_batch[0]["id"]) if single_batch["status"] == "new": # check if batch is not locked in other threads. if len(single_batch["requests"]) + semaphore_events.count( single_batch['prepid']) < max_in_batch: # we found a needed batch self.bdb.logger.debug("found a matching batch:%s" % (single_batch["prepid"])) batchNumber = int( single_batch["prepid"].split("-")[-1]) new_batch = False if new_batch: # we default to max batch and increment its number self.bdb.logger.debug("no new batch. incementing:%s +1" % (single_batch["prepid"])) batchNumber = int(top_batch[0]["id"].split("-")[-1]) + 1 else: self.bdb.logger.debug("starting new batch family:%s" % (batchName)) batchNumber = 1 batchName += '-%05d' % (batchNumber) if not self.bdb.document_exists(batchName) and create_batch: newBatch = batch({ '_id': batchName, 'prepid': batchName, 'version': version, 'extension': extension, 'process_string': process_string }) notes = "" cdb = database('campaigns') cs = [] if not cdb.document_exists(next_campaign): ccdb = database('chained_campaigns') if ccdb.document_exists(next_campaign): mcm_cc = ccdb.get(next_campaign) for (c, f) in mcm_cc['campaigns']: cs.append(cdb.get(c)) else: cs = [cdb.get(next_campaign)] for mcm_c in cs: if mcm_c['notes']: notes += "Notes about the campaign %s:\n" % mcm_c[ 'prepid'] + mcm_c['notes'] + "\n" if flown_with: fdb = database('flows') mcm_f = fdb.get(flown_with) if mcm_f['notes']: notes += "Notes about the flow:\n" + mcm_f[ 'notes'] + "\n" if notes: newBatch.set_attribute('notes', notes) newBatch.update_history({'action': 'created'}) self.bdb.save(newBatch.json()) return batchName
def get(self, batch_id=None, n_to_go=1): """ Look for batches that are new and with 1 requests or /N and announce them, or /batchid or /batchid/N """ bdb = database('batches') res = [] if settings.get_value('batch_announce'): __query = bdb.construct_lucene_query({'status': 'new'}) new_batches = bdb.full_text_search('search', __query, page=-1) for new_batch in new_batches: if batch_id and new_batch['prepid'] != batch_id: continue if len(new_batch['requests']) >= n_to_go: # it is good to be announced ! res.append(self.announce_with_text(new_batch['_id'], 'Automatic announcement.')) else: self.logger.info('Not announcing any batch') if settings.get_value('batch_set_done'): # check on on-going batches rdb = database('requests') __query2 = bdb.construct_lucene_query({'status': 'announced'}) announced_batches = bdb.full_text_search('search', __query2, page=-1) for announced_batch in announced_batches: if batch_id and announced_batch['prepid'] != batch_id: continue this_bid = announced_batch['prepid'] all_done = False for r in announced_batch['requests']: all_done = False wma_name = r['name'] rid = r['content']['pdmv_prep_id'] if not rdb.document_exists(rid): # it OK like this. # It could happen that a request has been deleted and yet in a batch continue mcm_r = rdb.get(rid) if mcm_r['status'] == 'done': # if done, it's done all_done = True else: if len(mcm_r['reqmgr_name']) == 0: # not done, and no requests in request manager, ignore = all_done all_done = True else: if wma_name != mcm_r['reqmgr_name'][0]['name']: # not done, and a first requests that does not correspond # to the one in the batch, ignore = all_done all_done = True if not all_done: # no need to go further break if all_done: # set the status and save mcm_b = batch(announced_batch) mcm_b.set_status() bdb.update(mcm_b.json()) res.append({"results": True, "prepid": this_bid, "message": "Set to done"}) else: res.append({"results": False, "prepid": this_bid, "message": "Not completed"}) else: self.logger.info('Not setting any batch to done') # anyways return something return res
self.tasks.put((func, args, kargs)) def wait_completion(self): """Wait for completion of all the tasks in the queue""" self.tasks.join() def get_queue_length(self): """Return the number of tasks waiting in the Queue""" return self.tasks.qsize() # END OF THREAD POOL submit_pool = ThreadPool("submission", settings.get_value('threads_num_submission')) class Handler(): """ A class which manages locks for the resources. """ logger = logging.getLogger("mcm_error") hname = '' # handler's name lock = None def __init__(self, **kwargs): if 'lock' not in kwargs: self.lock = Lock() else: self.lock = kwargs['lock']
def get(self, batch_id=None, n_to_go=1): """ Look for batches that are new and with 1 requests or /N and announce them, or /batchid or /batchid/N """ bdb = database('batches') res = [] if settings.get_value('batch_announce'): __query = bdb.construct_lucene_query({'status': 'new'}) new_batches = bdb.full_text_search('search', __query, page=-1) for new_batch in new_batches: if batch_id and new_batch['prepid'] != batch_id: continue if len(new_batch['requests']) >= n_to_go: # it is good to be announced ! res.append( self.announce_with_text(new_batch['_id'], 'Automatic announcement.')) else: self.logger.info('Not announcing any batch') if settings.get_value('batch_set_done'): # check on on-going batches rdb = database('requests') __query2 = bdb.construct_lucene_query({'status': 'announced'}) announced_batches = bdb.full_text_search('search', __query2, page=-1) for announced_batch in announced_batches: if batch_id and announced_batch['prepid'] != batch_id: continue this_bid = announced_batch['prepid'] all_done = False for r in announced_batch['requests']: all_done = False wma_name = r['name'] rid = r['content']['pdmv_prep_id'] if not rdb.document_exists(rid): # it OK like this. # It could happen that a request has been deleted and yet in a batch continue mcm_r = rdb.get(rid) if mcm_r['status'] == 'done': # if done, it's done all_done = True else: if len(mcm_r['reqmgr_name']) == 0: # not done, and no requests in request manager, ignore = all_done all_done = True else: if wma_name != mcm_r['reqmgr_name'][0]['name']: # not done, and a first requests that does not correspond # to the one in the batch, ignore = all_done all_done = True if not all_done: # no need to go further break if all_done: # set the status and save mcm_b = batch(announced_batch) mcm_b.set_status() bdb.update(mcm_b.json()) res.append({ "results": True, "prepid": this_bid, "message": "Set to done" }) else: res.append({ "results": False, "prepid": this_bid, "message": "Not completed" }) else: self.logger.info('Not setting any batch to done') # anyways return something return res
def __init__(self): self.access_user = settings.get_value('allowed_to_acknowledge') self.before_request() self.count_call()
def sendMail(self, destination, subject, text, sender=None, reply_msg_ID=None, accumulate=False): if not isinstance(destination, list): print "Cannot send email. destination should be a list of strings" return destination.sort() msg = MIMEMultipart() # it could happen that message are send after forking, threading and there's no current user anymore msg['From'] = sender if sender else '*****@*****.**' # add a mark on the subjcet automatically if locator().isDev(): msg['Subject'] = '[McM-dev] ' + subject destination = ["*****@*****.**" ] # if -dev send only to service account and sender if sender: destination.append(sender) else: msg['Subject'] = '[McM] ' + subject msg['To'] = COMMASPACE.join(destination) msg['Date'] = formatdate(localtime=True) new_msg_ID = make_msgid() msg['Message-ID'] = new_msg_ID if reply_msg_ID is not None: msg['In-Reply-To'] = reply_msg_ID msg['References'] = reply_msg_ID # accumulate messages prior to sending emails com__accumulate = settings.get_value('com_accumulate') force_com_accumulate = settings.get_value('force_com_accumulate') if force_com_accumulate or (accumulate and com__accumulate): with locker.lock('accumulating_notifcations'): # get a subject where the request name is taken out subject_type = " ".join( filter(lambda w: w.count('-') != 2, msg['Subject'].split())) addressees = msg['To'] sendee = msg['From'] key = (subject_type, sendee, addressees) if key in self.cache: self.cache[key]['Text'] += '\n\n' self.cache[key]['Text'] += text self.cache[key]['N'] += 1 else: self.cache[key] = {'Text': text, 'N': 1} # self.logger.info('Got a message in cache %s'% (self.cache.keys())) return new_msg_ID # add a signature automatically text += '\n\n' text += 'McM Announcing service' try: msg.attach(MIMEText(text)) smtpObj = smtplib.SMTP() smtpObj.connect() smtpObj.sendmail(sender, destination, msg.as_string()) smtpObj.quit() return new_msg_ID except Exception as e: print "Error: unable to send email", e.__class__
def notify(self, subject, message, who=[], actors=True, service=True, HN=False, sender=None, Nchild=-1, reply_msg_ID=None, accumulate=False): dest = map(lambda i: i, who) if actors: auth = authenticator() # add the actors to the object for history_entry in self.get_attribute('history'): username = history_entry.get('updater', {}).get('author_username') if not username: continue email = history_entry.get('updater', {}).get('author_email') if not email: continue if email in dest: # No need to chech the same person again continue action = history_entry.get('action') if not action: continue user_level, user_role = auth.get_user_role_index(username) if user_level == access_rights.user and action != 'register': continue dest.append(email) if service: # let the service know at any time dest.append(settings.get_value('service_account')) if HN: # back bone HN notification ? dest.append(settings.get_value('hypernews_test')) # be sure to not have duplicates dest = set(dest) exclude_emails = set(settings.get_value('exclude_from_notify')) dest = list(dest - exclude_emails) if not len(dest): dest.append(settings.get_value('service_account')) subject += '. And no destination was set' sender = sender if sender else self.current_user_email self.logger.info('Notification %s from %s send to %s [acc:%s]' % (subject, sender, ', '.join(dest), accumulate)) return self.com.sendMail(dest, subject, message, sender, reply_msg_ID, accumulate=accumulate)