Ejemplo n.º 1
0
    def notify_batch(self, batch_id, message_notes):
        message = message_notes
        to_who = [settings().get_value('service_account')]
        l_type = locator()
        if l_type.isDev():
            to_who.append(settings().get_value('hypernews_test'))
        else:
            to_who.append(settings().get_value('dataops_announce'))

        single_batch = batch(self.bdb.get(batch_id))
        subject = single_batch.get_subject('[Notification]')
        current_message_id = single_batch.get_attribute('message_id')

        self.logger.log('current msgID: %s' % current_message_id)
        if current_message_id != '':
            result = single_batch.notify(subject,
                                         message,
                                         who=to_who,
                                         sender=None,
                                         reply_msg_ID=current_message_id)
            self.logger.log('result if True : %s' % result)
        else:
            result = single_batch.notify(subject,
                                         message,
                                         who=to_who,
                                         sender=None)
            self.logger.log('result if False : %s' % result)

        single_batch.update_history({'action': 'notify', 'step': message})
        single_batch.reload()

        return {'results': result}
Ejemplo n.º 2
0
 def GET(self, *args):
     """
     Reset all requests in a batch (or list of) and set the status to reset
     """
     res = []
     bdb = database('batches')
     rdb = database('requests')
     bids = args[0]
     for bid in bids.split(','):
         mcm_b = bdb.get(bid)
         for r in mcm_b['requests']:
             if not 'pdmv_prep_id' in r['content']:
                 continue
             rid = r['content']['pdmv_prep_id']
             if not rdb.document_exists(rid):
                 continue
             mcm_r = request(rdb.get(rid))
             try:
                 mcm_r.reset()
                 rdb.update(mcm_r.json())
             except Exception as ex:
                 continue
         batch_to_update = batch(mcm_b)
         batch_to_update.set_attribute('status', 'reset')
         batch_to_update.update_history({'action': 'set status',
                                         'step': 'reset'})
         bdb.update(batch_to_update.json())
         res.append({'prepid': bid, 'results': True})
     return dumps(res)
Ejemplo n.º 3
0
    def get(self, batch_ids):
        """
        Set batch status to hold (from new) or to new (from hold)
        """
        res = []
        bdb = database('batches')
        for bid in batch_ids.split(','):
            mcm_b = batch(bdb.get(bid))
            if mcm_b.get_attribute('status') == 'new':
                mcm_b.set_attribute('status', 'hold')
                mcm_b.update_history({'action': 'set status', 'step': 'hold'})
            elif mcm_b.get_attribute('status') == 'hold':
                mcm_b.set_attribute('status', 'new')
                mcm_b.update_history({'action': 'set status', 'step': 'new'})
            else:
                res.append({
                    'prepid': bid,
                    'results': False,
                    'message': 'Only status hold or new allowed'
                })
                continue

            bdb.update(mcm_b.json())
            res.append({'prepid': bid, 'results': True})
        return res
Ejemplo n.º 4
0
 def get(self, batch_ids):
     """
     Reset all requests in a batch (or list of) and set the status to reset
     """
     res = []
     bdb = database('batches')
     rdb = database('requests')
     for bid in batch_ids.split(','):
         mcm_b = bdb.get(bid)
         for r in mcm_b['requests']:
             if 'pdmv_prep_id' not in r['content']:
                 continue
             rid = r['content']['pdmv_prep_id']
             if not rdb.document_exists(rid):
                 continue
             mcm_r = request(rdb.get(rid))
             try:
                 mcm_r.reset()
                 rdb.update(mcm_r.json())
             except Exception:
                 continue
         batch_to_update = batch(mcm_b)
         batch_to_update.set_attribute('status', 'reset')
         batch_to_update.update_history({
             'action': 'set status',
             'step': 'reset'
         })
         bdb.update(batch_to_update.json())
         res.append({'prepid': bid, 'results': True})
     return res
Ejemplo n.º 5
0
    def notify_batch(self, batch_id, message_notes):
        message = message_notes
        to_who = [settings().get_value('service_account')]
        l_type = locator()
        if l_type.isDev():
            to_who.append( settings().get_value('hypernews_test'))
        else:
            to_who.append( settings().get_value('dataops_announce' ))

        single_batch = batch(self.bdb.get(batch_id))
        subject = single_batch.get_subject('[Notification]')
        current_message_id = single_batch.get_attribute('message_id')

        self.logger.log('current msgID: %s' %current_message_id)
        if current_message_id != '':
            result = single_batch.notify(subject,message,who=to_who,sender=None,reply_msg_ID=current_message_id)
            self.logger.log('result if True : %s' %result)
        else:
            result = single_batch.notify(subject,message,who=to_who,sender=None)
            self.logger.log('result if False : %s' %result)

        self.update_history({'action':'notify', 'step' : message})
        self.reload()

        return {'results': result}
Ejemplo n.º 6
0
    def notify_batch(self, batch_id, message_notes):
        message = message_notes
        to_who = [settings.get_value('service_account')]
        l_type = locator()
        if l_type.isDev():
            to_who.append(settings.get_value('hypernews_test'))
        else:
            to_who.append(settings.get_value('dataops_announce'))

        single_batch = batch(self.bdb.get(batch_id))
        subject = single_batch.get_subject('[Notification]')
        current_message_id = single_batch.get_attribute('message_id')
        self.logger.info('current msgID: %s' % current_message_id)
        if current_message_id != '':
            result = single_batch.notify(subject, message, who=to_who, sender=None, reply_msg_ID=current_message_id)
            self.logger.info('result if True : %s' % result)
        else:
            result = single_batch.notify(subject, message, who=to_who, sender=None)
            self.logger.info('result if False : %s' % result)
        notification(
            subject,
            message,
            [],
            group=notification.BATCHES,
            target_role='production_manager',
            action_objects=[single_batch.get_attribute('prepid')],
            object_type='batches',
            base_object=single_batch
        )
        single_batch.update_history({'action': 'notify', 'step': message})
        single_batch.reload()
        return {'results': result}
Ejemplo n.º 7
0
    def PUT(self):
        """
        Update the content of a batch given the json content
        """
        bdb = database('batches')
        data = threaded_loads(cherrypy.request.body.read().strip())

        mcm_b = batch(data)

        bdb.update(mcm_b.json())
Ejemplo n.º 8
0
 def announce_with_text(self, bid, message):
     bdb = database('batches')
     if not semaphore_events.is_set(bid):
         return {"results": False, "message": "Batch {0} has on-going submissions.".format(bid) , "prepid" : bid}
     b = batch(bdb.get(bid))
     r = b.announce(message)
     if r:
         return {"results":bdb.update(b.json()) , "message" : r , "prepid" : bid}
     else:
         return {"results":False , "prepid" : bid}
Ejemplo n.º 9
0
 def PUT(self):
     """
     Update the content of a batch given the json content
     """
     bdb = database('batches')
     data = loads(cherrypy.request.body.read().strip())
   
     mcm_b = batch( data )
     
     bdb.update( mcm_b.json() )
Ejemplo n.º 10
0
    def GET(self, *args):
        """
        Look for batches that are new and with 1 requests or /N and announce them, or /batchid or /batchid/N
        """
        self.N_to_go=1
        bid=None
        if len(args):
            if args[0].isdigit():
                self.N_to_go=int(args[0])
            else:
                bid = args[0]
            if len(args)==2:
                self.N_to_go=int(args[1])
        bdb = database('batches')
        res=[]
        if settings().get_value('batch_announce'):
            new_batches = bdb.queries(['status==new'])
            for new_batch in new_batches:
                if bid and new_batch['prepid']!=bid:  continue
                if len(new_batch['requests'])>=self.N_to_go:
                    ## it is good to be announced !
                    res.append( self.announce_with_text( new_batch['_id'], 'Automatic announcement.') )
        else:
            self.logger.log('Not announcing any batch')
        
        if settings().get_value('batch_set_done'):
            ## check on on-going batches
            rdb = database('requests')
            announced_batches = bdb.queries(['status==announced'])
            for announced_batch in announced_batches:
                if bid and announced_batch['prepid']!=bid:  continue
                this_bid = announced_batch['prepid']
                all_done=False
                for r in announced_batch['requests']:
                    wma_name = r['name']
                    rid = r['content']['pdmv_prep_id']
                    mcm_r = rdb.get( rid )
                    all_done = ( mcm_r['status'] == 'done' )
                    if not all_done:
                        ## no need to go further
                        break
                if all_done:
                    ## set the status and save
                    mcm_b = batch(announced_batch)
                    mcm_b.set_status()
                    bdb.update( mcm_b.json() )
                    res.append({"results": True, "prepid" : this_bid, "message" : "Set to done"})
                else:
                    res.append({"results": False, "prepid" : this_bid, "message" : "Not completed"})
        else:
            self.logger.log('Not setting any batch to done')

        #anyways return something
        return dumps(res)
Ejemplo n.º 11
0
    def announce_with_text(self, bid, message):
        bdb = database('batches')
        if not semaphore_events.is_set(bid):
            return {"results": False, "message":
                    "Batch {0} has on-going submissions.".format(bid), "prepid": bid}

        b = batch(bdb.get(bid))
        workflows = ''
        for dictionary in b.get_attribute('requests'):
            workflows += dictionary['name'] + ','
        workflows = workflows[:-1]
        r = ''
        result = {}
        if workflows != '':
            approver = RequestApprover(bid, workflows)
            result = approver.internal_run()
            if (result['results']):
                r = b.announce(message)
        else:
            r = b.announce(message)
        if r:
            map_wf_to_prepid = {}
            for dictionary in b.get_attribute('requests'):
                wf = dictionary.get('name')
                prepid = dictionary.get('content', {}).get('pdmv_prep_id')
                if not wf or not prepid:
                    continue

                if wf not in map_wf_to_prepid:
                    map_wf_to_prepid[wf] = []

                map_wf_to_prepid[wf].append(prepid)

            rdb = database('requests')
            priority_coeff = settings.get_value('nanoaod_priority_increase_coefficient')
            for wf, requests in map_wf_to_prepid.iteritems():
                if len(requests) == 1 and 'nanoaod' in requests[0].lower():
                    for r_prepid in requests:
                        req = request(rdb.get(r_prepid))
                        current_priority = req.get_attribute('priority')
                        new_priority = int(current_priority + priority_coeff * 1000)
                        req.change_priority(new_priority)

            return {
                "results": bdb.update(b.json()),
                "message": r,
                "prepid": bid
            }
        else:
            return {
                "results": False,
                "prepid": bid,
                "message": result['message'] if 'message' in result and not r else r
            }
Ejemplo n.º 12
0
    def PUT(self):
        """
        Save the content of a batch given the json content
        """
        bdb = database('batches')
        data = threaded_loads(cherrypy.request.body.read().strip())

        data.pop('_rev')

        mcm_b = batch(data)

        bdb.save(mcm_b.json())
Ejemplo n.º 13
0
    def PUT(self):
        """
        Save the content of a batch given the json content
        """
        bdb = database('batches')
        data = loads(cherrypy.request.body.read().strip())
      
        data.pop('_rev')

        mcm_b = batch( data )
        
        bdb.save( mcm_b.json() )
Ejemplo n.º 14
0
 def announce_with_text(self, bid, message):
     bdb = database('batches')
     if not semaphore_events.is_set(bid):
         return {
             "results": False,
             "message": "Batch {0} has on-going submissions.".format(bid),
             "prepid": bid
         }
     b = batch(bdb.get(bid))
     r = b.announce(message)
     if r:
         return {
             "results": bdb.update(b.json()),
             "message": r,
             "prepid": bid
         }
     else:
         return {"results": False, "prepid": bid}
Ejemplo n.º 15
0
    def get(self, batch_ids):
        """
        Set batch status to hold (from new) or to new (from hold)
        """
        res = []
        bdb = database('batches')
        for bid in batch_ids.split(','):
            mcm_b = batch(bdb.get(bid))
            if mcm_b.get_attribute('status') == 'new':
                mcm_b.set_attribute('status', 'hold')
                mcm_b.update_history({'action': 'set status', 'step': 'hold'})
            elif mcm_b.get_attribute('status') == 'hold':
                mcm_b.set_attribute('status', 'new')
                mcm_b.update_history({'action': 'set status','step': 'new'})
            else:
                res.append({'prepid': bid, 'results': False, 'message': 'Only status hold or new allowed'})
                continue

            bdb.update(mcm_b.json())
            res.append({'prepid': bid, 'results': True})
        return res
Ejemplo n.º 16
0
    def announce_with_text(self, bid, message):
        bdb = database('batches')
        if not semaphore_events.is_set(bid):
            return {
                "results": False,
                "message": "Batch {0} has on-going submissions.".format(bid),
                "prepid": bid
            }

        b = batch(bdb.get(bid))
        workflows = ''
        for dictionary in b.get_attribute('requests'):
            workflows += dictionary['name'] + ','
        workflows = workflows[:-1]
        r = ''
        result = {}
        if workflows != '':
            approver = RequestApprover(bid, workflows)
            result = approver.internal_run()
            if (result['results']):
                r = b.announce(message)
        else:
            r = b.announce(message)
        if r:
            return {
                "results": bdb.update(b.json()),
                "message": r,
                "prepid": bid
            }
        else:
            return {
                "results":
                False,
                "prepid":
                bid,
                "message":
                result['message'] if 'message' in result and not r else r
            }
Ejemplo n.º 17
0
    def GET(self, *args):
        """
        Look for batches that are new and with 1 requests or /N and announce them, or /batchid or /batchid/N
        """
        self.N_to_go = 1
        bid = None
        if len(args):
            if args[0].isdigit():
                self.N_to_go = int(args[0])
            else:
                bid = args[0]
            if len(args) == 2:
                self.N_to_go = int(args[1])
        bdb = database('batches')
        res = []
        if settings().get_value('batch_announce'):
            new_batches = bdb.queries(['status==new'])
            for new_batch in new_batches:
                if bid and new_batch['prepid'] != bid: continue
                if len(new_batch['requests']) >= self.N_to_go:
                    ## it is good to be announced !
                    res.append(
                        self.announce_with_text(new_batch['_id'],
                                                'Automatic announcement.'))
        else:
            self.logger.log('Not announcing any batch')

        if settings().get_value('batch_set_done'):
            ## check on on-going batches
            rdb = database('requests')
            announced_batches = bdb.queries(['status==announced'])
            for announced_batch in announced_batches:
                if bid and announced_batch['prepid'] != bid: continue
                this_bid = announced_batch['prepid']

                all_done = False
                for r in announced_batch['requests']:
                    all_done = False
                    wma_name = r['name']
                    rid = r['content']['pdmv_prep_id']
                    if not rdb.document_exists(rid):
                        ##it OK like this. It could happen that a request has been deleted and yet in a batch
                        continue
                    mcm_r = rdb.get(rid)
                    if mcm_r['status'] == 'done':
                        ## if done, it's done
                        all_done = True
                    else:
                        if len(mcm_r['reqmgr_name']) == 0:
                            ## not done, and no requests in request manager, ignore = all_done
                            all_done = True
                        else:
                            if wma_name != mcm_r['reqmgr_name'][0]['name']:
                                ## not done, and a first requests that does not correspond to the one in the batch, ignore = all_done
                                all_done = True
                    if not all_done:
                        ## no need to go further
                        break
                if all_done:
                    ## set the status and save
                    mcm_b = batch(announced_batch)
                    mcm_b.set_status()
                    bdb.update(mcm_b.json())
                    res.append({
                        "results": True,
                        "prepid": this_bid,
                        "message": "Set to done"
                    })
                else:
                    res.append({
                        "results": False,
                        "prepid": this_bid,
                        "message": "Not completed"
                    })
        else:
            self.logger.log('Not setting any batch to done')

        #anyways return something
        return dumps(res)
Ejemplo n.º 18
0
    def next_batch_id(self, next_campaign, version=0, extension=0, process_string="",
            flown_with="", create_batch=True):

        with locker.lock('batch name clashing protection'):
            self.bdb.logger.debug("working on batch prepid")
            if flown_with:
                batchName = flown_with + '_' + next_campaign
            else:
                batchName = next_campaign

            # find the max batch with similar name, descending guarantees that
            # the returned one will be biggest
            __query_options = {"endkey": '"%s-00001"' % (batchName),
                    "startkey": '"%s-99999"' % (batchName),
                    "descending": "true", "limit": 1}

            max_in_batch = settings.get_value('max_in_batch')
            top_batch = self.bdb.raw_query("prepid", __query_options)
            new_batch = True

            if len(top_batch) != 0:
                # we already have some existing batch, check if its fine for appending
                # get a single batch
                single_batch = self.bdb.get(top_batch[0]["id"])
                if single_batch["status"] == "new":
                    # check if batch is not locked in other threads.
                    if len(single_batch["requests"]) + semaphore_events.count(single_batch['prepid']) < max_in_batch:
                        # we found a needed batch
                        self.bdb.logger.debug("found a matching batch:%s" % (single_batch["prepid"]))
                        batchNumber = int(single_batch["prepid"].split("-")[-1])
                        new_batch = False
                if new_batch:
                    # we default to max batch and increment its number
                    self.bdb.logger.debug("no new batch. incementing:%s +1" % (single_batch["prepid"]))
                    batchNumber = int(top_batch[0]["id"].split("-")[-1]) + 1
            else:
                self.bdb.logger.debug("starting new batch family:%s" % (batchName))
                batchNumber = 1

            batchName += '-%05d' % (batchNumber)

            if not self.bdb.document_exists(batchName) and create_batch:
                newBatch = batch({
                    '_id': batchName,
                    'prepid': batchName,
                    'version': version,
                    'extension': extension,
                    'process_string': process_string})
                notes = ""
                cdb = database('campaigns')
                cs = []
                if not cdb.document_exists(next_campaign):
                    ccdb = database('chained_campaigns')
                    if ccdb.document_exists(next_campaign):
                        mcm_cc = ccdb.get(next_campaign)
                        for (c, f) in mcm_cc['campaigns']:
                            cs.append(cdb.get(c))
                else:
                    cs = [cdb.get(next_campaign)]
                for mcm_c in cs:
                    if mcm_c['notes']:
                        notes += "Notes about the campaign %s:\n" % mcm_c['prepid'] + mcm_c['notes'] + "\n"
                if flown_with:
                    fdb = database('flows')
                    mcm_f = fdb.get(flown_with)
                    if mcm_f['notes']:
                        notes += "Notes about the flow:\n" + mcm_f['notes'] + "\n"
                if notes:
                    newBatch.set_attribute('notes', notes)
                newBatch.update_history({'action': 'created'})
                self.bdb.save(newBatch.json())

            return batchName
Ejemplo n.º 19
0
    def internal_run(self):
        if not self.lock.acquire(blocking=False):
            self.logger.error("Could not acquire lock for ChainRequestInjector. prepid %s" % (
                    self.prepid))

            return False
        try:
            crdb = database('chained_requests')
            rdb = database('requests')
            batch_name = None
            if not crdb.document_exists( self.prepid ):
                ## it's a request actually, pick up all chains containing it
                mcm_r = rdb.get( self.prepid )
                #mcm_crs = crdb.query(query="root_request==%s"% self.prepid) ## not only when its the root of
                mcm_crs = crdb.query(query="contains==%s" % self.prepid)
                task_name = 'task_' + self.prepid
                batch_type = 'Task_' + mcm_r['member_of_campaign']
            else:
                mcm_crs = [crdb.get( self.prepid )]
                task_name = self.prepid
                batch_type = mcm_crs[-1]['member_of_campaign']

            if len(mcm_crs)==0:
                return False
            mcm_rs=[]
            ## upload all config files to config cache, with "configuration economy" already implemented
            for cr in mcm_crs:
                mcm_cr = chained_request(cr)
                chain = mcm_cr.get_attribute('chain')[mcm_cr.get_attribute('step'):]
                for rn in chain:
                    mcm_rs.append(request(rdb.get(rn)))
                    if self.check_approval and mcm_rs[-1].get_attribute('approval') != 'submit':
                        self.logger.error('requests %s is in "%s"/"%s" status/approval, requires "approved"/"submit"'%(
                                rn, mcm_rs[-1].get_attribute('status'),
                                mcm_rs[-1].get_attribute('approval')))

                        return False

                    if mcm_rs[-1].get_attribute('status') != 'approved':
                        ## change the return format to percolate the error message
                        self.logger.error('requests %s in in "%s"/"%s" status/approval, requires "approved"/"submit"'%(
                                rn, mcm_rs[-1].get_attribute('status'),
                                mcm_rs[-1].get_attribute('approval')))

                        return False

                    uploader = ConfigMakerAndUploader(prepid=rn, lock=locker.lock(rn))
                    uploader.internal_run() ## we run in same thread that we locked in start

            mcm_r = mcm_rs[-1]
            batch_name = BatchPrepId().next_batch_id(batch_type, create_batch=True)
            semaphore_events.increment(batch_name)
            self.logger.error('found batch %s'% batch_name)

            with ssh_executor(server = 'cms-pdmv-op.cern.ch') as ssh:
                cmd = self.make_command(mcm_r)
                self.logger.error('prepared command %s' % cmd)
                ## modify here to have the command to be executed
                _, stdout, stderr = ssh.execute(cmd)
                output = stdout.read()
                error = stderr.read()
                self.logger.log(output)
                self.logger.log(error)
                injected_requests = [l.split()[-1] for l in output.split('\n') if
                                     l.startswith('Injected workflow:')]

                approved_requests = [l.split()[-1] for l in output.split('\n') if
                                     l.startswith('Approved workflow:')]

                if not injected_requests:
                    self.injection_error('Injection has succeeded but no request manager names were registered. Check with administrators. \nOutput: \n%s\n\nError: \n%s'%(
                            output, error), mcm_rs)

                    return False

                if injected_requests and not approved_requests:
                    self.injection_error("Request %s was injected but could not be approved" % (
                        injected_requests), mcm_rs)

                ## yet again...
                objects_to_invalidate = [
                        {"_id": inv_req, "object": inv_req, "type": "request",
                        "status": "new", "prepid": self.prepid}
                        for inv_req in injected_requests if inv_req not in approved_requests]

                if objects_to_invalidate:
                    self.logger.error("Some requests %s need to be invalidated" % (
                            objects_to_invalidate))

                    invalidation = database('invalidation')
                    saved = invalidation.save_all(objects_to_invalidate)
                    if not saved:
                        self.logger.error('Could not save the invalidations {0}'.format(
                                objects_to_invalidate))

                    return False

                # what gets printed into the batch object
                added_requests = []
                once=set()
                for mcm_r in mcm_rs:
                    if mcm_r.get_attribute('prepid') in once: continue
                    once.add(mcm_r.get_attribute('prepid'))
                    added = [{'name': app_req,
                        'content': {'pdmv_prep_id': mcm_r.get_attribute('prepid')}}
                        for app_req in approved_requests]

                    added_requests.extend(added)

                ##edit the batch object
                with locker.lock(batch_name):
                    bdb = database('batches')
                    bat = batch(bdb.get(batch_name))
                    bat.add_requests(added_requests)
                    bat.update_history({'action': 'updated', 'step': task_name })
                    bat.reload()

                ## reload the content of all requests as they might have changed already
                added = [{'name': app_req, 'content': {'pdmv_prep_id': task_name }}
                    for app_req in approved_requests]

                seen = set()
                for cr in mcm_crs:
                    mcm_cr = chained_request(cr)
                    chain = mcm_cr.get_attribute('chain')[mcm_cr.get_attribute('step'):]
                    message = ""
                    for rn in chain:
                        if rn in seen: continue # don't do it twice
                        seen.add(rn)
                        mcm_r = request(rdb.get(rn))
                        message += mcm_r.textified()
                        message += "\n\n"
                        mcm_r.set_attribute('reqmgr_name', added)
                        mcm_r.update_history({'action': 'inject','step' : batch_name})
                        if not self.check_approval:
                            mcm_r.set_attribute('approval', 'submit')
                        mcm_r.set_status(with_notification=False)
                        mcm_r.reload()
                        mcm_cr.set_attribute('last_status', mcm_r.get_attribute('status'))
                    ## re-get the object
                    mcm_cr = chained_request(crdb.get(cr['prepid']))
                    #take care of changes to the chain
                    mcm_cr.update_history({'action' : 'inject','step': batch_name})
                    mcm_cr.set_attribute('step', len(mcm_cr.get_attribute('chain'))-1)
                    mcm_cr.set_attribute('status','processing')
                    mcm_cr.notify('Injection succeeded for %s' % mcm_cr.get_attribute('prepid'),
                                  message)

                    mcm_cr.reload()

                return True
        except Exception as e:
            self.injection_error("Error with injecting chains for %s :\n %s" % (
                self.prepid, traceback.format_exc()),[])

        finally: ##we decrement batch id and release lock on prepid+lower semaphore
            if batch_name: ##ditry thing for now. Because batch name can be None for certain use-cases in code above
                semaphore_events.decrement(batch_name)
            self.lock.release()
            self.queue_lock.release()
Ejemplo n.º 20
0
    def next_batch_id(self,
                      next_campaign,
                      version=0,
                      extension=0,
                      process_string="",
                      flown_with="",
                      create_batch=True):

        with locker.lock('batch name clashing protection'):
            self.bdb.logger.debug("working on batch prepid")
            if flown_with:
                batchName = flown_with + '_' + next_campaign
            else:
                batchName = next_campaign

            # find the max batch with similar name, descending guarantees that
            # the returned one will be biggest
            __query_options = {
                "endkey": '"%s-00001"' % (batchName),
                "startkey": '"%s-99999"' % (batchName),
                "descending": "true",
                "limit": 1
            }

            max_in_batch = settings.get_value('max_in_batch')
            top_batch = self.bdb.raw_query("prepid", __query_options)
            new_batch = True

            if len(top_batch) != 0:
                # we already have some existing batch, check if its fine for appending
                # get a single batch
                single_batch = self.bdb.get(top_batch[0]["id"])
                if single_batch["status"] == "new":
                    # check if batch is not locked in other threads.
                    if len(single_batch["requests"]) + semaphore_events.count(
                            single_batch['prepid']) < max_in_batch:
                        # we found a needed batch
                        self.bdb.logger.debug("found a matching batch:%s" %
                                              (single_batch["prepid"]))
                        batchNumber = int(
                            single_batch["prepid"].split("-")[-1])
                        new_batch = False
                if new_batch:
                    # we default to max batch and increment its number
                    self.bdb.logger.debug("no new batch. incementing:%s +1" %
                                          (single_batch["prepid"]))
                    batchNumber = int(top_batch[0]["id"].split("-")[-1]) + 1
            else:
                self.bdb.logger.debug("starting new batch family:%s" %
                                      (batchName))
                batchNumber = 1

            batchName += '-%05d' % (batchNumber)

            if not self.bdb.document_exists(batchName) and create_batch:
                newBatch = batch({
                    '_id': batchName,
                    'prepid': batchName,
                    'version': version,
                    'extension': extension,
                    'process_string': process_string
                })
                notes = ""
                cdb = database('campaigns')
                cs = []
                if not cdb.document_exists(next_campaign):
                    ccdb = database('chained_campaigns')
                    if ccdb.document_exists(next_campaign):
                        mcm_cc = ccdb.get(next_campaign)
                        for (c, f) in mcm_cc['campaigns']:
                            cs.append(cdb.get(c))
                else:
                    cs = [cdb.get(next_campaign)]
                for mcm_c in cs:
                    if mcm_c['notes']:
                        notes += "Notes about the campaign %s:\n" % mcm_c[
                            'prepid'] + mcm_c['notes'] + "\n"
                if flown_with:
                    fdb = database('flows')
                    mcm_f = fdb.get(flown_with)
                    if mcm_f['notes']:
                        notes += "Notes about the flow:\n" + mcm_f[
                            'notes'] + "\n"
                if notes:
                    newBatch.set_attribute('notes', notes)
                newBatch.update_history({'action': 'created'})
                self.bdb.save(newBatch.json())

            return batchName
Ejemplo n.º 21
0
    def GET(self, *args):
        """                       
        Provides the injection command and does the injection.
        """
        crn= args[0]
        crdb = database('chained_requests')
        mcm_cr = chained_request(crdb.get(crn))
        rdb = database('requests')
        mcm_rs=[]
        ## upload all config files to config cache, with "configuration economy" already implemented
        from tools.locker import locker
        from tools.handlers import ConfigMakerAndUploader
        for rn in mcm_cr.get_attribute('chain'):
            mcm_rs.append( request( rdb.get( rn )))
            if self.mode=='inject' and mcm_rs[-1].get_attribute('status') != 'approved':
                return dumps({"results" : False, "message" : 'requests %s in in "%s" status, requires "approved"'%( rn, mcm_rs[-1].get_attribute('status'))})
            uploader = ConfigMakerAndUploader(prepid=rn, lock = locker.lock(rn))
            uploader.run()

        mcm_r = mcm_rs[-1]
        from rest_api.BatchPrepId import BatchPrepId
        batch_name = BatchPrepId().next_batch_id( mcm_cr.get_attribute('member_of_campaign') , create_batch=self.mode=='inject')
        from tools.locker import semaphore_events, locker
        semaphore_events.increment(batch_name)

        from tools.ssh_executor import ssh_executor
        from tools.locator import locator
        l_type = locator()
        with ssh_executor(server = 'pdmvserv-test.cern.ch') as ssh:
            cmd='cd /afs/cern.ch/cms/PPD/PdmV/work/McM/dev-submit/\n'
            cmd+=mcm_r.make_release()
            cmd+='export X509_USER_PROXY=/afs/cern.ch/user/p/pdmvserv/private/$HOST/voms_proxy.cert\n'
            cmd+='export PATH=/afs/cern.ch/cms/PPD/PdmV/tools/wmcontrol:${PATH}\n'
            ## until we get into production
            there='--wmtest --wmtesturl cmsweb-testbed.cern.ch'
            cmd+='wmcontrol.py --url-dict %s/public/restapi/chained_requests/get_dict/%s %s \n'%(l_type.baseurl(), crn, there)
            if self.mode == 'show':
                cherrypy.response.headers['Content-Type'] = 'text/plain'
                return cmd
            else:
                _, stdout, stderr = ssh.execute(cmd)
                cherrypy.response.headers['Content-Type'] = 'text/plain'
                output = stdout.read()
                error = stderr.read()
                self.logger.log(output)
                self.logger.log(error)

                injected_requests = [l.split()[-1] for l in output.split('\n') if
                                     l.startswith('Injected workflow:')]
                approved_requests = [l.split()[-1] for l in output.split('\n') if
                                     l.startswith('Approved workflow:')]
                if injected_requests and not approved_requests:
                    return dumps({"results" : False, "message" : "Request %s was injected but could not be approved" % ( injected_requests )})

                objects_to_invalidate = [
                    {"_id": inv_req, "object": inv_req, "type": "request", "status": "new", "prepid": self.prepid}
                    for inv_req in injected_requests if inv_req not in approved_requests]
                if objects_to_invalidate:
                    return dumps({"results" : False, "message" : "Some requests %s need to be invalidated"})
                
                added_requests = []
                for mcm_r in mcm_rs:
                    added = [{'name': app_req, 'content': {'pdmv_prep_id': mcm_r.get_attribute('prepid')}} for app_req in approved_requests]
                    added_requests.extend( added )
                
                with locker.lock(batch_name):
                    bdb = database('batches') 
                    bat = batch(bdb.get(batch_name))      
                    bat.add_requests(added_requests)
                    bat.update_history({'action': 'updated', 'step': crn})
                    bat.reload()
                    for mcm_r in mcm_rs:
                        mcm_r.set_attribute('reqmgr_name',  added_requests)

                for mcm_r in mcm_rs:
                    added = [{'name': app_req, 'content': {'pdmv_prep_id': mcm_r.get_attribute('prepid')}} for app_req in approved_requests]
                    mcm_r.set_attribute('reqmgr_name', added )
                    mcm_r.update_history({'action': 'inject','step' : batch_name})
                    mcm_r.set_attribute('approval', 'submit')
                    mcm_r.set_status(with_notification=False) ## maybe change to false
                    mcm_r.reload()
                
                mcm_cr.update_history({'action' : 'inject','step': batch_name})
                mcm_cr.set_attribute('step', len(mcm_rs)-1)
                mcm_cr.set_attribute('status','processing')
                mcm_cr.set_attribute('last_status', mcm_rs[-1].get_attribute('status'))
                message=""
                for mcm_r in mcm_rs:
                    message+=mcm_r.textified()
                    message+="\n\n"
                mcm_cr.notify('Injection succeeded for %s'% crn,
                              message)

                mcm_cr.reload()
                
                return dumps({"results" : True, "message" : "request send to batch %s"% batch_name})
Ejemplo n.º 22
0
    def internal_run(self):
        try:
            if not self.lock.acquire(blocking=False):
                self.injection_error('Couldnt acquire lock', None)
                return False
            try:
                okay, req = self.check_request()
                if not okay:
                    return False

                batch_name = BatchPrepId().next_batch_id(
                    req.get_attribute("member_of_campaign"), create_batch=True)

                semaphore_events.increment(
                    batch_name
                )  # so it's not possible to announce while still injecting
                executor = ssh_executor(server='vocms081.cern.ch')
                try:
                    cmd = req.prepare_submit_command()
                    self.inject_logger.info(
                        "Command being used for injecting request {0}: {1}".
                        format(self.prepid, cmd))
                    _, stdout, stderr = executor.execute(cmd)
                    if not stdout and not stderr:
                        self.injection_error(
                            'ssh error for request {0} injection'.format(
                                self.prepid), req)
                        return False
                    output = stdout.read()
                    error = stderr.read()
                    self.injection_error(output, None)
                    self.injection_error(error, None)
                    if error and not output:  # money on the table that it will break as well?
                        self.injection_error(
                            'Error in wmcontrol: {0}'.format(error), req)
                        return False

                    injected_requests = [
                        l.split()[-1] for l in output.split('\n')
                        if l.startswith('Injected workflow:')
                    ]

                    if not injected_requests:
                        self.injection_error(
                            'Injection has succeeded but no request manager names were registered. Check with administrators. \nOutput: \n%s\n\nError: \n%s'
                            % (output, error), req)
                        return False

                    # another great structure
                    added_requests = [{
                        'name': app_req,
                        'content': {
                            'pdmv_prep_id': self.prepid
                        }
                    } for app_req in injected_requests]
                    requests = req.get_attribute('reqmgr_name')
                    requests.extend(added_requests)
                    req.set_attribute('reqmgr_name', requests)
                    # inject to batch
                    with locker.lock(batch_name):
                        bdb = database('batches')
                        bat = batch(bdb.get(batch_name))
                        bat.add_requests(added_requests)
                        bat.update_history({
                            'action': 'updated',
                            'step': self.prepid
                        })
                        saved = bdb.update(bat.json())
                    if not saved:
                        self.injection_error(
                            'There was a problem with registering request in the batch {0}'
                            .format(batch_name), req)
                        return False
                    # and in the end update request in database
                    req.update_history({
                        'action': 'inject',
                        'step': batch_name
                    })
                    req.set_status(
                        step=req._json_base__status.index('submitted'),
                        with_notification=True)
                    saved = self.request_db.update(req.json())
                    if not saved:
                        self.injection_error(
                            'Could not update request {0} in database'.format(
                                self.prepid), req)
                        return False
                    for added_req in added_requests:
                        self.inject_logger.info(
                            'Request {0} sent to {1}'.format(
                                added_req['name'], batch_name))

                    return True
                finally:  # lover batch semahore, created on submission time
                    semaphore_events.decrement(batch_name)

            finally:  # finally release Sumbitter lock
                self.lock.release()
                try:
                    executor.close_executor()
                except UnboundLocalError:
                    pass
        except Exception:
            self.injection_error(
                'Error with injecting the {0} request:\n{1}'.format(
                    self.prepid, traceback.format_exc()), None)
Ejemplo n.º 23
0
    def internal_run(self):
        if not self.lock.acquire(blocking=False):
            self.logger.error(
                "Could not acquire lock for ChainRequestInjector. prepid %s" %
                (self.prepid))
            return False
        try:
            crdb = database('chained_requests')
            rdb = database('requests')
            batch_name = None
            if not crdb.document_exists(self.prepid):
                # it's a request actually, pick up all chains containing it
                mcm_r = rdb.get(self.prepid)
                # mcm_crs = crdb.query(query="root_request==%s"% self.prepid) ## not only when its the root of
                mcm_crs = crdb.query(query="contains==%s" % self.prepid)
                task_name = 'task_' + self.prepid
                batch_type = 'Task_' + mcm_r['member_of_campaign']
            else:
                mcm_crs = [crdb.get(self.prepid)]
                current_step_prepid = mcm_crs[0]['chain'][mcm_crs[0]['step']]
                mcm_request = rdb.get(current_step_prepid)
                task_name = 'task_' + current_step_prepid
                batch_type = 'Task_' + mcm_request['member_of_campaign']

            if len(mcm_crs) == 0:
                return False
            mcm_rs = []
            # upload all config files to config cache, with "configuration economy" already implemented
            for cr in mcm_crs:
                mcm_cr = chained_request(cr)
                chain = mcm_cr.get_attribute(
                    'chain')[mcm_cr.get_attribute('step'):]
                for request_prepid in chain:
                    mcm_rs.append(request(rdb.get(request_prepid)))
                    if self.check_approval and mcm_rs[-1].get_attribute(
                            'approval') != 'submit':
                        message = 'requests %s is in "%s"/"%s" status/approval, requires "approved"/"submit"' % (
                            request_prepid, mcm_rs[-1].get_attribute('status'),
                            mcm_rs[-1].get_attribute('approval'))
                        self.logger.error(message)
                        subject = '%s injection failed' % mcm_cr.get_attribute(
                            'prepid')
                        notification(
                            subject,
                            message, [],
                            group=notification.CHAINED_REQUESTS,
                            action_objects=[mcm_cr.get_attribute('prepid')],
                            object_type='chained_requests',
                            base_object=mcm_cr)
                        mcm_cr.notify(subject, message)
                        return False

                    if mcm_rs[-1].get_attribute('status') != 'approved':
                        # change the return format to percolate the error message
                        message = 'requests %s in in "%s"/"%s" status/approval, requires "approved"/"submit"' % (
                            request_prepid, mcm_rs[-1].get_attribute('status'),
                            mcm_rs[-1].get_attribute('approval'))
                        self.logger.error(message)
                        subject = '%s injection failed' % mcm_cr.get_attribute(
                            'prepid')
                        notification(
                            subject,
                            message, [],
                            group=notification.CHAINED_REQUESTS,
                            action_objects=[mcm_cr.get_attribute('prepid')],
                            object_type='chained_requests',
                            base_object=mcm_cr)
                        mcm_cr.notify(subject, message)
                        return False

                    uploader = ConfigMakerAndUploader(
                        prepid=request_prepid,
                        lock=locker.lock(request_prepid))
                    if not uploader.internal_run():
                        message = 'Problem with uploading the configuration for request %s' % (
                            request_prepid)
                        notification(
                            'Configuration upload failed',
                            message, [],
                            group=notification.CHAINED_REQUESTS,
                            action_objects=[mcm_cr.get_attribute('prepid')],
                            object_type='chained_requests',
                            base_object=mcm_cr)
                        mcm_cr.notify('Configuration upload failed', message)
                        self.logger.error(message)
                        return False

            mcm_r = mcm_rs[-1]
            batch_name = BatchPrepId().next_batch_id(batch_type,
                                                     create_batch=True)
            semaphore_events.increment(batch_name)
            self.logger.error('found batch %s' % batch_name)
            with ssh_executor(server='vocms081.cern.ch') as ssh:
                cmd = self.make_command(mcm_r)
                self.logger.error('prepared command %s' % cmd)
                # modify here to have the command to be executed
                _, stdout, stderr = ssh.execute(cmd)
                output = stdout.read()
                error = stderr.read()
                self.logger.info(output)
                self.logger.info(error)
                injected_requests = [
                    l.split()[-1] for l in output.split('\n')
                    if l.startswith('Injected workflow:')
                ]
                if not injected_requests:
                    self.injection_error(
                        'Injection has succeeded but no request manager names were registered. Check with administrators. \nOutput: \n%s\n\nError: \n%s'
                        % (output, error), mcm_rs)
                    return False
                # what gets printed into the batch object
                added_requests = []
                once = set()
                for mcm_r in mcm_rs:
                    if mcm_r.get_attribute('prepid') in once:
                        continue
                    once.add(mcm_r.get_attribute('prepid'))
                    added = [{
                        'name': app_req,
                        'content': {
                            'pdmv_prep_id': mcm_r.get_attribute('prepid')
                        }
                    } for app_req in injected_requests]
                    added_requests.extend(added)

                # edit the batch object
                with locker.lock(batch_name):
                    bdb = database('batches')
                    bat = batch(bdb.get(batch_name))
                    bat.add_requests(added_requests)
                    bat.update_history({
                        'action': 'updated',
                        'step': task_name
                    })
                    bat.reload()

                # reload the content of all requests as they might have changed already
                added = [{
                    'name': app_req,
                    'content': {
                        'pdmv_prep_id': task_name
                    }
                } for app_req in injected_requests]

                seen = set()
                for cr in mcm_crs:
                    mcm_cr = chained_request(cr)
                    chain = mcm_cr.get_attribute(
                        'chain')[mcm_cr.get_attribute('step'):]
                    message = ""
                    for rn in chain:
                        if rn in seen:
                            continue  # don't do it twice
                        seen.add(rn)
                        mcm_r = request(rdb.get(rn))
                        message += mcm_r.textified()
                        message += "\n\n"
                        mcm_r.set_attribute('reqmgr_name', added)
                        mcm_r.update_history({
                            'action': 'inject',
                            'step': batch_name
                        })
                        if not self.check_approval:
                            mcm_r.set_attribute('approval', 'submit')
                        # set the status to submitted
                        mcm_r.set_status(
                            step=mcm_r._json_base__status.index('submitted'),
                            with_notification=False)
                        mcm_r.reload()
                        mcm_cr.set_attribute('last_status',
                                             mcm_r.get_attribute('status'))
                    # re-get the object
                    mcm_cr = chained_request(crdb.get(cr['prepid']))
                    # take care of changes to the chain
                    mcm_cr.update_history({
                        'action': 'inject',
                        'step': batch_name
                    })
                    mcm_cr.set_attribute(
                        'step',
                        len(mcm_cr.get_attribute('chain')) - 1)
                    mcm_cr.set_attribute('status', 'processing')
                    subject = 'Injection succeeded for %s' % mcm_cr.get_attribute(
                        'prepid')
                    notification(
                        subject,
                        message, [],
                        group=notification.CHAINED_REQUESTS,
                        action_objects=[mcm_cr.get_attribute('prepid')],
                        object_type='chained_requests',
                        base_object=mcm_cr)
                    mcm_cr.notify(subject, message)
                    mcm_cr.reload()

                return True
        except Exception:
            self.injection_error(
                "Error with injecting chains for %s :\n %s" %
                (self.prepid, traceback.format_exc()), [])

        finally:  # we decrement batch id and release lock on prepid+lower semaphore
            if batch_name:  # ditry thing for now. Because batch name can be None for certain use-cases in code above
                semaphore_events.decrement(batch_name)
            self.lock.release()
            self.queue_lock.release()
Ejemplo n.º 24
0
    def get(self, batch_id=None, n_to_go=1):
        """
        Look for batches that are new and with 1 requests or /N and announce them,
        or /batchid or /batchid/N
        """
        bdb = database('batches')
        res = []
        if settings.get_value('batch_announce'):
            __query = bdb.construct_lucene_query({'status': 'new'})
            new_batches = bdb.full_text_search('search', __query, page=-1)
            for new_batch in new_batches:
                if batch_id and new_batch['prepid'] != batch_id:
                    continue
                if len(new_batch['requests']) >= n_to_go:
                    # it is good to be announced !
                    res.append(
                        self.announce_with_text(new_batch['_id'],
                                                'Automatic announcement.'))
        else:
            self.logger.info('Not announcing any batch')

        if settings.get_value('batch_set_done'):
            # check on on-going batches
            rdb = database('requests')
            __query2 = bdb.construct_lucene_query({'status': 'announced'})
            announced_batches = bdb.full_text_search('search',
                                                     __query2,
                                                     page=-1)
            for announced_batch in announced_batches:
                if batch_id and announced_batch['prepid'] != batch_id:
                    continue
                this_bid = announced_batch['prepid']
                all_done = False
                for r in announced_batch['requests']:
                    all_done = False
                    wma_name = r['name']
                    rid = r['content']['pdmv_prep_id']
                    if not rdb.document_exists(rid):
                        # it OK like this.
                        # It could happen that a request has been deleted and yet in a batch
                        continue
                    mcm_r = rdb.get(rid)
                    if mcm_r['status'] == 'done':
                        # if done, it's done
                        all_done = True
                    else:
                        if len(mcm_r['reqmgr_name']) == 0:
                            # not done, and no requests in request manager, ignore = all_done
                            all_done = True
                        else:
                            if wma_name != mcm_r['reqmgr_name'][0]['name']:
                                # not done, and a first requests that does not correspond
                                # to the one in the batch, ignore = all_done
                                all_done = True
                    if not all_done:
                        # no need to go further
                        break
                if all_done:
                    # set the status and save
                    mcm_b = batch(announced_batch)
                    mcm_b.set_status()
                    bdb.update(mcm_b.json())
                    res.append({
                        "results": True,
                        "prepid": this_bid,
                        "message": "Set to done"
                    })
                else:
                    res.append({
                        "results": False,
                        "prepid": this_bid,
                        "message": "Not completed"
                    })
        else:
            self.logger.info('Not setting any batch to done')

        # anyways return something
        return res
Ejemplo n.º 25
0
    def internal_run(self):
        try:
            if not self.lock.acquire(blocking=False):
                return False
            try:
                okay, req = self.check_request()
                if not okay: return False
                batch_name = BatchPrepId().next_id(req.json())
                semaphore_events.increment(batch_name) # so it's not possible to announce while still injecting
                executor = ssh_executor(server='pdmvserv-test.cern.ch')
                try:
                    cmd = req.prepare_submit_command(batch_name)
                    self.logger.inject("Command being used for injecting request {0}: {1}".format(self.prepid, cmd),
                                       handler=self.prepid)
                    _, stdout, stderr = executor.execute(cmd)
                    if not stdout and not stderr:
                        self.injection_error('ssh error for request {0} injection'.format(self.prepid), req)
                        return False
                    output = stdout.read()
                    error = stderr.read()
                    if error and not output: # money on the table that it will break as well?
                        self.injection_error('Error in wmcontrol: {0}'.format(error), req)
                        return False
                    injected_requests = [l.split()[-1] for l in output.split('\n') if
                                         l.startswith('Injected workflow:')]
                    approved_requests = [l.split()[-1] for l in output.split('\n') if
                                         l.startswith('Approved workflow:')]
                    if not approved_requests:
                        self.injection_error(
                            'Injection has succeeded but no request manager names were registered. Check with administrators. \nOutput: \n{0}\n\nError: \n{1}'.format(
                                output, error), req)
                        return False
                    objects_to_invalidate = [
                        {"_id": inv_req, "object": inv_req, "type": "request", "status": "new", "prepid": self.prepid}
                        for inv_req in injected_requests if inv_req not in approved_requests]
                    if objects_to_invalidate:
                        self.logger.inject(
                            "Some of the workflows had to be invalidated: {0}".format(objects_to_invalidate),
                            handler=self.prepid)
                        invalidation = database('invalidation')
                        saved = invalidation.save_all(objects_to_invalidate)
                        if not saved:
                            self.injection_error('Could not save the invalidations {0}'.format(objects_to_invalidate),
                                                 req)

                    added_requests = [{'name': app_req, 'content': {'pdmv_prep_id': self.prepid}} for app_req in
                                      approved_requests]
                    requests = req.get_attribute('reqmgr_name')
                    requests.extend(added_requests)
                    req.set_attribute('reqmgr_name', requests)

                    #inject to batch
                    with locker.lock(batch_name):
                        bdb = database('batches')
                        bat = batch(bdb.get(batch_name))
                        bat.add_requests(added_requests)
                        bat.update_history({'action': 'updated', 'step': self.prepid})
                        saved = bdb.update(bat.json())
                    if not saved:
                        self.injection_error(
                            'There was a problem with registering request in the batch {0}'.format(batch_name), req)
                        return False

                    #and in the end update request in database
                    req.update_history({'action': 'inject', 'step' : batch_name})
                    req.set_status(with_notification=True)
                    saved = self.request_db.update(req.json())
                    if not saved:
                        self.injection_error('Could not update request {0} in database'.format(self.prepid), req)
                        return False

                    for added_req in added_requests:
                        self.logger.inject('Request {0} sent to {1}'.format(added_req['name'], batch_name),
                                           handler=self.prepid)
                    return True
                finally:
                    semaphore_events.decrement(batch_name)

            finally:
                self.lock.release()
                executor.close_executor()

        except Exception as e:
            self.injection_error(
                'Error with injecting the {0} request:\n{1}'.format(self.prepid, traceback.format_exc()), req)
Ejemplo n.º 26
0
    def internal_run(self):
        try:
            if not self.lock.acquire(blocking=False):
                return False
            try:
                okay, req = self.check_request()
                if not okay: return False
                batch_name = BatchPrepId().next_id(req.json())
                semaphore_events.increment(batch_name) # so it's not possible to announce while still injecting
                try:
                    cmd = self.prepare_command(req, batch_name)
                    self.logger.inject("Command being used for injecting request {0}: {1}".format(self.prepid, cmd), handler=self.prepid)
                    _, stdout, stderr = self.ssh_executor.execute(cmd)
                    if not stdout and not stderr:
                            self.injection_error('ssh error for request {0} injection'.format(self.prepid), req)
                            return False
                    output = stdout.read()
                    error = stderr.read()
                    if error and not output: # money on the table that it will break as well?
                        self.injection_error('Error in wmcontrol: {0}'.format(error), req)
                        return False
                    injected_requests = [l.split()[-1] for l in output.split('\n') if l.startswith('Injected workflow:')]
                    approved_requests = [l.split()[-1] for l in output.split('\n') if l.startswith('Approved workflow:')]
                    if not approved_requests:
                        self.injection_error('Injection has succeeded but no request manager names were registered. Check with administrators. \nOutput: \n{0}\n\nError: \n{1}'.format(output, error), req)
                        return False
                    objects_to_invalidate = [{"_id": inv_req, "object": inv_req, "type": "request", "status": "new" , "prepid": self.prepid}
                                             for inv_req in injected_requests if inv_req not in approved_requests]
                    if objects_to_invalidate:
                        self.logger.inject("Some of the workflows had to be invalidated: {0}".format(objects_to_invalidate), handler=self.prepid)
                        invalidation = database('invalidation')
                        saved = invalidation.save_all(objects_to_invalidate)
                        if not saved:
                            self.injection_error('Could not save the invalidations {0}'.format(objects_to_invalidate), req)

                    added_requests = [{'name': app_req, 'content': {'pdmv_prep_id': self.prepid}} for app_req in approved_requests]
                    requests = req.get_attribute('reqmgr_name')
                    requests.extend(added_requests)
                    req.set_attribute('reqmgr_name', requests)

                    #inject to batch
                    with locker.lock(batch_name):
                        bdb = database('batches')
                        bat = batch(bdb.get(batch_name))
                        bat.add_requests(added_requests)
                        bat.update_history({'action': 'updated', 'step': self.prepid})
                        saved = bdb.update(bat.json())
                    if not saved:
                        self.injection_error('There was a problem with registering request in the batch {0}'.format(batch_name), req)
                        return False

                    #and in the end update request in database
                    req.update_history({'action': 'inject'})
                    req.set_status(with_notification=True)
                    saved = self.request_db.update(req.json())
                    if not saved:
                        self.injection_error('Could not update request {0} in database'.format(self.prepid), req)
                        return False

                    for added_req in added_requests:
                        self.logger.inject('Request {0} sent to {1}'.format(added_req['name'], batch_name), handler=self.prepid)
                    return True
                finally:
                    semaphore_events.decrement(batch_name)
            finally:
                self.lock.release()
        except Exception as e:
            self.injection_error('Error with injecting the {0} request:\n{1}'.format(self.prepid, traceback.format_exc()), req)
        finally:
            self.ssh_executor.close_executor()
Ejemplo n.º 27
0
    def next_id(self, for_request):
        flown_with = for_request['flown_with']
        next_campaign = for_request['member_of_campaign']
        version = for_request['version']
        extension = for_request['extension']
        process_string = for_request['process_string']

        with locker.lock('batch name clashing protection'):
            if flown_with:
                batchName = flown_with+'_'+next_campaign
            else:
                batchName = next_campaign

            #### doing the query by hand
            res = self.bdb.queries([])
            res_this = filter(lambda x: x['prepid'].split('-')[0] == batchName, res)
            ## filter to have the ones of that family, that are NEW or on hold
            res_new = filter(lambda x: x['status']=='new' or x['status']=='hold', res_this)

            ## add limitation to version, extension and process string
            res_new = filter(lambda x: x['version'] == version, res_new)
            res_new = filter(lambda x: x['extension'] == extension, res_new)
            res_new = filter(lambda x: x['process_string'] == process_string, res_new)


            ##get only the serial number of those
            res_new = map(lambda x: int(x['prepid'].split('-')[-1]), res_new)

            ##find out the next one
            if not res_new:
                ##no open batch of this kind
                res_next = filter(lambda x: x['prepid'].split('-')[0].split('_')[-1] == next_campaign , res)
                if not res_next:
                    ## not even a document with *_<campaign>-* existing: ---> creating a new family
                    batchNumber=1
                else:
                    ## pick up the last+1 serial number of *_<campaign>-*  family
                    batchNumber=max(map(lambda x: int(x['prepid'].split('-')[-1]), res_next)) + 1
            else:
                ## pick up the last serial number of that family
                batchNumber=max(res_new)

            batchName+='-%05d'%(batchNumber)
            if not self.bdb.document_exists(batchName):
                newBatch = batch({'_id':batchName,
                                  'prepid':batchName,
                                  'version' : version,
                                  'extension' : extension,
                                  'process_string' : process_string})
                notes=""
                cdb = database('campaigns')
                mcm_c = cdb.get( next_campaign )
                if mcm_c['notes']:
                    notes+="Notes about the campaign:\n"+mcm_c['notes']+"\n"
                if flown_with:
                    fdb = database('flows')
                    mcm_f = fdb.get(flown_with)
                    if mcm_f['notes']:
                        notes+="Notes about the flow:\n"+mcm_f['notes']+"\n"
                if notes:
                    newBatch.set_attribute('notes',notes)
                newBatch.update_history({'action':'created'})
                self.bdb.save(newBatch.json())

            return batchName
Ejemplo n.º 28
0
    def next_batch_id(self,
                      next_campaign,
                      version=0,
                      extension=0,
                      process_string="",
                      flown_with="",
                      create_batch=True):

        with locker.lock('batch name clashing protection'):
            if flown_with:
                batchName = flown_with + '_' + next_campaign
            else:
                batchName = next_campaign

            #### doing the query by hand
            res = self.bdb.queries([])
            res_this = filter(lambda x: x['prepid'].split('-')[0] == batchName,
                              res)
            ## filter to have the ones of that family, that are NEW or on hold
            res_new = filter(
                lambda x: x['status'] == 'new' or x['status'] == 'hold',
                res_this)

            ## add limitation to version, extension and process string
            res_new = filter(lambda x: x['version'] == version, res_new)
            res_new = filter(lambda x: x['extension'] == extension, res_new)
            res_new = filter(lambda x: x['process_string'] == process_string,
                             res_new)

            ## limit to a certain number of entry per batch : at name reservation time, so it does not work if one submitts more at a time
            max_in_batch = settings().get_value('max_in_batch')
            # for existing batches
            res_new = filter(lambda x: len(x['requests']) <= max_in_batch,
                             res_new)
            # for dynamic allocation from locks
            res_new = filter(
                lambda x: semaphore_events.count(x['prepid']) <= max_in_batch,
                res_new)

            ##get only the serial number of those
            res_new = map(lambda x: int(x['prepid'].split('-')[-1]), res_new)

            ##find out the next one
            if not res_new:
                ##no open batch of this kind
                res_next = filter(
                    lambda x: x['prepid'].split('-')[0].split('_')[-1] ==
                    next_campaign, res)
                if not res_next:
                    ## not even a document with *_<campaign>-* existing: ---> creating a new family
                    batchNumber = 1
                else:
                    ## pick up the last+1 serial number of *_<campaign>-*  family
                    batchNumber = max(
                        map(lambda x: int(x['prepid'].split('-')[-1]),
                            res_next)) + 1
            else:
                ## pick up the last serial number of that family
                batchNumber = max(res_new)

            batchName += '-%05d' % (batchNumber)
            if not self.bdb.document_exists(batchName) and create_batch:
                newBatch = batch({
                    '_id': batchName,
                    'prepid': batchName,
                    'version': version,
                    'extension': extension,
                    'process_string': process_string
                })
                notes = ""
                cdb = database('campaigns')
                cs = []
                if not cdb.document_exists(next_campaign):
                    ccdb = database('chained_campaigns')
                    mcm_cc = ccdb.get(next_campaign)
                    for (c, f) in mcm_cc['campaigns']:
                        cs.append(c)
                    else:
                        cs = [cdb.get(next_campaign)]
                for mcm_c in cs:
                    if mcm_c['notes']:
                        notes += "Notes about the campaign %s:\n" % mcm_c[
                            'prepid'] + mcm_c['notes'] + "\n"
                if flown_with:
                    fdb = database('flows')
                    mcm_f = fdb.get(flown_with)
                    if mcm_f['notes']:
                        notes += "Notes about the flow:\n" + mcm_f[
                            'notes'] + "\n"
                if notes:
                    newBatch.set_attribute('notes', notes)
                newBatch.update_history({'action': 'created'})
                self.bdb.save(newBatch.json())

            return batchName
Ejemplo n.º 29
0
    def next_batch_id(self, next_campaign, version=0, extension=0, process_string="",
            flown_with="", create_batch=True):

        with locker.lock('batch name clashing protection'):
            if flown_with:
                batchName = flown_with+'_'+next_campaign
            else:
                batchName = next_campaign

            #### doing the query by hand
            res = self.bdb.queries([])
            res_this = filter(lambda x: x['prepid'].split('-')[0] == batchName, res)
            ## filter to have the ones of that family, that are NEW or on hold
            res_new = filter(lambda x: x['status']=='new' or x['status']=='hold', res_this)

            ## add limitation to version, extension and process string
            res_new = filter(lambda x: x['version'] == version, res_new)
            res_new = filter(lambda x: x['extension'] == extension, res_new)
            res_new = filter(lambda x: x['process_string'] == process_string, res_new)

            ## limit to a certain number of entry per batch : at name reservation time, so it does not work if one submitts more at a time
            max_in_batch = settings().get_value('max_in_batch')
            # for existing batches
            res_new = filter(lambda x: len(x['requests']) <= max_in_batch, res_new)
            # for dynamic allocation from locks
            res_new = filter(lambda x: semaphore_events.count(x['prepid']) <= max_in_batch, res_new)


            ##get only the serial number of those
            res_new = map(lambda x: int(x['prepid'].split('-')[-1]), res_new)

            ##find out the next one
            if not res_new:
                ##no open batch of this kind
                res_next = filter(lambda x: x['prepid'].split('-')[0].split('_')[-1] == next_campaign.split('_')[-1] , res)
                if not res_next:
                    ## not even a document with *_<campaign>-* existing: ---> creating a new family
                    batchNumber = 1
                else:
                    ## pick up the last+1 serial number of *_<campaign>-*  family
                    batchNumber = max(map(lambda x: int(x['prepid'].split('-')[-1]), res_next)) + 1
            else:
                ## pick up the last serial number of that family
                batchNumber = max(res_new)

            batchName += '-%05d' % (batchNumber)

            if not self.bdb.document_exists(batchName) and create_batch:
                newBatch = batch({'_id':batchName,
                                  'prepid':batchName,
                                  'version' : version,
                                  'extension' : extension,
                                  'process_string' : process_string})
                notes = ""
                cdb = database('campaigns')
                cs = []
                if not cdb.document_exists(next_campaign):
                    ccdb = database('chained_campaigns')
                    if ccdb.document_exists(next_campaign):
                        mcm_cc = ccdb.get(next_campaign)
                        for (c,f) in mcm_cc['campaigns']:
                            cs.append(cdb.get(c))
                else:
                    cs = [cdb.get(next_campaign)]
                for mcm_c in cs:
                    if mcm_c['notes']:
                        notes+="Notes about the campaign %s:\n"%mcm_c['prepid']+mcm_c['notes']+"\n"
                if flown_with:
                    fdb = database('flows')
                    mcm_f = fdb.get(flown_with)
                    if mcm_f['notes']:
                        notes+="Notes about the flow:\n"+mcm_f['notes']+"\n"
                if notes:
                    newBatch.set_attribute('notes',notes)
                newBatch.update_history({'action':'created'})
                self.bdb.save(newBatch.json())

            return batchName
Ejemplo n.º 30
0
    def get(self, batch_id=None, n_to_go=1):
        """
        Look for batches that are new and with 1 requests or /N and announce them,
        or /batchid or /batchid/N
        """
        bdb = database('batches')
        res = []
        if settings.get_value('batch_announce'):
            __query = bdb.construct_lucene_query({'status': 'new'})
            new_batches = bdb.full_text_search('search', __query, page=-1)
            for new_batch in new_batches:
                if batch_id and new_batch['prepid'] != batch_id:
                    continue
                if len(new_batch['requests']) >= n_to_go:
                    # it is good to be announced !
                    res.append(self.announce_with_text(new_batch['_id'], 'Automatic announcement.'))
        else:
            self.logger.info('Not announcing any batch')

        if settings.get_value('batch_set_done'):
            # check on on-going batches
            rdb = database('requests')
            __query2 = bdb.construct_lucene_query({'status': 'announced'})
            announced_batches = bdb.full_text_search('search', __query2, page=-1)
            for announced_batch in announced_batches:
                if batch_id and announced_batch['prepid'] != batch_id:
                    continue
                this_bid = announced_batch['prepid']
                all_done = False
                for r in announced_batch['requests']:
                    all_done = False
                    wma_name = r['name']
                    rid = r['content']['pdmv_prep_id']
                    if not rdb.document_exists(rid):
                        # it OK like this.
                        # It could happen that a request has been deleted and yet in a batch
                        continue
                    mcm_r = rdb.get(rid)
                    if mcm_r['status'] == 'done':
                        # if done, it's done
                        all_done = True
                    else:
                        if len(mcm_r['reqmgr_name']) == 0:
                            # not done, and no requests in request manager, ignore = all_done
                            all_done = True
                        else:
                            if wma_name != mcm_r['reqmgr_name'][0]['name']:
                                # not done, and a first requests that does not correspond
                                # to the one in the batch, ignore = all_done
                                all_done = True
                    if not all_done:
                        # no need to go further
                        break
                if all_done:
                    # set the status and save
                    mcm_b = batch(announced_batch)
                    mcm_b.set_status()
                    bdb.update(mcm_b.json())
                    res.append({"results": True, "prepid": this_bid, "message": "Set to done"})
                else:
                    res.append({"results": False, "prepid": this_bid, "message": "Not completed"})
        else:
            self.logger.info('Not setting any batch to done')

        # anyways return something
        return res