コード例 #1
0
ファイル: settings.py プロジェクト: cms-PdmV/cmsPdmV
def add(label, setting):
    with locker.lock(label):
        result = __db.save(setting)
        if result:
            cache_key = 'settings_' + label
            __cache.set(cache_key, setting)
        return result
コード例 #2
0
ファイル: RestAPIMethod.py プロジェクト: srimanob/cmsPdmV
    def default(self, *vpath, **params):


        method = getattr(self, cherrypy.request.method, None)
        if not method:
            raise cherrypy.HTTPError(405, "Method not implemented.")

        if self.access_limit is not None:
            self.logger.log('Setting access limit to access_rights.%s (%s)' % (roles[self.access_limit], self.access_limit))
            self.authenticator.set_limit(self.access_limit)
        elif cherrypy.request.method in self.limit_per_method:
            self.authenticator.set_limit(self.limit_per_method[cherrypy.request.method])
        else:
            raise cherrypy.HTTPError(403, 'You cannot access this page with method %s' % cherrypy.request.method )

        user_p = user_pack()

        l_type = locator()
        if not user_p.get_username():
            #meaning we are going public, only allow GET.
            #if cherrypy.request.method != 'GET' or not l_type.isDev():
            #	raise cherrypy.HTTPError(403, 'User credentials were not provided.')
            if not 'public' in str(cherrypy.url()):
                self.logger.error('From within %s, adfs-login not found: \n %s \n %s' % (self.__class__.__name__, str(cherrypy.request.headers), str(cherrypy.url()) ))
        else:
            if not self.authenticator.can_access(user_p.get_username()):
                raise cherrypy.HTTPError(403, 'You cannot access this page, the limit for the page is {0} ({1})'.format(roles[self.authenticator.get_limit()],
                                                                                                                        self.authenticator.get_limit()))
        # counter for calls
        with locker.lock("rest-call-counter"):
            self.counter[method.im_class.__name__][method.__name__] += 1
        return method(*vpath, **params)
コード例 #3
0
ファイル: mcm_database.py プロジェクト: cms-PdmV/cmsPdmV
 def __get_from_cache(self, key):
     if self.cache_enabled:
         with locker.lock(key):
             cache_key = 'mcm_database_' + key
             return self.cache.get(cache_key)
     else:
         return None
コード例 #4
0
ファイル: chained_request.py プロジェクト: srimanob/cmsPdmV
    def toggle_last_request(self):

        ## let it toggle the last request to a given approval only if the chained request allows it
        if self.get_attribute('approval') == 'none':
            return 

        ccdb = database('chained_campaigns')
        mcm_cc = ccdb.get(self.get_attribute('member_of_campaign'))
        (next_campaign_id, flow_name) = mcm_cc['campaigns'][self.get_attribute('step')]
        fdb = database('flows')
        mcm_f = flow(fdb.get(flow_name))
        # check whether we have to do something even more subtle with the request
        if mcm_f.get_attribute('approval') == 'submit' or self.get_attribute('approval') == 'submit':
            rdb = database('requests')
            next_request = request(rdb.get(self.get_attribute('chain')[self.get_attribute('step')]))

            current_r_approval = next_request.get_attribute('approval')
            time_out = 0
            #self.logger.error('Trying to move %s from %s to submit'% (next_request.get_attribute('prepid'), current_r_approval))
            while current_r_approval != 'submit' and time_out <= 10:
                time_out += 1
                #get it back from db to avoid _red issues
                next_request = request(rdb.get(next_request.get_attribute('prepid')))
                with locker.lock('{0}-wait-for-approval'.format( next_request.get_attribute('prepid') )):
                    next_request.approve()
                    request_saved = rdb.save(next_request.json())
                    if not request_saved:
                        raise self.ChainedRequestCannotFlowException(self.get_attribute('_id'),
                                                                     'Could not save the new request %s while trying to move to submit approval' % (
                                next_request.get_attribute('prepid')))
                current_r_approval = next_request.get_attribute('approval')
                pass

        return True
コード例 #5
0
ファイル: json_base.py プロジェクト: vlimant/cmsPdmV
 def overwrite(self, json_input):
     """
     Update the document with the input, regardless of revision clash.
     This has to be used to much care
     """
     try:
         if self.__class__.__name__ =="batch":
             db = database(self.__class__.__name__ + "es")
         else:
             db = database(self.__class__.__name__ + "s")
     except (database.DatabaseNotFoundException, database.DatabaseAccessError) as ex:
         self.logger.error("Problem with database creation:\n{0}".format(ex))
         return False
     with locker.lock(self.get_attribute('_id')):
         if not db.document_exists(self.get_attribute('_id')):
             return False
         ## reload the doc with db
         self.__init__(db.get(self.get_attribute('_id')))
         ## add what was provided on top
         self._json_base__json.update( json_input )
         ## save back
         saved = db.update(self.json())
         if not saved:
             return False
         return True
コード例 #6
0
    def next_id(self, pwg, campaign):
        ccamp_db = database(self.ccamp_db_name)
        creq_db = database(self.creq_db_name)
        if not pwg:
            self.logger.error('Physics working group provided is None.')
            return None
        if not campaign:
            self.logger.error('Campaign id provided is None.')
            return None
        with locker.lock("{0}-{1}".format(pwg, campaign)):
            if not ccamp_db.document_exists(campaign):
                self.logger.error('Campaign id {0} does not exist.'.format(campaign))
                return None
            if (campaign, pwg) in self.serial_number_cache:
                sn = self.serial_number_cache[(campaign, pwg)] + 1
            else:
                sn=1
                serial_number_lookup = creq_db.raw_query('serial_number', {'group':True, 'key':[campaign, pwg]})
                if serial_number_lookup:
                    sn = serial_number_lookup[0]['value']+1

            ## construct the new id
            new_prepid = pwg + '-' + campaign + '-' + str(sn).zfill(5)
            if sn==1:
                self.logger.log('Beginning new prepid family: %s' % (new_prepid))

            new_request = chained_request({'_id':new_prepid, 'prepid':new_prepid, 'pwg':pwg, 'member_of_campaign':campaign})
            new_request.update_history({'action':'created'})
            creq_db.save(new_request.json())
            self.serial_number_cache[(campaign, pwg)] = sn
            self.logger.log('New chain id: %s' % new_prepid, level='debug')

            return new_prepid
コード例 #7
0
ファイル: json_base.py プロジェクト: cms-PdmV/cmsPdmV
    def overwrite(self, json_input):
        """
        Update the document with the input, regardless of revision clash.
        This has to be used to much care
        """
        db = self.get_database()
        if db is None:
            return False
        with locker.lock(self.get_attribute('_id')):
            if not db.document_exists(self.get_attribute('_id')):
                return False
            # reload the doc with db
            t = db.get(self.get_attribute('_id'))
            self.__init__(t)
            if "_rev" in json_input:
                self.logger.debug("trying to overwrite.DB _rev:%s Doc _rev: %s" % (t["_rev"], json_input["_rev"]))

            else:
                self.logger.debug("trying to overwrite.DB _rev:%s Doc _rev: none" % (t["_rev"]))

            # add what was provided on top
            self._json_base__json.update(json_input)
            # save back
            saved = db.update(self.json())
            if not saved:
                return False
            return True
コード例 #8
0
ファイル: user_management.py プロジェクト: cms-PdmV/cmsPdmV
    def get_user_role(cls, username, email=None):
        if not username:
            return 'user'

        with locker.lock(username):
            cache_key = 'authenticator_user_role_' + username
            cached_value = cls.__users_roles_cache.get(cache_key)

            if cached_value is not None:
                return cached_value

            user_role = 'user'
            if cls.__db.document_exists(username):
                user = cls.__db.get(username)

                if email and ('email' not in user or user['email'] != email):
                    user['email'] = email
                    cls.__db.update(user)

                try:
                    user_role = user['role']
                except Exception:
                    cls.logger.error('Error getting role for user "' + username + '". Will use default value "' + user_role + '"')

            cls.__users_roles_cache.set(cache_key, user_role, timeout=cls.CACHE_TIMEOUT)
            return user_role
コード例 #9
0
ファイル: user_management.py プロジェクト: srimanob/cmsPdmV
 def get_user_role(self, username, email=None):
     with locker.lock(username):
         if username not in self.__users_roles:
             if not self.__db.document_exists(username):
                 self.__users_roles[username] = 'user'
             else:
                 user = self.__db.get(username)
                 if email and ('email' not in user or user['email'] != email):
                     user['email'] = email
                     self.__db.update(user)
                 role = None
                 while not role:
                     try:
                         role = user['role']
                     except:
                         ## how to print an error from here ?
                         user = self.__db.get(username)
                         pass
                 self.__users_roles[username] = user['role']
         else:
             if self.__lookup_counter[username] == settings().get_value("user_cache_refresh_counter"):
                 if self.__db.document_exists(username):
                     self.__users_roles[username] = self.__db.get(username)['role']
                 self.__lookup_counter[username] = 0
             else:
                 self.__lookup_counter[username] += 1
         return self.__users_roles[username]
コード例 #10
0
ファイル: communicator.py プロジェクト: vlimant/cmsPdmV
 def flush(self,Nmin):
     res=[]
     with locker.lock('accumulating_notifcations'):
         for key in self.cache.keys():
             (subject,sender,addressee)=key
             if self.cache[key]['N'] <= Nmin: 
                 ## flush only above a certain amount of messages
                 continue
             destination = addressee.split(COMMASPACE)
             text = self.cache[key]['Text']
             msg = MIMEMultipart()
             
             msg['From'] = sender
             msg['To'] = addressee
             msg['Date'] = formatdate(localtime=True)
             new_msg_ID = make_msgid()  
             msg['Message-ID'] = new_msg_ID 
             msg['Subject'] = subject
             
             ## add a signature automatically
             text += '\n\n'
             text += 'McM Announcing service'
             #self.logger.log('Sending a message from cache \n%s'% (text))
             try:
                 msg.attach(MIMEText(text))
                 smtpObj = smtplib.SMTP()
                 smtpObj.connect()
                 smtpObj.sendmail(sender, destination, msg.as_string())
                 smtpObj.quit()
                 self.cache.pop(key)
                 res.append( subject )
             except Exception as e:
                 print "Error: unable to send email", e.__class__
         return res
コード例 #11
0
    def GET(self, *args):
        """
        Provides the injection command and does the injection.
        """

        if not len(args):
            return dumps({"results" : False, "message" : "no argument was passe"})

        pid = args[0]

        from tools.handlers import ChainRequestInjector, submit_pool

        _q_lock = locker.thread_lock(pid)
        if not locker.thread_acquire(pid, blocking=False):
            return dumps({"prepid": pid, "results": False,
                    "message": "The request {0} request is being handled already".format(
                        pid)})

        thread = ChainRequestInjector(prepid=pid, lock=locker.lock(pid), queue_lock=_q_lock,
                check_approval=False)

        if self.mode == 'show':
            cherrypy.response.headers['Content-Type'] = 'text/plain'
            return thread.make_command()
        else:
            submit_pool.add_task(thread.internal_run)
            #thread.start()
            return dumps({"results" : True,
                    "message" : "chain submission for %s will be forked unless same request is being handled already" % pid,
                    "prepid" : pid})
コード例 #12
0
ファイル: RestAPIMethod.py プロジェクト: cms-PdmV/cmsPdmV
 def count_call(self):
     # counter for calls
     method = request.method
     with locker.lock("rest-call-counter"):
        key = self.__class__.__name__ + method
        try:
            RESTResource.call_counters[key] += 1
        except KeyError:
            RESTResource.call_counters[key] = 1
コード例 #13
0
ファイル: settings.py プロジェクト: cms-PdmV/cmsPdmV
def get(label):
    with locker.lock(label):
        cache_key = 'settings_' + label
        cached_value = __cache.get(cache_key)
        if cached_value is not None:
            return cached_value
        setting = __db.get(label)
        __cache.set(cache_key, setting)
        return setting
コード例 #14
0
ファイル: settings.py プロジェクト: cms-PdmV/cmsPdmV
def set(label, setting):
    with locker.lock(label):
        result = __db.update(setting)
        if result:
            # Maybe it's a better idea to cache the setting immediately instead
            # getting it from database?
            new_value = __db.get(label)
            cache_key = 'settings_' + label
            __cache.set(cache_key, new_value)
        return result
コード例 #15
0
    def GET(self, *args):
        """
        Perform test for chained requests
        """

        if not len(args):
            return dumps({"results" : False, "message" : "no argument provided"})

        from tools.handlers import RunChainValid, validation_pool
        ## now in the core of the api
        runtest = RunChainValid(crid=args[0], lock=locker.lock(args[0]))

        crdb = database('chained_requests')
        rdb = database('requests')
        mcm_cr = chained_request(crdb.get(args[0]))
        mcm_rs = []

        for rid in mcm_cr.get_attribute('chain')[mcm_cr.get_attribute('step'):]:
            mcm_r = request( rdb.get( rid ) )
            if mcm_r.get_attribute('status') in ['approved','submitted','done']:
                return dumps({"results" : False, "prepid" : args[0],
                        "message" : "request %s is in status %s" % (
                                rid, mcm_r.get_attribute('status'))})

        for rid in mcm_cr.get_attribute('chain')[mcm_cr.get_attribute('step'):]:
            mcm_r = request(rdb.get(rid))
            next = 'validation'
            if not mcm_r.is_root:  next = 'approve'
            try:
                if mcm_r.get_attribute('approval')  == 'none':
                    ## no need to try and move it along if already further than that
                    getattr(mcm_r,'ok_to_move_to_approval_%s' % next)(for_chain=True)
                    mcm_r.update_history({'action' : 'approve', 'step' : next})
                    mcm_r.set_attribute('approval', next)
                    mcm_r.reload()
                else:
                    pass
                    ## fail this for the moment. there is no way to handle this yet
                    #text="It is not supported for the moment to test a chain of requests which are partially not new. Please contact an administrator"
                    #runtest.reset_all( text  , notify_one = rid )
                    #return dumps({"results" : False, "message" : text, "prepid" : args[0]})

                text = 'Within chain %s \n'% mcm_cr.get_attribute('prepid')
                text += mcm_r.textified()
                mcm_r.notify('Approval %s in chain %s for request %s' % (next,
                        mcm_cr.get_attribute('prepid'), mcm_r.get_attribute('prepid')),
                        text, accumulate=True)

            except Exception as e:
                runtest.reset_all(str(e), notify_one=rid)
                return dumps({"results" : False, "message" : str(e),"prepid" : args[0]})

        validation_pool.add_task(runtest.internal_run)
        #runtest.start()
        return dumps({"results" : True, "message" : "run test started","prepid" : args[0]})
コード例 #16
0
ファイル: MccmActions.py プロジェクト: cms-PdmV/cmsPdmV
 def fill_id(self, pwg, db):
     mccm_id = pwg
     with locker.lock(mccm_id):  # get date and number
         t = mccm.get_meeting_date()
         mccm_id += '-' + t.strftime("%Y%b%d") + '-'  # date
         final_mccm_id = mccm_id + '00001'
         i = 2
         while db.document_exists(final_mccm_id):
             final_mccm_id = mccm_id + str(i).zfill(5)
             i += 1
         return final_mccm_id
コード例 #17
0
ファイル: handlers.py プロジェクト: srimanob/cmsPdmV
 def internal_run(self):
     self.logger.inject('## Logger instance retrieved', level='info', handler=self.prepid)
     with locker.lock('{0}-wait-for-approval'.format( self.prepid ) ):
         if not self.lock.acquire(blocking=False):
             return {"prepid": self.prepid, "results": False,
                     "message": "The request with name {0} is being handled already" .format(self.prepid)}
         try:
             if not self.uploader.internal_run():
                 return  {"prepid": self.prepid, "results": False,
                          "message": "Problem with uploading the configuration for request {0}" .format(self.prepid)}
             self.submitter.internal_run()
         finally:
             self.lock.release()
コード例 #18
0
ファイル: json_base.py プロジェクト: cms-PdmV/cmsPdmV
 def reload(self, save_current=True):
     """
     Save (if specified) and reloads the object with info from database (new revision)
     """
     if save_current:
         if not self.save():
             return False
     db = self.get_database()
     if db is None:
         return False
     with locker.lock(self.get_attribute('_id')):
         self.__init__(db.get(self.get_attribute('_id')))
         return True
コード例 #19
0
ファイル: json_base.py プロジェクト: cms-PdmV/cmsPdmV
 def save(self):
     """
     Updates or creates document in database with name db_name
     """
     db = self.get_database()
     if db is None:
         return False
     with locker.lock(self.get_attribute('_id')):
         if not db.document_exists(self.get_attribute('_id')):
             saved = db.save(self.json())
         else:
             saved = db.update(self.json())
         if not saved:
             return False
     return True
コード例 #20
0
ファイル: chained_request.py プロジェクト: srimanob/cmsPdmV
 def request_join(self, req):
     with locker.lock(req.get_attribute('prepid')):
         chain = req.get_attribute("member_of_chain")
         chain.append(self.get_attribute('_id'))
         req.set_attribute("member_of_chain", chain)
     loc = locator()
     req.notify("Request {0} joined chain".format(req.get_attribute('prepid')), 
                "Request {0} has successfully joined chain {1}\n\n{2}\n".format(req.get_attribute('prepid'),
                                                                                       self.get_attribute('_id'),
                                                                                       "/".join([loc.baseurl(), "requests?prepid={0}".format(req.get_attribute('prepid'))])))
     req.update_history({'action': 'join chain', 'step': self.get_attribute('_id')})
     if not req.get_attribute('prepid') in self.get_attribute('chain'):
         chain = self.get_attribute('chain')
         chain.append(req.get_attribute('prepid'))
         self.set_attribute("chain", chain)
         self.update_history({'action': 'add request', 'step': req.get_attribute('prepid')})
コード例 #21
0
ファイル: ControlActions.py プロジェクト: franzoni/cmsPdmV
 def GET(self, *args):
     """
     Reset counters
     """
     res = {}
     with locker.lock("rest-call-counter"):
         for arg in args:
             if arg in RESTResource:
                 RESTResource.counter[arg] = 0
                 res[arg] = True
             else:
                 res[arg] = False
         if not args:
             for key in RESTResource.counter:
                 RESTResource.counter[key] = 0
                 res[key] = True
         return dumps(res)
コード例 #22
0
ファイル: MccmActions.py プロジェクト: srimanob/cmsPdmV
    def GET(self, *args):
        """
        Operate the chaining for a given MccM document id
        """
        if not args:
            return dumps({"results": 'Error: No arguments were given'})
        mid=args[0]
        reserve=False
        if len(args)>1:
            reserve= (args[1]=='reserve')

        lock = locker.lock(mid)
        if lock.acquire(blocking=False):       
            res= self.generate(mid, reserve)
            lock.release()
            return dumps(res)
        else:
            return dumps({"results" : False, "message" : "%s is already being operated on"% mid} )
コード例 #23
0
ファイル: RequestPrepId.py プロジェクト: srimanob/cmsPdmV
 def next_prepid(self, pwg, camp):
     if not pwg or not camp:
         return None
     with locker.lock("{0}-{1}".format(pwg, camp)):
         db = database(self.db_name)
         query_results = db.raw_query('serial_number', {'group':True, 'key':[camp, pwg]})
         sn = 1
         if query_results:
             sn = query_results[0]['value']+1
         pid='%s-%s-%05d'%( pwg, camp , sn)
         if sn==1:
             self.logger.log('Beginning new prepid family: %s-%s' %( pwg, camp))
         db_camp = database('campaigns', cache=True)
         req_camp = campaign(db_camp.get(camp))
         new_request = request(req_camp.add_request({'_id':pid, 'prepid':pid, 'pwg':pwg, 'member_of_campaign':camp}))
         new_request.update_history({'action':'created'})
         db.save(new_request.json())
         self.logger.log('New prepid : %s '%pid)
         return pid
コード例 #24
0
ファイル: MccmActions.py プロジェクト: cms-PdmV/cmsPdmV
    def get(self, mccm_id, reserve_input='', limit_campaign_id=''):
        """
        Operate the chaining for a given MccM document id
        """
        reserve = False
        if reserve_input == 'reserve':
            reserve = True
            if limit_campaign_id != '':
                reserve = limit_campaign_id

        lock = locker.lock(mccm_id)
        if lock.acquire(blocking=False):
            try:
                res = self.generate(mccm_id, reserve)
            finally:
                lock.release()
            return res
        else:
            return {
                "results": False,
                "message": "%s is already being operated on" % mccm_id}
コード例 #25
0
ファイル: json_base.py プロジェクト: vlimant/cmsPdmV
 def reload(self):
     """
     Updates or creates document in database with name db_name
     and reloads the object with info from database (new revision)
     """
     try:
         if self.__class__.__name__ =="batch":
             db = database(self.__class__.__name__ + "es")
         else:
             db = database(self.__class__.__name__ + "s")
     except (database.DatabaseNotFoundException, database.DatabaseAccessError) as ex:
         self.logger.error("Problem with database creation:\n{0}".format(ex))
         return False
     with locker.lock(self.get_attribute('_id')):
         if not db.document_exists(self.get_attribute('_id')):
             saved = db.save(self.json())
         else:
             saved = db.update(self.json())
         if not saved:
             return False
         self.__init__(db.get(self.get_attribute('_id')))
         return True
コード例 #26
0
    def get(self, chained_request_id):
        """
        Provides the injection command and does the injection.
        """
        from tools.handlers import ChainRequestInjector, submit_pool

        _q_lock = locker.thread_lock(chained_request_id)
        if not locker.thread_acquire(chained_request_id, blocking=False):
            return {"prepid": chained_request_id, "results": False,
                    "message": "The request {0} request is being handled already".format(
                        chained_request_id)}

        thread = ChainRequestInjector(prepid=chained_request_id, lock=locker.lock(chained_request_id), queue_lock=_q_lock,
                check_approval=False)
        if self.mode == 'show':
            self.representations = {'text/plain': self.output_text}
            return thread.make_command()
        else:
            submit_pool.add_task(thread.internal_run)
            return {
                "results": True,
                "message": "chain submission for %s will be forked unless same request is being handled already" % chained_request_id,
                "prepid": chained_request_id}
コード例 #27
0
    def internal_run(self):
        try:
            if not self.lock.acquire(blocking=False):
                return False
            try:
                okay, req = self.check_request()
                if not okay: return False
                batch_name = BatchPrepId().next_id(req.json())
                semaphore_events.increment(batch_name) # so it's not possible to announce while still injecting
                executor = ssh_executor(server='pdmvserv-test.cern.ch')
                try:
                    cmd = req.prepare_submit_command(batch_name)
                    self.logger.inject("Command being used for injecting request {0}: {1}".format(self.prepid, cmd),
                                       handler=self.prepid)
                    _, stdout, stderr = executor.execute(cmd)
                    if not stdout and not stderr:
                        self.injection_error('ssh error for request {0} injection'.format(self.prepid), req)
                        return False
                    output = stdout.read()
                    error = stderr.read()
                    if error and not output: # money on the table that it will break as well?
                        self.injection_error('Error in wmcontrol: {0}'.format(error), req)
                        return False
                    injected_requests = [l.split()[-1] for l in output.split('\n') if
                                         l.startswith('Injected workflow:')]
                    approved_requests = [l.split()[-1] for l in output.split('\n') if
                                         l.startswith('Approved workflow:')]
                    if not approved_requests:
                        self.injection_error(
                            'Injection has succeeded but no request manager names were registered. Check with administrators. \nOutput: \n{0}\n\nError: \n{1}'.format(
                                output, error), req)
                        return False
                    objects_to_invalidate = [
                        {"_id": inv_req, "object": inv_req, "type": "request", "status": "new", "prepid": self.prepid}
                        for inv_req in injected_requests if inv_req not in approved_requests]
                    if objects_to_invalidate:
                        self.logger.inject(
                            "Some of the workflows had to be invalidated: {0}".format(objects_to_invalidate),
                            handler=self.prepid)
                        invalidation = database('invalidation')
                        saved = invalidation.save_all(objects_to_invalidate)
                        if not saved:
                            self.injection_error('Could not save the invalidations {0}'.format(objects_to_invalidate),
                                                 req)

                    added_requests = [{'name': app_req, 'content': {'pdmv_prep_id': self.prepid}} for app_req in
                                      approved_requests]
                    requests = req.get_attribute('reqmgr_name')
                    requests.extend(added_requests)
                    req.set_attribute('reqmgr_name', requests)

                    #inject to batch
                    with locker.lock(batch_name):
                        bdb = database('batches')
                        bat = batch(bdb.get(batch_name))
                        bat.add_requests(added_requests)
                        bat.update_history({'action': 'updated', 'step': self.prepid})
                        saved = bdb.update(bat.json())
                    if not saved:
                        self.injection_error(
                            'There was a problem with registering request in the batch {0}'.format(batch_name), req)
                        return False

                    #and in the end update request in database
                    req.update_history({'action': 'inject', 'step' : batch_name})
                    req.set_status(with_notification=True)
                    saved = self.request_db.update(req.json())
                    if not saved:
                        self.injection_error('Could not update request {0} in database'.format(self.prepid), req)
                        return False

                    for added_req in added_requests:
                        self.logger.inject('Request {0} sent to {1}'.format(added_req['name'], batch_name),
                                           handler=self.prepid)
                    return True
                finally:
                    semaphore_events.decrement(batch_name)

            finally:
                self.lock.release()
                executor.close_executor()

        except Exception as e:
            self.injection_error(
                'Error with injecting the {0} request:\n{1}'.format(self.prepid, traceback.format_exc()), req)
コード例 #28
0
def set_value(label, value):
    with locker.lock(label):
        setting = get(label)
        setting['value'] = value
        return set(label, setting)
コード例 #29
0
    def next_batch_id(self,
                      next_campaign,
                      version=0,
                      extension=0,
                      process_string="",
                      flown_with="",
                      create_batch=True):

        with locker.lock('batch name clashing protection'):
            if flown_with:
                batchName = flown_with + '_' + next_campaign
            else:
                batchName = next_campaign

            #### doing the query by hand
            res = self.bdb.queries([])
            res_this = filter(lambda x: x['prepid'].split('-')[0] == batchName,
                              res)
            ## filter to have the ones of that family, that are NEW or on hold
            res_new = filter(
                lambda x: x['status'] == 'new' or x['status'] == 'hold',
                res_this)

            ## add limitation to version, extension and process string
            res_new = filter(lambda x: x['version'] == version, res_new)
            res_new = filter(lambda x: x['extension'] == extension, res_new)
            res_new = filter(lambda x: x['process_string'] == process_string,
                             res_new)

            ## limit to a certain number of entry per batch : at name reservation time, so it does not work if one submitts more at a time
            max_in_batch = settings().get_value('max_in_batch')
            # for existing batches
            res_new = filter(lambda x: len(x['requests']) <= max_in_batch,
                             res_new)
            # for dynamic allocation from locks
            res_new = filter(
                lambda x: semaphore_events.count(x['prepid']) <= max_in_batch,
                res_new)

            ##get only the serial number of those
            res_new = map(lambda x: int(x['prepid'].split('-')[-1]), res_new)

            ##find out the next one
            if not res_new:
                ##no open batch of this kind
                res_next = filter(
                    lambda x: x['prepid'].split('-')[0].split('_')[-1] ==
                    next_campaign, res)
                if not res_next:
                    ## not even a document with *_<campaign>-* existing: ---> creating a new family
                    batchNumber = 1
                else:
                    ## pick up the last+1 serial number of *_<campaign>-*  family
                    batchNumber = max(
                        map(lambda x: int(x['prepid'].split('-')[-1]),
                            res_next)) + 1
            else:
                ## pick up the last serial number of that family
                batchNumber = max(res_new)

            batchName += '-%05d' % (batchNumber)
            if not self.bdb.document_exists(batchName) and create_batch:
                newBatch = batch({
                    '_id': batchName,
                    'prepid': batchName,
                    'version': version,
                    'extension': extension,
                    'process_string': process_string
                })
                notes = ""
                cdb = database('campaigns')
                cs = []
                if not cdb.document_exists(next_campaign):
                    ccdb = database('chained_campaigns')
                    mcm_cc = ccdb.get(next_campaign)
                    for (c, f) in mcm_cc['campaigns']:
                        cs.append(c)
                    else:
                        cs = [cdb.get(next_campaign)]
                for mcm_c in cs:
                    if mcm_c['notes']:
                        notes += "Notes about the campaign %s:\n" % mcm_c[
                            'prepid'] + mcm_c['notes'] + "\n"
                if flown_with:
                    fdb = database('flows')
                    mcm_f = fdb.get(flown_with)
                    if mcm_f['notes']:
                        notes += "Notes about the flow:\n" + mcm_f[
                            'notes'] + "\n"
                if notes:
                    newBatch.set_attribute('notes', notes)
                newBatch.update_history({'action': 'created'})
                self.bdb.save(newBatch.json())

            return batchName
コード例 #30
0
ファイル: mcm_database.py プロジェクト: peiffer/cmsPdmV
 def __get_from_cache(self, key):
     from tools.locker import locker
     with locker.lock(key):
         return self.cache_dictionary[key]
コード例 #31
0
 def set(self, label, setting):
     with locker.lock(label):
         result = self.__db.update(setting)
         if result:
             self.cache[label] = self.__db.get(label)
         return result
コード例 #32
0
ファイル: user_management.py プロジェクト: peiffer/cmsPdmV
 def set_user_role(self, username, role):
     with locker.lock(username):
         self.__users_roles[username] = role
コード例 #33
0
ファイル: mcm_database.py プロジェクト: gourangakole/cmsPdmV
 def __save_to_cache(self, key, value):
     if self.cache_enabled:
         with locker.lock(key):
             cache_key = 'mcm_database_' + key
             self.cache.set(cache_key, value, timeout=self.CACHE_TIMEOUT)
コード例 #34
0
ファイル: mcm_database.py プロジェクト: srimanob/cmsPdmV
 def __get_from_cache(self, key):
     from tools.locker import locker
     with locker.lock(key):
         return self.cache_dictionary[key]
コード例 #35
0
    def next_batch_id(self,
                      next_campaign,
                      version=0,
                      extension=0,
                      process_string="",
                      flown_with="",
                      create_batch=True):

        with locker.lock('batch name clashing protection'):
            self.bdb.logger.debug("working on batch prepid")
            if flown_with:
                batchName = flown_with + '_' + next_campaign
            else:
                batchName = next_campaign

            # find the max batch with similar name, descending guarantees that
            # the returned one will be biggest
            __query_options = {
                "endkey": '"%s-00001"' % (batchName),
                "startkey": '"%s-99999"' % (batchName),
                "descending": "true",
                "limit": 1
            }

            max_in_batch = settings.get_value('max_in_batch')
            top_batch = self.bdb.raw_query("prepid", __query_options)
            new_batch = True

            if len(top_batch) != 0:
                # we already have some existing batch, check if its fine for appending
                # get a single batch
                single_batch = self.bdb.get(top_batch[0]["id"])
                if single_batch["status"] == "new":
                    # check if batch is not locked in other threads.
                    if len(single_batch["requests"]) + semaphore_events.count(
                            single_batch['prepid']) < max_in_batch:
                        # we found a needed batch
                        self.bdb.logger.debug("found a matching batch:%s" %
                                              (single_batch["prepid"]))
                        batchNumber = int(
                            single_batch["prepid"].split("-")[-1])
                        new_batch = False
                if new_batch:
                    # we default to max batch and increment its number
                    self.bdb.logger.debug("no new batch. incementing:%s +1" %
                                          (single_batch["prepid"]))
                    batchNumber = int(top_batch[0]["id"].split("-")[-1]) + 1
            else:
                self.bdb.logger.debug("starting new batch family:%s" %
                                      (batchName))
                batchNumber = 1

            batchName += '-%05d' % (batchNumber)

            if not self.bdb.document_exists(batchName) and create_batch:
                newBatch = batch({
                    '_id': batchName,
                    'prepid': batchName,
                    'version': version,
                    'extension': extension,
                    'process_string': process_string
                })
                notes = ""
                cdb = database('campaigns')
                cs = []
                if not cdb.document_exists(next_campaign):
                    ccdb = database('chained_campaigns')
                    if ccdb.document_exists(next_campaign):
                        mcm_cc = ccdb.get(next_campaign)
                        for (c, f) in mcm_cc['campaigns']:
                            cs.append(cdb.get(c))
                else:
                    cs = [cdb.get(next_campaign)]
                for mcm_c in cs:
                    if mcm_c['notes']:
                        notes += "Notes about the campaign %s:\n" % mcm_c[
                            'prepid'] + mcm_c['notes'] + "\n"
                if flown_with:
                    fdb = database('flows')
                    mcm_f = fdb.get(flown_with)
                    if mcm_f['notes']:
                        notes += "Notes about the flow:\n" + mcm_f[
                            'notes'] + "\n"
                if notes:
                    newBatch.set_attribute('notes', notes)
                newBatch.update_history({'action': 'created'})
                self.bdb.save(newBatch.json())

            return batchName
コード例 #36
0
 def get(self, label):
     with locker.lock(label):
         if not label in self.cache:
             setting = self.__db.get(label)
             self.cache[label] = setting
         return self.cache[label]
コード例 #37
0
 def set_user_role(cls, username, role):
     with locker.lock(username):
         cls.__users_roles_cache.set(username,
                                     role,
                                     timeout=cls.CACHE_TIMEOUT)
コード例 #38
0
ファイル: mcm_database.py プロジェクト: srimanob/cmsPdmV
 def __save_to_cache(self, key, value):
     from tools.locker import locker
     with locker.lock(key):
         self.cache_dictionary[key]=value
コード例 #39
0
    def internal_run(self):
        if not self.lock.acquire(blocking=False):
            self.logger.error(
                "Could not acquire lock for ChainRequestInjector. prepid %s" %
                (self.prepid))
            return False
        try:
            crdb = database('chained_requests')
            rdb = database('requests')
            batch_name = None
            if not crdb.document_exists(self.prepid):
                # it's a request actually, pick up all chains containing it
                mcm_r = rdb.get(self.prepid)
                # mcm_crs = crdb.query(query="root_request==%s"% self.prepid) ## not only when its the root of
                mcm_crs = crdb.query(query="contains==%s" % self.prepid)
                task_name = 'task_' + self.prepid
                batch_type = 'Task_' + mcm_r['member_of_campaign']
            else:
                mcm_crs = [crdb.get(self.prepid)]
                current_step_prepid = mcm_crs[0]['chain'][mcm_crs[0]['step']]
                mcm_request = rdb.get(current_step_prepid)
                task_name = 'task_' + current_step_prepid
                batch_type = 'Task_' + mcm_request['member_of_campaign']

            if len(mcm_crs) == 0:
                return False
            mcm_rs = []
            # upload all config files to config cache, with "configuration economy" already implemented
            for cr in mcm_crs:
                mcm_cr = chained_request(cr)
                chain = mcm_cr.get_attribute(
                    'chain')[mcm_cr.get_attribute('step'):]
                for request_prepid in chain:
                    mcm_rs.append(request(rdb.get(request_prepid)))
                    if self.check_approval and mcm_rs[-1].get_attribute(
                            'approval') != 'submit':
                        message = 'requests %s is in "%s"/"%s" status/approval, requires "approved"/"submit"' % (
                            request_prepid, mcm_rs[-1].get_attribute('status'),
                            mcm_rs[-1].get_attribute('approval'))
                        self.logger.error(message)
                        subject = '%s injection failed' % mcm_cr.get_attribute(
                            'prepid')
                        notification(
                            subject,
                            message, [],
                            group=notification.CHAINED_REQUESTS,
                            action_objects=[mcm_cr.get_attribute('prepid')],
                            object_type='chained_requests',
                            base_object=mcm_cr)
                        mcm_cr.notify(subject, message)
                        return False

                    if mcm_rs[-1].get_attribute('status') != 'approved':
                        # change the return format to percolate the error message
                        message = 'requests %s in in "%s"/"%s" status/approval, requires "approved"/"submit"' % (
                            request_prepid, mcm_rs[-1].get_attribute('status'),
                            mcm_rs[-1].get_attribute('approval'))
                        self.logger.error(message)
                        subject = '%s injection failed' % mcm_cr.get_attribute(
                            'prepid')
                        notification(
                            subject,
                            message, [],
                            group=notification.CHAINED_REQUESTS,
                            action_objects=[mcm_cr.get_attribute('prepid')],
                            object_type='chained_requests',
                            base_object=mcm_cr)
                        mcm_cr.notify(subject, message)
                        return False

                    uploader = ConfigMakerAndUploader(
                        prepid=request_prepid,
                        lock=locker.lock(request_prepid))
                    if not uploader.internal_run():
                        message = 'Problem with uploading the configuration for request %s' % (
                            request_prepid)
                        notification(
                            'Configuration upload failed',
                            message, [],
                            group=notification.CHAINED_REQUESTS,
                            action_objects=[mcm_cr.get_attribute('prepid')],
                            object_type='chained_requests',
                            base_object=mcm_cr)
                        mcm_cr.notify('Configuration upload failed', message)
                        self.logger.error(message)
                        return False

            mcm_r = mcm_rs[-1]
            batch_name = BatchPrepId().next_batch_id(batch_type,
                                                     create_batch=True)
            semaphore_events.increment(batch_name)
            self.logger.error('found batch %s' % batch_name)
            with ssh_executor(server='vocms081.cern.ch') as ssh:
                cmd = self.make_command(mcm_r)
                self.logger.error('prepared command %s' % cmd)
                # modify here to have the command to be executed
                _, stdout, stderr = ssh.execute(cmd)
                output = stdout.read()
                error = stderr.read()
                self.logger.info(output)
                self.logger.info(error)
                injected_requests = [
                    l.split()[-1] for l in output.split('\n')
                    if l.startswith('Injected workflow:')
                ]
                if not injected_requests:
                    self.injection_error(
                        'Injection has succeeded but no request manager names were registered. Check with administrators. \nOutput: \n%s\n\nError: \n%s'
                        % (output, error), mcm_rs)
                    return False
                # what gets printed into the batch object
                added_requests = []
                once = set()
                for mcm_r in mcm_rs:
                    if mcm_r.get_attribute('prepid') in once:
                        continue
                    once.add(mcm_r.get_attribute('prepid'))
                    added = [{
                        'name': app_req,
                        'content': {
                            'pdmv_prep_id': mcm_r.get_attribute('prepid')
                        }
                    } for app_req in injected_requests]
                    added_requests.extend(added)

                # edit the batch object
                with locker.lock(batch_name):
                    bdb = database('batches')
                    bat = batch(bdb.get(batch_name))
                    bat.add_requests(added_requests)
                    bat.update_history({
                        'action': 'updated',
                        'step': task_name
                    })
                    bat.reload()

                # reload the content of all requests as they might have changed already
                added = [{
                    'name': app_req,
                    'content': {
                        'pdmv_prep_id': task_name
                    }
                } for app_req in injected_requests]

                seen = set()
                for cr in mcm_crs:
                    mcm_cr = chained_request(cr)
                    chain = mcm_cr.get_attribute(
                        'chain')[mcm_cr.get_attribute('step'):]
                    message = ""
                    for rn in chain:
                        if rn in seen:
                            continue  # don't do it twice
                        seen.add(rn)
                        mcm_r = request(rdb.get(rn))
                        message += mcm_r.textified()
                        message += "\n\n"
                        mcm_r.set_attribute('reqmgr_name', added)
                        mcm_r.update_history({
                            'action': 'inject',
                            'step': batch_name
                        })
                        if not self.check_approval:
                            mcm_r.set_attribute('approval', 'submit')
                        # set the status to submitted
                        mcm_r.set_status(
                            step=mcm_r._json_base__status.index('submitted'),
                            with_notification=False)
                        mcm_r.reload()
                        mcm_cr.set_attribute('last_status',
                                             mcm_r.get_attribute('status'))
                    # re-get the object
                    mcm_cr = chained_request(crdb.get(cr['prepid']))
                    # take care of changes to the chain
                    mcm_cr.update_history({
                        'action': 'inject',
                        'step': batch_name
                    })
                    mcm_cr.set_attribute(
                        'step',
                        len(mcm_cr.get_attribute('chain')) - 1)
                    mcm_cr.set_attribute('status', 'processing')
                    subject = 'Injection succeeded for %s' % mcm_cr.get_attribute(
                        'prepid')
                    notification(
                        subject,
                        message, [],
                        group=notification.CHAINED_REQUESTS,
                        action_objects=[mcm_cr.get_attribute('prepid')],
                        object_type='chained_requests',
                        base_object=mcm_cr)
                    mcm_cr.notify(subject, message)
                    mcm_cr.reload()

                return True
        except Exception:
            self.injection_error(
                "Error with injecting chains for %s :\n %s" %
                (self.prepid, traceback.format_exc()), [])

        finally:  # we decrement batch id and release lock on prepid+lower semaphore
            if batch_name:  # ditry thing for now. Because batch name can be None for certain use-cases in code above
                semaphore_events.decrement(batch_name)
            self.lock.release()
            self.queue_lock.release()
コード例 #40
0
ファイル: BatchPrepId.py プロジェクト: vlimant/cmsPdmV
    def next_batch_id(self, next_campaign, version=0, extension=0, process_string="",
            flown_with="", create_batch=True):

        with locker.lock('batch name clashing protection'):
            if flown_with:
                batchName = flown_with+'_'+next_campaign
            else:
                batchName = next_campaign

            #### doing the query by hand
            res = self.bdb.queries([])
            res_this = filter(lambda x: x['prepid'].split('-')[0] == batchName, res)
            ## filter to have the ones of that family, that are NEW or on hold
            res_new = filter(lambda x: x['status']=='new' or x['status']=='hold', res_this)

            ## add limitation to version, extension and process string
            res_new = filter(lambda x: x['version'] == version, res_new)
            res_new = filter(lambda x: x['extension'] == extension, res_new)
            res_new = filter(lambda x: x['process_string'] == process_string, res_new)

            ## limit to a certain number of entry per batch : at name reservation time, so it does not work if one submitts more at a time
            max_in_batch = settings().get_value('max_in_batch')
            # for existing batches
            res_new = filter(lambda x: len(x['requests']) <= max_in_batch, res_new)
            # for dynamic allocation from locks
            res_new = filter(lambda x: semaphore_events.count(x['prepid']) <= max_in_batch, res_new)


            ##get only the serial number of those
            res_new = map(lambda x: int(x['prepid'].split('-')[-1]), res_new)

            ##find out the next one
            if not res_new:
                ##no open batch of this kind
                res_next = filter(lambda x: x['prepid'].split('-')[0].split('_')[-1] == next_campaign.split('_')[-1] , res)
                if not res_next:
                    ## not even a document with *_<campaign>-* existing: ---> creating a new family
                    batchNumber = 1
                else:
                    ## pick up the last+1 serial number of *_<campaign>-*  family
                    batchNumber = max(map(lambda x: int(x['prepid'].split('-')[-1]), res_next)) + 1
            else:
                ## pick up the last serial number of that family
                batchNumber = max(res_new)

            batchName += '-%05d' % (batchNumber)

            if not self.bdb.document_exists(batchName) and create_batch:
                newBatch = batch({'_id':batchName,
                                  'prepid':batchName,
                                  'version' : version,
                                  'extension' : extension,
                                  'process_string' : process_string})
                notes = ""
                cdb = database('campaigns')
                cs = []
                if not cdb.document_exists(next_campaign):
                    ccdb = database('chained_campaigns')
                    if ccdb.document_exists(next_campaign):
                        mcm_cc = ccdb.get(next_campaign)
                        for (c,f) in mcm_cc['campaigns']:
                            cs.append(cdb.get(c))
                else:
                    cs = [cdb.get(next_campaign)]
                for mcm_c in cs:
                    if mcm_c['notes']:
                        notes+="Notes about the campaign %s:\n"%mcm_c['prepid']+mcm_c['notes']+"\n"
                if flown_with:
                    fdb = database('flows')
                    mcm_f = fdb.get(flown_with)
                    if mcm_f['notes']:
                        notes+="Notes about the flow:\n"+mcm_f['notes']+"\n"
                if notes:
                    newBatch.set_attribute('notes',notes)
                newBatch.update_history({'action':'created'})
                self.bdb.save(newBatch.json())

            return batchName
コード例 #41
0
    def internal_run(self):
        try:
            if not self.lock.acquire(blocking=False):
                self.injection_error('Couldnt acquire lock', None)
                return False
            try:
                okay, req = self.check_request()
                if not okay:
                    return False

                batch_name = BatchPrepId().next_batch_id(
                    req.get_attribute("member_of_campaign"), create_batch=True)

                semaphore_events.increment(
                    batch_name
                )  # so it's not possible to announce while still injecting
                executor = ssh_executor(server='vocms081.cern.ch')
                try:
                    cmd = req.prepare_submit_command()
                    self.inject_logger.info(
                        "Command being used for injecting request {0}: {1}".
                        format(self.prepid, cmd))
                    _, stdout, stderr = executor.execute(cmd)
                    if not stdout and not stderr:
                        self.injection_error(
                            'ssh error for request {0} injection'.format(
                                self.prepid), req)
                        return False
                    output = stdout.read()
                    error = stderr.read()
                    self.injection_error(output, None)
                    self.injection_error(error, None)
                    if error and not output:  # money on the table that it will break as well?
                        self.injection_error(
                            'Error in wmcontrol: {0}'.format(error), req)
                        return False

                    injected_requests = [
                        l.split()[-1] for l in output.split('\n')
                        if l.startswith('Injected workflow:')
                    ]

                    if not injected_requests:
                        self.injection_error(
                            'Injection has succeeded but no request manager names were registered. Check with administrators. \nOutput: \n%s\n\nError: \n%s'
                            % (output, error), req)
                        return False

                    # another great structure
                    added_requests = [{
                        'name': app_req,
                        'content': {
                            'pdmv_prep_id': self.prepid
                        }
                    } for app_req in injected_requests]
                    requests = req.get_attribute('reqmgr_name')
                    requests.extend(added_requests)
                    req.set_attribute('reqmgr_name', requests)
                    # inject to batch
                    with locker.lock(batch_name):
                        bdb = database('batches')
                        bat = batch(bdb.get(batch_name))
                        bat.add_requests(added_requests)
                        bat.update_history({
                            'action': 'updated',
                            'step': self.prepid
                        })
                        saved = bdb.update(bat.json())
                    if not saved:
                        self.injection_error(
                            'There was a problem with registering request in the batch {0}'
                            .format(batch_name), req)
                        return False
                    # and in the end update request in database
                    req.update_history({
                        'action': 'inject',
                        'step': batch_name
                    })
                    req.set_status(
                        step=req._json_base__status.index('submitted'),
                        with_notification=True)
                    saved = self.request_db.update(req.json())
                    if not saved:
                        self.injection_error(
                            'Could not update request {0} in database'.format(
                                self.prepid), req)
                        return False
                    for added_req in added_requests:
                        self.inject_logger.info(
                            'Request {0} sent to {1}'.format(
                                added_req['name'], batch_name))

                    return True
                finally:  # lover batch semahore, created on submission time
                    semaphore_events.decrement(batch_name)

            finally:  # finally release Sumbitter lock
                self.lock.release()
                try:
                    executor.close_executor()
                except UnboundLocalError:
                    pass
        except Exception:
            self.injection_error(
                'Error with injecting the {0} request:\n{1}'.format(
                    self.prepid, traceback.format_exc()), None)
コード例 #42
0
    def sendMail(self,
                 destination,
                 subject,
                 text,
                 sender=None,
                 reply_msg_ID=None,
                 accumulate=False):

        if not isinstance(destination, list):
            print "Cannot send email. destination should be a list of strings"
            return

        destination.sort()
        msg = MIMEMultipart()
        # it could happen that message are send after forking, threading and there's no current user anymore
        msg['From'] = sender if sender else '*****@*****.**'

        # add a mark on the subjcet automatically
        if locator().isDev():
            msg['Subject'] = '[McM-dev] ' + subject
            destination = ["*****@*****.**"
                           ]  # if -dev send only to service account and sender
            if sender:
                destination.append(sender)
        else:
            msg['Subject'] = '[McM] ' + subject

        msg['To'] = COMMASPACE.join(destination)
        msg['Date'] = formatdate(localtime=True)
        new_msg_ID = make_msgid()
        msg['Message-ID'] = new_msg_ID

        if reply_msg_ID is not None:
            msg['In-Reply-To'] = reply_msg_ID
            msg['References'] = reply_msg_ID

        # accumulate messages prior to sending emails
        com__accumulate = settings.get_value('com_accumulate')
        force_com_accumulate = settings.get_value('force_com_accumulate')
        if force_com_accumulate or (accumulate and com__accumulate):
            with locker.lock('accumulating_notifcations'):
                # get a subject where the request name is taken out
                subject_type = " ".join(
                    filter(lambda w: w.count('-') != 2,
                           msg['Subject'].split()))
                addressees = msg['To']
                sendee = msg['From']
                key = (subject_type, sendee, addressees)
                if key in self.cache:
                    self.cache[key]['Text'] += '\n\n'
                    self.cache[key]['Text'] += text
                    self.cache[key]['N'] += 1
                else:
                    self.cache[key] = {'Text': text, 'N': 1}
                # self.logger.info('Got a message in cache %s'% (self.cache.keys()))
                return new_msg_ID

        # add a signature automatically
        text += '\n\n'
        text += 'McM Announcing service'

        try:
            msg.attach(MIMEText(text))
            smtpObj = smtplib.SMTP()
            smtpObj.connect()
            smtpObj.sendmail(sender, destination, msg.as_string())
            smtpObj.quit()
            return new_msg_ID
        except Exception as e:
            print "Error: unable to send email", e.__class__
コード例 #43
0
ファイル: mcm_database.py プロジェクト: peiffer/cmsPdmV
 def __save_to_cache(self, key, value):
     from tools.locker import locker
     with locker.lock(key):
         self.cache_dictionary[key] = value
コード例 #44
0
ファイル: communicator.py プロジェクト: vlimant/cmsPdmV
    def sendMail(self,
                 destination,
                 subject,
                 text,
                 sender=None,
                 reply_msg_ID=None,
                 accumulate=False):

        if not isinstance(destination, list):
            print "Cannot send email. destination should be a list of strings"
            return

        destination.sort()
        msg = MIMEMultipart()
        #it could happen that message are send after forking, threading and there's no current user anymore
        msg['From'] = sender if sender else '*****@*****.**'

        ## add a mark on the subjcet automatically
        if locator().isDev():
            msg['Subject'] = '[McM-dev] ' + subject
            destination = ["*****@*****.**"] # if -dev send only to service account and sender
            if sender:
                destination.append(sender)
        else:
            msg['Subject'] = '[McM] ' + subject

        msg['To'] = COMMASPACE.join(destination)
        msg['Date'] = formatdate(localtime=True)
        new_msg_ID = make_msgid()
        msg['Message-ID'] = new_msg_ID
        
        if reply_msg_ID is not None:
            msg['In-Reply-To'] = reply_msg_ID
            msg['References'] = reply_msg_ID

        ### accumulate messages prior to sending emails
        com__accumulate=settings().get_value('com_accumulate')
        force_com_accumulate=settings().get_value('force_com_accumulate')
        if force_com_accumulate or (accumulate and com__accumulate):
            with locker.lock('accumulating_notifcations'):
                # get a subject where the request name is taken out
                subject_type=" ".join( filter(lambda w : w.count('-')!=2, msg['Subject'].split()) )
                addressees = msg['To']
                sendee = msg['From']
                key = (subject_type, sendee, addressees)
                if key in self.cache:
                    self.cache[key]['Text']+='\n\n'
                    self.cache[key]['Text']+=text
                    self.cache[key]['N']+=1
                else:
                    self.cache[key] = {'Text' : text, 'N':1}
                #self.logger.log('Got a message in cache %s'% (self.cache.keys()))
                return new_msg_ID


        ## add a signature automatically
        text += '\n\n'
        text += 'McM Announcing service'

        try:
            msg.attach(MIMEText(text))
            smtpObj = smtplib.SMTP()
            smtpObj.connect()
            smtpObj.sendmail(sender, destination, msg.as_string())
            smtpObj.quit()
            return new_msg_ID
        except Exception as e:
            print "Error: unable to send email", e.__class__
コード例 #45
0
 def add(self, label, setting):
     with locker.lock(label):
         result = self.__db.save(setting)
         if result:
             self.cache[label] = setting
         return result