Exemplo n.º 1
0
 def __init__(self, jid, password, host=None, port=5222, logid=None):
     XMPPClient.__init__(self, jid, password, host=host, port=port)
     self.log = getLoggerAdapter(
         log) if logid is None else getLoggerAdapter(log, id=logid)
     self.factory.addBootstrap(xmlstream.STREAM_ERROR_EVENT,
                               self._onStreamError)
     self._authFailureTime = None
     self._lastTimeAuth = None
     self._connectedTime = None
Exemplo n.º 2
0
class XMPPKeepAliveSupervisor(Supervisor, Subject):
    name = 'XMPP_KEEPALIVE_SUPERVISOR'
    log = getLoggerAdapter(log, id=name)

    def __init__(self):
        Supervisor.__init__(self)
        Subject.__init__(self)
        self.lastTime = datetime.utcnow()
        self.metric = Metric(name='elapsed_time',
                             value=None,
                             unit='msec',
                             source=self.name,
                             scale=1000,
                             sampling=True)

    def perform_keep_alive(self):
        self.lastTime = datetime.utcnow()
        self.notifyObservers()
        elapsedTime = datetime.utcnow() - self.lastTime
        usecs = elapsedTime.seconds * 1000000 + elapsedTime.microseconds
        self.log.info('Finished XMPP_KEEP_ALIVE. Elapsed %s usecs', usecs)
        self.metric.add(usecs)

    def startService(self):
        Supervisor.startService(self)
        if conf.XMPP_KEEP_ALIVE_TIME > 0:
            t = LoopingCall(self.perform_keep_alive)
            self.registerTask(t)
            t.start(conf.XMPP_KEEP_ALIVE_TIME, now=False)
Exemplo n.º 3
0
def new_onRosterSet(f):
    log = getLoggerAdapter(getLogger('wokkel_onRosterSet', level='INFO'),
                           id='TWISTED')

    @wraps(f)
    def wrapper(self, iq):
        def trapIgnored(failure):
            failure.trap(RosterPushIgnored)
            raise error.StanzaError('service-unavailable')

        request = RosterRequest.fromElement(iq)

        if (not self.allowAnySender and request.sender and
                request.sender.userhostJID() != self.parent.jid.userhostJID()):
            d = defer.fail(RosterPushIgnored())
        elif request.item is None:
            log.info('_onRosterSet iq malformed. %s', iq.toXml())
            d = defer.fail(RosterPushIgnored())
        elif request.item.remove:
            d = defer.maybeDeferred(self.removeReceived, request)
        else:
            d = defer.maybeDeferred(self.setReceived, request)
        d.addErrback(trapIgnored)
        return d

    return wrapper
Exemplo n.º 4
0
 def __init__(self, application, request, **kwargs):
     cyclone.web.RequestHandler.__init__(self, application, request,
                                         **kwargs)
     self.log = getLoggerAdapter(log)
     self.key = ''
     self.args = ''
     self.response = ''
Exemplo n.º 5
0
 def __init__(self, collectionName, mongourl=None, indexes=None):
     self.setup(url=mongourl)
     self.db = self.mongo_db
     self.collection = collectionName
     self.pool = self.mongo_conn
     self.indexes = indexes
     logger = getLoggerAdapter(log, id="DATA-%s" % (collectionName.upper()))
     Model.__init__(self, logging=logger)
Exemplo n.º 6
0
 def constructor(self, key, args_class=None, metric=None):
     self.log = getLoggerAdapter(log, id=key)
     self.metric = metric
     self.key = key
     self.args = '' if args_class is None else args_class(self).args
     self.user_agent = CheckUserAgent(
         self.request.headers.get('User-Agent', ''))
     if not bool(self.user_agent):
         raise cyclone.web.HTTPError(403)
Exemplo n.º 7
0
 def setup(cls, url=None, log=None):
     if cls.redis_conn is None:
         if url is None:
             url = conf.REDIS_URL
         hostname, port, db, _, password = url_parse(url, 'redis')
         cls.redis_db = int(db)
         AuthRedisProtocol.password = password
         if log is None:
             log = getLoggerAdapter(getLogger(__name__),
                                    id='REDIS_CONNECTIONPOOL')
         cls.log = AuthRedisProtocol.log = log
         RedisFactory.protocol = AuthRedisProtocol
         cls.redis_conn = redis.lazyConnectionPool(host=hostname,
                                                   port=port,
                                                   dbid=cls.redis_db,
                                                   poolsize=conf.REDIS_POOL,
                                                   reconnect=True)
Exemplo n.º 8
0
class HerokuUnidlingSupervisor(Supervisor):
    name = 'HEROKU_UNIDLING_SUPERVISOR'
    log = getLoggerAdapter(log, id=name)

    @defer.inlineCallbacks
    def avoidHerokuUnidling(self, url):
        try:
            self.log.info('AVOIDING HEROKU IDLING: %s', url)
            yield cyclone.httpclient.fetch(url)
        except Exception as e:
            self.log.err(e, 'Exception in avoidHerokuUnidling')

    def startService(self):
        Supervisor.startService(self)
        if not conf.HEROKU_UNIDLING_URL is None:
            t = LoopingCall(self.avoidHerokuUnidling, conf.HEROKU_UNIDLING_URL)
            self.registerTask(t)
            t.start(1800, now=True)
Exemplo n.º 9
0
class MetricsSupervisor(Supervisor, Subject):
    name = 'METRICS_SUPERVISOR'
    log = getLoggerAdapter(log, id=name)

    def __init__(self):
        Supervisor.__init__(self)
        Subject.__init__(self)

    def report(self):
        self.notifyObservers()
        MetricsHub().report()

    def startService(self):
        Supervisor.startService(self)
        t = LoopingCall(self.report)
        self.registerTask(t)
        t.start(conf.METRICS_REPORT_TIME, now=False)

    def stopService(self):
        Supervisor.stopService(self)
Exemplo n.º 10
0
 def __init__(self):
     handlers = [
         (r"/1/google/messages/(.+)", v1.handlers.GoogleMessagesHandler),
         (r"/1/google/contacts/(.+)", v1.handlers.GoogleContactsHandler),
         (r"/1/google/(.+)", v1.handlers.GoogleHandler)
     ]
     settings = dict(debug=conf.CYCLONE_DEBUG, )
     cyclone.web.Application.__init__(self, handlers, **settings)
     self.log = getLoggerAdapter(log)
     self.metric = Metric(name='time',
                          value=None,
                          unit=v1.handlers.METRIC_UNIT_TIME,
                          source=v1.handlers.METRIC_SOURCE,
                          sampling=True)
     self.metric_response_codes = {
         1:
         Metric(name='response_1XX',
                value=None,
                unit='requests',
                source=v1.handlers.METRIC_SOURCE),
         2:
         Metric(name='response_2XX',
                value=None,
                unit='requests',
                source=v1.handlers.METRIC_SOURCE),
         3:
         Metric(name='response_3XX',
                value=None,
                unit='requests',
                source=v1.handlers.METRIC_SOURCE),
         4:
         Metric(name='response_4XX',
                value=None,
                unit='requests',
                source=v1.handlers.METRIC_SOURCE),
         5:
         Metric(name='response_5XX',
                value=None,
                unit='requests',
                source=v1.handlers.METRIC_SOURCE)
     }
Exemplo n.º 11
0
 def setup(cls, url=None, log=None):
     if cls.mongo_conn is None:
         if url is None:
             url = conf.MONGO_URL
         hostname, port, cls.mongo_db, username, password = url_parse(
             url, 'mongodb')
         AuthMongoProtocol.database = cls.mongo_db
         AuthMongoProtocol.username = username
         AuthMongoProtocol.password = password
         if log is None:
             log = getLoggerAdapter(getLogger(__name__),
                                    id='MONGO_CONNECTIONPOOL')
         cls.log = AuthMongoProtocol.log = log
         txmongo._MongoFactory.protocol = AuthMongoProtocol
         txmongo._MongoFactory.maxRetries = conf.BACKEND_MAX_RETRIES
         txmongo._MongoFactory.maxDelay = conf.BACKEND_MAX_DELAY
         cls.mongo_conn = txmongo.lazyMongoConnectionPool(
             host=hostname,
             port=port,
             reconnect=True,
             pool_size=conf.MONGO_POOL)
Exemplo n.º 12
0
class AsynchronousCall(SynchronousCall):
    def __init__(self, queue):
        SynchronousCall.__init__(self, queue)
        self.sync = conf.DIST_ASYNC_AS_SYNC
        self.result_ttl = conf.DIST_DEFAULT_TTL if conf.DIST_ASYNC_AS_SYNC else 0


if __name__ == '__main__':
    from katoo import KatooApp
    from katoo.api import API
    from twisted.internet import reactor
    from katoo.rqtwisted.worker import Worker
    from katoo.data import GoogleUser
    from pickle import dumps

    my_log = getLoggerAdapter(getLogger(__name__))

    @defer.inlineCallbacks
    def example():
        job = Job(job_id="123")
        pickled_rv = dumps({'results': None})
        yield job.save()
        yield job.connection.hset(job.key, 'result', pickled_rv)
        res = yield job.result
        my_log.debug('Job result %s', res)

        user = GoogleUser(_userid="1",
                          _token="asdasdf",
                          _refreshtoken="refreshtoken",
                          _resource="unknownresource",
                          _pushtoken="",
Exemplo n.º 13
0
    @AsynchronousCall(conf.DIST_QUEUE_PUSH)
    def sendpush(self, message, token, badgenumber, sound='', **kwargs):
        self.log.debug(
            'SEND_PUSH: %s token: %s, badgenumber: %s, sound: %s kwargs: %s',
            message, token, badgenumber, sound, kwargs)
        return self._sendapn(token, message, sound, badgenumber, **kwargs)


if __name__ == '__main__':
    from twisted.internet import reactor, defer, task
    from katoo import KatooApp
    from katoo.utils.applog import getLoggerAdapter, getLogger
    from katoo.rqtwisted import worker
    import os, translate

    my_log = getLoggerAdapter(getLogger(__name__, level="DEBUG"), id='MYLOG')

    #@defer.inlineCallbacks
    def send():
        my_log.debug('Starting send')
        API('TEST').sendpush(
            message=translate.TRANSLATORS['en']._('disconnected'),
            token=os.getenv('PUSHTOKEN', None),
            badgenumber=0,
            sound='')
        my_log.debug('Finished send')

    @defer.inlineCallbacks
    def close():
        try:
            my_log.debug('Starting close')
Exemplo n.º 14
0
class GlobalSupervisor(Supervisor):
    name = 'GLOBAL_SUPERVISOR'
    log = getLoggerAdapter(log, id=name)
    DISCONNECT_AWAY_METRIC = Metric(name='away_user_disconnected',
                                    value=None,
                                    unit='events',
                                    source='XMPPGOOGLE')

    def __init__(self):
        Supervisor.__init__(self)
        self.lock = defer.DeferredLock()
        self._checkworkerstasks = [
            self.processDeathWorkers, self.processBadAssignedWorkers,
            self.processOnMigrationUsers, self.checkRunningWorkers
        ]
        self._globalmetrics = [RedisMetrics, MongoMetrics]

    def _attach_global_metrics(self):
        service = KatooApp().getService(MetricsSupervisor.name)
        for metric in self._globalmetrics:
            service.registerObserver(metric())

    @defer.inlineCallbacks
    def checkRunningWorkers(self):
        workers = yield Worker.getWorkers(Worker.redis_workers_keys)
        if workers:
            self.log.info('CHECKING_RUNNING_WORKERS %s', len(workers))
        for worker in workers:
            name = worker.get('name')
            key = worker.get('key')
            lastTime = worker.get('lastTime')
            if key is None or name is None or lastTime is None:
                self.log.warning('WORKER_DATA_WRONG %s', worker)
                continue
            death = worker.get('death')
            if death is None:
                lastTime = parser.parse(lastTime)
                delta = datetime.utcnow() - lastTime
                if delta.seconds > conf.SUPERVISOR_WORKER_REFRESH_TIME:
                    self.log.warning(
                        'REGISTERING_WORKER_DEATH %s has not been updated since %s second(s)',
                        name, delta.seconds)
                    w = Worker([], name=name)
                    w.log = self.log
                    yield w.register_death()

    @defer.inlineCallbacks
    def processOnMigrationUsers(self):
        onMigration_users = yield GoogleUser.get_onMigration()
        total_users = len(onMigration_users)
        if total_users > 0:
            self.log.info("ON_MIGRATION_USERS %s", total_users)
        now = datetime.utcnow()
        for data in onMigration_users:
            user = GoogleUser(**data)
            delta_time = now - user.onMigrationTime
            if delta_time.seconds < conf.XMPP_KEEP_ALIVE_TIME + 30:
                if 60 < delta_time.seconds < 70:
                    self.log.warning(
                        '[%s] USER_MIGRATION_STOPPED %s second(s) ago. Waiting to fix the problem with XMPP_KEEP_ALIVE. User state: %s',
                        user.userid, delta_time.seconds, user)
                continue
            self.log.warning(
                '[%s] USER_MIGRATION_STOPPED %s second(s) ago. Performing new relogin. User state: %s',
                user.userid, delta_time.seconds, user)
            user.worker = user.userid
            user.onMigrationTime = ''
            yield user.save()
            yield API(user.userid).relogin(user, pending_jobs=[])

    @defer.inlineCallbacks
    def getPendingJobs(self, userid, queue_name):
        queue = Queue(queue_name)
        job_ids = yield queue.job_ids
        jobs = []
        index = 0
        for job_id in job_ids:
            try:
                job = yield Job.fetch(job_id, connection=queue.connection)
                if job.meta.get('userid') == userid:
                    jobs.append(job_id)
            except Exception as e:
                self.log.err(
                    e,
                    '[%s] Exception fetching job %s with index %s while getPendingJobs in queue %s'
                    % (userid, job_id, index, queue_name))
                yield queue.remove(job_id)
            finally:
                index += 1
        defer.returnValue(jobs)

    @defer.inlineCallbacks
    def processDeathWorkers(self):
        #avoid process death workers when service is not running
        death_workers = yield Worker.getWorkers(
            Worker.redis_death_workers_keys) if self.running else []

        if death_workers:
            self.log.info('DEATH_WORKERS %s',
                          [worker.get('name') for worker in death_workers])
        for worker in death_workers:
            name = worker.get('name')
            if conf.DIST_QUEUE_LOGIN in worker.get('queues', []):
                connected_users = yield GoogleUser.get_connected(name)
                total_users = len(connected_users)
                self.log.info(
                    'Reconnecting %s connected user(s) of death worker %s',
                    total_users, name)
                last_user_index = total_users - 1
                for i in xrange(total_users):
                    try:
                        data = connected_users[i]
                        user = GoogleUser(**data)

                        #Update worker as userid to enqueue new jobs in user own queue
                        user.worker = user.userid
                        yield user.save()

                        #Get pending jobs
                        reactor.callLater(0, self.reloginUser, user, name,
                                          i == last_user_index)
                        self.log.info(
                            '[%s] Reconnecting %s/%s user(s) of worker %s',
                            user.userid, i + 1, total_users, name)
                    except Exception as e:
                        self.log.err(
                            e, '[%s] Exception while reconnecting' %
                            (data['_userid']))

                #Remove worker and queue when no users were assigned
                if total_users == 0:
                    yield self.removeWorker(name)
            else:
                yield self.removeWorker(name)

    @defer.inlineCallbacks
    def processBadAssignedWorkers(self):
        assigned_workers = yield GoogleUser.get_assigned_workers()

        running_workers = yield Worker.getWorkers(Worker.redis_workers_keys)
        running_workers = [
            worker.get('name') for worker in running_workers
            if not worker.get('name') is None
        ]

        death_workers = yield Worker.getWorkers(
            Worker.redis_death_workers_keys)
        death_workers = [
            worker.get('name') for worker in death_workers
            if not worker.get('name') is None
        ]

        registered_workers = set(running_workers + death_workers)
        assigned_workers = set(assigned_workers)
        bad_workers = assigned_workers.difference(registered_workers)

        if bad_workers:
            self.log.warning(
                'BAD_WORKERS %s are assigned to users. Running %s Death %s',
                bad_workers, len(running_workers), len(death_workers))
            for worker in bad_workers:
                bad_users = yield GoogleUser.get_connected(worker_name=worker)
                total_bad_users = len(bad_users)
                if total_bad_users > 0:
                    self.log.info(
                        'Reconnecting %s users assigned to bad worker %s',
                        total_bad_users, worker)
                last_user_index = total_bad_users - 1
                for i in xrange(total_bad_users):
                    try:
                        data = bad_users[i]
                        user = GoogleUser(**data)
                        user.worker = user.userid
                        yield user.save()

                        reactor.callLater(0, self.reloginUser, user, worker,
                                          i == last_user_index)
                        self.log.info(
                            '[%s] Reconnecting %s/%s user(s) of worker %s',
                            user.userid, i + 1, total_bad_users, worker)
                    except Exception as e:
                        self.log.err(
                            e, '[%s] Exception while reconnecting' %
                            (data['_userid']))

                #Remove worker and queue when no users were assigned
                if total_bad_users == 0:
                    yield self.removeWorker(worker)

    @defer.inlineCallbacks
    def checkWorkers(self):
        try:
            for task in self._checkworkerstasks:
                if self.running:
                    yield task()
                else:
                    self.log.info(
                        'CheckWorkers task %s not launched. Supervisor not running',
                        task)
        except Exception as e:
            self.log.err(e, 'Exception in checkWorkers task %s' % (task))

    @defer.inlineCallbacks
    def disconnectAwayUsers(self):
        away_users = yield GoogleUser.get_away()
        away_users = [] if not away_users else away_users
        self.log.info('CHECKING_AWAY_USERS: %s', len(away_users))
        for data in away_users:
            try:
                user = GoogleUser(**data)
                API(user.userid, queue=user.worker).disconnect(user.userid)
                APNSAPI(user.userid).sendpush(message=u'{0} {1}'.format(
                    u'\ue252',
                    translate.TRANSLATORS[user.lang]._('disconnected')),
                                              token=user.pushtoken,
                                              badgenumber=user.badgenumber,
                                              sound='')
                self.DISCONNECT_AWAY_METRIC.add(1)
            except Exception as e:
                self.log.err(
                    e, '[%s] Exception disconnecting user' % (data['_userid']))

    @defer.inlineCallbacks
    def reconnectUsers(self):
        connected_users = yield GoogleUser.get_connected()
        total_users = len(connected_users)
        self.log.info('reconnectUsers reconnecting %s users', total_users)
        for i in xrange(total_users):
            data = connected_users[i]
            try:
                user = GoogleUser(**data)
                worker, user.worker = user.worker, user.userid
                yield user.save()

                #Enqueing in the next loop iteration of twisted event loop
                reactor.callLater(0, self.reloginUser, user, user.worker)
                self.log.info('[%s] Reconnecting %s/%s user(s)', user.userid,
                              i + 1, total_users)
            except Exception as e:
                self.log.err(
                    e, '[%s] Exception while reconnecting' % (data['_userid']))

    @defer.inlineCallbacks
    def reloginUser(self, user, last_worker, removeWorker=False):
        try:
            pending_jobs = yield self.getPendingJobs(user.userid, last_worker)
            yield API(user.userid).relogin(user, pending_jobs)
        except Exception as e:
            self.log.err(e,
                         '[%s] Exception while reconnecting' % (user.userid))
        finally:
            if removeWorker:
                yield self.removeWorker(last_worker)

    @defer.inlineCallbacks
    def removeWorker(self, name):
        self.log.info("Removing worker/queue %s from system", name)
        #Remove worker from death workers
        worker = Worker(queues=[], name=name)
        yield worker.remove(worker.key)

        #Remove own queue of worker
        queue = Queue(name)
        yield queue.empty()

    def startService(self):
        Supervisor.startService(self)

        t = LoopingCall(self.disconnectAwayUsers)
        self.registerTask(t)
        t.start(conf.TASK_DISCONNECT_SECONDS, now=False)

        if conf.TASK_RECONNECT_ALL_USERS:
            reactor.callLater(conf.TWISTED_WARMUP, self.lock.run,
                              self.reconnectUsers)
        else:
            reactor.callLater(conf.TWISTED_WARMUP, self.lock.run,
                              self.processDeathWorkers)

        if conf.REDIS_WORKERS > 0:
            t = LoopingCall(self.lock.run, self.checkWorkers)
            self.registerTask(t)
            t.start(conf.TASK_CHECK_WORKERS, now=False)

        reactor.callLater(conf.TWISTED_WARMUP, self._attach_global_metrics)
Exemplo n.º 15
0
 def log(self):
     if self._log is None:
         self._log = getLoggerAdapter(log, id=self.key)
     return self._log
Exemplo n.º 16
0
 def constructor(self):
     self._time = datetime.utcnow()
     self.log = getLoggerAdapter(getLogger(__name__, "INFO"), id='TIMER')
     self._interval = conf.TIMER_INTERVAL
     self._maxinterval = self._interval * 3
Exemplo n.º 17
0
            deferred_list.append(self.user.save())
            deferred_list.append(GoogleMessage.updateRemoveTime(self.user.userid, self.user.lastTimeConnected))
            deferred_list.append(GoogleRosterItem.remove(self.user.userid))
        return defer.DeferredList(deferred_list, consumeErrors=True)
    
    def __str__(self):
        return '<%s object at %s. name: %s>(user: %s)'%(self.__class__.__name__, hex(id(self)), self.name, self.user)

if __name__ == '__main__':
    import os
    from katoo.data import GoogleUser
    from wokkel_extensions import XMPPClient
    from katoo.utils.applog import getLogger, getLoggerAdapter
    from katoo.apns.api import KatooAPNSService
    from katoo.supervisor import XMPPKeepAliveSupervisor
    
    my_log = getLoggerAdapter(getLogger(__name__, level="INFO"), id='MYLOG')
    
    app = KatooApp().app
    KatooAPNSService().service.setServiceParent(app)
    KatooApp().start()
    XMPPKeepAliveSupervisor().setServiceParent(app)
    
    import twisted.python.log
    twisted.python.log.startLoggingWithObserver(KatooApp().log.emit)
    user = GoogleUser("1", _token=os.getenv('TOKEN'), _refreshtoken=os.getenv('REFRESHTOKEN'), _resource="asdfasdf", _pushtoken=os.getenv('PUSHTOKEN', None), _jid=os.getenv('JID'), _pushsound='cell1.aif', _favoritesound='cell7.aif', _away=True)
    my_log.info('Instance user: %s', user)
    xmppclient = XMPPGoogle(user, app)
    my_log.info('Instance xmppclient: %s', xmppclient)
    xmppclient.log.info("Instance XMPPGoogle %s. Instance ReauthXMPP %s Instance XMPPClient %s Instance GoogleUser %s", isinstance(xmppclient, XMPPGoogle), isinstance(xmppclient, ReauthXMPPClient), isinstance(xmppclient, XMPPClient), isinstance(xmppclient, GoogleUser))
    reactor.run()
Exemplo n.º 18
0
@author: pvicente
'''
from datetime import timedelta
from katoo import conf
from katoo.data import GoogleMessage, GoogleRosterItem, GoogleUser
from katoo.metrics import Metric
from katoo.rqtwisted.queue import Queue, FailedQueue
from katoo.utils.applog import getLogger, getLoggerAdapter
from katoo.utils.connections import RedisMixin
from katoo.utils.patterns import Observer
from katoo.utils.time import Timer
from twisted.internet import defer


log = getLoggerAdapter(getLogger(__name__),id='GLOBAL_METRICS')

class RedisMetrics(Observer):
    SOURCE='REDIS'
    UNIT='keys'
    def __init__(self):
        self._connection = RedisMixin.redis_conn
        self._keys=Metric(name="keys", value=None, unit=self.UNIT, source=self.SOURCE)
        failed_queue_name = FailedQueue().name
        self._items={conf.DIST_QUEUE_LOGIN: Metric(name='queue_%s'%(conf.DIST_QUEUE_LOGIN), value=None, unit=self.UNIT, source=self.SOURCE),
                     conf.DIST_QUEUE_PUSH: Metric(name='queue_%s'%(conf.DIST_QUEUE_PUSH), value=None, unit=self.UNIT, source=self.SOURCE), 
                     conf.DIST_QUEUE_RELOGIN: Metric(name='queue_%s'%(conf.DIST_QUEUE_RELOGIN), value=None, unit=self.UNIT, source=self.SOURCE),
                     failed_queue_name: Metric(name='queue_%s'%(failed_queue_name), value=None, unit=self.UNIT, source=self.SOURCE)
                     }
    
    @defer.inlineCallbacks
Exemplo n.º 19
0
    os.environ[
        'ADOPTED_STREAM'] = ''  #Avoid to perform Mutlprocess Service in child processes

    if conf.MULTIPROCESS > 0:
        m = MultiProcess(__file__, number=conf.MULTIPROCESS)
        m.setServiceParent(application)

metrics_supervisor = MetricsSupervisor()
metrics_supervisor.setServiceParent(application)

xmpp_keepalive_supervisor = XMPPKeepAliveSupervisor()
xmpp_keepalive_supervisor.setServiceParent(application)

if conf.REDIS_WORKERS > 0:
    worker.LOGGING_OK_JOBS = conf.LOGGING_OK_JOBS
    worker.SLEEP_CALL = sleep
    worker.MAX_RETRIES = conf.BACKEND_MAX_RETRIES
    worker.MAX_DELAY_TIME = conf.BACKEND_MAX_DELAY

    w = worker.Worker(
        [conf.MACHINEID, conf.DIST_QUEUE_LOGIN, conf.DIST_QUEUE_RELOGIN],
        name=conf.MACHINEID,
        loops=conf.REDIS_WORKERS,
        default_result_ttl=conf.DIST_DEFAULT_TTL,
        default_warmup=conf.WORKER_WARMUP,
        default_enqueue_failed_jobs=conf.DIST_ENQUEUE_FAILED_JOBS,
        default_perform_job_in_thread=conf.DIST_PERFORM_JOB_IN_THREAD,
        default_thread_pool_size=conf.DIST_THREAD_POOL)

    w.log = getLoggerAdapter(getLogger('WORKER', level='INFO'), id='WORKER')
    w.setServiceParent(application)
Exemplo n.º 20
0
 def __init__(self, key=None, queue=None, synchronous_call=False):
     self.key = key
     self.queue_name = None if conf.DIST_DISABLED else queue
     self.enqueued = False
     self.sync = synchronous_call
     self._log = getLoggerAdapter(log, id=self.key)