Beispiel #1
0
    def work(self):
        # We should probably open up our own redis client
        self.client = qless.client(self.host, self.port)
        self.queues = [self.client.queues[q] for q in self.queues]

        if not os.path.isdir(self.sandbox):
            os.makedirs(self.sandbox)

        from gevent.pool import Pool
        from gevent import sleep, Greenlet
        pool = Pool(self.pool_size)
        while True:
            try:
                seen = False
                for queue in self.queues:
                    # Wait until a greenlet is available
                    pool.wait_available()
                    job = queue.pop()
                    if job:
                        # For whatever reason, doing imports within a greenlet
                        # (there's one implicitly invoked in job.process), was
                        # throwing exceptions. The relatively ghetto way to get
                        # around this is to force the import to happen before
                        # the greenlet is spawned.
                        _module = job.klass
                        seen = True
                        pool.start(Greenlet(job.process))

                if not seen:
                    logger.debug('Sleeping for %fs' % self.interval)
                    sleep(self.interval)
            except KeyboardInterrupt:
                return
Beispiel #2
0
    def run(self):
        '''Work on jobs'''
        # Register signal handlers
        self.signals()

        # And monkey-patch before doing any imports
        self.patch()

        # Start listening
        with self.listener():
            try:
                generator = self.jobs()
                while not self.shutdown:
                    self.pool.wait_available()
                    job = generator.next()
                    if job:
                        # For whatever reason, doing imports within a greenlet
                        # (there's one implicitly invoked in job.process), was
                        # throwing exceptions. The hacky way to get around this
                        # is to force the import to happen before the greenlet
                        # is spawned.
                        job.klass
                        greenlet = gevent.Greenlet(self.process, job)
                        self.greenlets[job.jid] = greenlet
                        self.pool.start(greenlet)
                    else:
                        logger.debug('Sleeping for %fs' % self.interval)
                        gevent.sleep(self.interval)
            except StopIteration:
                logger.info('Exhausted jobs')
            finally:
                logger.info('Waiting for greenlets to finish')
                self.pool.join()
Beispiel #3
0
 def work(self):
     # We should probably open up our own redis client
     self.client = qless.client(url=self.host)
     self.queues = [self.client.queues[q] for q in self.queues]
     
     if not os.path.isdir(self.sandbox):
         os.makedirs(self.sandbox)
     
     from gevent.pool import Pool
     from gevent import sleep, Greenlet
     pool = Pool(self.pool_size)
     while True:
         try:
             seen = False
             for queue in self.queues:
                 # Wait until a greenlet is available
                 pool.wait_available()
                 job = queue.pop()
                 if job:
                     # For whatever reason, doing imports within a greenlet
                     # (there's one implicitly invoked in job.process), was
                     # throwing exceptions. The relatively ghetto way to get
                     # around this is to force the import to happen before
                     # the greenlet is spawned.
                     _module = job.klass
                     seen = True
                     pool.start(Greenlet(job.process))
             
             if not seen:
                 logger.debug('Sleeping for %fs' % self.interval)
                 sleep(self.interval)
         except KeyboardInterrupt:
             return
Beispiel #4
0
    def run(self):
        '''Work on jobs'''
        # Register signal handlers
        self.signals()

        # Start listening
        with self.listener():
            try:
                generator = self.jobs()
                while not self.shutdown:
                    self.pool.wait_available()
                    job = next(generator)
                    if job:
                        # For whatever reason, doing imports within a greenlet
                        # (there's one implicitly invoked in job.process), was
                        # throwing exceptions. The hacky way to get around this
                        # is to force the import to happen before the greenlet
                        # is spawned.
                        job.klass
                        greenlet = gevent.Greenlet(self.process, job)
                        self.greenlets[job.jid] = greenlet
                        self.pool.start(greenlet)
                    else:
                        logger.debug('Sleeping for %fs' % self.interval)
                        gevent.sleep(self.interval)
            except StopIteration:
                logger.info('Exhausted jobs')
            finally:
                logger.info('Waiting for greenlets to finish')
                self.pool.join()
Beispiel #5
0
 def clean(cls, path):
     '''Clean up all the files in a provided path'''
     for pth in os.listdir(path):
         pth = os.path.abspath(os.path.join(path, pth))
         if os.path.isdir(pth):
             logger.debug('Removing directory %s' % pth)
             shutil.rmtree(pth)
         else:
             logger.debug('Removing file %s' % pth)
             os.remove(pth)
Beispiel #6
0
 def clean(cls, path):
     '''Clean up all the files in a provided path'''
     for pth in os.listdir(path):
         pth = os.path.abspath(os.path.join(path, pth))
         if os.path.isdir(pth):
             logger.debug('Removing directory %s' % pth)
             shutil.rmtree(pth)
         else:
             logger.debug('Removing file %s' % pth)
             os.remove(pth)
Beispiel #7
0
 def title(cls, message=None, level='INFO'):
     '''Set the title of the process'''
     if message == None:
         return getproctitle()
     else:
         setproctitle('qless-py-worker %s' % message)
         if level == 'DEBUG':
             logger.debug(message)
         elif level == 'INFO':
             logger.info(message)
Beispiel #8
0
 def heartbeat(self):
     '''Renew the heartbeat, if possible, and optionally update the job's
     user data.'''
     logger.debug('Heartbeating %s (ttl = %s)', self.jid, self.ttl)
     try:
         self.expires_at = float(self.client('heartbeat', self.jid,
         self.client.worker_name, json.dumps(self.data)) or 0)
     except QlessException:
         raise LostLockException(self.jid)
     logger.debug('Heartbeated %s (ttl = %s)', self.jid, self.ttl)
     return self.expires_at
Beispiel #9
0
 def heartbeat(self):
     '''Renew the heartbeat, if possible, and optionally update the job's
     user data.'''
     logger.debug('Heartbeating %s (ttl = %s)', self.jid, self.ttl)
     try:
         self.expires_at = float(self.client('heartbeat', self.jid,
         self.client.worker_name, json.dumps(self.data)) or 0)
     except QlessException:
         raise LostLockException(self.jid)
     logger.debug('Heartbeated %s (ttl = %s)', self.jid, self.ttl)
     return self.expires_at
Beispiel #10
0
 def sandbox(cls, path):
     '''Ensures path exists before yielding, cleans up after'''
     # Ensure the path exists and is clean
     if not os.path.exists(path):
         logger.debug('Making %s' % path)
         os.makedirs(path)
     cls.clean(path)
     # Then yield, but make sure to clean up the directory afterwards
     try:
         yield
     finally:
         cls.clean(path)
Beispiel #11
0
 def sandbox(cls, path):
     '''Ensures path exists before yielding, cleans up after'''
     # Ensure the path exists and is clean
     if not os.path.exists(path):
         logger.debug('Making %s' % path)
         os.makedirs(path)
     cls.clean(path)
     # Then yield, but make sure to clean up the directory afterwards
     try:
         yield
     finally:
         cls.clean(path)
Beispiel #12
0
 def heartbeat(self):
     """Renew the heartbeat, if possible, and optionally update the job's
     user data."""
     logger.debug("Heartbeating %s (ttl = %s)" % (self.jid, self.ttl))
     try:
         self.expires_at = float(
             self.client("heartbeat", self.jid, self.client.worker_name, json.dumps(self.data)) or 0
         )
     except QlessException:
         print "Raising exception"
         raise LostLockException(self.jid)
     logger.debug("Heartbeated %s (ttl = %s)" % (self.jid, self.ttl))
     return self.expires_at
Beispiel #13
0
 def work(self):
     # We should probably open up our own redis client
     self.client = qless.client(url=self.host)
     self.queues = [self.client.queues[q] for q in self.queues]
     
     if not os.path.isdir(self.sandbox):
         os.makedirs(self.sandbox)
     self.clean()
     # First things first, we should clear out any jobs that
     # we're responsible for off-hand
     while len(self.jids):
         try:
             job = self.client.jobs[self.jids.pop(0)]
             # If we still have access to it, then we should process it
             if job.heartbeat():
                 logger.info('Resuming %s' % job.jid)
                 self.setproctitle('Working %s (%s)' % (job.jid, job.klass_name))
                 job.process()
                 self.clean()
             else:
                 logger.warn('Lost heart on would-be resumed job %s' % job.jid)
         except KeyboardInterrupt:
             return
     
     sleep_cycles = 0
     while True:
         try:
             for queue in self.queues:
                 job = queue.pop()
                 if job:
                     sleep_cycles = -1
                     self.setproctitle('Working %s (%s)' % (job.jid, job.klass_name))
                     job.process()
                     self.clean()
             
             if self.stop_on_idle and sleep_cycles >= 2:
                 logger.info("Idle for too long, quiting")
                 import sys
                 sys.exit(self.IDLE_EXIT_STATUS)
             if sleep_cycles >= 0:
                 self.setproctitle('sleeping...')
                 logger.debug('Sleeping for %fs' % self.interval)
                 time.sleep(self.interval)
                 sleep_cycles += 1
             else:
                 sleep_cycles = 0
         except KeyboardInterrupt:
             return
Beispiel #14
0
 def sandbox(cls, path):
     '''Ensures path exists before yielding, cleans up after'''
     # Ensure the path exists and is clean
     try:
         os.makedirs(path)
         logger.debug('Making %s' % path)
     except OSError:
         if not os.path.isdir(path):
             raise
     finally:
         cls.clean(path)
     # Then yield, but make sure to clean up the directory afterwards
     try:
         yield
     finally:
         cls.clean(path)
Beispiel #15
0
 def sandbox(cls, path):
     '''Ensures path exists before yielding, cleans up after'''
     # Ensure the path exists and is clean
     try:
         os.makedirs(path)
         logger.debug('Making %s' % path)
     except OSError:
         if not os.path.isdir(path):
             raise
     finally:
         cls.clean(path)
     # Then yield, but make sure to clean up the directory afterwards
     try:
         yield
     finally:
         cls.clean(path)
Beispiel #16
0
    def work(self):
        # We should probably open up our own redis client
        self.client = qless.client(self.host, self.port)
        self.queues = [self.client.queues[q] for q in self.queues]

        if not os.path.isdir(self.sandbox):
            os.makedirs(self.sandbox)
        self.clean()
        # First things first, we should clear out any jobs that
        # we're responsible for off-hand
        while len(self.jids):
            try:
                job = self.client.jobs[self.jids.pop(0)]
                # If we still have access to it, then we should process it
                if job.heartbeat():
                    logger.info('Resuming %s' % job.jid)
                    self.setproctitle('Working %s (%s)' %
                                      (job.jid, job.klass_name))
                    job.process()
                    self.clean()
                else:
                    logger.warn('Lost heart on would-be resumed job %s' %
                                job.jid)
            except KeyboardInterrupt:
                return

        while True:
            try:
                seen = False
                for queue in self.queues:
                    job = queue.pop()
                    if job:
                        seen = True
                        self.setproctitle('Working %s (%s)' %
                                          (job.jid, job.klass_name))
                        job.process()
                        self.clean()

                if not seen:
                    self.setproctitle('sleeping...')
                    logger.debug('Sleeping for %fs' % self.interval)
                    time.sleep(self.interval)
            except KeyboardInterrupt:
                return
Beispiel #17
0
 def work(self):
     # We should probably open up our own redis client
     self.client = qless.client(self.host, self.port, password=self.password)
     self.queues = [self.client.queues[q] for q in self.queues]
     
     if not os.path.isdir(self.sandbox):
         os.makedirs(self.sandbox)
     self.clean()
     # First things first, we should clear out any jobs that
     # we're responsible for off-hand
     while len(self.jids):
         try:
             job = self.client.jobs[self.jids.pop(0)]
             # If we still have access to it, then we should process it
             if job.heartbeat():
                 logger.info('Resuming %s' % job.jid)
                 self.setproctitle('Working %s (%s)' % (job.jid, job.klass_name))
                 job.process()
                 self.clean()
             else:
                 logger.warn('Lost heart on would-be resumed job %s' % job.jid)
         except KeyboardInterrupt:
             return
     
     while True:
         try:
             seen = False
             for queue in self.queues:
                 job = queue.pop()
                 if job:
                     seen = True
                     self.setproctitle('Working %s (%s)' % (job.jid, job.klass_name))
                     job.process()
                     self.clean()
             
             if not seen:
                 self.setproctitle('sleeping...')
                 logger.debug('Sleeping for %fs' % self.interval)
                 time.sleep(self.interval)
         except KeyboardInterrupt:
             return