Exemplo n.º 1
0
 def __init__(self, timezone=None):
     BaseMaster.__init__(self)
     self.jobqueue = RedisJobQueue(self, **settings.JOB_QUEUE_CONFIG)
     self.jobqueue_lock = RLock()
     self.timezone = timezone or get_localzone()
     self._event = Event()
     self._stopped = True
     self.jobstore = MongoJobStore(self, **settings.JOB_STORE_CONFIG)
     #self.jobstore = MemoryJobStore(self)
     self._internal_buffer_queues = {}
     self._job_maximum_buffer_time = settings.JOB_QUEUE_CONFIG[
         'buffer_time']
Exemplo n.º 2
0
 def __init__(self, name, listen_keys=None, worker_num=2, timezone=None, logger_name='elric.worker'):
     BaseWorker.__init__(self, name, logger_name)
     self.jobqueue = RedisJobQueue(self, **settings.JOB_QUEUE_CONFIG)
     self.listen_keys = []
     if listen_keys:
         self.listen_keys = ['%s:%s' % (self.name, listen_key) for listen_key in listen_keys]
     self.timezone = timezone or get_localzone()
     self.internal_job_queue = Queue(maxsize=worker_num)
     self.executor = ProcessPoolExecutor(worker_num, self)
     self._stopped = True
Exemplo n.º 3
0
 def __init__(self, timezone=None):
     BaseMaster.__init__(self)
     self.jobqueue = RedisJobQueue(self, **settings.JOB_QUEUE_CONFIG)
     self.jobqueue_lock = RLock()
     self.timezone = timezone or get_localzone()
     self._event = Event()
     self._stopped = True
     self.jobstore = MongoJobStore(self, **settings.JOB_STORE_CONFIG)
     #self.jobstore = MemoryJobStore(self)
     self._internal_buffer_queues = {}
     self._job_maximum_buffer_time = settings.JOB_QUEUE_CONFIG['buffer_time']
Exemplo n.º 4
0
class RQMasterBase(BaseMaster):

    MIN_WAIT_TIME = 5

    def __init__(self, timezone=None):
        BaseMaster.__init__(self)
        self.jobqueue = RedisJobQueue(self, **settings.JOB_QUEUE_CONFIG)
        self.jobqueue_lock = RLock()
        self.timezone = timezone or get_localzone()
        self._event = Event()
        self._stopped = True
        self.jobstore = MongoJobStore(self, **settings.JOB_STORE_CONFIG)
        #self.jobstore = MemoryJobStore(self)
        self._internal_buffer_queues = {}
        self._job_maximum_buffer_time = settings.JOB_QUEUE_CONFIG['buffer_time']

    def submit_job(self, job):
        """
            process submit_job request from worker.
            :type job: Job
        """
        self.log.debug('client submit job, id=%s, key=%s' % (job.id, job.job_key))
        # if job doesn't contains trigger, then enqueue it into job queue immediately
        if not job.trigger:
            self._enqueue_job(job.job_key, job.serialize())
        # else store job into job store first
        else:
            with distributed_lock(**settings.DISTRIBUTED_LOCK_CONFIG):
                try:
                    self.jobstore.add_job(job)
                except JobAlreadyExist as e:
                    if job.replace_exist:
                        self.update_job(job)
                    else:
                        self.log.error(e)
            # wake up when new job has store into job store
            self.wake_up()

    def update_job(self, job):
        """
            update job in jobstore
            :type job: Job
        """
        try:
            self.jobstore.update_job(job)
        except JobDoesNotExist as e:
            self.log.error(e)

    def remove_job(self, job):
        """
            remove job from jobstore
            :type job: Job
        """
        try:
            self.jobstore.remove_job(job)
        except JobDoesNotExist:
            self.log.error('remove job error. job id %s does not exist' % job.id)

    def finish_job(self, job):
        """
            process finish_job request from worker
            :type job: Job
        """
        self.jobstore.save_execute_record(job)

    def _enqueue_buffer_queue(self, key, job):
        self.log.debug("job queue [%s] is full, put job into buffer queue" % key)
        try:
            self._internal_buffer_queues[key].put((job, datetime.now()))
        except KeyError:
            self._internal_buffer_queues[key] = Queue()
            self._internal_buffer_queues[key].put((job, datetime.now()))
            self.start_process_buffer_job(key)

    def _enqueue_job(self, key, job):
        """
            enqueue job into jobqueue
            :type key: str
            :type job: str or xmlrpc.Binary
        """
        self.log.debug('enqueue job key=[%s]' % key)
        with self.jobqueue_lock:
            # check whether job queue is full
            if not self.jobqueue.is_full(key):
                self.jobqueue.enqueue(key, job)
            else:
                self._enqueue_buffer_queue(key, job)

    def start(self):
        """
            Start elric master. Select all due jobs from jobstore and enqueue them into redis queue.
            Then update due jobs' information into jobstore.
        :return:
        """
        if self.running:
            raise AlreadyRunningException
        self._stopped = False
        self.log.debug('eric master start...')

        self.start_subscribe_thread()

        while True:
            now = datetime.now(self.timezone)
            wait_seconds = None
            with distributed_lock(**settings.DISTRIBUTED_LOCK_CONFIG):
                for job_id, job_key, serialized_job in self.jobstore.get_due_jobs(now):
                    # enqueue due job into redis queue
                    self._enqueue_job(job_key, serialized_job)
                    # update job's information, such as next_run_time
                    job = Job.deserialize(serialized_job)
                    last_run_time = Job.get_serial_run_times(job, now)
                    if last_run_time:
                        next_run_time = Job.get_next_trigger_time(job, last_run_time[-1])
                        if next_run_time:
                            job.next_run_time = next_run_time
                            self.update_job(job)
                        else:
                            # if job has no next run time, then remove it from jobstore
                            self.remove_job(job)

                # get next closet run time job from jobstore and set it to be wake up time
                closest_run_time = self.jobstore.get_closest_run_time()

            if closest_run_time is not None:
                wait_seconds = min(max(timedelta_seconds(closest_run_time - now), 0), self.MIN_WAIT_TIME)
                self.log.debug('Next wakeup is due at %s (in %f seconds)' % (closest_run_time, wait_seconds))
            self._event.wait(wait_seconds if wait_seconds is not None else self.MIN_WAIT_TIME)
            self._event.clear()

    def wake_up(self):
        self._event.set()

    @property
    def running(self):
        return not self._stopped

    def start_process_buffer_job(self, job_key):
        """
            Start filter data serialization thread
        """
        self.log.debug('start process buffer job... job key=[%s]' % job_key)
        thd = threading.Thread(target=self.process_buffer_job, args=(job_key, ))
        thd.setDaemon(True)
        thd.start()

    def process_buffer_job(self, job_key):
        while True:
            job, buffer_time = self._internal_buffer_queues[job_key].get()
            with self.jobqueue_lock:
                if not self.jobqueue.is_full(job_key):
                    self.jobqueue.enqueue(job_key, job)
                    continue

            if (datetime.now() - buffer_time).total_seconds() < self._job_maximum_buffer_time:
                self.log.debug("requeue into buffer")
                self._internal_buffer_queues[job_key].put((job, buffer_time))
                time.sleep(1.0)
            else:
                self.log.warning("timeout, discard job...")

    def subscribe_mq(self):
        while self.running:
            try:
                # grab job from job queue only if internal_job_queue has space
                key, serialized_job = self.jobqueue.dequeue_any(['__elric_submit_channel__',
                                                                 '__elric_remove_channel__',
                                                                 '__elric_finish_channel__'])
                Job.deserialize(serialized_job)
                self.func_map[key](Job.deserialize(serialized_job))
            except TypeError as e:
                self.log.error(e)
                time.sleep(60)
                continue
Exemplo n.º 5
0
class RQMasterBase(BaseMaster):

    MIN_WAIT_TIME = 5

    def __init__(self, timezone=None):
        BaseMaster.__init__(self)
        self.jobqueue = RedisJobQueue(self, **settings.JOB_QUEUE_CONFIG)
        self.jobqueue_lock = RLock()
        self.timezone = timezone or get_localzone()
        self._event = Event()
        self._stopped = True
        self.jobstore = MongoJobStore(self, **settings.JOB_STORE_CONFIG)
        #self.jobstore = MemoryJobStore(self)
        self._internal_buffer_queues = {}
        self._job_maximum_buffer_time = settings.JOB_QUEUE_CONFIG[
            'buffer_time']

    def submit_job(self, job):
        """
            process submit_job request from worker.
            :type job: Job
        """
        self.log.debug('client submit job, id=%s, key=%s' %
                       (job.id, job.job_key))
        # if job doesn't contains trigger, then enqueue it into job queue immediately
        if not job.trigger:
            self._enqueue_job(job.job_key, job.serialize())
        # else store job into job store first
        else:
            with distributed_lock(**settings.DISTRIBUTED_LOCK_CONFIG):
                try:
                    self.jobstore.add_job(job)
                except JobAlreadyExist as e:
                    if job.replace_exist:
                        self.update_job(job)
                    else:
                        self.log.error(e)
            # wake up when new job has store into job store
            self.wake_up()

    def update_job(self, job):
        """
            update job in jobstore
            :type job: Job
        """
        try:
            self.jobstore.update_job(job)
        except JobDoesNotExist as e:
            self.log.error(e)

    def remove_job(self, job):
        """
            remove job from jobstore
            :type job: Job
        """
        try:
            self.jobstore.remove_job(job)
        except JobDoesNotExist:
            self.log.error('remove job error. job id %s does not exist' %
                           job.id)

    def finish_job(self, job):
        """
            process finish_job request from worker
            :type job: Job
        """
        self.jobstore.save_execute_record(job)

    def _enqueue_buffer_queue(self, key, job):
        self.log.debug("job queue [%s] is full, put job into buffer queue" %
                       key)
        try:
            self._internal_buffer_queues[key].put((job, datetime.now()))
        except KeyError:
            self._internal_buffer_queues[key] = Queue()
            self._internal_buffer_queues[key].put((job, datetime.now()))
            self.start_process_buffer_job(key)

    def _enqueue_job(self, key, job):
        """
            enqueue job into jobqueue
            :type key: str
            :type job: str or xmlrpc.Binary
        """
        self.log.debug('enqueue job key=[%s]' % key)
        with self.jobqueue_lock:
            # check whether job queue is full
            if not self.jobqueue.is_full(key):
                self.jobqueue.enqueue(key, job)
            else:
                self._enqueue_buffer_queue(key, job)

    def start(self):
        """
            Start elric master. Select all due jobs from jobstore and enqueue them into redis queue.
            Then update due jobs' information into jobstore.
        :return:
        """
        if self.running:
            raise AlreadyRunningException
        self._stopped = False
        self.log.debug('eric master start...')

        self.start_subscribe_thread()

        while True:
            now = datetime.now(self.timezone)
            wait_seconds = None
            with distributed_lock(**settings.DISTRIBUTED_LOCK_CONFIG):
                for job_id, job_key, serialized_job in self.jobstore.get_due_jobs(
                        now):
                    # enqueue due job into redis queue
                    self._enqueue_job(job_key, serialized_job)
                    # update job's information, such as next_run_time
                    job = Job.deserialize(serialized_job)
                    last_run_time = Job.get_serial_run_times(job, now)
                    if last_run_time:
                        next_run_time = Job.get_next_trigger_time(
                            job, last_run_time[-1])
                        if next_run_time:
                            job.next_run_time = next_run_time
                            self.update_job(job)
                        else:
                            # if job has no next run time, then remove it from jobstore
                            self.remove_job(job)

                # get next closet run time job from jobstore and set it to be wake up time
                closest_run_time = self.jobstore.get_closest_run_time()

            if closest_run_time is not None:
                wait_seconds = min(
                    max(timedelta_seconds(closest_run_time - now), 0),
                    self.MIN_WAIT_TIME)
                self.log.debug('Next wakeup is due at %s (in %f seconds)' %
                               (closest_run_time, wait_seconds))
            self._event.wait(wait_seconds if wait_seconds is not None else self
                             .MIN_WAIT_TIME)
            self._event.clear()

    def wake_up(self):
        self._event.set()

    @property
    def running(self):
        return not self._stopped

    def start_process_buffer_job(self, job_key):
        """
            Start filter data serialization thread
        """
        self.log.debug('start process buffer job... job key=[%s]' % job_key)
        thd = threading.Thread(target=self.process_buffer_job,
                               args=(job_key, ))
        thd.setDaemon(True)
        thd.start()

    def process_buffer_job(self, job_key):
        while True:
            job, buffer_time = self._internal_buffer_queues[job_key].get()
            with self.jobqueue_lock:
                if not self.jobqueue.is_full(job_key):
                    self.jobqueue.enqueue(job_key, job)
                    continue

            if (datetime.now() - buffer_time
                ).total_seconds() < self._job_maximum_buffer_time:
                self.log.debug("requeue into buffer")
                self._internal_buffer_queues[job_key].put((job, buffer_time))
                time.sleep(1.0)
            else:
                self.log.warning("timeout, discard job...")

    def subscribe_mq(self):
        while self.running:
            try:
                # grab job from job queue only if internal_job_queue has space
                key, serialized_job = self.jobqueue.dequeue_any([
                    '__elric_submit_channel__', '__elric_remove_channel__',
                    '__elric_finish_channel__'
                ])
                Job.deserialize(serialized_job)
                self.func_map[key](Job.deserialize(serialized_job))
            except TypeError as e:
                self.log.error(e)
                time.sleep(60)
                continue
Exemplo n.º 6
0
class RQWorker(BaseWorker):
    def __init__(self, name, listen_keys=None, worker_num=2, timezone=None, logger_name='elric.worker'):
        BaseWorker.__init__(self, name, logger_name)
        self.jobqueue = RedisJobQueue(self, **settings.JOB_QUEUE_CONFIG)
        self.listen_keys = []
        if listen_keys:
            self.listen_keys = ['%s:%s' % (self.name, listen_key) for listen_key in listen_keys]
        self.timezone = timezone or get_localzone()
        self.internal_job_queue = Queue(maxsize=worker_num)
        self.executor = ProcessPoolExecutor(worker_num, self)
        self._stopped = True

    def start(self):
        if self.running:
            raise AlreadyRunningException

        self._stopped = False
        self.log.debug('elric worker running..')
        while self.running:
            try:
                # grab job from job queue only if internal_job_queue has space
                self.internal_job_queue.put("#", True)
                job_key, serialized_job = self.jobqueue.dequeue_any(self.listen_keys)
                job = Job.deserialize(serialized_job)
                self.log.debug('get job id=[%s] func=[%s]from key %s' % (job.id, job.func, job.job_key))
                self.executor.execute_job(job)
            except TypeError as e:
                self.log.error(e)
                time.sleep(60)
                continue

    def submit_job(self, func, job_key, args=None, kwargs=None, trigger=None, job_id=None,
                   replace_exist=False, need_filter=False, **trigger_args):
        """
            submit job to master through redis queue
            :type func: str or callable obj or unicode
            :type job_key: str or unicode
            :type args: tuple or list
            :type kwargs: dict
            :type trigger: str or unicode
            :type job_id: str or unicode
            :type replace_exist: bool
            :type trigger_args: dict
        """
        # use worker's timezone if trigger don't provide specific `timezone` configuration
        trigger_args['timezone'] = self.timezone
        job_in_dict = {
            'id': "%s:%s" % (self.name, job_id) if job_id else None,
            'func': func,
            'args': args,
            'trigger': create_trigger(trigger, trigger_args) if trigger else None,
            'kwargs': kwargs,
            'job_key': '%s:%s' % (self.name, job_key),
            'need_filter': need_filter,
            'replace_exist': replace_exist
        }
        job = Job(**job_in_dict)
        job.check()
        self.jobqueue.enqueue('__elric_submit_channel__', job.serialize())

    def remove_job(self, job_id):
        """
            send remove job request to master through redis queue
            :type job_id: str
        """
        job_in_dict = {
            'id': "%s:%s" % (self.name, job_id)
        }
        job = Job(**job_in_dict)
        self.jobqueue.enqueue('__elric_remove_channel__', job.serialize())

    def finish_job(self, job_id, is_success, details, job_key, need_filter):
        job_in_dict = {
            'id': job_id,
            'job_key': job_key,
            'is_success': is_success,
            'details': details,
            'need_filter': need_filter
        }
        job = Job(**job_in_dict)
        self.jobqueue.enqueue('__elric_finish_channel__', job.serialize())

    @property
    def running(self):
        return not self._stopped

    def stop(self):
        self.log.debug('Worker is quiting')
        self._stopped = True
        self.executor.shutdown()