Beispiel #1
0
    def run(self):
        """ 阻塞方式找到任务,并自动调用"""
        # 如果上次有任务在运行还没结束,重新执行
        jobs = ztq_core.get_job_state(CONFIG['server']['alias'])
        if self.name in jobs:
            self.start_job(jobs[self.name])

        # 队列批处理模式
        # batch_size: 批处理的阀值,达到这个阀值,就执行一次batch_func
        # batch_func: 
        #    1, 执行一批batch_size 大小的任务后,后续自动执行这个方法方法
        #    2, 执行一批小于batch_size 大小的任务后,再得不到任务,后续自动执行这个方法
        batch_config = CONFIG.get("batch_queue", {}).get(self.queue_name, {})
        batch_size = batch_config.get('batch_size', None) or -1
        batch_func = batch_config.get('batch_func', None) or (lambda *args, **kw: -1)

        run_job_index = 0
        queue_tiemout = QUEUE_TIMEOUT
        # 循环执行任务
        while not self._stop:
            try:
                task = ztq_core.pop_task(
                        self.queue_name, 
                        timeout=queue_tiemout, 
                        from_right=self.from_right
                        )
            except ztq_core.ConnectionError, e:
                logger.error('ERROR: redis connection error: %s' % str(e))
                time.sleep(3)
                continue
            except ztq_core.ResponseError, e:
                logger.error('ERROR: redis response error: %s' % str(e))
                time.sleep(3)
                continue
Beispiel #2
0
    def run(self):
        """ 阻塞方式找到任务,并自动调用"""
        # 如果上次有任务在运行还没结束,重新执行
        jobs = ztq_core.get_job_state(CONFIG['server']['alias'])
        if self.name in jobs:
            self.start_job(jobs[self.name])

        # 队列批处理模式
        # batch_size: 批处理的阀值,达到这个阀值,就执行一次batch_func
        # batch_func: 
        #    1, 执行一批batch_size 大小的任务后,后续自动执行这个方法方法
        #    2, 执行一批小于batch_size 大小的任务后,再得不到任务,后续自动执行这个方法
        batch_config = CONFIG.get("batch_queue", {}).get(self.queue_name, {})
        batch_size = batch_config.get('batch_size', None) or -1
        batch_func = batch_config.get('batch_func', None) or (lambda *args, **kw: -1)

        run_job_index = 0
        queue_tiemout = QUEUE_TIMEOUT
        # 循环执行任务
        while not self._stop:
            try:
                task = ztq_core.pop_task(
                        self.queue_name, 
                        timeout=queue_tiemout, 
                        from_right=self.from_right
                        )
            except ztq_core.ConnectionError, e:
                logger.error('ERROR: redis connection error: %s' % str(e))
                time.sleep(3)
                continue
            except ztq_core.ResponseError, e:
                logger.error('ERROR: redis response error: %s' % str(e))
                time.sleep(3)
                continue
Beispiel #3
0
    def run(self):
        """ 阻塞方式找到任务,并自动调用"""
        # 如果上次有任务在运行还没结束,重新执行
        jobs = ztq_core.get_job_state(safe_get_host('server', 'alias'))
        if self.name in jobs:
            self.start_job(jobs[self.name])

        # 队列批处理模式
        # batch_size: 批处理的阀值,达到这个阀值,就执行一次batch_func
        # batch_func: 
        #    1, 执行一批batch_size 大小的任务后,后续自动执行这个方法方法
        #    2, 执行一批小于batch_size 大小的任务后,再得不到任务,后续自动执行这个方法
        batch_config = CONFIG.get("batch_queue", {}).get(self.queue_name, {})
        batch_size = batch_config.get('batch_size', None) or -1
        batch_func = batch_config.get('batch_func', None) or (lambda *args, **kw: -1)

        run_job_index = 0
        queue_tiemout = QUEUE_TIMEOUT
        # 循环执行任务
        while not self._stop:
            try:
                task = ztq_core.pop_task(
                        self.queue_name, 
                        timeout=queue_tiemout, 
                        from_right=self.from_right
                        )
            except ztq_core.ConnectionError:
                print 'ERROR: Not connected the server\n'
                task = None
                time.sleep(3)

            if task is None: 
                # 没有后续任务了。执行batch_func
                if run_job_index > 0:
                    run_job_index = 0
                    queue_tiemout = QUEUE_TIMEOUT
                    batch_func()
                continue

            self.start_job(task)

            if batch_size > 0: 
                if run_job_index >= batch_size - 1:
                    # 完成了一批任务。执行batch_func
                    run_job_index = 0
                    queue_tiemout = QUEUE_TIMEOUT
                    batch_func()
                else:
                    run_job_index += 1
                    queue_tiemout = -1

            if self.sleep_time:
                time.sleep(self.sleep_time)
Beispiel #4
0
    def run(self):
        """ 阻塞方式找到任务,并自动调用"""
        # 如果上次有任务在运行还没结束,重新执行
        jobs = ztq_core.get_job_state(safe_get_host('server', 'alias'))
        if self.name in jobs:
            self.start_job(jobs[self.name])

        # 队列批处理模式
        # batch_size: 批处理的阀值,达到这个阀值,就执行一次batch_func
        # batch_func:
        #    1, 执行一批batch_size 大小的任务后,后续自动执行这个方法方法
        #    2, 执行一批小于batch_size 大小的任务后,再得不到任务,后续自动执行这个方法
        batch_config = CONFIG.get("batch_queue", {}).get(self.queue_name, {})
        batch_size = batch_config.get('batch_size', None) or -1
        batch_func = batch_config.get('batch_func',
                                      None) or (lambda *args, **kw: -1)

        run_job_index = 0
        queue_tiemout = QUEUE_TIMEOUT
        # 循环执行任务
        while not self._stop:
            try:
                task = ztq_core.pop_task(self.queue_name,
                                         timeout=queue_tiemout,
                                         from_right=self.from_right)
            except ztq_core.ConnectionError:
                print 'ERROR: Not connected the server\n'
                task = None
                time.sleep(3)

            if task is None:
                # 没有后续任务了。执行batch_func
                if run_job_index > 0:
                    run_job_index = 0
                    queue_tiemout = QUEUE_TIMEOUT
                    batch_func()
                continue

            self.start_job(task)

            if batch_size > 0:
                if run_job_index >= batch_size - 1:
                    # 完成了一批任务。执行batch_func
                    run_job_index = 0
                    queue_tiemout = QUEUE_TIMEOUT
                    batch_func()
                else:
                    run_job_index += 1
                    queue_tiemout = -1

            if self.sleep_time:
                time.sleep(self.sleep_time)