コード例 #1
0
ファイル: point_tasks.py プロジェクト: summer-apple/zazaza
 def task(self):
     """
     !!!!this function is useless don't run it!!!!
     Parameters:
         year (int|str) – 4-digit year
         month (int|str) – month (1-12)
         day (int|str) – day of the (1-31)
         week (int|str) – ISO week (1-53)
         day_of_week (int|str) – number or name of weekday (0-6 or mon,tue,wed,thu,fri,sat,sun)
         hour (int|str) – hour (0-23)
         minute (int|str) – minute (0-59)
         second (int|str) – second (0-59)
         start_date (datetime|str) – earliest possible date/time to trigger on (inclusive)
         end_date (datetime|str) – latest possible date/time to trigger on (inclusive)
         timezone (datetime.tzinfo|str) – time zone to use for the date/time calculations (defaults to scheduler timezone)
     :return:
     """
     scheduler = BlockingScheduler()
     #scheduler.add_job(self.task_func, trigger='cron', day='*/1', hour='1')
     scheduler.add_job(self.task_func, trigger='cron', minute='*/5')
     #scheduler.add_job(func, 'date', run_date='2016-10-25 13:51:30')
     try:
         scheduler.start()
     except Exception as e:
         # TODO 执行错误的处理方案
         Global.logger.error('定时任务错误:%s' % e)
         scheduler.shutdown()
コード例 #2
0
ファイル: __init__.py プロジェクト: fact-project/pycustos
class ScheduledCheck(Check, metaclass=ABCMeta):
    '''
    An abstract base class for a check that runs based on
    the Scheduler from apscheduler

    Child classes need to implement the check method
    '''
    def __init__(self,
                 queue=None,
                 notify_on_exception=True,
                 name=None,
                 **kwargs):
        '''
        Create a new instance of this Check
        The kwargs are handed over to apscheduler.blocking.BlockingScheduler.add_job
        and decide when the checks are run. For example `trigger='cron', hour=8` will
        run this check every day at 8 o'clock
        '''
        super().__init__(queue=queue,
                         notify_on_exception=notify_on_exception,
                         name=name)

        self.scheduler = BlockingScheduler(
            job_defaults={'misfire_grace_time': 5 * 60})
        self.scheduler.add_job(self.wrapped_check, **kwargs)

    def run(self):
        self.scheduler.start()

    def stop(self):
        self.scheduler.shutdown()
        self.log.info('Check %s stopped', self.__class__.__name__)
コード例 #3
0
ファイル: __init__.py プロジェクト: fact-project/pycustos
class ScheduledCheck(Check, metaclass=ABCMeta):
    '''
    An abstract base class for a check that runs based on
    the Scheduler from apscheduler

    Child classes need to implement the check method
    '''
    def __init__(self, queue=None, notify_on_exception=True, name=None, **kwargs):
        '''
        Create a new instance of this Check
        The kwargs are handed over to apscheduler.blocking.BlockingScheduler.add_job
        and decide when the checks are run. For example `trigger='cron', hour=8` will
        run this check every day at 8 o'clock
        '''
        super().__init__(queue=queue, notify_on_exception=notify_on_exception, name=name)

        self.scheduler = BlockingScheduler(
            job_defaults={'misfire_grace_time': 5*60}
        )
        self.scheduler.add_job(self.wrapped_check, **kwargs)

    def run(self):
        self.scheduler.start()

    def stop(self):
        self.scheduler.shutdown()
        self.log.info('Check %s stopped', self.__class__.__name__)
コード例 #4
0
 def _run(self):
     """ run your fun"""
     scheduler = BlockingScheduler()
     scheduler.add_job(PrintText().start, 'interval', seconds=3)
     # scheduler.add_job(PrintText().start, 'cron',  hour=start_hour, minute=start_minute,second='0')
     try:
         scheduler.start()
     except KeyboardInterrupt, SystemExit:
         scheduler.shutdown()
         logger.error('Exit The Job!')
コード例 #5
0
ファイル: container.py プロジェクト: eubchain/eubh-tool
 def log(self, container_id, is_watch=False, default_loop_time=20):
     if is_watch:
         scheduler = BlockingScheduler()
         scheduler.add_job(
             self.output_log_single(container_id=container_id),
             'interval',
             seconds=default_loop_time)
         try:
             scheduler.start()
         except (KeyboardInterrupt, SystemExit):
             scheduler.shutdown()
     else:
         self.output_log_single(container_id=container_id)
コード例 #6
0
def schedule(ctx, hour):
    email = ctx.parent.params['email']
    username = ctx.parent.params['username']
    email_to = ctx.parent.params['email_to']
    password = ctx.obj.get('password', None)
    gsheet = ctx.parent.params['gsheet']
    doc_key = ctx.parent.params['doc_key']

    schedule = BlockingScheduler()
    schedule.add_job(run, kwargs={"email": email, "gsheet": gsheet, "doc_key": doc_key,
                                  "username": username, "email_to": email_to, "password": password}, trigger='cron', hour=hour)
    try:
        schedule.start()
    except (KeyboardInterrupt, SystemExit):
        schedule.shutdown()
コード例 #7
0
def main():
    log = logging.getLogger("main")
    cameras, light, project = parse_configs()

    storage_dir = Path(project["storage_dir"])
    if not storage_dir.is_dir():
        storage_dir.mkdir(parents=True)

    # We will use queue to connect scheduler thread with I2C communication thread
    connection_queue = Queue()

    # Create separate thread for I2C communication
    log.info("Starting I2C thread")
    i2c_driver = I2CDriver(0x04)
    i2c_thread = Thread(target=i2c_thread_function,
                        args=(i2c_driver, connection_queue))
    i2c_thread.start()

    log.info("Running pipeline for the first time")
    pipeline_executor = PipelineExecutor(storage_dir, cameras, light,
                                         connection_queue, project["pipeline"])

    # For the first time, execute pipeline manually, then schedule it
    pipeline_executor.execute()

    # Create a scheduler and add job to it
    log.info("Scheduling the pipeline")
    scheduler = BlockingScheduler()
    scheduler.add_job(
        func=(lambda executor=pipeline_executor: executor.execute()),
        trigger="interval",
        seconds=project['run_interval_seconds'])
    atexit.register(lambda: scheduler.shutdown())
    scheduler.start()  # Blocks thread
コード例 #8
0
def engage():
    banner.print_banner()
    oneness_scheduler = BlockingScheduler({
        'apscheduler.executors.processpool': {
            'class': 'apscheduler.executors.pool:ProcessPoolExecutor',
            'max_workers': '20'
        },
        'job_defaults': {
            'coalesce': False,
            'executor': 'processpool'
        }
    })

    oneness_scheduler.add_executor('processpool')
    t_retweet = oneness_scheduler.add_job(twitter.retweet.start,
                                          'interval',
                                          minutes=60,
                                          id='twitter_retweet_bot')
    t_follow = oneness_scheduler.add_job(twitter.follow.start,
                                         'interval',
                                         minutes=10,
                                         id='twitter_follow_bot')

    # quoted_im_generator = oneness_scheduler.add_job(
    #     image_generator.quoted_image.start,
    #     'interval', minutes=300,
    #     id='quoted_im_generator',
    #     kwargs={'overlay_flag': True}
    # )

    im_with_quote_generator = oneness_scheduler.add_job(
        image_generator.quoted_image.start,
        'interval',
        minutes=120,
        id='image_with_quote_generator',
        kwargs={'overlay_flag': False})

    try:
        # oneness_scheduler.start()
        for job in oneness_scheduler.get_jobs():
            job.modify(next_run_time=datetime.now())
        oneness_scheduler.start()
    except (KeyboardInterrupt, SystemExit):
        oneness_scheduler.shutdown()
コード例 #9
0
    def watch(self):
        docker_log_watch = BackgroundScheduler()
        docker_log_watch.add_job(self.watch_upload_docker_log,
                                 'interval',
                                 seconds=10)
        try:
            docker_log_watch.start()
        except (KeyboardInterrupt, SystemExit):
            docker_log_watch.shutdown()

        scheduler = BlockingScheduler()
        scheduler.add_job(self.upload_machine_information,
                          'interval',
                          seconds=self.time,
                          max_instances=2)
        try:
            scheduler.start()
        except (KeyboardInterrupt, SystemExit):
            scheduler.shutdown()
コード例 #10
0
def main():

    logging.basicConfig(filename='test.log', level=logging.ERROR, \
                    format='%(asctime)s:%(levelname)s:%(name)s:%(message)s')

    #Kill gphoto2
    killgphoto()

    #Declaring/Calculating variables needed
    scheduler = BlockingScheduler(timezone='US/Central')
    hourbound = tl.getConfig('START_HOUR') + '-' + tl.getConfig('FINISH_HOUR')

    #Ensure photo_local_root exists
    if not os.path.exists(tl.getConfig('photo_local_root')):
        os.makedirs(tl.getConfig('photo_local_root'))

    #GP2 Log and Camera Setup
    gp.check_result(gp.use_python_logging())
    context = gp.gp_context_new()
    camera = gp.check_result(gp.gp_camera_new())
    gp.check_result(gp.gp_camera_init(camera, context))

    #Adding job to scheduler
    scheduler.add_job(captureandsave, 'cron', args=[camera,context], \
                      day_of_week='mon-sun', second='*/'+str(tl.getConfig('INTERVAL')), \
                      hour=hourbound)
    print('Press Ctrl+{0} to exit'.format( \
          'Break' if os.name == 'nt' else 'C'))

    try:
        scheduler.start()
    except (KeyboardInterrupt, SystemExit):
        scheduler.shutdown()
        pass

    #Close Camera
    gp.check_result(gp.gp_camera_exit(camera, context))

    return 0
コード例 #11
0
ファイル: tasks.py プロジェクト: summer-apple/spark
    def daily_task(self):
        def func():
            day = datetime.datetime.now().strftime('%Y-%m-%d')
            # 活期
            self.da.init_balance(day, 1)
            self.logger.info(day, '活期每日余额计算完成')
            # 定期
            self.da.init_balance(day, 2)
            self.logger.info(day, '定期每日余额计算完成')
            # 理财
            self.da.init_balance(day, 3)
            self.logger.info(day, '理财每日余额计算完成')

        scheduler = BlockingScheduler()
        scheduler.add_job(func,'cron',day='*',hour='1') # 每天凌晨1点运行

        try:
            scheduler.start()
        except Exception as e:
            # TODO 执行错误的处理方案
            self.logger.error('每日AUM计算出错:',e)
            scheduler.shutdown()
コード例 #12
0
ファイル: task_manager.py プロジェクト: sword03/schedule
class TaskMgr:
    def __init__(self):
        self.scheduler = BlockingScheduler()
        self.update_time = Moment(hour=0, minute=0, second=0)
        self.list_task = []

    def add_task(self, task):
        self.list_task.append(task)

    def set_moment(self, hour):
        self.update_time.hour = hour

    def _at_the_moment(self):
        now = datetime.now()
        option_moment = datetime(year=now.year, month=now.month, day=now.day, hour=0, minute=0, second=0)
        option_moment = option_moment.replace(hour=self.update_time.hour)
        return self._time_equal(t1=now.timestamp(), t2=option_moment.timestamp(), delta=30)

    def _time_equal(self, t1, t2, delta):
        if t1 > t2 - delta and t1 < t2 + delta:
            return True
        return False

    def _cron(self):
        for task in self.list_task:
            if self._at_the_moment() or not task.flag:
                task.run()

    def start(self, run_immedately=True):
        try:
            if run_immedately:
                for task in self.list_task:
                    task.run()
            self.scheduler.add_job(func=self._cron, trigger='cron', minute=0, second=0)
            self.scheduler.start()
        except (KeyboardInterrupt, SystemExit):
            self.scheduler.shutdown()
コード例 #13
0
class Scheduler:
    def __init__(self):
        conf = configparser.ConfigParser()
        conf.read("../agent.ini")
        ip = conf.get("redis", "ip")
        port = conf.getint("redis", "port")
        timeout = conf.getint("redis", "timeout")
        self.invoker_id = self._get_invoker_id()
        self.max_tasks = conf.getint("invoker", "max_tasks")
        self.live_seconds = conf.getint("invoker", "live_seconds")
        self.db = SchedulerDb(ip, port, timeout)
        logging.config.fileConfig("../logger.ini")
        self.logger = logging.getLogger("main")
        executors = {
            'default': {'type': 'processpool', 'max_workers': self.max_tasks + 1}
        }
        self.blockScheduler = BlockingScheduler()
        self.jobs = {}
        self.lock = threading.Lock()

    @staticmethod
    def _get_invoker_id():
        hostname = socket.gethostname()
        pid = os.getpid()
        return hostname + "-" + str(pid)

    def task_invoke(self, task_instance, task_param):
        if task_param.cmd.startswith('http'):
            executor = HttpExecutor(self.db, task_instance, task_param)
            executor.execute()
        else:
            pass

    def break_heart(self):
        """
        invoker每隔一段时间就心跳一下,看看是否有新任务,是否有任务需要更新
        :param bs:
        :return:
        """
        # 先看看参数是否有变化的把调度重启或者关闭
        try:
            self.lock.acquire()
            self.refresh_local_invoker()
            self.refresh_other_invokers()
            if len(self.jobs) >= self.max_tasks:
                return

            task_instances, task_params = self.db.query_waiting_run_tasks(self.invoker_id,
                                                                          self.max_tasks - len(self.jobs),
                                                                          True)
            if len(task_instances) == 0:
                return
            for i in range(len(task_instances)):
                task_instance = task_instances[i]
                task_param = task_params[i]
                if task_instance.id not in self.jobs.keys():
                    self.logger.info("分配了新任务%s", task_instance.id)
                    job = self.blockScheduler.add_job(self.task_invoke,
                                                      next_run_time=(
                                                          datetime.datetime.now() + datetime.timedelta(seconds=2)),
                                                      args=[task_instance, task_param], id=task_instance.id)
                    self.jobs[job.id] = job
                    self.db.lock_invoker_instance(self.invoker_id, task_instance.id, self.live_seconds)
                else:
                    self.logger.error("%s任务已经在运行", task_instance.id)
        finally:
            self.lock.release()

    def refresh_local_invoker(self):
        """
        调度的参数是否发生变化,如有需要重启调度
        :param bs:
        :return:
        """

        self.db.update_invoker_time(self.invoker_id, self.jobs.keys(), self.live_seconds)
        self.logger.info("%s心跳更新成功!", self.invoker_id)
        # 看看是否有需要停止的任务再自己这里,释放掉
        stop_tasks = self.db.query_need_stop_tasks(self.invoker_id)
        for stop_task in stop_tasks:
            if stop_task in self.jobs.keys():
                try:
                    job = self.jobs[stop_task]
                    task_instance = job.args[0]
                    task_instance.status = 'off'
                    job.pause()
                    job.remove()
                except Exception as e:
                    self.logger.error(e)
                    self.jobs.pop(stop_task)
                    try:
                        self.blockScheduler.remove_job(stop_task)
                    except Exception as e1:
                        self.logger.error(e1)

            self.logger.info("人工停止了任务%s", stop_task)
            self.db.unlock_invoker_instance(self.invoker_id, stop_task, self.live_seconds)

        # 是否有参数变化的任务需要重启
        c_jobs = copy.copy(self.jobs)
        for key in c_jobs.keys():
            if key not in self.jobs.keys():
                continue
            job = self.jobs[key]
            task_instance = job.args[0]
            old_task_param = job.args[1]
            # 判断参数是否发生变化,如果有变化重新执行任务
            new_task_param = self.db.query_task_param(task_instance.task_param_id)
            # if new_task_param
            if not new_task_param.has_diff(old_task_param):
                continue

            try:
                task_instance.status = 'off'
                job.pause()
                job.remove()
            except Exception as e:
                self.logger.error(e)
                self.jobs.pop(key)
                try:
                    self.blockScheduler.remove_job(key)
                except Exception as e1:
                    self.logger.error(e1)
            self.logger.info("参数变化停止了任务%s", task_instance.id)
            self.db.unlock_invoker_instance(self.invoker_id, task_instance.id, self.live_seconds)
            self.db.add_task_waiting_run(task_instance.id)

    def refresh_other_invokers(self):
        """
        遍历所有的invoker,判断invoker是否超过存活期
        :return:
        """
        invokers = self.db.query_all_invokers()
        for invoker_id in invokers.keys():
            if not self.db.invoker_is_live(self.invoker_id):
                task_instance_list = self.db.query_invoker_tasks(self.invoker_id)
                for task_instance_id in task_instance_list:
                    self.db.add_task_waiting_run(task_instance_id)

    def main(self):
        try:
            self.db.register_invoker(self.invoker_id, self.max_tasks, self.live_seconds);
            self.blockScheduler.add_listener(self._job_listener,
                                             events.EVENT_JOB_ERROR | events.EVENT_JOB_MISSED)

            self.blockScheduler.add_job(self.break_heart, "interval", seconds=self.live_seconds / 2,
                                        id="break_heart")
            self.logger.info("开始启动调度...")
            self.blockScheduler.start()
            self.logger.info("启动调度成功!")
        except KeyboardInterrupt as e:
            self.logger.info(e)
            self.blockScheduler.shutdown()

    def _job_listener(self, ev):
        """
        监听job的事件,job完成后再发起下次调用,对于异常也要处理
        :param ev:
        :return:
        """
        if ev.code == events.EVENT_JOB_ERROR:
            self.logger.error(ev.exception)
            self.logger.error(ev.traceback)
        else:
            pass
コード例 #14
0
class ActionScheduler:
    def __init__(self):
        self.scheduler = BlockingScheduler(jobstores=APSCHEDULER_SETTINGS['jobstores'],
                                           executors=APSCHEDULER_SETTINGS['executors'],
                                           job_defaults=APSCHEDULER_SETTINGS['job_defaults'],
                                           timezone=TIMEZONE_PST8PDT)
        pass

    def start(self):
        self._add_event_listener()
        # self._add_example_jobs()
        self._add_jobs()
        self.scheduler.start()

    def shutdown(self):
        # self.scheduler.remove_all_jobs()  # save all jobs into sqlite, do not remove them
        self.scheduler.shutdown()

    def _add_event_listener(self):
        self.scheduler.add_listener(ActionScheduler.listener_jobs_status, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)
        self.scheduler.add_listener(ActionScheduler.listener_all_jobs_finished, EVENT_ALL_JOBS_REMOVED)

    # examples
    def _add_example_jobs(self):
        import datetime
        self.scheduler.add_job(func=ActionScheduler.job_example, args=["cron", ], trigger='cron', second='*/5',
                               misfire_grace_time=DEFAULT_MISFIRE_GRACE_TIME, replace_existing=True, id="cron")
        self.scheduler.add_job(func=ActionScheduler.job_example, args=["interval", ], trigger='interval', seconds=60,
                               misfire_grace_time=DEFAULT_MISFIRE_GRACE_TIME, replace_existing=True, id="interval")
        self.scheduler.add_job(func=ActionScheduler.job_example, args=["date", ], trigger='date',
                               run_date=get_cur_time()+datetime.timedelta(seconds=12), id="date")

    # examples
    @staticmethod
    def job_example(job_type):
        print("job_example: {}".format(job_type))

    def _add_jobs(self):
        # add reap alerts immediate job TODO test
        # self.scheduler.add_job(id="reap_alerts_immediate", func=ActionScheduler.job_reap_alerts_and_start_action_tasks, args=[],
        #                        misfire_grace_time=DEFAULT_MISFIRE_GRACE_TIME, replace_existing=True, )
        # add reap alerts interval job
        # self.scheduler.add_job(id="reap_alerts", func=ActionScheduler.job_reap_alerts_and_start_action_tasks,
        #                        args=[], trigger='interval', seconds=REAP_INTERVAL_SECONDS,
        #                        misfire_grace_time=DEFAULT_MISFIRE_GRACE_TIME, replace_existing=True, )
        # add gather & retry failed action tasks immediate job TODO test
        # self.scheduler.add_job(id="check_tasks_immediate", func=ActionScheduler.job_gather_and_retry_failed_action_tasks, args=[],
        #                        misfire_grace_time=DEFAULT_MISFIRE_GRACE_TIME, replace_existing=True, )
        # add gather & retry failed action tasks interval job
        # self.scheduler.add_job(id="check_tasks", func=ActionScheduler.job_gather_and_retry_failed_action_tasks,
        #                        args=[], trigger='interval', seconds=GATHER_FAILED_TASKS_INTERVAL_SECONDS,
        #                        misfire_grace_time=DEFAULT_MISFIRE_GRACE_TIME, replace_existing=True, )
        pass

    @staticmethod
    def listener_all_jobs_finished(event):  # this would hardly be invoked
        logger_.info('All jobs are done.')

    @staticmethod
    def listener_jobs_status(event):
        if event.exception:
            logger_.warn('Job {} crashed.'.format(event.job_id))
        else:
            logger_.info('Job {} executed.'.format(event.job_id))
コード例 #15
0
ファイル: auto.py プロジェクト: caizhengxin/copy-u
class AutoCopy(object):

    config_class = Config

    default_config = ImmutableDict({
        'DEBUG': False,
        'DRIVE_LIST': [],
        'DST_PATH': 'C:\\autocopy',
        'EXTENSION': ['.txt'],
        'FILE_SIZE': "10 MB",
        'SECONDS': 2,
        'UNIT': {
            "KB": 1024,
            "MB": 1024 * 1024,
            "GB": 1024 * 1024 * 1024
        }
    })

    def __init__(self, import_name, *args, **kwargs):
        self.import_name = import_name
        self.get_logica_drives = win32file.GetLogicalDrives
        self.config = self.make_config()
        self.scheduler = BlockingScheduler()

    def init(self):
        self.drive_list = self.config.get('DRIVE_LIST')
        self.dst_path = self.config.get('DST_PATH')
        self.unit = self.config.get('UNIT')
        self.size = self.config.get('FILE_SIZE')
        self.extension = self.config.get('EXTENSION')
        self.seconds = self.config.get('SECONDS')
        self.is_dst_path()

    def make_config(self):
        return self.config_class(self.default_config)

    def is_u_disk(self, drive, drive_num=2):
        return win32file.GetDriveType(drive) == drive_num

    def get_u_disk(self, drive_list=None):
        sign = self.get_logica_drives()
        drive_list = drive_list or self.drive_list

        drives = (drive_list[i] for i in range(len(drive_list) - 1)
                  if (sign & 1 << i and self.is_u_disk(drive_list[i])))

        return drives

    def is_dst_path(self, dst_path=None):
        dst_path = dst_path or self.dst_path

        if not os.path.exists(dst_path) or not os.path.isdir(dst_path):

            try:
                os.mkdir(dst_path)
            except Exception:
                os.remove(dst_path)
                os.mkdir(dst_path)

        return None

    def _get_size(self, size, unit='KB'):
        units = self.unit

        if isinstance(size, integer_types):
            return size * units.get(unit, 1024)

        try:
            size, unit = size.split(' ')
        except Exception:
            pass
        else:
            return int(size) * units.get(unit, 1024)

        return 1024

    def is_file_size(self, file, size=None):
        if not isinstance(file, basestring):
            raise TypeError("This is not a string.")

        size = size or self.size

        return os.path.getsize(file) < self._get_size(size=size)

    def _copyfile(self, path, dst_path=None, extension=None):

        for path, _, file_list in os.walk(path):
            for file in file_list:
                _, ext = os.path.splitext(file)
                file = os.path.join(path, file)
                if ext in extension and self.is_file_size(file):
                    try:
                        shutil.copy(file, dst_path)
                    except Exception:
                        self.is_dst_path()

    def copyfile(self, dst_path=None, extension=None):
        extension = extension or self.extension
        dst_path = dst_path or self.dst_path
        drives = self.get_u_disk()

        for drive in drives:
            self._copyfile(drive, dst_path, extension)

    def _run(self, *args, **kwargs):
        self.copyfile(*args, **kwargs)

    def run(self, timer=True, seconds=None, *args, **kwargs):
        self.init()

        if not timer:
            return self._run

        trigger = IntervalTrigger(seconds=seconds or self.seconds)
        self.scheduler.add_job(self._run, trigger)
        # self.scheduler.add_job()

        try:
            self.scheduler.start()
        except (KeyboardInterrupt, SystemExit):
            self.scheduler.shutdown()
コード例 #16
0
ファイル: base.py プロジェクト: timff/st2
class St2Timer(object):
    """
    A timer interface that uses APScheduler 3.0.
    """
    def __init__(self, local_timezone=None):
        self._timezone = local_timezone
        self._scheduler = BlockingScheduler(timezone=self._timezone)
        self._jobs = {}
        self._trigger_types = TIMER_TRIGGER_TYPES.keys()
        self._trigger_watcher = TriggerWatcher(
            create_handler=self._handle_create_trigger,
            update_handler=self._handle_update_trigger,
            delete_handler=self._handle_delete_trigger,
            trigger_types=self._trigger_types,
            queue_suffix='timers')
        self._trigger_dispatcher = TriggerDispatcher(LOG)

    def start(self):
        self._register_timer_trigger_types()
        self._trigger_watcher.start()
        self._scheduler.start()

    def cleanup(self):
        self._scheduler.shutdown(wait=True)

    def add_trigger(self, trigger):
        self._add_job_to_scheduler(trigger)

    def update_trigger(self, trigger):
        self.remove_trigger(trigger)
        self.add_trigger(trigger)

    def remove_trigger(self, trigger):
        id = trigger['id']

        try:
            job_id = self._jobs[id]
        except KeyError:
            LOG.info('Job not found: %s', id)
            return

        self._scheduler.remove_job(job_id)

    def _add_job_to_scheduler(self, trigger):
        trigger_type_ref = trigger['type']
        trigger_type = TIMER_TRIGGER_TYPES[trigger_type_ref]
        try:
            jsonschema.validate(trigger['parameters'],
                                trigger_type['parameters_schema'])
        except jsonschema.ValidationError as e:
            LOG.error('Exception scheduling timer: %s, %s',
                      trigger['parameters'],
                      e,
                      exc_info=True)
            raise  # Or should we just return?

        time_spec = trigger['parameters']
        time_zone = aps_utils.astimezone(trigger['parameters'].get('timezone'))

        time_type = None

        if trigger_type['name'] == 'st2.IntervalTimer':
            unit = time_spec.get('unit', None)
            value = time_spec.get('delta', None)
            time_type = IntervalTrigger(**{unit: value, 'timezone': time_zone})
        elif trigger_type['name'] == 'st2.DateTimer':
            # Raises an exception if date string isn't a valid one.
            dat = date_parser.parse(time_spec.get('date', None))
            time_type = DateTrigger(dat, timezone=time_zone)
        elif trigger_type['name'] == 'st2.CronTimer':
            cron = time_spec.copy()
            cron['timezone'] = time_zone

            time_type = CronTrigger(**cron)

        if hasattr(time_type,
                   'run_date') and datetime.now(tzutc()) > time_type.run_date:
            LOG.warning('Not scheduling expired timer: %s : %s',
                        trigger['parameters'], time_type.run_date)
        else:
            self._add_job(trigger, time_type)

    def _add_job(self, trigger, time_type, replace=True):
        try:
            job = self._scheduler.add_job(self._emit_trigger_instance,
                                          trigger=time_type,
                                          args=[trigger],
                                          replace_existing=replace)
            LOG.info('Job %s scheduled.', job.id)
            self._jobs[trigger['id']] = job.id
        except Exception as e:
            LOG.error('Exception scheduling timer: %s, %s',
                      trigger['parameters'],
                      e,
                      exc_info=True)

    def _emit_trigger_instance(self, trigger):
        LOG.info('Timer fired at: %s. Trigger: %s', str(datetime.utcnow()),
                 trigger)

        payload = {
            'executed_at': str(datetime.utcnow()),
            'schedule': trigger['parameters'].get('time')
        }
        self._trigger_dispatcher.dispatch(trigger, payload)

    def _register_timer_trigger_types(self):
        return container_utils.add_trigger_models(TIMER_TRIGGER_TYPES.values())

    ##############################################
    # Event handler methods for the trigger events
    ##############################################

    def _handle_create_trigger(self, trigger):
        LOG.debug('Calling "add_trigger" method (trigger.type=%s)' %
                  (trigger.type))
        trigger = self._sanitize_trigger(trigger=trigger)
        self.add_trigger(trigger=trigger)

    def _handle_update_trigger(self, trigger):
        LOG.debug('Calling "update_trigger" method (trigger.type=%s)' %
                  (trigger.type))
        trigger = self._sanitize_trigger(trigger=trigger)
        self.update_trigger(trigger=trigger)

    def _handle_delete_trigger(self, trigger):
        LOG.debug('Calling "remove_trigger" method (trigger.type=%s)' %
                  (trigger.type))
        trigger = self._sanitize_trigger(trigger=trigger)
        self.remove_trigger(trigger=trigger)

    def _sanitize_trigger(self, trigger):
        sanitized = trigger._data
        if 'id' in sanitized:
            # Friendly objectid rather than the MongoEngine representation.
            sanitized['id'] = str(sanitized['id'])
        return sanitized
コード例 #17
0
ファイル: base.py プロジェクト: dnjsakf/lotto
class JobLauncher(object):
    def __init__(self, background=False, deamon=True, **kwargs):
        logging.basicConfig(format="[%(asctime)s] %(message)s",
                            atefmt="%Y-%m-%d %H:%M:%S")
        logging.getLogger('apscheduler').setLevel(logging.DEBUG)

        if background:
            self.sched = BackgroundScheduler(deamon=deamon)  # background
        else:
            self.sched = BlockingScheduler(deamon=deamon)  # foreground

        # TODO: Read from configuration file.
        self.sched.configure(
            jobstores={
                # "sqlite": SQLAlchemyJobStore(url='sqlite:///app/database/example.db'),
                # "default": MemoryJobStore()
                "default":
                SQLAlchemyJobStore(url='sqlite:///app/database/example.db')
            },
            executors={
                'default': ThreadPoolExecutor(20),
                'processpool': ProcessPoolExecutor(5)
            },
            job_defaults={
                'coalesce': False,
                'max_instances': 3
            },
            timezone=get_localzone()  # Asia/Seoul
        )

        self.retried = 0
        self.logger = logging.getLogger('apscheduler')

        super(JobLauncher, self).__init__()

    def start(self):
        try:
            if self.sched.state != STATE_RUNNING:
                self.printJobs(jobstore='default')
                started = self.sched.start()

        except ConflictingIdError as e:
            traceback.print_exc()

        except KeyboardInterrupt as e:
            traceback.print_exc()

        finally:
            pass
            # Remove all remained store.
            # self.sched.remove_all_jobs()
            # for job in self.getJobs():
            #   if job.pending:
            #     job.pause()

            self.logger.info('Finished')
            self.logger.info(self.getJobs())
            self.printJobs()

    def stop(self, wait=False):
        if self.sched.state == STATE_RUNNING:
            self.sched.shutdown(wait=wait)

    def resume(self):
        if self.sched.state == STATE_RUNNING:
            self.sched.resume()

    def pause(self):
        if self.sched.state == STATE_RUNNING:
            self.sched.pause()

    def addListener(self, listener, types):
        self.sched.add_listener(listener, types)

    def addJob(self, job, **kwargs):
        execute, trigger, options = job.build(**kwargs)

        added_job = self.sched.add_job(execute, trigger, **options)

        self.printJobs()

        return added_job

    def getJob(self, job_id):
        return self.sched.get_job(job_id)

    def getJobs(self, jobstore=None):
        return self.sched.get_jobs(jobstore=jobstore)

    def removeJob(self, job_id, jobstore=None):
        return self.sched.remove_job(job_id, jobstore=jobstore)

    def removeAllJob(self, jobstore=None):
        return self.sched.remove_all_jobs(jobstore=jobstore)

    def printJobs(self, jobstore=None, out=None):
        return self.sched.print_jobs(jobstore=jobstore, out=None)

    def getJobState(self, job_id=None, jobstore=None):
        state = list()

        if job_id is not None:
            job = self.sched.get_job(job_id, jobstore=jobstore)

            if job is not None:
                temp = dict()
                temp[job.id] = {
                    "next_run_time": job.next_run_time,
                    "state": job.pending,
                }
                state.append(temp)

        else:
            for job in self.sched.get_jobs(jobstore=jobstore):
                temp = dict()
                temp[job.id] = {
                    "next_run_time": job.next_run_time,
                    "state": job.pending,
                }
                state.append(temp)

        return state
コード例 #18
0
ファイル: background.py プロジェクト: deepakr6242/Mayrepo
# sched.add_job(build_add, 'date', run_date='2018-03-08 18:01:50',misfire_grace_time=10000,max_instances=3)
# # # sched = BlockingScheduler(jobstores=jobstores, executors=executors, job_defaults=job_defaults)

# # print "scheduled job from store",sched.get_jobs('default')

# date='2018-03-08 18:05:58'

# @sched.scheduled_job('date', run_date=date)
# def build_scheduled():
#     # supplying the values  for kwargs

#      # server_c = jenkins.Jenkins('http://10.144.169.116:8080', username='******', password='******')
#      # print request.POST['CARD/CPS/Authorization']
#      # url=request.POST['url2']
#      # print url
#      try:
#       # build=server_c.build_job('POC/ET_Trail');
#       print "job build initiated successfully"
#       # return HttpResponse(simplejson.dumps(build), content_type='application/json')
#      except Exception as e:
#         print e
#      with open("test.txt", "a") as myfile:
#          myfile.write("2017-03-07 13:37:40' printed")

print "scheduled job from store", sched.get_jobs('default')
# print "scheduled jobs are ",sched.get_jobs(pending='True')

sched.start()
sched.wakeup()
sched.shutdown(wait=True)
コード例 #19
0
class TaskExecutor:
    def __init__(self, db, task_instance, task_param):
        self.task_instance = task_instance
        self.task_param = task_param
        self.db = db
        # invoke log
        self.invoke_log_map = {}
        self.jobs = {}
        logging.config.fileConfig("../logger.ini")
        self.logger = logging.getLogger("taskExecutor")
        invoke_count = int(self.task_param.get_invoke_args()['invoke_count'])
        executors = {
            'default': {
                'type': 'threadpool',
                'max_workers': invoke_count + 1
            }
        }
        self.scheduler = BlockingScheduler(executors=executors)

    def execute(self):
        self.scheduler.add_listener(
            self._job_listener,
            events.EVENT_JOB_EXECUTED | events.EVENT_JOB_ERROR
            | events.EVENT_JOB_ADDED | events.EVENT_JOB_MISSED)

        # invoke_log_map up server
        self.scheduler.add_job(self._invoke_break_heart, "interval", seconds=2)
        try:
            self.scheduler.start()
        except Exception as e:
            print(e)
            self.scheduler.shutdown(wait=True)

    def _job_listener(self, ev):
        """
        监听job的事件,job完成后再发起下次调用,对于异常也要处理
        :param ev:
        :return:
        """
        if self.task_instance.status == 'off':
            return
        if ev.code == events.EVENT_JOB_ADDED:
            self.jobs[ev.job_id] = self.scheduler.get_job(ev.job_id)
        elif ev.code == events.EVENT_JOB_EXECUTED or ev.code == events.EVENT_JOB_ERROR:
            if ev.code == events.EVENT_JOB_ERROR:
                self.logger.error(ev.exception)
                self.logger.error(ev.traceback)
            job = self.jobs[ev.job_id]
            self.scheduler.add_job(
                job.func,
                next_run_time=(datetime.datetime.now() +
                               datetime.timedelta(seconds=1)),
                id=ev.job_id,
                args=job.args)
        else:
            pass

    def _invoke_break_heart(self):
        if self.task_instance.status == 'off':
            jobs = self.scheduler.get_jobs()
            for job in jobs:
                try:
                    job.pause()
                    job.remove()
                except Exception as e:
                    self.logger.error(e)
        self.db.save_task_logs(self.invoke_log_map)
コード例 #20
0
ファイル: feeds.py プロジェクト: mpucci92/NewsLab
class Feeds(Thread):

    WINDOW = 1000

    def __init__(self, sources, feeds, sleep, logger):

        Thread.__init__(self)

        self.sleep = sleep
        self.logger = logger

        self.coords = deque([(source.strip(), feed.strip())
                             for source, feed in zip(sources, feeds)])

        self.entries = []
        self.last_45 = {feed: [] for _, feed in self.coords}

        socket.setdefaulttimeout(3)

    def run(self):

        job_defaults = {'coalesce': True, 'max_instances': 1}
        self.blocker = BlockingScheduler(job_defaults=job_defaults)
        self.blocker.add_job(self.parse_feed,
                             'cron',
                             second=f'*/{self.sleep}',
                             id='parse_feed')

        self.blocker.start()

    def on_close(self):

        self.blocker.shutdown()
        self.join()

    def parse_feed(self):

        self.coords.rotate()
        self.source, self.feed = self.coords[0]

        try:
            response = feedparser.parse(self.feed)
        except Exception as e:
            self.logger.warning(f"Status,{self.source},{self.feed},{e}")
            return

        status = response.get('status', None)
        if not status:
            self.logger.warning(f"Status,{self.source},{self.feed},None")
            return

        if status != 200:
            self.logger.warning(f"Status,{self.source},{self.feed},{status}")
            return

        entries = response.get('entries', None)
        if not entries:
            self.logger.warning(f"Entries,{self.source},{self.feed},None")
            return

        for entry in entries:

            entry_str = json.dumps(entry).encode()
            entry_hash = sha256(entry_str).hexdigest()

            if entry_hash in self.last_45[self.feed]:
                break

            self.last_45[self.feed].append(entry_hash)
            self.last_45[self.feed] = self.last_45[self.feed][-self.WINDOW:]

            utc_now = datetime.now(
                tz=timezone.utc).strftime("%Y-%d-%m %H:%M:%S.%f")
            entry['oscrap_acquisition_datetime'] = utc_now
            entry['oscrap_source'] = self.source

            print(self.source)
            self.entries.append(entry)

        if len(self.entries) > 0:

            with open(f"{DIR}/news_data/{str(uuid.uuid4())}.txt", "w") as file:
                file.write(json.dumps(self.entries))
            self.entries = []
コード例 #21
0
class St2Timer(object):
    """
    A timer interface that uses APScheduler 3.0.
    """
    def __init__(self, local_timezone=None):
        self._timezone = local_timezone
        self._scheduler = BlockingScheduler(timezone=self._timezone)
        self._jobs = {}
        self._trigger_types = list(TIMER_TRIGGER_TYPES.keys())
        self._trigger_watcher = TriggerWatcher(
            create_handler=self._handle_create_trigger,
            update_handler=self._handle_update_trigger,
            delete_handler=self._handle_delete_trigger,
            trigger_types=self._trigger_types,
            queue_suffix=self.__class__.__name__,
            exclusive=True)
        self._trigger_dispatcher = TriggerDispatcher(LOG)

    def start(self):
        self._register_timer_trigger_types()
        self._trigger_watcher.start()
        self._scheduler.start()

    def cleanup(self):
        self._scheduler.shutdown(wait=True)

    def add_trigger(self, trigger):
        self._add_job_to_scheduler(trigger)

    def update_trigger(self, trigger):
        self.remove_trigger(trigger)
        self.add_trigger(trigger)

    def remove_trigger(self, trigger):
        trigger_id = trigger['id']

        try:
            job_id = self._jobs[trigger_id]
        except KeyError:
            LOG.info('Job not found: %s', trigger_id)
            return

        self._scheduler.remove_job(job_id)
        del self._jobs[trigger_id]

    def _add_job_to_scheduler(self, trigger):
        trigger_type_ref = trigger['type']
        trigger_type = TIMER_TRIGGER_TYPES[trigger_type_ref]
        try:
            util_schema.validate(instance=trigger['parameters'],
                                 schema=trigger_type['parameters_schema'],
                                 cls=util_schema.CustomValidator,
                                 use_default=True,
                                 allow_default_none=True)
        except jsonschema.ValidationError as e:
            LOG.error('Exception scheduling timer: %s, %s',
                      trigger['parameters'],
                      e,
                      exc_info=True)
            raise  # Or should we just return?

        time_spec = trigger['parameters']
        time_zone = aps_utils.astimezone(trigger['parameters'].get('timezone'))

        time_type = None

        if trigger_type['name'] == 'st2.IntervalTimer':
            unit = time_spec.get('unit', None)
            value = time_spec.get('delta', None)
            time_type = IntervalTrigger(**{unit: value, 'timezone': time_zone})
        elif trigger_type['name'] == 'st2.DateTimer':
            # Raises an exception if date string isn't a valid one.
            dat = date_parser.parse(time_spec.get('date', None))
            time_type = DateTrigger(dat, timezone=time_zone)
        elif trigger_type['name'] == 'st2.CronTimer':
            cron = time_spec.copy()
            cron['timezone'] = time_zone

            time_type = CronTrigger(**cron)

        utc_now = date_utils.get_datetime_utc_now()
        if hasattr(time_type, 'run_date') and utc_now > time_type.run_date:
            LOG.warning('Not scheduling expired timer: %s : %s',
                        trigger['parameters'], time_type.run_date)
        else:
            self._add_job(trigger, time_type)
        return time_type

    def _add_job(self, trigger, time_type, replace=True):
        try:
            job = self._scheduler.add_job(self._emit_trigger_instance,
                                          trigger=time_type,
                                          args=[trigger],
                                          replace_existing=replace)
            LOG.info('Job %s scheduled.', job.id)
            self._jobs[trigger['id']] = job.id
        except Exception as e:
            LOG.error('Exception scheduling timer: %s, %s',
                      trigger['parameters'],
                      e,
                      exc_info=True)

    def _emit_trigger_instance(self, trigger):
        utc_now = date_utils.get_datetime_utc_now()
        # debug logging is reasonable for this one. A high resolution timer will end up
        # trashing standard logs.
        LOG.debug('Timer fired at: %s. Trigger: %s', str(utc_now), trigger)

        payload = {
            'executed_at': str(utc_now),
            'schedule': trigger['parameters'].get('time')
        }

        trace_context = TraceContext(trace_tag='%s-%s' %
                                     (self._get_trigger_type_name(trigger),
                                      trigger.get('name',
                                                  uuid.uuid4().hex)))
        self._trigger_dispatcher.dispatch(trigger,
                                          payload,
                                          trace_context=trace_context)

    def _get_trigger_type_name(self, trigger):
        trigger_type_ref = trigger['type']
        trigger_type = TIMER_TRIGGER_TYPES[trigger_type_ref]
        return trigger_type['name']

    def _register_timer_trigger_types(self):
        return trigger_services.add_trigger_models(
            list(TIMER_TRIGGER_TYPES.values()))

    ##############################################
    # Event handler methods for the trigger events
    ##############################################

    def _handle_create_trigger(self, trigger):
        LOG.debug('Calling "add_trigger" method (trigger.type=%s)' %
                  (trigger.type))
        trigger = self._sanitize_trigger(trigger=trigger)
        self.add_trigger(trigger=trigger)

    def _handle_update_trigger(self, trigger):
        LOG.debug('Calling "update_trigger" method (trigger.type=%s)' %
                  (trigger.type))
        trigger = self._sanitize_trigger(trigger=trigger)
        self.update_trigger(trigger=trigger)

    def _handle_delete_trigger(self, trigger):
        LOG.debug('Calling "remove_trigger" method (trigger.type=%s)' %
                  (trigger.type))
        trigger = self._sanitize_trigger(trigger=trigger)
        self.remove_trigger(trigger=trigger)

    def _sanitize_trigger(self, trigger):
        sanitized = TriggerAPI.from_model(trigger).to_dict()
        return sanitized
コード例 #22
0
# ch19/example1.py

from datetime import datetime

from apscheduler.schedulers.background import BlockingScheduler

def tick():
    print(f'Tick! The time is: {datetime.now()}')

if __name__ == '__main__':
    scheduler = BlockingScheduler()
    scheduler.add_job(tick, 'interval', seconds=3)

    try:
        scheduler.start()
        print('Printing in the main thread.')
    except KeyboardInterrupt:
        pass

scheduler.shutdown()
コード例 #23
0
class St2TimerSensor(Sensor):
    '''
    A timer sensor that uses APScheduler 3.0.
    '''
    def __init__(self, sensor_service=None):
        self._timezone = 'America/Los_Angeles'  # Whatever TZ local box runs in.
        self._sensor_service = sensor_service
        self._log = self._sensor_service.get_logger(self.__class__.__name__)
        self._scheduler = BlockingScheduler(timezone=self._timezone)
        self._jobs = {}

    def setup(self):
        pass

    def run(self):
        self._scheduler.start()

    def cleanup(self):
        self._scheduler.shutdown(wait=True)

    def add_trigger(self, trigger):
        self._add_job_to_scheduler(trigger)

    def update_trigger(self, trigger):
        self.remove_trigger(trigger)
        self.add_trigger(trigger)

    def remove_trigger(self, trigger):
        id = trigger['id']

        try:
            job_id = self._jobs[id]
        except KeyError:
            self._log.info('Job not found: %s', id)
            return

        self._scheduler.remove_job(job_id)

    def _get_trigger_type(self, ref):
        pass

    def _add_job_to_scheduler(self, trigger):
        trigger_type_ref = trigger['type']
        trigger_type = TRIGGER_TYPES[trigger_type_ref]
        try:
            jsonschema.validate(trigger['parameters'],
                                trigger_type['parameters_schema'])
        except jsonschema.ValidationError as e:
            self._log.error('Exception scheduling timer: %s, %s',
                            trigger['parameters'], e, exc_info=True)
            raise  # Or should we just return?

        time_spec = trigger['parameters']
        time_zone = aps_utils.astimezone(trigger['parameters'].get('timezone'))

        time_type = None

        if trigger_type['name'] == 'st2.IntervalTimer':
            unit = time_spec.get('unit', None)
            value = time_spec.get('delta', None)
            time_type = IntervalTrigger(**{unit: value, 'timezone': time_zone})
        elif trigger_type['name'] == 'st2.DateTimer':
            # Raises an exception if date string isn't a valid one.
            dat = date_parser.parse(time_spec.get('date', None))
            time_type = DateTrigger(dat, timezone=time_zone)
        elif trigger_type['name'] == 'st2.CronTimer':
            cron = time_spec.copy()
            cron['timezone'] = time_zone

            time_type = CronTrigger(**cron)

        if hasattr(time_type, 'run_date') and datetime.now(tzutc()) > time_type.run_date:
            self._log.warning('Not scheduling expired timer: %s : %s',
                              trigger['parameters'], time_type.run_date)
        else:
            self._add_job(trigger, time_type)

    def _add_job(self, trigger, time_type, replace=True):
        try:
            job = self._scheduler.add_job(self._emit_trigger_instance,
                                          trigger=time_type,
                                          args=[trigger],
                                          replace_existing=replace)
            self._log.info('Job %s scheduled.', job.id)
            self._jobs[trigger['id']] = job.id
        except Exception as e:
            self._log.error('Exception scheduling timer: %s, %s',
                            trigger['parameters'], e, exc_info=True)

    def _emit_trigger_instance(self, trigger):
        self._log.info('Timer fired at: %s. Trigger: %s', str(datetime.utcnow()), trigger)

        payload = {
            'executed_at': str(datetime.utcnow()),
            'schedule': trigger['parameters'].get('time')
        }
        self._sensor_service.dispatch(trigger, payload)
コード例 #24
0
ファイル: feeds.py プロジェクト: zQuantz/NewsLab
class Feeds(Thread):

    WINDOW = 10_000

    def __init__(self, sources, feeds, sleep, logger):

        Thread.__init__(self)

        self.sleep = sleep
        self.logger = logger

        self.coords = deque([(source.strip(), feed.strip())
                             for source, feed in zip(sources, feeds)])

        self.entries = []
        self.last = {feed: [] for _, feed in self.coords}

        socket.setdefaulttimeout(3)

    def run(self):

        job_defaults = {'coalesce': True, 'max_instances': 1}
        self.blocker = BlockingScheduler(job_defaults=job_defaults)
        self.blocker.add_job(self.parse_feed,
                             'cron',
                             second=f'*/{self.sleep}',
                             id='parse_feed')

        self.blocker.start()

    def on_close(self):

        self.blocker.shutdown()
        self.join()

    def parse_feed(self):

        self.coords.rotate()
        self.source, self.feed = self.coords[0]

        try:
            response = feedparser.parse(self.feed)
        except Exception as e:
            self.logger.warning(f"Status,{self.source},{self.feed},{e}")
            return

        status = response.get('status', None)
        if not status:
            self.logger.warning(f"Status,{self.source},{self.feed},None")
            return

        if status != 200:
            self.logger.warning(f"Status,{self.source},{self.feed},{status}")
            return

        entries = response.get('entries', None)
        if not entries:
            self.logger.warning(f"Entries,{self.source},{self.feed},None")
            return

        for entry in entries:

            _id = entry['id'] if self.source == 'Google' else get_id(
                entry.copy())
            if _id in self.last[self.feed]:
                continue

            self.last[self.feed].append(_id)
            self.last[self.feed] = self.last[self.feed][-self.WINDOW:]

            entry['acquisition_datetime'] = datetime.utcnow().isoformat()[:19]
            entry['feed_source'] = self.source
            entry['_source'] = 'rss'
            entry['_id'] = _id

            print(self.source)
            self.entries.append(entry)

        if len(self.entries) > 0:

            with open(f"{DIR}/news_data/{str(uuid.uuid4())}.json",
                      "w") as file:
                file.write(json.dumps(self.entries))

            self.entries = []
コード例 #25
0
ファイル: base.py プロジェクト: hejin/st2
class St2Timer(object):
    """
    A timer interface that uses APScheduler 3.0.
    """
    def __init__(self, local_timezone=None):
        self._timezone = local_timezone
        self._scheduler = BlockingScheduler(timezone=self._timezone)
        self._jobs = {}
        self._trigger_types = TIMER_TRIGGER_TYPES.keys()
        self._trigger_watcher = TriggerWatcher(create_handler=self._handle_create_trigger,
                                               update_handler=self._handle_update_trigger,
                                               delete_handler=self._handle_delete_trigger,
                                               trigger_types=self._trigger_types,
                                               queue_suffix=self.__class__.__name__,
                                               exclusive=True)
        self._trigger_dispatcher = TriggerDispatcher(LOG)

    def start(self):
        self._register_timer_trigger_types()
        self._trigger_watcher.start()
        self._scheduler.start()

    def cleanup(self):
        self._scheduler.shutdown(wait=True)

    def add_trigger(self, trigger):
        self._add_job_to_scheduler(trigger)

    def update_trigger(self, trigger):
        self.remove_trigger(trigger)
        self.add_trigger(trigger)

    def remove_trigger(self, trigger):
        trigger_id = trigger['id']

        try:
            job_id = self._jobs[trigger_id]
        except KeyError:
            LOG.info('Job not found: %s', trigger_id)
            return

        self._scheduler.remove_job(job_id)
        del self._jobs[trigger_id]

    def _add_job_to_scheduler(self, trigger):
        trigger_type_ref = trigger['type']
        trigger_type = TIMER_TRIGGER_TYPES[trigger_type_ref]
        try:
            jsonschema.validate(trigger['parameters'],
                                trigger_type['parameters_schema'])
        except jsonschema.ValidationError as e:
            LOG.error('Exception scheduling timer: %s, %s',
                      trigger['parameters'], e, exc_info=True)
            raise  # Or should we just return?

        time_spec = trigger['parameters']
        time_zone = aps_utils.astimezone(trigger['parameters'].get('timezone'))

        time_type = None

        if trigger_type['name'] == 'st2.IntervalTimer':
            unit = time_spec.get('unit', None)
            value = time_spec.get('delta', None)
            time_type = IntervalTrigger(**{unit: value, 'timezone': time_zone})
        elif trigger_type['name'] == 'st2.DateTimer':
            # Raises an exception if date string isn't a valid one.
            dat = date_parser.parse(time_spec.get('date', None))
            time_type = DateTrigger(dat, timezone=time_zone)
        elif trigger_type['name'] == 'st2.CronTimer':
            cron = time_spec.copy()
            cron['timezone'] = time_zone

            time_type = CronTrigger(**cron)

        utc_now = date_utils.get_datetime_utc_now()
        if hasattr(time_type, 'run_date') and utc_now > time_type.run_date:
            LOG.warning('Not scheduling expired timer: %s : %s',
                        trigger['parameters'], time_type.run_date)
        else:
            self._add_job(trigger, time_type)
        return time_type

    def _add_job(self, trigger, time_type, replace=True):
        try:
            job = self._scheduler.add_job(self._emit_trigger_instance,
                                          trigger=time_type,
                                          args=[trigger],
                                          replace_existing=replace)
            LOG.info('Job %s scheduled.', job.id)
            self._jobs[trigger['id']] = job.id
        except Exception as e:
            LOG.error('Exception scheduling timer: %s, %s',
                      trigger['parameters'], e, exc_info=True)

    def _emit_trigger_instance(self, trigger):
        utc_now = date_utils.get_datetime_utc_now()
        # debug logging is reasonable for this one. A high resolution timer will end up
        # trashing standard logs.
        LOG.debug('Timer fired at: %s. Trigger: %s', str(utc_now), trigger)

        payload = {
            'executed_at': str(utc_now),
            'schedule': trigger['parameters'].get('time')
        }

        trace_context = TraceContext(trace_tag='%s-%s' % (self._get_trigger_type_name(trigger),
                                                          trigger.get('name', uuid.uuid4().hex)))
        self._trigger_dispatcher.dispatch(trigger, payload, trace_context=trace_context)

    def _get_trigger_type_name(self, trigger):
        trigger_type_ref = trigger['type']
        trigger_type = TIMER_TRIGGER_TYPES[trigger_type_ref]
        return trigger_type['name']

    def _register_timer_trigger_types(self):
        return trigger_services.add_trigger_models(TIMER_TRIGGER_TYPES.values())

    ##############################################
    # Event handler methods for the trigger events
    ##############################################

    def _handle_create_trigger(self, trigger):
        LOG.debug('Calling "add_trigger" method (trigger.type=%s)' % (trigger.type))
        trigger = self._sanitize_trigger(trigger=trigger)
        self.add_trigger(trigger=trigger)

    def _handle_update_trigger(self, trigger):
        LOG.debug('Calling "update_trigger" method (trigger.type=%s)' % (trigger.type))
        trigger = self._sanitize_trigger(trigger=trigger)
        self.update_trigger(trigger=trigger)

    def _handle_delete_trigger(self, trigger):
        LOG.debug('Calling "remove_trigger" method (trigger.type=%s)' % (trigger.type))
        trigger = self._sanitize_trigger(trigger=trigger)
        self.remove_trigger(trigger=trigger)

    def _sanitize_trigger(self, trigger):
        sanitized = trigger._data
        if 'id' in sanitized:
            # Friendly objectid rather than the MongoEngine representation.
            sanitized['id'] = str(sanitized['id'])
        return sanitized
コード例 #26
0
class St2TimerSensor(object):
    '''
    A timer sensor that uses APScheduler 3.0.
    '''
    def __init__(self, container_service):
        self._timezone = 'America/Los_Angeles'  # Whatever TZ local box runs in.
        self._container_service = container_service
        self._log = self._container_service.get_logger(self.__class__.__name__)
        self._scheduler = BlockingScheduler(timezone=self._timezone)
        self._jobs = {}

    def setup(self):
        pass

    def start(self):
        self._scheduler.start()

    def stop(self):
        self._scheduler.shutdown(wait=True)

    def add_trigger(self, trigger):
        self._add_job_to_scheduler(trigger)

    def update_trigger(self, trigger):
        self.remove_trigger(trigger)
        self.add_trigger(trigger)

    def remove_trigger(self, trigger):
        id = trigger['id']

        try:
            job_id = self._jobs[id]
        except KeyError:
            self._log.info('Job not found: %s', id)
            return

        self._scheduler.remove_job(job_id)

    def get_trigger_types(self):
        return [trigger_type for trigger_type in six.itervalues(TRIGGER_TYPES)]

    def _get_trigger_type(self, ref):
        pass

    def _add_job_to_scheduler(self, trigger):
        trigger_type_ref = trigger['type']
        trigger_type = TRIGGER_TYPES[trigger_type_ref]
        try:
            jsonschema.validate(trigger['parameters'],
                                trigger_type['parameters_schema'])
        except jsonschema.ValidationError as e:
            self._log.error('Exception scheduling timer: %s, %s',
                            trigger['parameters'],
                            e,
                            exc_info=True)
            raise  # Or should we just return?

        time_spec = trigger['parameters']
        time_zone = aps_utils.astimezone(trigger['parameters'].get('timezone'))

        time_type = None

        if trigger_type['name'] == 'st2.IntervalTimer':
            unit = time_spec.get('unit', None)
            value = time_spec.get('delta', None)
            time_type = IntervalTrigger(**{unit: value, 'timezone': time_zone})
        elif trigger_type['name'] == 'st2.DateTimer':
            # Raises an exception if date string isn't a valid one.
            dat = date_parser.parse(time_spec.get('date', None))
            time_type = DateTrigger(dat, timezone=time_zone)
        elif trigger_type['name'] == 'st2.CronTimer':
            cron = time_spec.copy()
            cron['timezone'] = time_zone

            time_type = CronTrigger(**cron)

        if hasattr(time_type,
                   'run_date') and datetime.now(tzutc()) > time_type.run_date:
            self._log.warning('Not scheduling expired timer: %s : %s',
                              trigger['parameters'], time_type.run_date)
        else:
            self._add_job(trigger, time_type)

    def _add_job(self, trigger, time_type, replace=True):
        try:
            job = self._scheduler.add_job(self._emit_trigger_instance,
                                          trigger=time_type,
                                          args=[trigger],
                                          replace_existing=replace)
            self._log.info('Job %s scheduled.', job.id)
            self._jobs[trigger['id']] = job.id
        except Exception as e:
            self._log.error('Exception scheduling timer: %s, %s',
                            trigger['parameters'],
                            e,
                            exc_info=True)

    def _emit_trigger_instance(self, trigger):
        self._log.info('Timer fired at: %s. Trigger: %s',
                       str(datetime.utcnow()), trigger)

        payload = {
            'executed_at': str(datetime.utcnow()),
            'schedule': trigger['parameters'].get('time')
        }
        self._container_service.dispatch(trigger, payload)
コード例 #27
0
ファイル: timer_util.py プロジェクト: guoyy2017/py_demo
class TimerWork():

    def __init__(self):
        self.jobs = []
        self.sched = BlockingScheduler()
        self.sched.add_listener(self.listener, EVENT_JOB_ERROR | EVENT_JOB_EXECUTED)

    def listener(self, event):
        if event.exception:
            print 'JOB :<'
        else:
            print 'JOB :>'
        pass

    def exit(self):
        print 'shutdown now'
        self.sched.shutdown(wait=True)
        print 'shutdown ok'
        pass

    def pause(self):
        BaseScheduler.pause_job()
        pass

    def resume(self):
        BaseScheduler.remove_job()
        pass

    def add_job(self, func, gap_time=0, first_run_time=None, args=None, kwargs=None):
        '''
        添加定时任务
        :param func: 执行函数
        :param gap_time: 间隔时间,如果设置小于等于0的值忽略 单位s
        :param first_run_time: 第一次执行时间 datetime.datetime 如果不设置就按正常处理
        :param args: 参数
        :param kwargs: 参数
        :return:
        '''
        self.jobs.append(func)
        if type(first_run_time) == types.IntType:
            first_run_time = datetime.datetime.now() + datetime.timedelta(seconds=first_run_time)
        if gap_time > 0:
            if first_run_time:
                self.sched.add_job(func, 'interval', seconds=gap_time, next_run_time=first_run_time, args=args, kwargs=kwargs)
            else:
                self.sched.add_job(func, 'interval', seconds=gap_time, args=args,
                                   kwargs=kwargs)
        else:
            if first_run_time:
                self.sched.add_job(func, next_run_time=first_run_time, args=args, kwargs=kwargs)
                pass
            else:
                self.sched.add_job(func, args=args, kwargs=kwargs)
                pass
        pass

    def run(self):
        if len(self.jobs) > 0:
            self.sched.start()
        else:
            print 'no add jobs , so check'
コード例 #28
0
class Monitor ():

    def __init__ (self, bay = 2, temperature_ctrl = None, wait_email = 20, wait_T_readout = 30):
        self._bay = bay
        self._name = 'bay'+str(bay)
        self._notifications = True
        self._wait_email = wait_email
        self._wait_T_readout = wait_T_readout
        self._offset = 100
        self._pwd = None

        self._temperature_ctrl = temperature_ctrl

        if temperature_ctrl:
            self._Tctrl = temperature_ctrl
        else:
            self._Tctrl = None
            print ("No temperature controller!")

        self._max_T = 10

        self._scheduler = BlockingScheduler()
        self._scheduler.configure(timezone='UTC')
        self._scheduler.add_job(self._check_email, 'interval', seconds=self._wait_email)
        self._scheduler.add_job(self._get_temperature, 'interval', seconds=self._wait_T_readout)

    def login (self):
        try:
           print ("Enter password...")
           self._pwd = getpass.getpass()
           self._email = QPLemail.QPLmail(bay=self._bay, password=self._pwd)
        except:
            print ("Login failed!")

    def set_max_temperature (self, T=10):
        self._max_T = T

    def set_channel (self, channel):
        self._channel = channel

    def _check_email (self):
        msg_dict = self._email.fetch_unread()

        for msg in msg_dict:
            body = msg['body'][0].as_string()
            #print (msg)
            sender = msg['mail_from'][0]
            sender_addr = msg['mail_from'][1]
            #print (sender)
            #print (sender_addr)

            if (body.find ('notifications-off')>0):
                self._deactivate(sender_addr)
            elif (body.find ('notifications-on')>0):
                self._activate(sender_addr)
            elif (body.find('get-temperature')>0):
                T = self._get_temperature()
                # here I need to extract the sender email address, not the name
                self._email.send (to=[sender_addr], 
                            subject='Temperature readout', 
                            message='Current temperature: '+str(self._curr_T)+'K')
            elif (body.find ('send-report')>0):
                self._send_report()
            else:
                print ("None")

    def _send_alarm_email (self):
        email_to = ['*****@*****.**', '*****@*****.**'] 
        #email_to = ['*****@*****.**'] 
        self._email.send (to=email_to, subject='Help!', 
                                message='Current temperature: '+str(self._curr_T)+'K')
        print ("ALARM: temperature = "+str(self._curr_T)+ "K. Email sent to: ")
        print (email_to)

    def _activate(self, sender):
        self._notifications = True
        print ("Notifications activated, as requested by: "+sender)
        self._email.send (to=['*****@*****.**', sender], subject='Settings change', 
                            message='Notifications activated, as requested by '+sender)

    def _deactivate(self, sender):
        self._notifications = False
        print ("Notifications de-activated, as requested by: "+sender)
        self._email.send (to=['*****@*****.**', sender], subject='Settings change', 
                            message='Notifications de-activated, as requested by '+sender)

    def _get_temperature (self, overrule_notifications=False):
        self._curr_T = self._temperature_ctrl.get_kelvin(channel = self._channel)
        #print ("Read temperature: ", self._curr_T)

        if (self._curr_T>self._max_T):
            if (self._notifications):
                self._send_alarm_email()
        return self._curr_T

    def _send_report (self):
        pass

    def start (self):

        print('Press Ctrl+C to exit')

        try:
            self._scheduler.start()

            while True:
                time.sleep(1)
        except (KeyboardInterrupt, SystemExit):
            # Not strictly necessary if daemonic mode is enabled but should be done if possible
            self._scheduler.shutdown()
コード例 #29
0
ファイル: core.py プロジェクト: Esiravegna/domus
get_coverage = openweather.run

if __name__ == '__main__':
    cba = timezone('America/Argentina/Cordoba')

    log.info("Starting Domus core...")

    jobstores = {
        'mysql': SQLAlchemyJobStore(url=JOB_STORAGE)
    }
    executors = {
        'default': ThreadPoolExecutor(20),
    }
    job_defaults = {
        'coalesce': False,
        'max_instances': 5
    }
    log.info("Starting core...")
    log.debug("Connecting to job store...")
    scheduler = BlockingScheduler(jobstores=jobstores, executors=executors, job_defaults=job_defaults, timezone=cba)
    log.debug("Creating Jobs...")
    scheduler.add_job(cache_data, 'interval', minutes=20, id='data_from_wunderground')
    scheduler.add_job(get_coverage, 'interval', minutes=5, id='data_from_openwrt')
    scheduler.add_job(do_forecast,  trigger='cron', minute='30', hour='8,13', id='twitting forecast')
    try:
        scheduler.start()
    except (KeyboardInterrupt, SystemExit):
        print "quitting"
        scheduler.shutdown(wait=False)
        pass
コード例 #30
0
class Scheduler:
    def __init__(self, main_config):
        self.main_config = main_config
        self.create_dirs()
        self.logger = get_logger(main_config['project_name'],
                                 file=main_config['logs_dir'],
                                 level=main_config['log_level'])
        self.board = None
        self.scheduler = BlockingScheduler()
        self.setup()
        atexit.register(self._exit)

    def create_dirs(self):
        try:
            Path(self.main_config['logs_dir']).mkdir(parents=True,
                                                     exist_ok=True)
            Path(self.main_config['data_dir']).mkdir(parents=True,
                                                     exist_ok=True)

            Path(self.main_config['data_dir']).joinpath('sensors/').mkdir(
                parents=True, exist_ok=True)

            cameras_config = read_json(self.main_config['cameras_config'])
            for camera in cameras_config:
                Path(self.main_config['data_dir'])\
                    .joinpath('images/' + str(camera['type'] + '_' + str(camera['id'])))\
                    .mkdir(parents=True, exist_ok=True)
        except Exception as e:
            self.logger.error('Error creating file structure!')

    def setup(self):
        try:
            board_scheme = read_json(self.main_config['board'])
            sensors = read_json(self.main_config['sensors_config'])
            board = Board(board_scheme, sensors, self.logger)
            self.board = board
        except Exception as e:
            self.logger.warning(
                'No board specified in config or some error in Board init')
            self.logger.warning(str(e))
            raise UserWarning(str(e))

        for p in self.main_config['pipelines']:
            pipeline = read_json(p)
            pipeline_executor = PipelineExecutor(
                logger=get_logger(self.main_config['project_name'] + '.' +
                                  pipeline['name'],
                                  file=self.main_config['logs_dir'],
                                  level=self.main_config['log_level']),
                pipeline=pipeline['pipeline'],
                main_config=self.main_config,
                pipeline_name=pipeline['name'],
                board=self.board)
            self.scheduler.add_job(
                func=(lambda executor=pipeline_executor: executor.execute()),
                **pipeline['run_interval'])

    def start(self):
        try:
            self.logger.info(self.main_config['project_name'] + ' started')
            self.scheduler.start()
        except Exception as e:
            self.logger.error('Error starting scheduler!')
            self.logger.error(str(e))

    def _exit(self):
        self.board.exit()
        print('EXITING!!!')
        self.logger.info('System exited normally')
        self.scheduler.shutdown()
コード例 #31
0
class TaskerDo:
    '''
    schedule template to file generation based on redis
    '''
    ip_list =[]
    def __init__(self, conf_file):
        self.get_configuration(conf_file)
        self.client = redis.Redis(**self.config['settings']['redis'])
        self.set_logging(self.config['settings']['logfile'])
        self.sched = BlockingScheduler()
        self.set_scheduler(self.run, self.config['generate']['schedule'])

    def set_scheduler(self,job,schedule,args=None):
        '''
        set schedual jobs
        :param schedule: dict, contain 'name', 'trigger' =[cron|interval|more apscheduler on web] , and params minuets,second, and more on apscheduler
        :return: None
        '''
        self.sched.add_job(job, args=args,trigger=schedule['trigger'], name=schedule['name'],**schedule['params'])


    def start(self):
        self.sched.start()

    def stop(self):
            self.sched.shutdown()

    def run(self):
        '''
        This is where the magic starts and all the jobs been executed.
          optional load ip_list to redis,
          get ips from redis -> generate file -> run reload job
        :param ip_list: optional, list of ip to loan into redis
        :return: nothing yet
        '''
        if len(self.ip_list) > 0:
            self.redis_push_ip_list(self.ip_list, expiration=self.config['bad_ip_expiration'])
            self.ip_list = []
        ip_list = self.redis_get_ip_list()
        logging.info("%s items in database: %s" % (len(ip_list), ",".join(ip_list)))
        if len(ip_list) > 0:
            generate = self.config['generate']
            status = self.file_generate(template_file=generate['template'],generated_file=generate['file'], ip_list=ip_list)
            if status:
                logging.info("configuration file: %s was created"%(generate['file']))
                for command in self.config['generate']['reload_commands']:
                    logging.info("executing command: %s" % (command))
                    try:
                        if self.reload_services(command):
                            logging.info("Success")
                    except CommandFail as e:
                        self.logger.error(e)
                        break
            else:
                logging.info('reload not needed')

    def is_diff(self, content_str, current_file):
        '''
        Compare new version to existing one
        :param content_str: string, new version content string
        :param current_file: file, existing file
        :return: bool, true is different
        '''
        try:
            with open(current_file) as f:
                current_str = f.read()
        except FileNotFoundError:
            return True
        if content_str == current_str:
            return False
        else:
            return True

    def get_configuration(self, conf_file):
        '''
        :param conf_file: yaml configuration file with report data
        :return: configuration data
        '''
        with open(conf_file) as f:
            yaml_data = f.read()
        self.config = yaml.load(yaml_data)

    def set_logging(self, log_file):
        self.logger = logging.getLogger()
        hdlr = logging.FileHandler(log_file)
        formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
        hdlr.setFormatter(formatter)
        self.logger.addHandler(hdlr)
        self.logger.setLevel(logging.INFO)

    def file_generate(self, template_file, generated_file, **kwargs):
        '''
        generate application config file using template
        :param template_file: file, the template file to generate using Jinja2 template engine
        :param generated_file: file, the file to generate
        :param kwargs: the argument to send to templates
        :return: bool status
        '''
        with open(template_file) as t:
            template_str = t.read()
        t = Template(template_str)
        content_str = t.render(**kwargs)
        is_diff = self.is_diff(content_str, generated_file)
        if is_diff:
            return self.write_content(content_str, generated_file)
            logging.info("New version exist")
        else:
            logging.info("No changes in current file, generation not needed")
            return False

    def redis_push_ip_list(self, ip_list, expiration):
        '''
        push list of ip's to redis and set expiration
        :param ip_list: list of strings
        :param expiration: int second
        :return: bool status
        '''
        ip_list = [ip for ip in ip_list if not ipaddress.ip_address(ip).is_private]
        if len(ip_list) == 0:
            raise ValueError('Empty list supplied')
        for key in ip_list:
            _ = self.client.set(key, None, ex=expiration)
        return True

    def redis_get_ip_list(self):
        '''
        get the list of ips from redis
        :return: list
        '''
        ips = self.client.keys('*')
        return [i.decode() for i in ips]

    def write_content(self, content_str, generated_file, backup=True):
        '''
        helper to generate_file function, write the data to file and create backup file
        :param content_str: string, the generated content
        :param generated_file:
        :param backup: bool
        :return: bool, status
        '''
        if backup and os.path.exists(generated_file):
            date = datetime.now().strftime('%y%m%d%H%M%S')
            os.rename(generated_file, generated_file + '.' + date)
        with open(generated_file, 'w') as conf_file:
            _ = conf_file.write(content_str)
        return True

    def reload_services(self, commands_list):
        '''
        list of command_str to execute, stop if one failed
        :param commands_list: list of shell command_str
        :return: bool, status
        '''
        for command_str in commands_list:
            exit_status,output = subprocess.getstatusoutput(command_str)
            if exit_status > 0:
                raise CommandFail("Command %s failed with status %s output: %s" % (command_str, exit_status, output))
        return True