Esempio n. 1
0
class TaskScheduler(object):
    def __init__(self, client: MongoClient) -> None:
        super().__init__()
        self.store = MongoDBJobStore(client=client)
        self.scheduler = AsyncIOScheduler()
        self.scheduler.add_jobstore(self.store)
        self.jobs: list = []
        self.jobLengthMax = 0

    def addJob(self,
               job: Callable,
               year: str,
               month: str,
               dayOfWeek: str,
               day: str,
               hour: str,
               minute: str,
               second: str,
               target: str,
               args: List = None) -> Job:
        trigger = CronTrigger(year=year,
                              month=month,
                              day_of_week=dayOfWeek,
                              day=day,
                              hour=hour,
                              minute=minute,
                              second=second)
        try:
            nextLength = self.jobLengthMax + 1
            job = self.scheduler.add_job(job,
                                         args=args,
                                         trigger=trigger,
                                         id=f"{target}-{nextLength}")
            self.jobLengthMax = nextLength
            return job
        except Exception as e:
            print(e)

    def removeJob(self, id: str) -> None:
        self.scheduler.remove_job(id)

    def setupJobs(self) -> None:
        jobs = self.scheduler.get_jobs()
        for i in range(len(jobs)):
            id: str = jobs[i].id
            idSplit = id.split("-")
            if not len(idSplit) > 1:
                continue
            if self.jobLengthMax < int(idSplit[1]):
                self.jobLengthMax = int(idSplit[1])
            self.jobs.append({"target": idSplit[0], "numId": idSplit[1]})

    def getJobs(self) -> list:
        return self.scheduler.get_jobs()

    def start(self) -> None:
        self.scheduler.start()
        self.setupJobs()
Esempio n. 2
0
class TimerJob:
    _scheduler = None

    def __init__(self):
        self._scheduler = AsyncIOScheduler()
        self._scheduler.configure(timezone=utc)  # utc作为调度程序的时区

    def init_timer(self):
        print("启动调度器...")
        self._scheduler.start()

    def close_timer(self):
        self._scheduler.shutdown(wait=True)
        print("关闭调度器...")

    def new_job(self, j_id: str, func: object, args: tuple, cron: str):
        """添加定时任务"""
        return self._scheduler.add_job(id=j_id,
                                       func=func,
                                       args=args,
                                       trigger=CronTrigger.from_crontab(cron))

    def delete_job(self, j_id: str):
        """删除定时任务"""
        return self._scheduler.remove_job(job_id=j_id)

    def stop_job(self, j_id: str):
        """暂停任务"""
        return self._scheduler.pause_job(job_id=j_id)

    def replay_job(self, j_id: str):
        """恢复任务"""
        return self._scheduler.resume_job(job_id=j_id)

    def modify_job(self, j_id: str, func: object, args: tuple, cron: str):
        """更新任务"""
        return self._scheduler.modify_job(
            job_id=j_id,
            func=func,
            args=args,
            trigger=CronTrigger.from_crontab(cron))

    def get_job(self, j_id: str):
        """获取定时任务信息"""
        return self._scheduler.get_job(job_id=j_id)

    def get_all(self):
        """所有任务"""
        self._scheduler.get_jobs()
Esempio n. 3
0
def list_scheduled_jobs(scheduler: AsyncIOScheduler,
                        team_id: Text) -> List[Job]:
    """
    Return all the jobs matching team_id.
    """
    id_re = re.compile(rf"^{team_id}\-U[A-Z0-9]+\-[a-f0-9]{{40}}$")
    return [job for job in scheduler.get_jobs() if id_re.match(job.id)]
Esempio n. 4
0
    async def test_watch(self):
        sched = AsyncIOScheduler(timezone=cfg.tz)
        monitor = MonitorManager()
        monitor.init(sched)

        plot = 'momentum'
        freq = 3
        trade_time_only = True

        code = '000001.XSHE'
        flag = 'both'
        frame_type = '1d'
        mom = 0.01

        with patch('omicron.core.timeframe.tf.is_trade_day',
                   side_effect=[True]):
            await monitor.watch(plot,
                                freq=freq,
                                trade_time_only=trade_time_only,
                                code=code,
                                flag=flag,
                                mom=mom,
                                frame_type=frame_type)
            job_id = ":".join((plot, code, frame_type, flag))
            self.assertEqual(3, len(sched.get_jobs()))
            recs = await monitor.resume_monitors()
            self.assertDictEqual(
                {
                    job_id: {
                        "jobinfo": {
                            "freq": 3,
                            "plot": plot,
                            "trade_time_only": trade_time_only
                        },
                        "kwargs": {
                            "mom": mom,
                            'code': code,
                            'flag': flag,
                            'frame_type': frame_type
                        }
                    }
                }, recs)
            await monitor.remove(plot, code, frame_type, flag)
            self.assertEqual(1, len(sched.get_jobs()))
Esempio n. 5
0
class Application(object):
    def __init__(self):
        self.scheduler = None

    async def init(self, app, loop):
        logger.info("init alpha...")
        self.scheduler = AsyncIOScheduler({'event_loop': loop},
                                          timezone='Asia/Shanghai')
        self.scheduler.start()
        await omicron.init()
        await emit.start(emit.Engine.REDIS,
                         dsn=cfg.redis.dsn,
                         start_server=False)

        mm.init(self.scheduler)
        start_plot_scan(self.scheduler)

        app.add_route(handlers.plot_command_handler,
                      '/plot/<cmd>',
                      methods=['POST'])
        app.add_route(handlers.add_monitor, '/monitor/add', methods=['POST'])
        app.add_route(handlers.remove_monitor,
                      '/monitor/remove',
                      methods=['POST'])
        app.add_route(handlers.list_monitors, '/monitor/list', methods=['GET'])
        app.add_route(self.jobs, '/jobs/<cmd>', methods=['POST'])
        app.add_route(handlers.get_stock_pool, '/stock_pool', methods=['GET'])
        app.add_route(handlers.fuzzy_match,
                      '/common/fuzzy-match',
                      methods=['GET'])

    async def jobs(self, request, cmd):
        if cmd == 'list':
            result = self.list_jobs()
            return response.json(result, status=200)

    def list_jobs(self):
        result = []
        for job in self.scheduler.get_jobs():
            result.append([job.name, str(job.trigger), str(job.next_run_time)])

        return result
Esempio n. 6
0
class Scheduler(abstract.Scheduler):
    apscheduler: AsyncIOScheduler

    def __init__(self) -> None:
        super().__init__()
        self.apscheduler = AsyncIOScheduler()
        self.apscheduler.start()
        logger.info("Started scheduler")

    def add_scheduled_feeding(self, feeding_schedule: abstract.Schedule,
                              feeding_callback: Callable) -> time:
        kwargs = {
            "trigger": "cron",
            "id": feeding_schedule.get_id(),
            "name": "Scheduled Feeding",
            "misfire_grace_time": 3600,
            "coalesce": True,
            "max_instances": 1,
        }
        cron_args = feeding_schedule.get_cron_args()
        kwargs.update(cron_args)
        job = self.apscheduler.add_job(feeding_callback, **kwargs)
        logger.info("Added scheduled feeding: {}", job)
        return job.next_run_time

    def remove_scheduled_feeding(
            self, feeding_schedule: abstract.Schedule) -> Optional[time]:
        job = self.apscheduler.get_job(feeding_schedule.get_id())
        logger.info("Removing scheduled job: {}", job)
        if job:
            t = job.next_run_time
            job.remove()
            return t
        return None

    def list_scheduled_feedings(self) -> List[Tuple[str, time]]:
        return sorted(
            [(job.id, job.next_run_time)
             for job in self.apscheduler.get_jobs()],
            key=lambda x: x[1],
        )
Esempio n. 7
0
class Service:
    name: str
    plugin: "Plugin"
    logger: Logger

    def __init__(
        self,
        name=None,
        *,
        use_priv=Privilege.DEFAULT,
        manage_priv=Privilege.ADMIN,
        enable_on_default=None,
        visible=None,
    ):
        self.name = name or 'base'
        self.plugin = _tmp_current_plugin
        self.logger = self.plugin.logger.getChild(self.name)

        config = _load_service_config(self.key)

        self.use_priv = config.get('use_priv', use_priv)
        self.manage_priv = config.get('manage_priv', manage_priv)
        self.enable_on_default = config.get('enable_on_default')
        if self.enable_on_default is None:
            if enable_on_default is None:
                self.enable_on_default = name is None
            else:
                self.enable_on_default = enable_on_default
        self.visible = config.get('visible')
        if self.visible is None:
            if visible is None:
                self.visible = name is not None
            else:
                self.visible = visible
        self.enable_group = set(config.get('enable_group', []))
        self.disable_group = set(config.get('disable_group', []))
        self.user_privs = dict(config.get('user_privs', []))

        # self._node_key = PredicateFunction(lambda event: self.check_priv(event), notation=self)
        # self._node = PredicateNode(self._node_key)
        self._sv_node = router.meta_plugin_is(self.plugin)
        self._terminals: Set[TerminalNode] = set()

        self._scheduler = Scheduler()

        @self.on_loaded()
        def _start_scheduler():
            if self._scheduler and not self._scheduler.running and self._scheduler.get_jobs(
            ):
                self._scheduler.configure(ajenga.config.APSCHEDULER_CONFIG)
                self._scheduler.start()

        @self.on_unload()
        def _on_unload():
            self.logger.info(
                f'Unloading... Unsubscribe all {len(self._terminals)} subscribers.'
            )
            app.engine.unsubscribe_terminals(self._terminals)

            # Stop scheduler
            if self._scheduler and self._scheduler.running:
                self._scheduler.remove_all_jobs()

        # Add to service list
        if self.key in _loaded_services:
            self.logger.warning(f"Service {self} already exists")
        _loaded_services[self.key] = self
        self.plugin.add_service(self)

    def check_priv(self,
                   event: Event,
                   required_priv: Union[int, Callable[[int], bool]] = None):
        if event.type in MessageEventTypes:
            required_priv = self.use_priv if required_priv is None else required_priv
            user_priv = self.get_user_priv(event)

            if isinstance(event, GroupMessageEvent):
                if not self.check_enabled(event.group):
                    return False
            if isinstance(required_priv, int):
                return bool(user_priv >= required_priv)
            elif isinstance(required_priv, Callable):
                return required_priv(user_priv)
            else:
                return False
        else:
            return True

    @property
    def key(self):
        if self.plugin is None:
            self.logger.error('Access key when service initializing!')
        else:
            return f'{self.plugin.name}.{self.name}'

    def __str__(self):
        return f'<Service: {self.key}>'

    @property
    def scheduler(self) -> Scheduler:
        return self._scheduler

    def on(self,
           graph=std.true,
           *,
           priv: Union[int, Callable[[int], bool]] = None):
        return ServiceGraphImpl(self) & graph & PredicateNode(
            PredicateFunction(
                lambda event: self.check_priv(event, required_priv=priv),
                notation=self))

    def on_message(self,
                   graph=std.true,
                   *,
                   priv: Union[int, Callable[[int], bool]] = None):
        return self.on(ajenga.router.message.is_message & graph, priv=priv)

    def on_load(self, arg: Any = None):
        g = (ServiceGraphImpl(self) & router.event_type_is(EventType.Meta)
             & router.meta_type_is(MetaEventType.PluginLoad) & self._sv_node)
        if isinstance(arg, Callable):
            return g(arg)
        else:
            return g

    def on_loaded(self, arg: Any = None):
        g = (ServiceGraphImpl(self) & router.event_type_is(EventType.Meta)
             & router.meta_type_is(MetaEventType.PluginLoaded) & self._sv_node)
        if isinstance(arg, Callable):
            return g(arg)
        else:
            return g

    def on_unload(self, arg: Any = None):
        g = (ServiceGraphImpl(self) & router.event_type_is(EventType.Meta)
             & router.meta_type_is(MetaEventType.PluginUnload) & self._sv_node)
        if isinstance(arg, Callable):
            return g(arg)
        else:
            return g

    @staticmethod
    def check_block_group(group: int):
        if group in _black_list_group and datetime.now(
        ) > _black_list_group[group]:
            del _black_list_group[group]  # 拉黑时间过期
            return False
        return bool(group in _black_list_group)

    @staticmethod
    def check_block_user(qq: int):
        if qq in _black_list_user and datetime.now() > _black_list_user[qq]:
            del _black_list_user[qq]  # 拉黑时间过期
            return False
        return bool(qq in _black_list_user)

    @staticmethod
    def get_priv_from_event(event: Event):
        if isinstance(event, GroupMessageEvent):
            if event.sender.qq in ajenga.config.SUPERUSERS:
                return Privilege.SUPERUSER
            elif Service.check_block_user(event.sender.qq):
                return Privilege.BLACK
            elif event.sender.permission == GroupPermission.OWNER:
                return Privilege.OWNER
            elif event.sender.permission == GroupPermission.ADMIN:
                return Privilege.ADMIN
            elif event.sender.permission:
                return Privilege.GROUP
        elif isinstance(event, FriendMessageEvent):
            if event.sender.qq in ajenga.config.SUPERUSERS:
                return Privilege.SUPERUSER
            elif Service.check_block_user(event.sender.qq):
                return Privilege.BLACK
            else:
                return Privilege.PRIVATE_FRIEND
        elif isinstance(event, TempMessageEvent):
            if event.sender.qq in ajenga.config.SUPERUSERS:
                return Privilege.SUPERUSER
            elif Service.check_block_user(event.sender.qq):
                return Privilege.BLACK
            else:
                return Privilege.PRIVATE_GROUP
        return Privilege.DEFAULT

    async def get_user_priv_in_group(self, qq: ContactIdType,
                                     event: GroupMessageEvent, api: Api):
        priv = self.get_user_priv(qq)
        if priv == Privilege.BLACK:
            return priv
        member_info = await api.get_group_member_info(group=event.group, qq=qq)
        if member_info.ok:
            if member_info.data.permission == GroupPermission.OWNER:
                return max(priv, Privilege.OWNER)
            elif member_info.data.permission == GroupPermission.ADMIN:
                return max(priv, Privilege.ADMIN)
            else:
                return max(priv, Privilege.GROUP)

    def get_user_priv(self, qq_or_event: Union[ContactIdType,
                                               MessageEvent]) -> int:
        if isinstance(qq_or_event, ContactIdType):
            if qq_or_event in ajenga.config.SUPERUSERS:
                return Privilege.SUPERUSER
            else:
                return self.user_privs.get(qq_or_event, Privilege.DEFAULT)
        elif isinstance(qq_or_event, MessageEvent):
            qq = qq_or_event.sender.qq
            ev_priv = self.get_priv_from_event(qq_or_event)
            sv_priv = self.user_privs.get(qq, Privilege.DEFAULT)
            if qq in ajenga.config.SUPERUSERS:
                return Privilege.SUPERUSER
            elif ev_priv == Privilege.BLACK or sv_priv == Privilege.BLACK:
                return Privilege.BLACK
            else:
                return max(ev_priv, sv_priv)
        else:
            self.logger.error(f'Unknown qq_or_event {qq_or_event}')
            return Privilege.DEFAULT

    def set_user_priv(self, qq_or_event: Union[ContactIdType, MessageEvent],
                      priv: int):
        # print(self.user_privs)
        if isinstance(qq_or_event, int):
            self.user_privs[qq_or_event] = priv
        elif isinstance(qq_or_event, MessageEvent):
            self.user_privs[qq_or_event.sender.qq] = priv
        else:
            self.logger.error(f'Unknown qq_or_event {qq_or_event}')
        _save_service_config(self)

    def set_enable(self, group: ContactIdType):
        self.enable_group.add(group)
        self.disable_group.discard(group)
        _save_service_config(self)
        self.logger.info(f'Service {self.name} is enabled at group {group}')

    def set_disable(self, group: ContactIdType):
        self.enable_group.discard(group)
        self.disable_group.add(group)
        _save_service_config(self)
        self.logger.info(f'Service {self.name} is disabled at group {group}')

    def check_enabled(self, group: ContactIdType):
        return bool(
            (group in self.enable_group)
            or (self.enable_on_default and group not in self.disable_group))

    async def get_enabled_groups(self) -> dict:
        ret = {}
        for qq, ses in app.get_sessions().items():
            group_list = await ses.api.get_group_list()
            for group in group_list.data:
                if self.check_enabled(group.id):
                    ret[group.id] = qq
        return ret

    def scheduled_job(self, *args, **kwargs) -> Callable:
        kwargs.setdefault('timezone', pytz.timezone('Asia/Shanghai'))
        kwargs.setdefault('misfire_grace_time', 60)
        kwargs.setdefault('coalesce', True)

        def deco(func: Callable) -> Callable:
            @wraps(func)
            async def wrapper():
                try:
                    self.logger.info(f'Scheduled job {func.__name__} started.')
                    await func()
                except Exception as e:
                    self.logger.exception(e)
                    self.logger.error(
                        f'{type(e)} occurred when doing scheduled job {func.__name__}.'
                    )

            return self.scheduler.scheduled_job(*args, **kwargs)(wrapper)

        return deco

    def scheduled(self, *args, **kwargs) -> Callable:
        def deco(func: Callable) -> Callable:
            uid = str(uuid.uuid4())

            async def _sched():
                await app.handle_event(_scheduler_source,
                                       SchedulerEvent(id=uid))

            self.scheduler.scheduled_job(*args, **kwargs)(_sched)

            return self.on(
                router.event_type_is(EventType.Scheduler)
                & router.std.if_(lambda ev: ev.id == uid))(func)

        return deco

    async def broadcast(self, *messages: Message_T, interval=0.2):
        groups = await self.get_enabled_groups()
        for group, qq in groups.items():
            try:
                for message in messages:
                    await app.get_session(qq).api.send_group_message(
                        group=group, message=message)
                    await asyncio.sleep(interval)
            except Exception as e:
                self.logger.exception(e)
                self.logger.error(f"Failed to broadcast to group {group}")
Esempio n. 8
0
class Scheduler(object):
    def __init__(self, app=None):
        self.scheduler = AsyncIOScheduler()
        self.id = 'controller'
        self.app = app

    def schedule_workflows(self, task_id, executable, workflow_ids, trigger):
        """
        Schedules a workflow for execution

        Args:
            task_id (UUID): Id of the scheduled task
            executable (func): A callable to execute must take in one argument -- a workflow id
            workflow_ids (iterable(UUID)): An iterable of workflow ids
            trigger (Trigger): The trigger to use for this scheduled task
        """

        # def execute(id_):
        #     with self.app.app_context():
        #         executable(id_)

        for workflow_id in workflow_ids:
            self.scheduler.add_job(executable,
                                   args=(workflow_id, ),
                                   id=construct_task_id(task_id, workflow_id),
                                   trigger=trigger,
                                   replace_existing=True)

    def get_all_scheduled_workflows(self):
        """
        Gets all the scheduled workflows

        Returns:
             (dict{str: list[str]}) A dict of task_id to workflow execution ids
        """
        tasks = {}
        for job in self.scheduler.get_jobs():
            task, workflow_execution_id = split_task_id(job.id)
            if task not in tasks:
                tasks[task] = [workflow_execution_id]
            else:
                tasks[task].append(workflow_execution_id)
        return tasks

    def get_scheduled_workflows(self, task_id):
        """
        Gets all the scheduled workflow for a given task id

        Args:
            task_id (str): The task id

        Returns:
            (list[str]) A list fo workflow execution id associated with this task id
        """
        tasks = []
        for job in self.scheduler.get_jobs():
            task, workflow_execution_id = split_task_id(job.id)
            if task == task_id:
                tasks.append(workflow_execution_id)
        return tasks

    def update_workflows(self, task_id, trigger):
        """
        Updates the workflows for a given task id to use a different trigger

        Args:
            task_id (str|int): The task id to update
            trigger (Trigger): The new trigger to use
        """
        existing_tasks = {
            construct_task_id(task_id, workflow_execution_id)
            for workflow_execution_id in self.get_scheduled_workflows(task_id)
        }
        for job_id in existing_tasks:
            self.scheduler.reschedule_job(job_id=job_id, trigger=trigger)

    def unschedule_workflows(self, task_id, workflow_execution_ids):
        """
        Unschedule a workflow

        Args:
            task_id (UUID): The task ID to unschedule
            workflow_execution_ids (list[UUID]): The list of workflow execution IDs to update
        """
        for workflow_execution_id in workflow_execution_ids:
            try:
                self.scheduler.remove_job(
                    construct_task_id(task_id, workflow_execution_id))
            except JobLookupError:
                logger.warning('Cannot delete task {}. '
                               'No task found in scheduler'.format(
                                   construct_task_id(task_id,
                                                     workflow_execution_id)))

    def start(self):
        """Starts the scheduler for active execution. This function must be called before any workflows are executed.

        Returns:
            The state of the scheduler if successful, error message if scheduler is in "stopped" state.
        """
        if self.scheduler.state == STATE_STOPPED:
            logger.info('Starting scheduler')
            self.scheduler.start()
        else:
            logger.warning(
                'Cannot start scheduler. Scheduler is already running or is paused'
            )
            # return "Scheduler already running."
            raise InvalidInputException(
                "start",
                "Scheduler",
                "",
                errors={"error": "Scheduler is already started"})

        return self.scheduler.state

    def stop(self, wait=True):
        """Stops active execution.

        Args:
            wait (bool, optional): Boolean to synchronously or asynchronously wait for the scheduler to shutdown.
                Default is True.

        Returns:
            The state of the scheduler if successful, error message if scheduler is already in "stopped" state.
        """
        if self.scheduler.state != STATE_STOPPED:
            logger.info('Stopping scheduler')
            self.scheduler.shutdown(wait=wait)
            self.scheduler.state = STATE_STOPPED
        else:
            logger.warning(
                'Cannot stop scheduler. Scheduler is already stopped')
            # return "Scheduler already stopped."
            raise InvalidInputException(
                "stopped",
                "Scheduler",
                "",
                errors={"error": "Scheduler is already stopped"})

        return self.scheduler.state

    def pause(self):
        """Pauses active execution.

        Returns:
            The state of the scheduler if successful, error message if scheduler is not in the "running" state.
        """
        if self.scheduler.state == STATE_RUNNING:
            logger.info('Pausing scheduler')
            self.scheduler.pause()
        elif self.scheduler.state == STATE_PAUSED:
            logger.warning(
                'Cannot pause scheduler. Scheduler is already paused')
            # return "Scheduler already paused."
            raise InvalidInputException(
                "pause",
                "Scheduler",
                "",
                errors={"error": "Scheduler is already paused"})
        elif self.scheduler.state == STATE_STOPPED:
            logger.warning('Cannot pause scheduler. Scheduler is stopped')
            # return "Scheduler is in STOPPED state and cannot be paused."
            raise InvalidInputException(
                "pause",
                "Scheduler",
                "",
                errors={"error": "Scheduler is  stopped"})
        return self.scheduler.state

    def resume(self):
        """Resumes active execution.

        Returns:
            The state of the scheduler if successful, error message if scheduler is not in the "paused" state.
        """
        if self.scheduler.state == STATE_PAUSED:
            logger.info('Resuming scheduler')
            self.scheduler.resume()
        else:
            logger.warning(
                "Scheduler is not in PAUSED state and cannot be resumed.")
            # return "Scheduler is not in PAUSED state and cannot be resumed."
            raise InvalidInputException(
                "resume",
                "Scheduler",
                "",
                errors={"error": "Scheduler already running."})
        return self.scheduler.state

    def pause_workflows(self, task_id, workflow_execution_ids):
        """
        Pauses some workflows associated with a task

        Args:
            task_id (int|str): The id of the task to pause
            workflow_execution_ids (list[str]): The list of workflow execution IDs to pause
        """
        for workflow_execution_id in workflow_execution_ids:
            job_id = construct_task_id(task_id, workflow_execution_id)
            try:
                self.scheduler.pause_job(job_id=job_id)
                logger.info('Paused job {0}'.format(job_id))
            except JobLookupError:
                logger.warning(
                    'Cannot pause scheduled workflow {}. Workflow ID not found'
                    .format(job_id))

    def resume_workflows(self, task_id, workflow_execution_ids):
        """
        Resumes some workflows associated with a task

        Args:
            task_id: The id of the task to pause
            workflow_execution_i The list of workflow execution IDs to resume
        """
        for workflow_execution_id in workflow_execution_ids:
            job_id = construct_task_id(task_id, workflow_execution_id)
            try:
                self.scheduler.resume_job(job_id=job_id)
                logger.info('Resumed job {0}'.format(job_id))
            except JobLookupError:
                logger.warning(
                    'Cannot resume scheduled workflow {}. Workflow ID not found'
                    .format(job_id))
Esempio n. 9
0
class RPCServer:
    def __init__(self,
                 ethpconnect='127.0.0.1',
                 ethpport=9500,
                 rpcconnect='127.0.0.1',
                 rpcport=8545,
                 ipcconnect=None,
                 blocknotify=None,
                 walletnotify=None,
                 alertnotify=None,
                 tls=False,
                 *,
                 loop=None):
        self._loop = loop or asyncio.get_event_loop()
        self._app = Sanic(__name__,
                          log_config=None,
                          error_handler=SentryErrorHandler())
        self._host = ethpconnect
        self._port = ethpport
        self._rpc_host = rpcconnect
        self._rpc_port = rpcport
        self._unix_socket = ipcconnect
        self._blocknotify = blocknotify
        self._walletnotify = walletnotify
        self._alertnotify = alertnotify
        self._tls = tls
        self._log = logging.getLogger('rpc_server')
        self.routes()

    @property
    def endpoint(self):
        schema = 'http'
        if self._tls:
            schema += 's'

        return ('{0}://{1}:{2}'.format(schema, self._rpc_host, self._rpc_port)
                if not self._unix_socket else 'unix://{0}'.format(
                    self._unix_socket))

    @property
    def cmds(self):
        cmds = {}
        if self._blocknotify:
            cmds['blocknotify'] = self._blocknotify
        if self._walletnotify:
            cmds['walletnotify'] = self._walletnotify
        if self._alertnotify:
            cmds['alertnotify'] = self._alertnotify
        return cmds

    def before_server_start(self):
        @self._app.listener('before_server_start')
        async def initialize_scheduler(app, loop):
            self._proxy = await create_ethereumd_proxy(self.endpoint,
                                                       loop=loop)
            self._poller = Poller(self._proxy, self.cmds, loop=loop)
            self._scheduler = AsyncIOScheduler({'event_loop': loop})
            if self._poller.has_blocknotify:
                self._scheduler.add_job(self._poller.blocknotify,
                                        'interval',
                                        id='blocknotify',
                                        seconds=1)
            if self._poller.has_walletnotify:
                self._scheduler.add_job(self._poller.walletnotify,
                                        'interval',
                                        id='walletnotify',
                                        seconds=1)
            if self._scheduler.get_jobs():
                self._scheduler.start()

        return initialize_scheduler

    def routes(self):
        self._app.add_route(self.handler_index, '/', methods=['POST'])
        self._app.add_route(self.handler_log,
                            '/_log/',
                            methods=['GET', 'POST'])

    async def handler_index(self, request):
        data = request.json
        try:
            id_, method, params, _ = data['id'], \
                data['method'], data['params'], data['jsonrpc']
        except KeyError:
            return response.json({
                'id': data.get('id', 0),
                'result': None,
                'error': {
                    'message': 'Invalid rpc 2.0 structure',
                    'code': -32602
                }
            })
        try:
            result = (await getattr(self._proxy, method)(*params))
        except AttributeError as e:
            self._log.exception(e)
            return response.json({
                'id': id_,
                'result': None,
                'error': {
                    'message': 'Method not found',
                    'code': -32601
                }
            })
        except TypeError as e:
            self._log.exception(e)
            return response.json({
                'id': id_,
                'result': None,
                'error': {
                    'message': e.args[0],
                    'code': -1
                }
            })
        except BadResponseError as e:
            return response.json({
                'id': id_,
                'result': None,
                'error': {
                    'message': e.msg,
                    'code': e.code
                }
            })
        else:
            return response.json({'id': id_, 'result': result, 'error': None})

    async def handler_log(self, request):
        self._log.warning('\nRequest args: %s;\nRequest body: %s',
                          request.args, request.body)
        return response.json({'status': 'OK'})

    def serve(self):
        self.before_server_start()
        self._log.info(GREETING)
        server_settings = self._app._helper(host=self._host,
                                            port=self._port,
                                            debug=True,
                                            loop=self._loop,
                                            backlog=100,
                                            run_async=True,
                                            has_log=False)
        return serve(**server_settings)

    def run(self):
        self._loop.run_until_complete(self.serve())
        try:
            self._log.warning('Starting server on http://%s:%s/...',
                              self._host, self._port)
            self._loop.run_forever()
        except Exception:
            self._log.warning('Stoping server...')
            self._poller.stop()
Esempio n. 10
0
    async def test_load_bars_sync_jobs(self):
        origin = cfg.omega.sync.bars
        try:
            cfg.omega.sync.bars = [
                {
                    "frame": "1m",
                    "start": "2020-01-02",
                    "stop": "2020-01-02",
                    "delay": 3,
                    "cat": [],
                    "include": "000001.XSHE",
                    "exclude": "000001.XSHG",
                },
                {
                    "frame": "5m",
                    "start": "2020-01-2",
                    "stop": "2020-01-03",
                    "delay": 3,
                    "cat": [],
                    "include": "000001.XSHE",
                    "exclude": "000001.XSHG",
                },
                {
                    "frame": "15m",
                    "start": "2020-01-02",
                    "stop": "2020-01-03",
                    "delay": 3,
                    "cat": [],
                    "include": "000001.XSHE",
                    "exclude": "000001.XSHG",
                },
                {
                    "frame": "30m",
                    "start": "2020-01-02",
                    "stop": "2020-01-03",
                    "delay": 3,
                    "cat": [],
                    "include": "000001.XSHE",
                    "exclude": "000001.XSHG",
                },
                {
                    "frame": "60m",
                    "start": "2020-01-02",
                    "stop": "2020-01-03",
                    "delay": 3,
                    "cat": [],
                    "include": "000001.XSHE 000004.XSHE",
                    "exclude": "000001.XSHG",
                },
                {
                    "frame": "1d",
                    "start": "2020-01-02",
                    "stop": "2020-01-03",
                    "delay": 3,
                    "cat": [],
                    "include": "000001.XSHE",
                    "exclude": "000001.XSHG",
                },
                {
                    "frame": "1W",
                    "start": "2020-01-02",
                    "stop": "2020-01-03",
                    "delay": 3,
                    "cat": [],
                    "include": "000001.XSHE",
                    "exclude": "000001.XSHG",
                },
                {
                    "frame": "1M",
                    "start": "2020-01-02",
                    "stop": "2020-01-03",
                    "delay": 3,
                    "cat": [],
                    "include": "000001.XSHE",
                    "exclude": "000001.XSHG",
                },
            ]

            scheduler = AsyncIOScheduler(timezone=cfg.tz)
            syncjobs.load_bars_sync_jobs(scheduler)

            actual = set([job.name for job in scheduler.get_jobs()])
            expected = set([
                "1m:9:31-59",
                "1m:10:*",
                "1m:11:0-30",
                "1m:13-14:*",
                "1m:15:00",
                "5m:9:35-55/5",
                "5m:10:*/5",
                "5m:11:0-30/5",
                "5m:13-14:*/5",
                "5m:15:00",
                "15m:9:45",
                "15m:10:*/5",
                "15m:11:15,30",
                "15m:13-14:*/15",
                "15m:15:00",
                "30m:10-11:*/30",
                "30m:13:30",
                "30m:14-15:*/30",
                "60m:10:30",
                "60m:11:30",
                "60m:14-15:00",
                "1d:15:00",
                "1M:15:00",
            ])
            self.assertSetEqual(expected, actual)
        finally:
            cfg.omega.sync.bars = origin
Esempio n. 11
0
class Scheduler:
    def __init__(self):
        scheduler_config = json.loads(
            config.httpdb.scheduling.scheduler_config)
        self._scheduler = AsyncIOScheduler(gconfig=scheduler_config,
                                           prefix=None)
        # this should be something that does not make any sense to be inside project name or job name
        self._job_id_separator = "-_-"
        # we don't allow to schedule a job to run more then one time per X
        # NOTE this cannot be less then one minute - see _validate_cron_trigger
        self._min_allowed_interval = config.httpdb.scheduling.min_allowed_interval
        self._secrets_provider = schemas.SecretProviderName.kubernetes

        self._store_schedule_credentials_in_secrets = (
            mlrun.mlconf.httpdb.scheduling.
            schedule_credentials_secrets_store_mode == "enabled"
            or (mlrun.mlconf.httpdb.scheduling.
                schedule_credentials_secrets_store_mode == "auto"
                and mlrun.mlconf.httpdb.authorization.mode == "opa"))

    async def start(self, db_session: Session):
        logger.info("Starting scheduler")
        self._scheduler.start()
        # the scheduler shutdown and start operation are not fully async compatible yet -
        # https://github.com/agronholm/apscheduler/issues/360 - this sleep make them work
        await asyncio.sleep(0)

        # don't fail the start on re-scheduling failure
        try:
            self._reload_schedules(db_session)
        except Exception as exc:
            logger.warning("Failed reloading schedules", exc=exc)

    async def stop(self):
        logger.info("Stopping scheduler")
        self._scheduler.shutdown()
        # the scheduler shutdown and start operation are not fully async compatible yet -
        # https://github.com/agronholm/apscheduler/issues/360 - this sleep make them work
        await asyncio.sleep(0)

    def create_schedule(
        self,
        db_session: Session,
        auth_info: mlrun.api.schemas.AuthInfo,
        project: str,
        name: str,
        kind: schemas.ScheduleKinds,
        scheduled_object: Union[Dict, Callable],
        cron_trigger: Union[str, schemas.ScheduleCronTrigger],
        labels: Dict = None,
        concurrency_limit: int = config.httpdb.scheduling.
        default_concurrency_limit,
    ):
        if isinstance(cron_trigger, str):
            cron_trigger = schemas.ScheduleCronTrigger.from_crontab(
                cron_trigger)

        self._validate_cron_trigger(cron_trigger)

        logger.debug(
            "Creating schedule",
            project=project,
            name=name,
            kind=kind,
            scheduled_object=scheduled_object,
            cron_trigger=cron_trigger,
            labels=labels,
            concurrency_limit=concurrency_limit,
        )
        self._store_schedule_secrets(auth_info, project, name)
        get_db().create_schedule(
            db_session,
            project,
            name,
            kind,
            scheduled_object,
            cron_trigger,
            concurrency_limit,
            labels,
        )
        self._create_schedule_in_scheduler(
            project,
            name,
            kind,
            scheduled_object,
            cron_trigger,
            concurrency_limit,
            auth_info,
        )

    def update_schedule(
        self,
        db_session: Session,
        auth_info: mlrun.api.schemas.AuthInfo,
        project: str,
        name: str,
        scheduled_object: Union[Dict, Callable] = None,
        cron_trigger: Union[str, schemas.ScheduleCronTrigger] = None,
        labels: Dict = None,
        concurrency_limit: int = None,
    ):
        if isinstance(cron_trigger, str):
            cron_trigger = schemas.ScheduleCronTrigger.from_crontab(
                cron_trigger)

        if cron_trigger is not None:
            self._validate_cron_trigger(cron_trigger)

        logger.debug(
            "Updating schedule",
            project=project,
            name=name,
            scheduled_object=scheduled_object,
            cron_trigger=cron_trigger,
            labels=labels,
            concurrency_limit=concurrency_limit,
        )
        self._store_schedule_secrets(auth_info, project, name)
        get_db().update_schedule(
            db_session,
            project,
            name,
            scheduled_object,
            cron_trigger,
            labels,
            concurrency_limit,
        )
        db_schedule = get_db().get_schedule(db_session, project, name)
        updated_schedule = self._transform_and_enrich_db_schedule(
            db_session, db_schedule)

        self._update_schedule_in_scheduler(
            project,
            name,
            updated_schedule.kind,
            updated_schedule.scheduled_object,
            updated_schedule.cron_trigger,
            updated_schedule.concurrency_limit,
            auth_info,
        )

    def list_schedules(
        self,
        db_session: Session,
        project: str = None,
        name: str = None,
        kind: str = None,
        labels: str = None,
        include_last_run: bool = False,
    ) -> schemas.SchedulesOutput:
        logger.debug("Getting schedules",
                     project=project,
                     name=name,
                     labels=labels,
                     kind=kind)
        db_schedules = get_db().list_schedules(db_session, project, name,
                                               labels, kind)
        schedules = []
        for db_schedule in db_schedules:
            schedule = self._transform_and_enrich_db_schedule(
                db_session, db_schedule, include_last_run)
            schedules.append(schedule)
        return schemas.SchedulesOutput(schedules=schedules)

    def get_schedule(
        self,
        db_session: Session,
        project: str,
        name: str,
        include_last_run: bool = False,
    ) -> schemas.ScheduleOutput:
        logger.debug("Getting schedule", project=project, name=name)
        db_schedule = get_db().get_schedule(db_session, project, name)
        return self._transform_and_enrich_db_schedule(db_session, db_schedule,
                                                      include_last_run)

    def delete_schedule(
        self,
        db_session: Session,
        project: str,
        name: str,
    ):
        logger.debug("Deleting schedule", project=project, name=name)
        self._remove_schedule_scheduler_resources(project, name)
        get_db().delete_schedule(db_session, project, name)

    def delete_schedules(
        self,
        db_session: Session,
        project: str,
    ):
        schedules = self.list_schedules(
            db_session,
            project,
        )
        logger.debug("Deleting schedules", project=project)
        for schedule in schedules.schedules:
            self._remove_schedule_scheduler_resources(schedule.project,
                                                      schedule.name)
        get_db().delete_schedules(db_session, project)

    def _remove_schedule_scheduler_resources(self, project, name):
        self._remove_schedule_from_scheduler(project, name)
        self._remove_schedule_secrets(project, name)

    def _remove_schedule_from_scheduler(self, project, name):
        job_id = self._resolve_job_id(project, name)
        # don't fail on delete if job doesn't exist
        job = self._scheduler.get_job(job_id)
        if job:
            self._scheduler.remove_job(job_id)

    async def invoke_schedule(
        self,
        db_session: Session,
        auth_info: mlrun.api.schemas.AuthInfo,
        project: str,
        name: str,
    ):
        logger.debug("Invoking schedule", project=project, name=name)
        db_schedule = await fastapi.concurrency.run_in_threadpool(
            get_db().get_schedule, db_session, project, name)
        function, args, kwargs = self._resolve_job_function(
            db_schedule.kind,
            db_schedule.scheduled_object,
            project,
            name,
            db_schedule.concurrency_limit,
            auth_info,
        )
        return await function(*args, **kwargs)

    def _store_schedule_secrets(
        self,
        auth_info: mlrun.api.schemas.AuthInfo,
        project: str,
        name: str,
    ):
        # import here to avoid circular imports
        import mlrun.api.crud

        if self._store_schedule_credentials_in_secrets:
            # sanity
            if not auth_info.session:
                raise mlrun.errors.MLRunAccessDeniedError(
                    "Session is required to create schedules in OPA authorization mode"
                )
            secret_key = mlrun.api.crud.Secrets().generate_schedule_secret_key(
                name)
            secret_key_map = (mlrun.api.crud.Secrets().
                              generate_schedule_key_map_secret_key())
            mlrun.api.crud.Secrets().store_secrets(
                project,
                schemas.SecretsData(
                    provider=self._secrets_provider,
                    secrets={secret_key: auth_info.session},
                ),
                allow_internal_secrets=True,
                key_map_secret_key=secret_key_map,
            )

    def _remove_schedule_secrets(
        self,
        project: str,
        name: str,
    ):
        # import here to avoid circular imports
        import mlrun.api.crud

        if self._store_schedule_credentials_in_secrets:
            # sanity
            secret_key = mlrun.api.crud.Secrets().generate_schedule_secret_key(
                name)
            secret_key_map = (mlrun.api.crud.Secrets().
                              generate_schedule_key_map_secret_key())
            mlrun.api.crud.Secrets().delete_secret(
                project,
                self._secrets_provider,
                secret_key,
                allow_secrets_from_k8s=True,
                allow_internal_secrets=True,
                key_map_secret_key=secret_key_map,
            )

    def _validate_cron_trigger(
        self,
        cron_trigger: schemas.ScheduleCronTrigger,
        # accepting now from outside for testing purposes
        now: datetime = None,
    ):
        """
        Enforce no more then one job per min_allowed_interval
        """
        logger.debug("Validating cron trigger")
        apscheduler_cron_trigger = self.transform_schemas_cron_trigger_to_apscheduler_cron_trigger(
            cron_trigger)
        now = now or datetime.now(apscheduler_cron_trigger.timezone)
        next_run_time = None
        second_next_run_time = now

        # doing 60 checks to allow one minute precision, if the _min_allowed_interval is less then one minute validation
        # won't fail in certain scenarios that it should. See test_validate_cron_trigger_multi_checks for detailed
        # explanation
        for index in range(60):
            next_run_time = apscheduler_cron_trigger.get_next_fire_time(
                None, second_next_run_time)
            # will be none if we got a schedule that has no next fire time - for example schedule with year=1999
            if next_run_time is None:
                return
            second_next_run_time = apscheduler_cron_trigger.get_next_fire_time(
                next_run_time, next_run_time)
            # will be none if we got a schedule that has no next fire time - for example schedule with year=2050
            if second_next_run_time is None:
                return
            min_allowed_interval_seconds = humanfriendly.parse_timespan(
                self._min_allowed_interval)
            if second_next_run_time < next_run_time + timedelta(
                    seconds=min_allowed_interval_seconds):
                logger.warn(
                    "Cron trigger too frequent. Rejecting",
                    cron_trigger=cron_trigger,
                    next_run_time=next_run_time,
                    second_next_run_time=second_next_run_time,
                    delta=second_next_run_time - next_run_time,
                )
                raise ValueError(
                    f"Cron trigger too frequent. no more then one job "
                    f"per {self._min_allowed_interval} is allowed")

    def _create_schedule_in_scheduler(
        self,
        project: str,
        name: str,
        kind: schemas.ScheduleKinds,
        scheduled_object: Any,
        cron_trigger: schemas.ScheduleCronTrigger,
        concurrency_limit: int,
        auth_info: mlrun.api.schemas.AuthInfo,
    ):
        job_id = self._resolve_job_id(project, name)
        logger.debug("Adding schedule to scheduler", job_id=job_id)
        function, args, kwargs = self._resolve_job_function(
            kind, scheduled_object, project, name, concurrency_limit,
            auth_info)

        # we use max_instances as well as our logic in the run wrapper for concurrent jobs
        # in order to allow concurrency for triggering the jobs (max_instances), and concurrency
        # of the jobs themselves (our logic in the run wrapper).
        self._scheduler.add_job(
            function,
            self.transform_schemas_cron_trigger_to_apscheduler_cron_trigger(
                cron_trigger),
            args,
            kwargs,
            job_id,
            max_instances=concurrency_limit,
        )

    def _update_schedule_in_scheduler(
        self,
        project: str,
        name: str,
        kind: schemas.ScheduleKinds,
        scheduled_object: Any,
        cron_trigger: schemas.ScheduleCronTrigger,
        concurrency_limit: int,
        auth_info: mlrun.api.schemas.AuthInfo,
    ):
        job_id = self._resolve_job_id(project, name)
        logger.debug("Updating schedule in scheduler", job_id=job_id)
        function, args, kwargs = self._resolve_job_function(
            kind, scheduled_object, project, name, concurrency_limit,
            auth_info)
        trigger = self.transform_schemas_cron_trigger_to_apscheduler_cron_trigger(
            cron_trigger)
        now = datetime.now(self._scheduler.timezone)
        next_run_time = trigger.get_next_fire_time(None, now)
        self._scheduler.modify_job(
            job_id,
            func=function,
            args=args,
            kwargs=kwargs,
            trigger=trigger,
            next_run_time=next_run_time,
        )

    def _reload_schedules(self, db_session: Session):
        logger.info("Reloading schedules")
        db_schedules = get_db().list_schedules(db_session)
        for db_schedule in db_schedules:
            # don't let one failure fail the rest
            try:
                # import here to avoid circular imports
                import mlrun.api.crud

                session = None
                if self._store_schedule_credentials_in_secrets:
                    schedule_secret_key = mlrun.api.crud.Secrets(
                    ).generate_schedule_secret_key(db_schedule.name)
                    secret_key_map = (mlrun.api.crud.Secrets().
                                      generate_schedule_key_map_secret_key())
                    session = mlrun.api.crud.Secrets().get_secret(
                        db_schedule.project,
                        self._secrets_provider,
                        schedule_secret_key,
                        allow_secrets_from_k8s=True,
                        allow_internal_secrets=True,
                        key_map_secret_key=secret_key_map,
                    )
                self._create_schedule_in_scheduler(
                    db_schedule.project,
                    db_schedule.name,
                    db_schedule.kind,
                    db_schedule.scheduled_object,
                    db_schedule.cron_trigger,
                    db_schedule.concurrency_limit,
                    mlrun.api.schemas.AuthInfo(session=session),
                )
            except Exception as exc:
                logger.warn(
                    "Failed rescheduling job. Continuing",
                    exc=str(exc),
                    traceback=traceback.format_exc(),
                    db_schedule=db_schedule,
                )

    def _transform_and_enrich_db_schedule(
        self,
        db_session: Session,
        schedule_record: schemas.ScheduleRecord,
        include_last_run: bool = False,
    ) -> schemas.ScheduleOutput:
        schedule_dict = schedule_record.dict()
        schedule_dict["labels"] = {
            label["name"]: label["value"]
            for label in schedule_dict["labels"]
        }
        schedule = schemas.ScheduleOutput(**schedule_dict)

        job_id = self._resolve_job_id(schedule_record.project,
                                      schedule_record.name)
        job = self._scheduler.get_job(job_id)
        if job:
            schedule.next_run_time = job.next_run_time

        if include_last_run:
            schedule = self._enrich_schedule_with_last_run(
                db_session, schedule)

        return schedule

    @staticmethod
    def _enrich_schedule_with_last_run(
            db_session: Session, schedule_output: schemas.ScheduleOutput):
        if schedule_output.last_run_uri:
            run_project, run_uid, iteration, _ = RunObject.parse_uri(
                schedule_output.last_run_uri)
            run_data = get_db().read_run(db_session, run_uid, run_project,
                                         iteration)
            schedule_output.last_run = run_data
        return schedule_output

    def _resolve_job_function(
        self,
        scheduled_kind: schemas.ScheduleKinds,
        scheduled_object: Any,
        project_name: str,
        schedule_name: str,
        schedule_concurrency_limit: int,
        auth_info: mlrun.api.schemas.AuthInfo,
    ) -> Tuple[Callable, Optional[Union[List, Tuple]], Optional[Dict]]:
        """
        :return: a tuple (function, args, kwargs) to be used with the APScheduler.add_job
        """

        if scheduled_kind == schemas.ScheduleKinds.job:
            scheduled_object_copy = copy.deepcopy(scheduled_object)
            return (
                Scheduler.submit_run_wrapper,
                [
                    self,
                    scheduled_object_copy,
                    project_name,
                    schedule_name,
                    schedule_concurrency_limit,
                    auth_info,
                ],
                {},
            )
        if scheduled_kind == schemas.ScheduleKinds.local_function:
            return scheduled_object, [], {}

        # sanity
        message = "Scheduled object kind missing implementation"
        logger.warn(message, scheduled_object_kind=scheduled_kind)
        raise NotImplementedError(message)

    def _list_schedules_from_scheduler(self, project: str):
        jobs = self._scheduler.get_jobs()
        return [
            job for job in jobs if self._resolve_job_id(project, "") in job.id
        ]

    def _resolve_job_id(self, project, name) -> str:
        """
        :return: returns the identifier that will be used inside the APScheduler
        """
        return self._job_id_separator.join([project, name])

    @staticmethod
    async def submit_run_wrapper(
        scheduler,
        scheduled_object,
        project_name,
        schedule_name,
        schedule_concurrency_limit,
        auth_info: mlrun.api.schemas.AuthInfo,
    ):
        # import here to avoid circular imports
        import mlrun.api.crud
        from mlrun.api.api.utils import submit_run

        # removing the schedule from the body otherwise when the scheduler will submit this task it will go to an
        # endless scheduling loop
        scheduled_object.pop("schedule", None)

        # removing the uid from the task metadata so that a new uid will be generated for every run
        # otherwise all runs will have the same uid
        scheduled_object.get("task", {}).get("metadata", {}).pop("uid", None)

        if "task" in scheduled_object and "metadata" in scheduled_object[
                "task"]:
            scheduled_object["task"]["metadata"].setdefault("labels", {})
            scheduled_object["task"]["metadata"]["labels"][
                schemas.constants.LabelNames.schedule_name] = schedule_name

        db_session = create_session()

        active_runs = mlrun.api.crud.Runs().list_runs(
            db_session,
            state=RunStates.non_terminal_states(),
            project=project_name,
            labels=
            f"{schemas.constants.LabelNames.schedule_name}={schedule_name}",
        )
        if len(active_runs) >= schedule_concurrency_limit:
            logger.warn(
                "Schedule exceeded concurrency limit, skipping this run",
                project=project_name,
                schedule_name=schedule_name,
                schedule_concurrency_limit=schedule_concurrency_limit,
                active_runs=len(active_runs),
            )
            return

        # if credentials are needed but missing (will happen for schedules on upgrade from scheduler that didn't store
        # credentials to one that does store) enrich them
        # Note that here we're using the "knowledge" that submit_run only requires the session of the auth info
        if not auth_info.session and scheduler._store_schedule_credentials_in_secrets:
            # import here to avoid circular imports
            import mlrun.api.utils.auth
            import mlrun.api.utils.singletons.project_member

            logger.info(
                "Schedule missing auth info which is required. Trying to fill from project owner",
                project_name=project_name,
                schedule_name=schedule_name,
            )

            project_owner = mlrun.api.utils.singletons.project_member.get_project_member(
            ).get_project_owner(db_session, project_name)
            # Update the schedule with the new auth info so we won't need to do the above again in the next run
            scheduler.update_schedule(
                db_session,
                mlrun.api.schemas.AuthInfo(session=project_owner.session),
                project_name,
                schedule_name,
            )

        response = await submit_run(db_session, auth_info, scheduled_object)

        run_metadata = response["data"]["metadata"]
        run_uri = RunObject.create_uri(run_metadata["project"],
                                       run_metadata["uid"],
                                       run_metadata["iteration"])
        get_db().update_schedule(
            db_session,
            run_metadata["project"],
            schedule_name,
            last_run_uri=run_uri,
        )

        close_session(db_session)

        return response

    @staticmethod
    def transform_schemas_cron_trigger_to_apscheduler_cron_trigger(
        cron_trigger: schemas.ScheduleCronTrigger, ):
        return APSchedulerCronTrigger(
            cron_trigger.year,
            cron_trigger.month,
            cron_trigger.day,
            cron_trigger.week,
            cron_trigger.day_of_week,
            cron_trigger.hour,
            cron_trigger.minute,
            cron_trigger.second,
            cron_trigger.start_date,
            cron_trigger.end_date,
            cron_trigger.timezone,
            cron_trigger.jitter,
        )
Esempio n. 12
0
class Break(commands.Cog):
    def __init__(self, bot):
        self.bot = bot
        self.scheduler= AsyncIOScheduler({'apscheduler.timezone': 'Europe/Helsinki'}) 
        self.scheduler.start()
        self.setup_in_progress = False

    @commands.group(name='break', help='Handles reminders for break time')
    async def _break(self, ctx):
        if ctx.subcommand_passed is None:
            await ctx.send_help(ctx.command)

    @_break.command(help='Shows the breaks that have been set up')
    async def show(self, ctx):
        jobs = self.scheduler.get_jobs()
        if len(jobs) == 0:
            await ctx.send('No job setup. Schedule on with \'!break setup\'.')
        else:
            jobs_str = [self.job_tostring(j, f'Break #{i}', j.id)
                        for i, j in enumerate(jobs)]
            await ctx.send('\n'.join(jobs_str))

    @_break.command(help='Removes a break by id')
    async def remove(self, ctx, id):
        if self.scheduler.get_job(id) is None:
            await ctx.send(f'No break with id \'{id}\' exists.')
        else:
            self.scheduler.remove_job(id)
            await ctx.send(f'Break with id \'{id}\' removed successfully.')

    @_break.command(help='Removes all breaks.')
    async def clear(self, ctx):
        self.scheduler.remove_all_jobs()
        await ctx.send('All breaks have been removed successfully.')

    @_break.command(help='Sets up the break time interactively, use \'!break abort\' to abort')
    async def setup(self, ctx, id=None):
        if self.setup_in_progress:
            await ctx.send('Another break setup is in progress, please wait for it to finish.')
            return
        self.setup_in_progress = True

        job_id = id if id is not None else f'break_{len(self.scheduler.get_jobs()) + 1}'

        def check_context(m):
            return m.channel == ctx.channel and m.author == ctx.author

        def check_command(m):
            # Only allow '!break abort' through
            return m.content == '!break abort' or not m.content.startswith(ctx.prefix)

        def check_range(m, lower_inc, upper_inc):
            try:
                num = int(m.content)
                return num >= lower_inc and num <= upper_inc
            except ValueError:
                return False

        def check_message(m):
            return check_context(m) and check_command(m)

        def check_weekday(m):
            if not check_context(m):
                return False
            if check_command(m):
                return True
            if m.content in ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']:
                return True
            return check_range(m, 0, 6)

        def check_hour(m):
            if not check_context(m):
                return False
            if check_command(m):
                return True
            return check_range(m, 0, 23)

        def check_minute(m):
            if not check_context(m):
                return False
            if check_command(m):
                return True
            return check_range(m, 0, 59)

        timeout_err_msg = 'Took too long to answer, aborting break time setup.'

        msg = await self._prompt(ctx, 'Message?', check_message, timeout_err_msg, 60.0)
        if msg is None:
            return

        weekday = await self._prompt(ctx, 'Week day?', check_weekday, timeout_err_msg, 60.0)
        if weekday is None:
            return

        hour = await self._prompt(ctx, 'Hour(s)?', check_hour, timeout_err_msg, 60.0)
        if hour is None:
            return

        minute = await self._prompt(ctx, 'Minute(s)?', check_minute, timeout_err_msg, 60.0)
        if minute is None:
            return

        try:
            self.scheduler.add_job(send_message, 'cron', args=[ctx, msg], name=msg,
                                         id=job_id, replace_existing=True,
                                         day_of_week=weekday, hour=hour, minute=minute)
            await ctx.send('Break setup successfully.')
        except ValueError:
            await ctx.send('Invalid argument format(s)! Try again.')

        self.setup_in_progress = False

    async def _prompt(self, ctx, msg, check, err_msg=None, timeout_sec=60.0):
        await ctx.send(msg)
        try:
            reply = await self.bot.wait_for('message', check=check, timeout=timeout_sec)
            if reply.content == '!break abort':
                await self._abort_setup(ctx, 'Setup aborted.')
                return None
            return reply.content
        except asyncio.TimeoutError:
            await self._abort_setup(ctx, err_msg)
            return None

    async def _abort_setup(self, ctx, msg=None):
        if msg is not None:
            await ctx.send(msg)
        self.setup_in_progress = False

    def job_tostring(self, job, title, id):
        t = job.trigger
        fields = {f.name: str(f) for f in t.fields}
        time = datetime.time(hour=int(fields['hour']),
                             minute=int(fields['minute']))
        return f'''{title} (id: {id})
class ApSchedulerAdapter(SchedulerAdapter):
    def __init__(self, dsn, exec_func, log_service):
        super().__init__(exec_func=exec_func, log_service=log_service)
        self.dsn = dsn

        self.scheduler = None

        self.create_scheduler()

    def create_scheduler(self):
        self.scheduler = AsyncIOScheduler()

        self.scheduler.configure(
            jobstores={
                'default': SQLAlchemyJobStore(url=self.dsn),
            },
            executors={
                'default': AsyncIOExecutor(),
            },
            job_defaults={
                'coalesce': False,
                'max_instances': 1,
                'misfire_grace_time': (60 * 60)
            },
            timezone="UTC",
        )

    async def start(self):
        self.scheduler.start()

    async def stop(self):
        self.scheduler.shutdown(wait=True)

    async def add_cron_job(
            self,
            id,
            name,
            args,
            kwargs,
            crontab_expr,
            weekdays,
            hour,
            minute,
            second,
            start_date,
            end_date,
            timezone,
            coalesce=False,
            misfire_grace_time=(60 * 60),
            replace_existing=True,
    ):
        weekdays = weekdays if weekdays is not None else []

        trigger = None

        if crontab_expr is not None:
            trigger = \
                CronTrigger.from_crontab(
                    crontab_expr,
                    timezone=timezone,
                )
        else:
            trigger = \
                CronTrigger(
                    day_of_week=
                    ",".join(
                        [w[0:3].upper() for w in weekdays]
                    )
                    if len(weekdays) > 0
                    else None,
                    hour=hour,
                    minute=minute,
                    second=second,
                    start_date=start_date,
                    end_date=end_date,
                    timezone=timezone,
                )

        await self._add_job(
            id=id,
            name=name,
            func=self.exec_func,
            kwargs=kwargs,
            trigger=trigger,
            coalesce=coalesce,
            misfire_grace_time=misfire_grace_time,
            replace_existing=replace_existing,
        )

    async def add_date_job(
            self,
            id,
            name,
            args,
            kwargs,
            date,
            timezone,
            coalesce=False,
            misfire_grace_time=(60 * 60),
            replace_existing=True,
    ):
        raise NotImplementedError()

    async def add_interval_job(
            self,
            id,
            name,
            args,
            kwargs,
            interval,
            days,
            hours,
            minutes,
            seconds,
            start_date,
            end_date,
            timezone,
            coalesce=False,
            misfire_grace_time=(60 * 60),
            replace_existing=True,
    ):
        raise NotImplementedError()

    async def get_job(self, job_id):
        return self.scheduler.get_job(job_id)

    async def remove_job(self, job_id, raises=False):
        try:
            self.scheduler.remove_job(job_id)
        except JobLookupError as e:
            if "No job by the id of" in str(e):
                self.log_service.error(
                    f"Tried to remove apscheduler job with ID '{job_id}' but "
                    f"it wasn't found. Ignoring but you should look this "
                    f"up since it should never happen.")
                if raises:
                    raise JobNotFound(job_id)
            else:
                raise

    async def remove_all_jobs(self):
        """
        Removes all scheduled jobs.
        """
        for job in self.scheduler.get_jobs():
            self.scheduler.remove_job(job.id)

    # Helpers

    async def _add_job(self, **kwargs):
        """
        Convenience method for adding a job.
        """
        self.scheduler.add_job(**kwargs)
Esempio n. 14
0
class Scheduler:
    def __init__(self,
                 clan_tag: str,
                 db: DB,
                 web_refresher: Refresher,
                 bot: commands.bot = None) -> None:
        self.clan_tag: str = clan_tag
        self.scheduler = AsyncIOScheduler()
        self.db = db
        self.web_refresher = web_refresher
        self.bot = bot

        # Start the scheduler
        self.scheduler.start()

    def add_jobs(self) -> None:
        # track the war once per hour starting now-ish
        self.scheduler.add_job(
            self.war_tracking,
            "interval",
            next_run_time=pendulum.now("UTC").add(seconds=5),
            minutes=60,
            timezone="UTC",
        )

        self.scheduler.add_job(
            Tracker.track_war_battles,
            "interval",
            args=[self.clan_tag, self.db],
            next_run_time=pendulum.now("UTC").add(seconds=3),
            minutes=30,
            timezone="UTC",
            id="track_war_battles",
            name="Track war battles",
        )

        self.scheduler.add_job(
            Tracker.track_clan,
            "interval",
            args=[self.clan_tag, self.db],
            next_run_time=pendulum.now("UTC").add(seconds=2),
            minutes=30,
            jitter=350,
            timezone="UTC",
            id="track_clan",
            name="Track clan data",
        )

        self.scheduler.add_job(
            Tracker.track_war_logs,
            "interval",
            args=[self.clan_tag, self.db],
            next_run_time=pendulum.now("UTC").add(seconds=60),
            hours=4,
            jitter=350,
            timezone="UTC",
            id="track_war_logs",
            name="Track war logs",
        )

        self.scheduler.add_job(
            self.inactive_members,
            "interval",
            args=[self.clan_tag],
            next_run_time=pendulum.now("UTC").end_of("week").add(minutes=5),
            weeks=1,
            timezone="UTC",
            id="inactives",
            name="Inactive players",
        )

        self.scheduler.add_job(
            self.top_donators,
            "interval",
            args=[self.clan_tag],
            next_run_time=pendulum.now("UTC")._end_of_week().subtract(
                minutes=1),
            weeks=1,
            timezone="UTC",
            id="top_donators",
            name="Top donators",
        )

        self.scheduler.add_job(
            self.web_refresher.run_no_spin,
            "interval",
            next_run_time=pendulum.now("UTC").add(minutes=3),
            hours=6,
            jitter=350,
            timezone="UTC",
            id="web_refresh",
            name="Refresh clan member battles on http://royaleapi.com",
        )

    async def war_tracking(self):
        current_war = await Tracker.track_war(self.clan_tag, self.db)

        if current_war["state"] == "warDay" or current_war[
                "state"] == "collectionDay":
            self.schedule_end_of_war_jobs(current_war)

    def schedule_end_of_war_jobs(self, war):
        war_end_timestamp = war.get("collectionEndTime", None) or war.get(
            "warEndTime", None)
        war_end_date = pendulum.from_timestamp(war_end_timestamp, tz="UTC")

        # War tracking jobs to collected war data
        t_minus_jobs = [1, 5, 10, 20, 30]
        for t_minus in t_minus_jobs:
            schedule_time = war_end_date.subtract(minutes=t_minus)
            job_id = self.get_job_id(
                war, "Tminus{}-{:.0f}".format(t_minus,
                                              schedule_time.timestamp()))
            self.schedule_war_job(
                self.war_tracking,
                schedule_time,
                job_id,
                "End of war job for {}".format(job_id),
            )

        # Job to fetch war logs if this is a War Day
        if war["state"] == "warDay":
            war_log_time = war_end_date.add(seconds=10)
            job_id = self.get_job_id(war, "war_logs")
            self.schedule_war_job(
                self.war_logs,
                war_log_time,
                job_id,
                "War logs job for {}".format(job_id),
                [war["clan"]["tag"]],
            )

        # Job to have bot print war summary
        summary_time = war_end_date.add(seconds=15)
        job_id = self.get_job_id(war, "war_summary")
        self.schedule_war_job(
            self.war_summary,
            summary_time,
            job_id,
            "War summary job for {}".format(job_id),
            [war["clan"]["tag"]],
        )

        log.debug(self.scheduler.get_jobs())

    def schedule_war_job(self, func, date, job_id, name=None, args=None):
        if not self.scheduler.get_job(job_id):
            self.scheduler.add_job(func,
                                   "date",
                                   id=job_id,
                                   name=name or job_id,
                                   run_date=date,
                                   args=args)

    async def war_logs(self, clan_tag):
        await Tracker.track_war_logs(clan_tag, self.db)

    async def war_summary(self, clan_tag):
        if not self.bot:
            return

        if not self.bot.is_ready() or not self.bot.get_cog("WarLog"):
            return

        await self.bot.get_cog("WarLog").war_summary_auto(clan_tag)

    async def inactive_members(self, clan_tag):
        if not self.bot:
            return

        if not self.bot.is_ready() or not self.bot.get_cog("WarLog"):
            return

        await self.bot.get_cog("WarLog").inactives_auto(clan_tag)

    async def top_donators(self, clan_tag):
        if not self.bot:
            return

        if not self.bot.is_ready() or not self.bot.get_cog("WarLog"):
            return

        await self.bot.get_cog("WarLog").top_donators_auto(clan_tag)

    @staticmethod
    def get_job_id(war, suffix):
        end_time = war.get("collectionEndTime", None) or war.get(
            "warEndTime", None)
        return "{}-{}-{}-{}".format(war["clan"]["tag"], war["state"], end_time,
                                    suffix)
Esempio n. 15
0
class JobQueue:
    """This class allows you to periodically perform tasks with the bot. It is a convenience
    wrapper for the APScheduler library.

    Attributes:
        scheduler (:class:`apscheduler.schedulers.asyncio.AsyncIOScheduler`): The scheduler.

            .. versionchanged:: 20.0
                Uses :class:`~apscheduler.schedulers.asyncio.AsyncIOScheduler` instead of
                :class:`~apscheduler.schedulers.background.BackgroundScheduler`


    """

    __slots__ = ("_application", "scheduler", "_executor")
    _CRON_MAPPING = ("sun", "mon", "tue", "wed", "thu", "fri", "sat")

    def __init__(self) -> None:
        self._application: "Optional[weakref.ReferenceType[Application]]" = None
        self._executor = AsyncIOExecutor()
        self.scheduler = AsyncIOScheduler(timezone=pytz.utc, executors={"default": self._executor})

    def _tz_now(self) -> datetime.datetime:
        return datetime.datetime.now(self.scheduler.timezone)

    @overload
    def _parse_time_input(self, time: None, shift_day: bool = False) -> None:
        ...

    @overload
    def _parse_time_input(
        self,
        time: Union[float, int, datetime.timedelta, datetime.datetime, datetime.time],
        shift_day: bool = False,
    ) -> datetime.datetime:
        ...

    def _parse_time_input(
        self,
        time: Union[float, int, datetime.timedelta, datetime.datetime, datetime.time, None],
        shift_day: bool = False,
    ) -> Optional[datetime.datetime]:
        if time is None:
            return None
        if isinstance(time, (int, float)):
            return self._tz_now() + datetime.timedelta(seconds=time)
        if isinstance(time, datetime.timedelta):
            return self._tz_now() + time
        if isinstance(time, datetime.time):
            date_time = datetime.datetime.combine(
                datetime.datetime.now(tz=time.tzinfo or self.scheduler.timezone).date(), time
            )
            if date_time.tzinfo is None:
                date_time = self.scheduler.timezone.localize(date_time)
            if shift_day and date_time <= datetime.datetime.now(pytz.utc):
                date_time += datetime.timedelta(days=1)
            return date_time
        return time

    def set_application(self, application: "Application") -> None:
        """Set the application to be used by this JobQueue.

        Args:
            application (:class:`telegram.ext.Application`): The application.

        """
        self._application = weakref.ref(application)
        if isinstance(application.bot, ExtBot) and application.bot.defaults:
            self.scheduler.configure(
                timezone=application.bot.defaults.tzinfo or pytz.utc,
                executors={"default": self._executor},
            )

    @property
    def application(self) -> "Application":
        """The application this JobQueue is associated with."""
        if self._application is None:
            raise RuntimeError("No application was set for this JobQueue.")
        application = self._application()
        if application is not None:
            return application
        raise RuntimeError("The application instance is no longer alive.")

    def run_once(
        self,
        callback: JobCallback,
        when: Union[float, datetime.timedelta, datetime.datetime, datetime.time],
        data: object = None,
        name: str = None,
        chat_id: int = None,
        user_id: int = None,
        job_kwargs: JSONDict = None,
    ) -> "Job":
        """Creates a new :class:`Job` instance that runs once and adds it to the queue.

        Args:
            callback (:term:`coroutine function`): The callback function that should be executed by
                the new job. Callback signature::

                    async def callback(context: CallbackContext)

            when (:obj:`int` | :obj:`float` | :obj:`datetime.timedelta` |                         \
                  :obj:`datetime.datetime` | :obj:`datetime.time`):
                Time in or at which the job should run. This parameter will be interpreted
                depending on its type.

                * :obj:`int` or :obj:`float` will be interpreted as "seconds from now" in which the
                  job should run.
                * :obj:`datetime.timedelta` will be interpreted as "time from now" in which the
                  job should run.
                * :obj:`datetime.datetime` will be interpreted as a specific date and time at
                  which the job should run. If the timezone (:attr:`datetime.datetime.tzinfo`) is
                  :obj:`None`, the default timezone of the bot will be used, which is UTC unless
                  :attr:`telegram.ext.Defaults.tzinfo` is used.
                * :obj:`datetime.time` will be interpreted as a specific time of day at which the
                  job should run. This could be either today or, if the time has already passed,
                  tomorrow. If the timezone (:attr:`datetime.time.tzinfo`) is :obj:`None`, the
                  default timezone of the bot will be used, which is UTC unless
                  :attr:`telegram.ext.Defaults.tzinfo` is used.

            chat_id (:obj:`int`, optional): Chat id of the chat associated with this job. If
                passed, the corresponding :attr:`~telegram.ext.CallbackContext.chat_data` will
                be available in the callback.

                .. versionadded:: 20.0

            user_id (:obj:`int`, optional): User id of the user associated with this job. If
                passed, the corresponding :attr:`~telegram.ext.CallbackContext.user_data` will
                be available in the callback.

                .. versionadded:: 20.0
            data (:obj:`object`, optional): Additional data needed for the callback function.
                Can be accessed through :attr:`Job.data` in the callback. Defaults to
                :obj:`None`.

                .. versionchanged:: 20.0
                    Renamed the parameter ``context`` to :paramref:`data`.
            name (:obj:`str`, optional): The name of the new job. Defaults to
                :external:attr:`callback.__name__ <definition.__name__>`.
            job_kwargs (:obj:`dict`, optional): Arbitrary keyword arguments to pass to the
                :meth:`apscheduler.schedulers.base.BaseScheduler.add_job()`.

        Returns:
            :class:`telegram.ext.Job`: The new :class:`Job` instance that has been added to the job
            queue.

        """
        if not job_kwargs:
            job_kwargs = {}

        name = name or callback.__name__
        job = Job(callback=callback, data=data, name=name, chat_id=chat_id, user_id=user_id)
        date_time = self._parse_time_input(when, shift_day=True)

        j = self.scheduler.add_job(
            job.run,
            name=name,
            trigger="date",
            run_date=date_time,
            args=(self.application,),
            timezone=date_time.tzinfo or self.scheduler.timezone,
            **job_kwargs,
        )

        job.job = j
        return job

    def run_repeating(
        self,
        callback: JobCallback,
        interval: Union[float, datetime.timedelta],
        first: Union[float, datetime.timedelta, datetime.datetime, datetime.time] = None,
        last: Union[float, datetime.timedelta, datetime.datetime, datetime.time] = None,
        data: object = None,
        name: str = None,
        chat_id: int = None,
        user_id: int = None,
        job_kwargs: JSONDict = None,
    ) -> "Job":
        """Creates a new :class:`Job` instance that runs at specified intervals and adds it to the
        queue.

        Note:
            For a note about DST, please see the documentation of `APScheduler`_.

        .. _`APScheduler`: https://apscheduler.readthedocs.io/en/stable/modules/triggers/cron.html
                           #daylight-saving-time-behavior

        Args:
            callback (:term:`coroutine function`): The callback function that should be executed by
                the new job. Callback signature::

                    async def callback(context: CallbackContext)

            interval (:obj:`int` | :obj:`float` | :obj:`datetime.timedelta`): The interval in which
                the job will run. If it is an :obj:`int` or a :obj:`float`, it will be interpreted
                as seconds.
            first (:obj:`int` | :obj:`float` | :obj:`datetime.timedelta` |                        \
                   :obj:`datetime.datetime` | :obj:`datetime.time`, optional):
                Time in or at which the job should run. This parameter will be interpreted
                depending on its type.

                * :obj:`int` or :obj:`float` will be interpreted as "seconds from now" in which the
                  job should run.
                * :obj:`datetime.timedelta` will be interpreted as "time from now" in which the
                  job should run.
                * :obj:`datetime.datetime` will be interpreted as a specific date and time at
                  which the job should run. If the timezone (:attr:`datetime.datetime.tzinfo`) is
                  :obj:`None`, the default timezone of the bot will be used.
                * :obj:`datetime.time` will be interpreted as a specific time of day at which the
                  job should run. This could be either today or, if the time has already passed,
                  tomorrow. If the timezone (:attr:`datetime.time.tzinfo`) is :obj:`None`, the
                  default timezone of the bot will be used, which is UTC unless
                  :attr:`telegram.ext.Defaults.tzinfo` is used.

                Defaults to :paramref:`interval`
            last (:obj:`int` | :obj:`float` | :obj:`datetime.timedelta` |                        \
                   :obj:`datetime.datetime` | :obj:`datetime.time`, optional):
                Latest possible time for the job to run. This parameter will be interpreted
                depending on its type. See :paramref:`first` for details.

                If :paramref:`last` is :obj:`datetime.datetime` or :obj:`datetime.time` type
                and ``last.tzinfo`` is :obj:`None`, the default timezone of the bot will be
                assumed, which is UTC unless :attr:`telegram.ext.Defaults.tzinfo` is used.

                Defaults to :obj:`None`.
            data (:obj:`object`, optional): Additional data needed for the callback function.
                Can be accessed through :attr:`Job.data` in the callback. Defaults to
                :obj:`None`.

                .. versionchanged:: 20.0
                    Renamed the parameter ``context`` to :paramref:`data`.
            name (:obj:`str`, optional): The name of the new job. Defaults to
                :external:attr:`callback.__name__ <definition.__name__>`.
            chat_id (:obj:`int`, optional): Chat id of the chat associated with this job. If
                passed, the corresponding :attr:`~telegram.ext.CallbackContext.chat_data` will
                be available in the callback.

                .. versionadded:: 20.0

            user_id (:obj:`int`, optional): User id of the user associated with this job. If
                passed, the corresponding :attr:`~telegram.ext.CallbackContext.user_data` will
                be available in the callback.

                .. versionadded:: 20.0
            job_kwargs (:obj:`dict`, optional): Arbitrary keyword arguments to pass to the
                :meth:`apscheduler.schedulers.base.BaseScheduler.add_job()`.

        Returns:
            :class:`telegram.ext.Job`: The new :class:`Job` instance that has been added to the job
            queue.

        """
        if not job_kwargs:
            job_kwargs = {}

        name = name or callback.__name__
        job = Job(callback=callback, data=data, name=name, chat_id=chat_id, user_id=user_id)

        dt_first = self._parse_time_input(first)
        dt_last = self._parse_time_input(last)

        if dt_last and dt_first and dt_last < dt_first:
            raise ValueError("'last' must not be before 'first'!")

        if isinstance(interval, datetime.timedelta):
            interval = interval.total_seconds()

        j = self.scheduler.add_job(
            job.run,
            trigger="interval",
            args=(self.application,),
            start_date=dt_first,
            end_date=dt_last,
            seconds=interval,
            name=name,
            **job_kwargs,
        )

        job.job = j
        return job

    def run_monthly(
        self,
        callback: JobCallback,
        when: datetime.time,
        day: int,
        data: object = None,
        name: str = None,
        chat_id: int = None,
        user_id: int = None,
        job_kwargs: JSONDict = None,
    ) -> "Job":
        """Creates a new :class:`Job` that runs on a monthly basis and adds it to the queue.

        .. versionchanged:: 20.0
            The ``day_is_strict`` argument was removed. Instead one can now pass ``-1`` to the
            :paramref:`day` parameter to have the job run on the last day of the month.

        Args:
            callback (:term:`coroutine function`): The callback function that should be executed by
                the new job. Callback signature::

                    async def callback(context: CallbackContext)

            when (:obj:`datetime.time`): Time of day at which the job should run. If the timezone
                (``when.tzinfo``) is :obj:`None`, the default timezone of the bot will be used,
                which is UTC unless :attr:`telegram.ext.Defaults.tzinfo` is used.
            day (:obj:`int`): Defines the day of the month whereby the job would run. It should
                be within the range of ``1`` and ``31``, inclusive. If a month has fewer days than
                this number, the job will not run in this month. Passing ``-1`` leads to the job
                running on the last day of the month.
            data (:obj:`object`, optional): Additional data needed for the callback function.
                Can be accessed through :attr:`Job.data` in the callback. Defaults to
                :obj:`None`.

                .. versionchanged:: 20.0
                    Renamed the parameter ``context`` to :paramref:`data`.
            name (:obj:`str`, optional): The name of the new job. Defaults to
                :external:attr:`callback.__name__ <definition.__name__>`.
            chat_id (:obj:`int`, optional): Chat id of the chat associated with this job. If
                passed, the corresponding :attr:`~telegram.ext.CallbackContext.chat_data` will
                be available in the callback.

                .. versionadded:: 20.0

            user_id (:obj:`int`, optional): User id of the user associated with this job. If
                passed, the corresponding :attr:`~telegram.ext.CallbackContext.user_data` will
                be available in the callback.

                .. versionadded:: 20.0
            job_kwargs (:obj:`dict`, optional): Arbitrary keyword arguments to pass to the
                :meth:`apscheduler.schedulers.base.BaseScheduler.add_job()`.

        Returns:
            :class:`telegram.ext.Job`: The new :class:`Job` instance that has been added to the job
            queue.

        """
        if not job_kwargs:
            job_kwargs = {}

        name = name or callback.__name__
        job = Job(callback=callback, data=data, name=name, chat_id=chat_id, user_id=user_id)

        j = self.scheduler.add_job(
            job.run,
            trigger="cron",
            args=(self.application,),
            name=name,
            day="last" if day == -1 else day,
            hour=when.hour,
            minute=when.minute,
            second=when.second,
            timezone=when.tzinfo or self.scheduler.timezone,
            **job_kwargs,
        )
        job.job = j
        return job

    def run_daily(
        self,
        callback: JobCallback,
        time: datetime.time,
        days: Tuple[int, ...] = tuple(range(7)),
        data: object = None,
        name: str = None,
        chat_id: int = None,
        user_id: int = None,
        job_kwargs: JSONDict = None,
    ) -> "Job":
        """Creates a new :class:`Job` that runs on a daily basis and adds it to the queue.

        Note:
            For a note about DST, please see the documentation of `APScheduler`_.

        .. _`APScheduler`: https://apscheduler.readthedocs.io/en/stable/modules/triggers/cron.html
                           #daylight-saving-time-behavior

        Args:
            callback (:term:`coroutine function`): The callback function that should be executed by
                the new job. Callback signature::

                    async def callback(context: CallbackContext)

            time (:obj:`datetime.time`): Time of day at which the job should run. If the timezone
                (:obj:`datetime.time.tzinfo`) is :obj:`None`, the default timezone of the bot will
                be used, which is UTC unless :attr:`telegram.ext.Defaults.tzinfo` is used.
            days (Tuple[:obj:`int`], optional): Defines on which days of the week the job should
                run (where ``0-6`` correspond to sunday - saturday). By default, the job will run
                every day.

                .. versionchanged:: 20.0
                    Changed day of the week mapping of 0-6 from monday-sunday to sunday-saturday.
            data (:obj:`object`, optional): Additional data needed for the callback function.
                Can be accessed through :attr:`Job.data` in the callback. Defaults to
                :obj:`None`.

                .. versionchanged:: 20.0
                    Renamed the parameter ``context`` to :paramref:`data`.
            name (:obj:`str`, optional): The name of the new job. Defaults to
                :external:attr:`callback.__name__ <definition.__name__>`.
            chat_id (:obj:`int`, optional): Chat id of the chat associated with this job. If
                passed, the corresponding :attr:`~telegram.ext.CallbackContext.chat_data` will
                be available in the callback.

                .. versionadded:: 20.0

            user_id (:obj:`int`, optional): User id of the user associated with this job. If
                passed, the corresponding :attr:`~telegram.ext.CallbackContext.user_data` will
                be available in the callback.

                .. versionadded:: 20.0
            job_kwargs (:obj:`dict`, optional): Arbitrary keyword arguments to pass to the
                :meth:`apscheduler.schedulers.base.BaseScheduler.add_job()`.

        Returns:
            :class:`telegram.ext.Job`: The new :class:`Job` instance that has been added to the job
            queue.

        """
        # TODO: After v20.0, we should remove the this warning.
        warn(
            "Prior to v20.0 the `days` parameter was not aligned to that of cron's weekday scheme."
            "We recommend double checking if the passed value is correct.",
            stacklevel=2,
        )
        if not job_kwargs:
            job_kwargs = {}

        name = name or callback.__name__
        job = Job(callback=callback, data=data, name=name, chat_id=chat_id, user_id=user_id)

        j = self.scheduler.add_job(
            job.run,
            name=name,
            args=(self.application,),
            trigger="cron",
            day_of_week=",".join([self._CRON_MAPPING[d] for d in days]),
            hour=time.hour,
            minute=time.minute,
            second=time.second,
            timezone=time.tzinfo or self.scheduler.timezone,
            **job_kwargs,
        )

        job.job = j
        return job

    def run_custom(
        self,
        callback: JobCallback,
        job_kwargs: JSONDict,
        data: object = None,
        name: str = None,
        chat_id: int = None,
        user_id: int = None,
    ) -> "Job":
        """Creates a new custom defined :class:`Job`.

        Args:
            callback (:term:`coroutine function`): The callback function that should be executed by
                the new job. Callback signature::

                    async def callback(context: CallbackContext)

            job_kwargs (:obj:`dict`): Arbitrary keyword arguments. Used as arguments for
                :meth:`apscheduler.schedulers.base.BaseScheduler.add_job`.
            data (:obj:`object`, optional): Additional data needed for the callback function.
                Can be accessed through :attr:`Job.data` in the callback. Defaults to
                :obj:`None`.

                .. versionchanged:: 20.0
                    Renamed the parameter ``context`` to :paramref:`data`.
            name (:obj:`str`, optional): The name of the new job. Defaults to
                :external:attr:`callback.__name__ <definition.__name__>`.
            chat_id (:obj:`int`, optional): Chat id of the chat associated with this job. If
                passed, the corresponding :attr:`~telegram.ext.CallbackContext.chat_data` will
                be available in the callback.

                .. versionadded:: 20.0

            user_id (:obj:`int`, optional): User id of the user associated with this job. If
                passed, the corresponding :attr:`~telegram.ext.CallbackContext.user_data` will
                be available in the callback.

                .. versionadded:: 20.0

        Returns:
            :class:`telegram.ext.Job`: The new :class:`Job` instance that has been added to the job
            queue.

        """
        name = name or callback.__name__
        job = Job(callback=callback, data=data, name=name, chat_id=chat_id, user_id=user_id)

        j = self.scheduler.add_job(job.run, args=(self.application,), name=name, **job_kwargs)

        job.job = j
        return job

    async def start(self) -> None:
        # this method async just in case future versions need that
        """Starts the :class:`~telegram.ext.JobQueue`."""
        if not self.scheduler.running:
            self.scheduler.start()

    async def stop(self, wait: bool = True) -> None:
        """Shuts down the :class:`~telegram.ext.JobQueue`.

        Args:
            wait (:obj:`bool`, optional): Whether to wait until all currently running jobs
                have finished. Defaults to :obj:`True`.

        """
        # the interface methods of AsyncIOExecutor are currently not really asyncio-compatible
        # so we apply some small tweaks here to try and smoothen the integration into PTB
        # TODO: When APS 4.0 hits, we should be able to remove the tweaks
        if wait:
            # Unfortunately AsyncIOExecutor just cancels them all ...
            await asyncio.gather(
                *self._executor._pending_futures,  # pylint: disable=protected-access
                return_exceptions=True,
            )
        if self.scheduler.running:
            self.scheduler.shutdown(wait=wait)
            # scheduler.shutdown schedules a task in the event loop but immediately returns
            # so give it a tiny bit of time to actually shut down.
            await asyncio.sleep(0.01)

    def jobs(self) -> Tuple["Job", ...]:
        """Returns a tuple of all *scheduled* jobs that are currently in the :class:`JobQueue`."""
        return tuple(
            Job._from_aps_job(job)  # pylint: disable=protected-access
            for job in self.scheduler.get_jobs()
        )

    def get_jobs_by_name(self, name: str) -> Tuple["Job", ...]:
        """Returns a tuple of all *pending/scheduled* jobs with the given name that are currently
        in the :class:`JobQueue`.
        """
        return tuple(job for job in self.jobs() if job.name == name)
Esempio n. 16
0
class AlamoScheduler(object):
    message_queue = None
    loop = handler = None

    def __init__(self, loop=None):
        kw = dict()
        if loop:
            kw['event_loop'] = loop

        self.scheduler = AsyncIOScheduler(**kw)

    def setup(self, loop=None):
        if loop is None:
            loop = asyncio.get_event_loop()
            asyncio.set_event_loop(loop)
        self.loop = loop
        self.message_queue = ZeroMQQueue(
            settings.ZERO_MQ_HOST,
            settings.ZERO_MQ_PORT
        )
        self.message_queue.connect()
        self.scheduler.add_listener(
            self.event_listener,
            EVENT_JOB_ERROR | EVENT_JOB_MISSED | EVENT_JOB_MAX_INSTANCES
        )

    @aiostats.increment()
    def _schedule_check(self, check):
        """Schedule check."""
        logger.info(
            'Check `%s:%s` scheduled!', check['uuid'], check['name']
        )

        check['scheduled_time'] = datetime.now(tz=pytz_utc).isoformat()
        self.message_queue.send(check)

    def remove_job(self, job_id):
        """Remove job."""
        try:
            logger.info('Removing job for check id=`%s`', job_id)
            self.scheduler.remove_job(str(job_id))
        except JobLookupError:
            pass

    def schedule_check(self, check):
        """Schedule check with proper interval based on `frequency`.

        :param dict check: Check definition
        """
        try:
            frequency = check['fields']['frequency'] = int(
                check['fields']['frequency']
            )
            logger.info(
                'Scheduling check `%s` with id `%s` and interval `%s`',
                check['name'], check['id'], frequency
            )
            jitter = random.randint(0, frequency)
            first_run = datetime.now() + timedelta(seconds=jitter)
            kw = dict(
                seconds=frequency,
                id=str(check['uuid']),
                next_run_time=first_run,
                args=(check,)
            )
            self.schedule_job(self._schedule_check, **kw)

        except KeyError as e:
            logger.exception('Failed to schedule check: %s. Exception: %s',
                             check, e)

    def schedule_job(self, method, **kwargs):
        """Add new job to scheduler.

        :param method: reference to method that should be scheduled
        :param kwargs: additional kwargs passed to `add_job` method
        """
        try:
            self.scheduler.add_job(
                method, 'interval',
                misfire_grace_time=settings.JOBS_MISFIRE_GRACE_TIME,
                max_instances=settings.JOBS_MAX_INSTANCES,
                coalesce=settings.JOBS_COALESCE,
                **kwargs
            )
        except ConflictingIdError as e:
            logger.error(e)

    def event_listener(self, event):
        """React on events from scheduler.

        :param apscheduler.events.JobExecutionEvent event: job execution event
        """
        if event.code == EVENT_JOB_MISSED:
            aiostats.increment.incr('job.missed')
            logger.warning("Job %s scheduler for %s missed.", event.job_id,
                           event.scheduled_run_time)
        elif event.code == EVENT_JOB_ERROR:
            aiostats.increment.incr('job.error')
            logger.error("Job %s scheduled for %s failed. Exc: %s",
                         event.job_id,
                         event.scheduled_run_time,
                         event.exception)
        elif event.code == EVENT_JOB_MAX_INSTANCES:
            aiostats.increment.incr('job.max_instances')
            logger.warning(
                'Job `%s` could not be submitted. '
                'Maximum number of running instances was reached.',
                event.job_id
            )

    @aiostats.increment()
    def get_jobs(self):
        return [job.id for job in self.scheduler.get_jobs()]

    async def checks(self, request=None):

        uuid = request.match_info.get('uuid', None)
        if uuid is None:
            jobs = self.get_jobs()
            return json_response(data=dict(count=len(jobs), results=jobs))
        job = self.scheduler.get_job(uuid)
        if job is None:
            return json_response(
                data={'detail': 'Check does not exists.'}, status=404
            )

        check, = job.args
        return json_response(data=check)

    @aiostats.timer()
    async def update(self, request=None):
        check = await request.json()
        check_uuid = check.get('uuid')
        check_id = check.get('id')

        message = dict(status='ok')

        if not check_id or not check_uuid:
            return json_response(status=400)

        if check_id % settings.SCHEDULER_COUNT != settings.SCHEDULER_NR:
            return json_response(data=message, status=202)

        job = self.scheduler.get_job(str(check_uuid))

        if job:
            scheduled_check, = job.args
            timestamp = scheduled_check.get('timestamp', 0)

            if timestamp > check['timestamp']:
                return json_response(data=message, status=202)
            message = dict(status='deleted')
            self.remove_job(check_uuid)

        if any([trigger['enabled'] for trigger in check['triggers']]):
            self.schedule_check(check)
            message = dict(status='scheduled')

        return json_response(data=message, status=202)

    def wait_and_kill(self, sig):
        logger.warning('Got `%s` signal. Preparing scheduler to exit ...', sig)
        self.scheduler.shutdown()
        self.loop.stop()

    def register_exit_signals(self):
        for sig in ['SIGQUIT', 'SIGINT', 'SIGTERM']:
            logger.info('Registering handler for `%s` signal '
                        'in current event loop ...', sig)
            self.loop.add_signal_handler(
                getattr(signal, sig),
                self.wait_and_kill, sig
            )

    def start(self, loop=None):
        """Start scheduler."""
        self.setup(loop=loop)
        self.register_exit_signals()
        self.scheduler.start()

        logger.info(
            'Press Ctrl+%s to exit.', 'Break' if os.name == 'nt' else 'C'
        )
        try:
            self.loop.run_forever()
        except KeyboardInterrupt:
            pass
        logger.info('Scheduler was stopped!')
Esempio n. 17
0
if __name__ == '__main__':
    scheduler = AsyncIOScheduler()
    jobs = get_jobs()
    for j in jobs:
        job_args = [j['job_name'], j['job_command']]
        scheduler.add_job(func=build_job, args=job_args, name=j['job_name'],
                          misfire_grace_time=3600,
                          trigger=IntervalTrigger(
                              weeks=j['job_trigger'].get("weeks", 0),
                              days=j['job_trigger'].get("days", 0),
                              hours=j['job_trigger'].get("hours", 0),
                              minutes=j['job_trigger'].get("minutes", 0),
                              seconds=j['job_trigger'].get("seconds", 0),
                              start_date=j['job_trigger'].get("start_date", None),
                              end_date=j['job_trigger'].get("end_date", None),
                              timezone=j['job_trigger'].get("timezone", None)
                          ))
    pending_jobs = scheduler.get_jobs()
    print("Job_Num: {}, Job List:".format(len(pending_jobs)))
    for x in pending_jobs:
        print("Job_name:{}, Job_command:{}, job_trigger:{}".format(x.name, x.args, x.trigger))
    scheduler.start()
    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))
    try:
        asyncio.get_event_loop().run_forever()
    except (KeyboardInterrupt, SystemExit):
        pass
    except Exception as e:
        print(e)

Esempio n. 18
0
class Scheduler(commands.Cog):
    def __init__(self, bot):
        self.bot = bot

        self.scheduler = AsyncIOScheduler(timezone=utc)
        self.scheduler.start()

        # Connect to Google Drive API
        credentials = service_account.Credentials.from_service_account_file(
            SERVICE_ACCOUNT, scopes=SCOPES)
        self.drive_service = discovery.build('drive',
                                             'v3',
                                             credentials=credentials)

        self.add_random_pic_job()

    @commands.group()
    @commands.is_owner()
    async def schedule(self, context):
        if context.invoked_subcommand is None:
            await context.send('Invalid schedule command passed...')

    @schedule.command(name='running',
                      description='Lists all currently running jobs.',
                      pass_context=True)
    async def running(self, context):
        jobs = self.scheduler.get_jobs()

        if not jobs:
            await context.send('There are no currently running jobs.')
            return

        message = '```'
        message += 'JOB ID | NEXT RUN TIME\n\n'
        for job in jobs:
            message += f'{job.id} | {job.next_run_time}\n'
        message += '```'

        await context.send(message)

    @schedule.command(
        name='add',
        description='Adds the job of the given id to the scheduler.')
    async def add(self, context, job_id: str):
        if job_id == RANDOM_PIC_JOB_ID:
            self.add_random_pic_job()
            await context.send(
                f'Successfully added job with id {RANDOM_PIC_JOB_ID}.')
        else:
            await context.send('Job id not recognized!')

    @schedule.command(
        name='remove',
        description='Removes the job of the given id from the scheduler.')
    async def remove(self, context, job_id: str):
        jobs = self.scheduler.get_jobs()

        for job in jobs:
            if job.id == job_id:
                self.scheduler.remove_job(job_id)
                await context.send(
                    f'Successfully removed job with id {job.id}.')
                return

        await context.send(f'There is no job with id {job.id}!')

    def add_random_pic_job(self):
        now = datetime.datetime.now(utc).time()
        midnight = datetime.time(hour=0)
        dawn = datetime.time(hour=6)
        midday = datetime.time(hour=12)
        dusk = datetime.time(hour=18)

        start_time = None

        if midnight < now < dawn:
            start_time = datetime.datetime.now(utc).replace(hour=dawn.hour,
                                                            minute=dawn.minute,
                                                            second=dawn.second)
        elif dawn < now < midday:
            start_time = datetime.datetime.now(utc).replace(
                hour=midday.hour, minute=midday.minute, second=midday.second)
        elif midday < now < dusk:
            start_time = datetime.datetime.now(utc).replace(hour=dusk.hour,
                                                            minute=dusk.minute,
                                                            second=dusk.second)
        elif dusk < now:
            start_time = (datetime.datetime.now(utc) +
                          datetime.timedelta(days=1)).replace(
                              hour=midnight.hour,
                              minute=midnight.minute,
                              second=midnight.second)

        self.scheduler.add_job(self.post_random_pic,
                               'interval',
                               hours=6,
                               next_run_time=start_time,
                               id=RANDOM_PIC_JOB_ID)

    async def post_random_pic(self):
        random_q = ''
        rnd.seed()

        random_int = rnd.randint(1, 60)
        if random_int >= 1 and random_int <= 10:
            random_q = GROUP_PIC_Q
        elif random_int >= 11 and random_int <= 20:
            random_q = SOLJI_PIC_Q
        elif random_int >= 21 and random_int <= 30:
            random_q = LE_PIC_Q
        elif random_int >= 31 and random_int <= 40:
            random_q = HANI_PIC_Q
        elif random_int >= 41 and random_int <= 50:
            random_q = HYELIN_PIC_Q
        else:
            random_q = JEONGHWA_PIC_Q

        response = {}
        page_token = None
        rnd.seed()

        while True:
            try:
                param = {}
                param['q'] = random_q
                param['spaces'] = 'drive'
                param['fields'] = 'nextPageToken, files(id, name)'
                param['pageSize'] = 100
                if page_token:
                    param['pageToken'] = page_token

                response = self.drive_service.files().list(**param).execute()
                page_token = response.get('nextPageToken')

                if not page_token or rnd.randint(0, 100) >= 30:
                    break

            except Exception as e:
                print("An error occured: {}".format(e))
                return

        pics = response.get('files', [])
        num_pics = len(pics)
        random_pic = pics[rnd.randint(0, num_pics - 1)]

        request = self.drive_service.files().get_media(fileId=random_pic['id'])
        file_name = random_pic['name']
        fh = io.FileIO(random_pic['name'], 'wb')
        downloader = MediaIoBaseDownload(fh, request)
        done = False
        while done is False:
            status, done = downloader.next_chunk()

        exid_channel = self.bot.get_guild(EXID_GUILD_ID).get_channel(
            EXID_CHAN_ID)

        await exid_channel.send(file=discord.File(file_name))
        await asyncio.sleep(5)
        fh.close()
        os.remove(file_name)
Esempio n. 19
0
main_admin = config.admin
channel_id = config.channel
bot = Bot(token=config.token)
storage = MemoryStorage()
dp = Dispatcher(bot, storage=storage)
dp.middleware.setup(LoggingMiddleware())
logging.info('BOT STARTED IN %f', time() - t)

t = time()
scheduler = AsyncIOScheduler(
    {
        'apscheduler.jobstores.default': {
            'type': 'sqlalchemy',
            'url': 'sqlite:///jobs.sqlite'
        }
    },
    job_defaults={'misfire_grace_time': 5 * 60})
scheduler.start()
jobs = scheduler.get_jobs()
names = []
for x in jobs:
    names.append(x.name)
try:
    names.index('update_sales')
    logging.info('TASK CREATE ALREADY')
except ValueError:
    scheduler.add_job(update_sales, 'cron', hour=3)

logging.info('SCHEDULER STARTED IN %f', time() - t)
asyncio.get_event_loop().run_until_complete(get_admins())
Esempio n. 20
0
class AScheduler(object):
    def __init__(self,
                 jobstores,
                 job_defaults,
                 JOB_ID,
                 LOCAL_TZ_OBJ,
                 loop,
                 remove_old_jobs=True):
        self.JOB_ID = JOB_ID

        self.timezone = LOCAL_TZ_OBJ

        self.scheduler = AsyncIOScheduler(jobstores=jobstores,
                                          job_defaults=job_defaults,
                                          timezone=LOCAL_TZ_OBJ,
                                          event_loop=loop)

        if remove_old_jobs:
            self.scheduler.remove_all_jobs()
            self.job_ids = set()
        else:
            self.job_ids = {job.id for job in self.scheduler.get_jobs()}

    def start(self, logging_running_job_state=False):
        self.scheduler.start()
        logging.info(self.scheduler.get_jobs())
        if not logging_running_job_state:
            # noinspection PyProtectedMember
            self.scheduler._executors['default']._logger.addFilter(
                NoRunningFilter())

    def add_cron_job(self,
                     func,
                     func_kwargs,
                     year=None,
                     month=None,
                     day=None,
                     week=None,
                     day_of_week=None,
                     hour=None,
                     minute=None,
                     second=None):
        """
        If you schedule jobs in a persistent job store during your application’s initialization,
        you MUST define an explicit ID for the job and use replace_existing=True
        or you will get a new copy of the job every time your application restarts!

        :param func_kwargs:
        :param func: function to execute cron
        :param year (int|str) – 4-digit year
        :param month (int|str) – month (1-12)
        :param day (int|str) – day of the (1-31)
        :param week (int|str) – ISO week (1-53)
        :param day_of_week (int|str) – number or name of weekday (0-6 or mon,tue,wed,thu,fri,sat,sun)
        :param hour (int|str) – hour (0-23)
        :param minute (int|str) – minute (0-59)
        :param second (int|str) – second (0-59)

        :return:
        """
        job_id = self.JOB_ID.format(func=func.__name__)
        job = self.scheduler.add_job(func=func,
                                     kwargs=func_kwargs,
                                     id=job_id,
                                     replace_existing=True,
                                     trigger='cron',
                                     year=year,
                                     month=month,
                                     day=day,
                                     week=week,
                                     day_of_week=day_of_week,
                                     hour=hour,
                                     minute=minute,
                                     second=second,
                                     timezone=self.timezone)
        self.job_ids.add(job.id)
        return job

    def stop(self):
        # self.scheduler.remove_all_jobs()

        # shutdown method will close mongo connection
        self.scheduler.shutdown()