Example #1
0
def update_picker_rotation(scheduler: AsyncIOScheduler,
                           event: JobExecutionEvent) -> None:
    """
    When a job finishes, we update its `previous_user_picks`
    parameter so that next time it runs, it will be able to
    pick users that were never picked.
    """
    job = scheduler.get_job(event.job_id)
    new_kwargs = deepcopy(job.kwargs)
    new_kwargs["previous_user_picks"] = event.retval
    job.modify(kwargs=new_kwargs)
Example #2
0
class TimerJob:
    _scheduler = None

    def __init__(self):
        self._scheduler = AsyncIOScheduler()
        self._scheduler.configure(timezone=utc)  # utc作为调度程序的时区

    def init_timer(self):
        print("启动调度器...")
        self._scheduler.start()

    def close_timer(self):
        self._scheduler.shutdown(wait=True)
        print("关闭调度器...")

    def new_job(self, j_id: str, func: object, args: tuple, cron: str):
        """添加定时任务"""
        return self._scheduler.add_job(id=j_id,
                                       func=func,
                                       args=args,
                                       trigger=CronTrigger.from_crontab(cron))

    def delete_job(self, j_id: str):
        """删除定时任务"""
        return self._scheduler.remove_job(job_id=j_id)

    def stop_job(self, j_id: str):
        """暂停任务"""
        return self._scheduler.pause_job(job_id=j_id)

    def replay_job(self, j_id: str):
        """恢复任务"""
        return self._scheduler.resume_job(job_id=j_id)

    def modify_job(self, j_id: str, func: object, args: tuple, cron: str):
        """更新任务"""
        return self._scheduler.modify_job(
            job_id=j_id,
            func=func,
            args=args,
            trigger=CronTrigger.from_crontab(cron))

    def get_job(self, j_id: str):
        """获取定时任务信息"""
        return self._scheduler.get_job(job_id=j_id)

    def get_all(self):
        """所有任务"""
        self._scheduler.get_jobs()
Example #3
0
class Scheduler_ex():
    def __init__(self, loop):
        self.loop = loop
        self.scheduler = AsyncIOScheduler({
            'apscheduler.executors.default': {
                'class': 'apscheduler.executors.pool:ThreadPoolExecutor',
                'max_workers': '100'
            }
        })
        self.scheduler.start()

    def afs(self, func, params: dict, call=None):
        def result(res):
            if call:
                call(res.result())

        asyncio.ensure_future(func(**params),
                              loop=self.loop).add_done_callback(result)

    def add_job(self, hours, minutes, seconds, args, id):
        self.scheduler.add_job(self.afs,
                               trigger='interval',
                               hours=hours,
                               minutes=minutes,
                               seconds=seconds,
                               args=args,
                               id=str(id))

    def add_job_for_pins(self, hour: int, minute: int, args: list, id=None):
        self.scheduler.add_job(self.afs,
                               trigger='cron',
                               hour=hour,
                               minute=minute,
                               args=args,
                               id=id)

    def remove_job(self, id):
        self.scheduler.remove_job(str(id))

    def get_job(self, id):
        res = self.scheduler.get_job(str(id))
        return res
Example #4
0
class Scheduler(abstract.Scheduler):
    apscheduler: AsyncIOScheduler

    def __init__(self) -> None:
        super().__init__()
        self.apscheduler = AsyncIOScheduler()
        self.apscheduler.start()
        logger.info("Started scheduler")

    def add_scheduled_feeding(self, feeding_schedule: abstract.Schedule,
                              feeding_callback: Callable) -> time:
        kwargs = {
            "trigger": "cron",
            "id": feeding_schedule.get_id(),
            "name": "Scheduled Feeding",
            "misfire_grace_time": 3600,
            "coalesce": True,
            "max_instances": 1,
        }
        cron_args = feeding_schedule.get_cron_args()
        kwargs.update(cron_args)
        job = self.apscheduler.add_job(feeding_callback, **kwargs)
        logger.info("Added scheduled feeding: {}", job)
        return job.next_run_time

    def remove_scheduled_feeding(
            self, feeding_schedule: abstract.Schedule) -> Optional[time]:
        job = self.apscheduler.get_job(feeding_schedule.get_id())
        logger.info("Removing scheduled job: {}", job)
        if job:
            t = job.next_run_time
            job.remove()
            return t
        return None

    def list_scheduled_feedings(self) -> List[Tuple[str, time]]:
        return sorted(
            [(job.id, job.next_run_time)
             for job in self.apscheduler.get_jobs()],
            key=lambda x: x[1],
        )
Example #5
0
class Reminder(commands.Cog):
    """Reminder messages"""
    def __init__(self, bot, db, eos):
        self.bot = bot
        self.db = db
        self.eos = eos
        self.scheduler = AsyncIOScheduler()
        self.latest_proposal_id = None
        self.latest_cycle_id = None

    async def notify_vote_duration(self):
        now = arrow.utcnow()
        config = self.eos.get_config()
        cycle = self.eos.get_cycle(config['current_cycle'])
        started_at = arrow.get(cycle['start_time'])

        vote_duration = arrow.get(started_at.timestamp() +
                                  config['cycle_voting_duration_sec'])
        vote_duration_str = vote_duration.humanize(
            now, granularity=["day", "hour", "minute"])
        vote_duration_dt = vote_duration.format("D MMMM YYYY HH:mm:ss ZZZ")

        channel = self.bot.get_channel(CHANNEL_IDS['DISCORD_DAO_SPAM_CHANNEL'])

        await channel.send(
            f"The current vote duration is almost over! **VOTE** while you still can on https://dao.effect.network/proposals\nThe vote duration ends at {vote_duration_dt} (**{vote_duration_str}**)"
        )

    async def check_new_proposals(self):
        logger.info(
            'Latest proposal id known: {0}. Checking for new proposals...'.
            format(self.latest_proposal_id))

        while True:
            proposal = self.eos.get_proposal(id=self.latest_proposal_id + 1,
                                             ipfs=False)
            if proposal:
                # NOTIFY
                channel = self.bot.get_channel(
                    CHANNEL_IDS['DISCORD_DAO_SPAM_CHANNEL'])
                await channel.send(
                    "A new proposal has been **made**! Click here https://dao.effect.network/proposals/{0} to see the new proposal."
                    .format(proposal[0]['id']))
                self.latest_proposal_id += 1
                logger.info(
                    'Found new proposal! id: {0}. Checking for new proposals...'
                    .format(proposal[0]['id']))
            else:
                logger.info('No new proposals found.')
                break

    async def check_new_cycle(self):
        logger.info(
            'Latest cycle id known: {0}. Checking for a new cycle...'.format(
                self.latest_cycle_id))

        while True:
            config = self.eos.get_config()
            cycle = self.eos.get_cycle(self.latest_cycle_id + 1)
            if cycle:
                # NOTIFY
                channel = self.bot.get_channel(
                    CHANNEL_IDS['DISCORD_DAO_SPAM_CHANNEL'])
                await channel.send(
                    "A new Cycle has **started**! Go to https://dao.effect.network/proposals to vote on proposals!"
                )

                self.latest_cycle_id += 1
                logger.info(
                    'Found new cycle! id: {0}. Checking for a new cycle...'.
                    format(cycle['id']))
            else:
                logger.info('No new cycle found.')
                break

    async def notify_dao_call(self):

        channel = self.bot.get_channel(CHANNEL_IDS['DISCORD_DAO_SPAM_CHANNEL'])
        await channel.send(
            f":warning:The weekly DAO CALL is starting:bangbang: Join us in the voice channel:warning:"
        )

    @commands.command(hidden=True)
    async def reschedule(self,
                         ctx,
                         trigger='cron | date | interval',
                         job_id="job_id",
                         *args):
        """Reschedule reminders to a different time."""
        func = None

        if not Admin._sender_is_effect_member(ctx):
            return

        job = self.scheduler.get_job(job_id=job_id)

        if not job:
            if job_id == "dao_call_notify": func = self.notify_dao_call
            elif job_id == "dao_vote_notify": func = self.notify_vote_duration
            elif job_id == "dao_new_proposals_notify":
                func = self.check_new_proposals
            elif job_id == "dao_new_cycle_notify":
                func = self.check_new_cycle
            else:
                return

        try:
            if trigger == 'cron':
                day_of_week, hour, minute = args

                # for the reschedule_job you need first 3 chars of the weekdays.
                if len(day_of_week) <= 3: return
                elif len(day_of_week) > 3: day_of_week = day_of_week[0:3]

                # create job when there is no job, else reschedule.
                if not job:
                    self.scheduler.add_job(func,
                                           trigger='cron',
                                           day_of_week=day_of_week,
                                           hour=hour,
                                           minute=minute,
                                           id=job_id)
                    return await ctx.send(
                        f"Job did not exist, created new one: **{job_id}**")

                else:
                    self.scheduler.reschedule_job(job_id,
                                                  trigger='cron',
                                                  day_of_week=day_of_week,
                                                  hour=hour,
                                                  minute=minute)

            elif trigger == 'date':
                run_date, = args

                if not job:
                    self.scheduler.add_job(func=func,
                                           trigger='date',
                                           run_date=run_date,
                                           id=job_id)
                    return await ctx.send(
                        f"Job did not exist, created new one: **{job_id}**")
                else:
                    self.scheduler.reschedule_job(job_id,
                                                  trigger='date',
                                                  run_date=run_date)

            elif trigger == 'interval':
                weeks, days, hours, minutes, seconds = args

                if not job:
                    self.scheduler.add_job(func=func,
                                           trigger='interval',
                                           weeks=weeks,
                                           days=days,
                                           hours=hours,
                                           minutes=minutes,
                                           seconds=seconds,
                                           id=job_id)
                    return await ctx.send(
                        f"Job did not exist, created new one: **{job_id}**")
                else:
                    self.scheduler.reschedule_job(job_id,
                                                  trigger='interval',
                                                  weeks=weeks,
                                                  days=days,
                                                  hours=hours,
                                                  minutes=minutes,
                                                  seconds=seconds)

        except EVENT_JOB_ERROR:
            return await ctx.send("something went wrong with rescheduling...")

        return await ctx.send("changed schedule for {0}.".format(job_id))

    @commands.Cog.listener()
    async def on_ready(self):

        config = self.eos.get_config()
        cycle = self.eos.get_cycle(config['current_cycle'])

        self.latest_cycle_id = int(cycle[0]['id'])
        self.latest_proposal_id = int(self.eos.get_latest_proposal()['id'])

        started_at = arrow.get(cycle[0]['start_time'])
        vote_duration = arrow.get(started_at.timestamp() +
                                  config['cycle_voting_duration_sec'])
        # set vote duration a day earlier.
        vote_duration = vote_duration.shift(days=-1)

        #starting the scheduler
        self.scheduler.start()

        # DAO call notification on discord.
        self.scheduler.add_job(self.notify_dao_call,
                               trigger='cron',
                               day_of_week='wed',
                               hour=15,
                               minute=0,
                               id="dao_call_notify")
        self.scheduler.add_job(self.notify_vote_duration,
                               'date',
                               run_date=vote_duration.datetime,
                               id="dao_vote_notify")
        self.scheduler.add_job(self.check_new_cycle,
                               trigger='interval',
                               hours=1,
                               id="dao_new_cycle_notify")
        self.scheduler.add_job(self.check_new_proposals,
                               trigger='interval',
                               hours=1,
                               id="dao_new_proposals_notify")
Example #6
0
File: fifo.py Project: glx33/Fox-V3
class FIFO(commands.Cog):
    """
    Simple Scheduling Cog

    Named after the simplest scheduling algorithm: First In First Out
    """
    def __init__(self, bot: Red):
        super().__init__()
        self.bot = bot
        self.config = Config.get_conf(self,
                                      identifier=70737079,
                                      force_registration=True)

        default_global = {"jobs": []}
        default_guild = {"tasks": {}}

        self.config.register_global(**default_global)
        self.config.register_guild(**default_guild)

        self.scheduler: Optional[AsyncIOScheduler] = None
        self.jobstore = None

        self.tz_cog = None

    async def red_delete_data_for_user(self, **kwargs):
        """Nothing to delete"""
        return

    def cog_unload(self):
        # self.scheduler.remove_all_jobs()
        if self.scheduler is not None:
            self.scheduler.shutdown()

    async def initialize(self):

        job_defaults = {
            "coalesce":
            True,  # Multiple missed triggers within the grace time will only fire once
            "max_instances":
            5,  # This is probably way too high, should likely only be one
            "misfire_grace_time":
            15,  # 15 seconds ain't much, but it's honest work
            "replace_existing": True,  # Very important for persistent data
        }

        # executors = {"default": AsyncIOExecutor()}

        # Default executor is already AsyncIOExecutor
        self.scheduler = AsyncIOScheduler(job_defaults=job_defaults,
                                          logger=schedule_log)

        from .redconfigjobstore import RedConfigJobStore  # Wait to import to prevent cyclic import

        self.jobstore = RedConfigJobStore(self.config, self.bot)
        await self.jobstore.load_from_config()
        self.scheduler.add_jobstore(self.jobstore, "default")

        self.scheduler.start()

    async def _check_parsable_command(self, ctx: commands.Context,
                                      command_to_parse: str):
        message: discord.Message = ctx.message

        message.content = ctx.prefix + command_to_parse
        message.author = ctx.author

        new_ctx: commands.Context = await self.bot.get_context(message)

        return new_ctx.valid

    async def _delete_task(self, task: Task):
        job: Union[Job, None] = await self._get_job(task)
        if job is not None:
            job.remove()

        await task.delete_self()

    async def _process_task(self, task: Task):
        # None of this is necessar, we have `replace_existing` already
        # job: Union[Job, None] = await self._get_job(task)
        # if job is not None:
        #     combined_trigger_ = await task.get_combined_trigger()
        #     if combined_trigger_ is None:
        #         job.remove()
        #     else:
        #         job.reschedule(combined_trigger_)
        #     return job
        return await self._add_job(task)

    async def _get_job(self, task: Task) -> Job:
        return self.scheduler.get_job(
            _assemble_job_id(task.name, task.guild_id))

    async def _add_job(self, task: Task):
        combined_trigger_ = await task.get_combined_trigger()
        if combined_trigger_ is None:
            return None

        return self.scheduler.add_job(
            _execute_task,
            kwargs=task.__getstate__(),
            id=_assemble_job_id(task.name, task.guild_id),
            trigger=combined_trigger_,
            name=task.name,
            replace_existing=True,
        )

    async def _resume_job(self, task: Task):
        job: Union[Job, None] = await self._get_job(task)
        if job is not None:
            job.resume()
        else:
            job = await self._process_task(task)
        return job

    async def _pause_job(self, task: Task):
        try:
            return self.scheduler.pause_job(
                job_id=_assemble_job_id(task.name, task.guild_id))
        except JobLookupError:
            return False

    async def _remove_job(self, task: Task):
        try:
            self.scheduler.remove_job(
                job_id=_assemble_job_id(task.name, task.guild_id))
        except JobLookupError:
            pass

    async def _get_tz(
            self, user: Union[discord.User,
                              discord.Member]) -> Union[None, tzinfo]:
        if self.tz_cog is None:
            self.tz_cog = self.bot.get_cog("Timezone")
            if self.tz_cog is None:
                self.tz_cog = False  # only try once to get the timezone cog

        if not self.tz_cog:
            return None
        try:
            usertime = await self.tz_cog.config.user(user).usertime()
        except AttributeError:
            return None

        if usertime:
            return await TimezoneConverter().convert(None, usertime)
        else:
            return None

    @checks.is_owner()
    @commands.guild_only()
    @commands.command()
    async def fifoclear(self, ctx: commands.Context):
        """Debug command to clear all current fifo data"""
        self.scheduler.remove_all_jobs()
        await self.config.guild(ctx.guild).tasks.clear()
        await self.config.jobs.clear()
        # await self.config.jobs_index.clear()
        await ctx.tick()

    @checks.is_owner()  # Will be reduced when I figure out permissions later
    @commands.guild_only()
    @commands.group()
    async def fifo(self, ctx: commands.Context):
        """
        Base command for handling scheduling of tasks
        """
        if ctx.invoked_subcommand is None:
            pass

    @fifo.command(name="wakeup")
    async def fifo_wakeup(self, ctx: commands.Context):
        """Debug command to fix missed executions.

        If you see a negative "Next run time" when adding a trigger, this may help resolve it.
        Check the logs when using this command.
        """

        self.scheduler.wakeup()
        await ctx.tick()

    @fifo.command(name="checktask", aliases=["checkjob", "check"])
    async def fifo_checktask(self, ctx: commands.Context, task_name: str):
        """Returns the next 10 scheduled executions of the task"""
        task = Task(task_name, ctx.guild.id, self.config, bot=self.bot)
        await task.load_from_config()

        if task.data is None:
            await ctx.maybe_send_embed(
                f"Task by the name of {task_name} is not found in this guild")
            return

        job = await self._get_job(task)
        if job is None:
            await ctx.maybe_send_embed("No job scheduled for this task")
            return
        now = datetime.now(job.next_run_time.tzinfo)

        times = [
            humanize_timedelta(timedelta=x - now)
            for x in itertools.islice(_get_run_times(job), 10)
        ]
        await ctx.maybe_send_embed("\n\n".join(times))

    @fifo.command(name="set")
    async def fifo_set(
        self,
        ctx: commands.Context,
        task_name: str,
        author_or_channel: Union[discord.Member, discord.TextChannel],
    ):
        """
        Sets a different author or in a different channel for execution of a task.
        """
        task = Task(task_name, ctx.guild.id, self.config, bot=self.bot)
        await task.load_from_config()

        if task.data is None:
            await ctx.maybe_send_embed(
                f"Task by the name of {task_name} is not found in this guild")
            return

        if isinstance(author_or_channel, discord.Member):
            if task.author_id == author_or_channel.id:
                await ctx.maybe_send_embed("Already executing as that member")
                return

            await task.set_author(author_or_channel)  # also saves
        elif isinstance(author_or_channel, discord.TextChannel):
            if task.channel_id == author_or_channel.id:
                await ctx.maybe_send_embed("Already executing in that channel")
                return

            await task.set_channel(author_or_channel)
        else:
            await ctx.maybe_send_embed("Unsupported result")
            return

        await ctx.tick()

    @fifo.command(name="resume")
    async def fifo_resume(self,
                          ctx: commands.Context,
                          task_name: Optional[str] = None):
        """
        Provide a task name to resume execution of a task.

        Otherwise resumes execution of all tasks on all guilds
        If the task isn't currently scheduled, will schedule it
        """
        if task_name is None:
            if self.scheduler.state == STATE_PAUSED:
                self.scheduler.resume()
                await ctx.maybe_send_embed(
                    "All task execution for all guilds has been resumed")
            else:
                await ctx.maybe_send_embed(
                    "Task execution is not paused, can't resume")
        else:
            task = Task(task_name, ctx.guild.id, self.config, bot=self.bot)
            await task.load_from_config()

            if task.data is None:
                await ctx.maybe_send_embed(
                    f"Task by the name of {task_name} is not found in this guild"
                )
                return

            if await self._resume_job(task):
                await ctx.maybe_send_embed(
                    f"Execution of {task_name=} has been resumed")
            else:
                await ctx.maybe_send_embed(f"Failed to resume {task_name=}")

    @fifo.command(name="pause")
    async def fifo_pause(self,
                         ctx: commands.Context,
                         task_name: Optional[str] = None):
        """
        Provide a task name to pause execution of a task

        Otherwise pauses execution of all tasks on all guilds
        """
        if task_name is None:
            if self.scheduler.state == STATE_RUNNING:
                self.scheduler.pause()
                await ctx.maybe_send_embed(
                    "All task execution for all guilds has been paused")
            else:
                await ctx.maybe_send_embed(
                    "Task execution is not running, can't pause")
        else:
            task = Task(task_name, ctx.guild.id, self.config, bot=self.bot)
            await task.load_from_config()

            if task.data is None:
                await ctx.maybe_send_embed(
                    f"Task by the name of {task_name} is not found in this guild"
                )
                return

            if await self._pause_job(task):
                await ctx.maybe_send_embed(
                    f"Execution of {task_name=} has been paused")
            else:
                await ctx.maybe_send_embed(f"Failed to pause {task_name=}")

    @fifo.command(name="details")
    async def fifo_details(self, ctx: commands.Context, task_name: str):
        """
        Provide all the details on the specified task name
        """
        task = Task(task_name, ctx.guild.id, self.config, bot=self.bot)
        await task.load_from_config()

        if task.data is None:
            await ctx.maybe_send_embed(
                f"Task by the name of {task_name} is not found in this guild")
            return

        embed = discord.Embed(title=f"Task: {task_name}")

        embed.add_field(name="Task command",
                        value=f"{ctx.prefix}{task.get_command_str()}",
                        inline=False)

        guild: discord.Guild = self.bot.get_guild(task.guild_id)

        if guild is not None:
            author: discord.Member = guild.get_member(task.author_id)
            channel: discord.TextChannel = guild.get_channel(task.channel_id)
            embed.add_field(name="Server", value=guild.name)
            if author is not None:
                embed.add_field(name="Author", value=author.mention)
            if channel is not None:
                embed.add_field(name="Channel", value=channel.mention)

        else:
            embed.add_field(name="Server",
                            value="Server not found",
                            inline=False)
        triggers, expired_triggers = await task.get_triggers()

        trigger_str = "\n".join(str(t) for t in triggers)
        expired_str = "\n".join(str(t) for t in expired_triggers)
        if trigger_str:
            embed.add_field(name="Triggers", value=trigger_str, inline=False)
        if expired_str:
            embed.add_field(name="Expired Triggers",
                            value=expired_str,
                            inline=False)

        job = await self._get_job(task)
        if job and job.next_run_time:
            embed.timestamp = job.next_run_time

        await ctx.send(embed=embed)

    @fifo.command(name="list")
    async def fifo_list(self, ctx: commands.Context, all_guilds: bool = False):
        """
        Lists all current tasks and their triggers.

        Do `[p]fifo list True` to see tasks from all guilds
        """
        if all_guilds:
            pass  # TODO: All guilds
        else:
            out = ""
            all_tasks = await self.config.guild(ctx.guild).tasks()
            for task_name, task_data in all_tasks.items():
                out += f"{task_name}: {task_data}\n\n"

            if out:
                if len(out) > 2000:
                    for page in pagify(out):
                        await ctx.maybe_send_embed(page)
                else:
                    await ctx.maybe_send_embed(out)
            else:
                await ctx.maybe_send_embed("No tasks to list")

    @fifo.command(name="printschedule")
    async def fifo_printschedule(self, ctx: commands.Context):
        """
        Print the current schedule of execution.

        Useful for debugging.
        """
        cp = CapturePrint()
        self.scheduler.print_jobs(out=cp)

        out = cp.string

        if out:
            if len(out) > 2000:
                for page in pagify(out):
                    await ctx.maybe_send_embed(page)
            else:
                await ctx.maybe_send_embed(out)
        else:
            await ctx.maybe_send_embed("Failed to get schedule from scheduler")

    @fifo.command(name="add")
    async def fifo_add(self, ctx: commands.Context, task_name: str, *,
                       command_to_execute: str):
        """
        Add a new task to this guild's task list
        """
        if (await self.config.guild(ctx.guild
                                    ).tasks.get_raw(task_name,
                                                    default=None)) is not None:
            await ctx.maybe_send_embed(f"Task already exists with {task_name=}"
                                       )
            return

        if "_" in task_name:  # See _disassemble_job_id
            await ctx.maybe_send_embed("Task name cannot contain underscores")
            return

        if not await self._check_parsable_command(ctx, command_to_execute):
            await ctx.maybe_send_embed(
                "Failed to parse command. Make sure not to include the prefix")
            return

        task = Task(task_name, ctx.guild.id, self.config, ctx.author.id,
                    ctx.channel.id, self.bot)
        await task.set_commmand_str(command_to_execute)
        await task.save_all()
        await ctx.tick()

    @fifo.command(name="delete")
    async def fifo_delete(self, ctx: commands.Context, task_name: str):
        """
        Deletes a task from this guild's task list
        """
        task = Task(task_name, ctx.guild.id, self.config, bot=self.bot)
        await task.load_from_config()

        if task.data is None:
            await ctx.maybe_send_embed(
                f"Task by the name of {task_name} is not found in this guild")
            return

        await self._delete_task(task)
        await ctx.maybe_send_embed(
            f"Task[{task_name}] has been deleted from this guild")

    @fifo.command(name="cleartriggers", aliases=["cleartrigger"])
    async def fifo_cleartriggers(self, ctx: commands.Context, task_name: str):
        """
        Removes all triggers from specified task

        Useful to start over with new trigger
        """

        task = Task(task_name, ctx.guild.id, self.config, bot=self.bot)
        await task.load_from_config()

        if task.data is None:
            await ctx.maybe_send_embed(
                f"Task by the name of {task_name} is not found in this guild")
            return

        await task.clear_triggers()
        await self._remove_job(task)
        await ctx.tick()

    @fifo.group(name="addtrigger", aliases=["trigger"])
    async def fifo_trigger(self, ctx: commands.Context):
        """
        Add a new trigger for a task from the current guild.
        """
        if ctx.invoked_subcommand is None:
            pass

    @fifo_trigger.command(name="interval")
    async def fifo_trigger_interval(self, ctx: commands.Context,
                                    task_name: str, *,
                                    interval_str: TimedeltaConverter):
        """
        Add an interval trigger to the specified task
        """

        task = Task(task_name, ctx.guild.id, self.config, bot=self.bot)
        await task.load_from_config()  # Will set the channel and author

        if task.data is None:
            await ctx.maybe_send_embed(
                f"Task by the name of {task_name} is not found in this guild")
            return

        result = await task.add_trigger("interval", interval_str)
        if not result:
            await ctx.maybe_send_embed(
                "Failed to add an interval trigger to this task, see console for logs"
            )
            return
        await task.save_data()
        job: Job = await self._process_task(task)
        delta_from_now: timedelta = job.next_run_time - datetime.now(
            job.next_run_time.tzinfo)
        await ctx.maybe_send_embed(
            f"Task `{task_name}` added interval of {interval_str} to its scheduled runtimes\n\n"
            f"Next run time: {job.next_run_time} ({delta_from_now.total_seconds()} seconds)"
        )

    @fifo_trigger.command(name="relative")
    async def fifo_trigger_relative(self, ctx: commands.Context,
                                    task_name: str, *,
                                    time_from_now: TimedeltaConverter):
        """
        Add a "run once" trigger at a time relative from now to the specified task
        """

        task = Task(task_name, ctx.guild.id, self.config, bot=self.bot)
        await task.load_from_config()

        if task.data is None:
            await ctx.maybe_send_embed(
                f"Task by the name of {task_name} is not found in this guild")
            return

        time_to_run = datetime.now(pytz.utc) + time_from_now

        result = await task.add_trigger("date", time_to_run,
                                        time_to_run.tzinfo)
        if not result:
            await ctx.maybe_send_embed(
                "Failed to add a date trigger to this task, see console for logs"
            )
            return

        await task.save_data()
        job: Job = await self._process_task(task)
        delta_from_now: timedelta = job.next_run_time - datetime.now(
            job.next_run_time.tzinfo)
        await ctx.maybe_send_embed(
            f"Task `{task_name}` added {time_to_run} to its scheduled runtimes\n"
            f"Next run time: {job.next_run_time} ({delta_from_now.total_seconds()} seconds)"
        )

    @fifo_trigger.command(name="date")
    async def fifo_trigger_date(self, ctx: commands.Context, task_name: str, *,
                                datetime_str: DatetimeConverter):
        """
        Add a "run once" datetime trigger to the specified task
        """

        task = Task(task_name, ctx.guild.id, self.config, bot=self.bot)
        await task.load_from_config()

        if task.data is None:
            await ctx.maybe_send_embed(
                f"Task by the name of {task_name} is not found in this guild")
            return

        maybe_tz = await self._get_tz(ctx.author)

        result = await task.add_trigger("date", datetime_str, maybe_tz)
        if not result:
            await ctx.maybe_send_embed(
                "Failed to add a date trigger to this task, see console for logs"
            )
            return

        await task.save_data()
        job: Job = await self._process_task(task)
        delta_from_now: timedelta = job.next_run_time - datetime.now(
            job.next_run_time.tzinfo)
        await ctx.maybe_send_embed(
            f"Task `{task_name}` added {datetime_str} to its scheduled runtimes\n"
            f"Next run time: {job.next_run_time} ({delta_from_now.total_seconds()} seconds)"
        )

    @fifo_trigger.command(name="cron")
    async def fifo_trigger_cron(
        self,
        ctx: commands.Context,
        task_name: str,
        optional_tz: Optional[TimezoneConverter] = None,
        *,
        cron_str: CronConverter,
    ):
        """
        Add a cron "time of day" trigger to the specified task

        See https://crontab.guru/ for help generating the cron_str
        """
        task = Task(task_name, ctx.guild.id, self.config, bot=self.bot)
        await task.load_from_config()

        if task.data is None:
            await ctx.maybe_send_embed(
                f"Task by the name of {task_name} is not found in this guild")
            return

        if optional_tz is None:
            optional_tz = await self._get_tz(ctx.author)  # might still be None

        result = await task.add_trigger("cron", cron_str, optional_tz)
        if not result:
            await ctx.maybe_send_embed(
                "Failed to add a cron trigger to this task, see console for logs"
            )
            return

        await task.save_data()
        job: Job = await self._process_task(task)
        delta_from_now: timedelta = job.next_run_time - datetime.now(
            job.next_run_time.tzinfo)
        await ctx.maybe_send_embed(
            f"Task `{task_name}` added cron_str to its scheduled runtimes\n"
            f"Next run time: {job.next_run_time} ({delta_from_now.total_seconds()} seconds)"
        )
Example #7
0
async def refresh_task(loop, scheduler: AsyncIOScheduler):
    """
    任务获取
    :param loop: 协程loop
    :param scheduler: apscheduler
    :return:
    """
    req = await fetch_resource_list(loop=loop)
    result = req.json()
    for resource in result:
        is_enabled = resource.get("is_enabled")
        if not is_enabled:
            continue

        spider_class = importlib.import_module(
            f'.{resource.get("spider_type").get("filename")}', package="backend.spiders"
        )
        link = resource.get("link")
        resource_id = resource.get("id")
        default_category_id = resource.get("default_category")
        default_tag_id = resource.get("default_tag")
        gap = resource.get("refresh_gap")
        status = resource.get("refresh_status")
        last_refresh_time = parse_datetime(resource.get("last_refresh_time"))
        proxy = resource.get("proxy")
        auth = resource.get("auth")

        if auth:
            username = auth.get("username")
            password = auth.get("password")
            cookie = auth.get("cookie")
            auth_header = auth.get("auth_header")
            auth_value = auth.get("auth_value")
        else:
            username, password, cookie, auth_header, auth_value = (None,) * 5

        task_data = {
            "loop": loop,
            "init_url": link,
            "resource_id": resource_id,
            "default_category_id": default_category_id,
            "default_tag_id": default_tag_id,
            "proxy": proxy,
            "username": username,
            "password": password,
            "cookie": cookie,
            "auth_header": auth_header,
            "auth_value": auth_value,
        }

        if status in (
            enums.ResourceRefreshStatus.NEVER.value,
            enums.ResourceRefreshStatus.FAIL.value,
        ):
            next_run_time = datetime.datetime.now(tz=pytz.UTC)
        elif status == enums.ResourceRefreshStatus.RUNNING.value:
            continue
        else:
            next_run_time = last_refresh_time + datetime.timedelta(hours=gap)

        task_id = f'{resource.get("name")}-({resource.get("spider_type").get("id")},{resource_id})'

        job = scheduler.get_job(job_id=task_id)
        if job:
            job.modify(
                next_run_time=max(next_run_time, datetime.datetime.now(tz=pytz.UTC))
            )
        else:
            scheduler.add_job(
                func=spider_class.get_spider(**task_data),
                trigger="date",
                next_run_time=max(next_run_time, datetime.datetime.now(tz=pytz.UTC)),
                id=task_id,
                name=resource.get("name"),
                misfire_grace_time=600,
                coalesce=True,
                replace_existing=True,
            )
class Reminders:
    """Reminders functionality for the guild bot"""
    def __init__(self, reminders_database_path: str):
        self.reminders_database_path = reminders_database_path
        self.scheduler : AsyncIOScheduler = None

    def start(self, event_loop):
        """Start the reminder service."""
        self.scheduler = AsyncIOScheduler(event_loop=event_loop, timezone='UTC')
        self.scheduler.add_jobstore(SQLAlchemyJobStore(url='sqlite:///' + self.reminders_database_path))
        self.scheduler.start()

    def stop(self):
        """Stop the reminder service immediately."""
        self.scheduler.shutdown(wait=False)
        del self.scheduler

    def createOrResetPeriodicStatusUpdateCallback(self, callback: callable):
        """Create or reset the status update callback for the entire bot."""
        self.scheduler.add_job(callback, trigger='interval', minutes=1, id='periodic-status', name='periodic-status',
            coalesce=True, max_instances=1, replace_existing=True)

    def addDailyReminder(self, owner_name: str, owner_id: str, callback: callable, callback_args: List[str]):
        """Add a daily reminder that is fired within a short time window after daily reset based on WotV world time (midnight at UTC -8).

        args:
        owner_name                  name of the owner, used in the description of the task.
        ower_id                     unique ID of the owner, used to construct IDs for the reminders.
        callback                    the callback function (must be a callable) to be invoked for the reminder.
        callback_args               positional arguments to be passed to the callback.

        The reminder will have the name "<owner_name>#daily", i.e. if the owner_name is "bob" then the ID of the reminder is "bob#daily"
        """
        job_id = owner_id + '#daily'
        job_desc = '#daily reminder for ' + owner_name + ' (id=' + owner_id + ')'
        self.scheduler.add_job(callback, id=job_id, name=job_desc, coalesce=True, max_instances=1, replace_existing=True, args=callback_args,
            trigger='cron', # Cron scheduler to get easy daily scheduling
            hour=8,    # World time is at UTC-8 so execute at midnight World Time
            minute=5,  # ... but actually spread it out a bit over 5 minutes by anchoring at 5 minutes past the hour and...
            jitter=300) # Jittering the time by as much as 300 seconds (5 minutes) in any direciton.

    def getDailyReminder(self, owner_id: str) -> apscheduler.job.Job:
        """Return the specified ownwer's daily reminder job."""
        return self.scheduler.get_job(owner_id + '#daily')

    def hasDailyReminder(self, owner_id: str) -> bool:
        """Return true if the specified user has a daily reminder configured."""
        return self.getDailyReminder(owner_id) is not None

    def cancelDailyReminder(self, owner_id: str):
        """Cancel any daily reminder configured for the specified owner."""
        job: apscheduler.job.Job = self.scheduler.get_job(owner_id + '#daily')
        if job:
            job.remove()

    def addWhimsyReminder(self, owner_name: str, owner_id: str, nrg_reminder_callback: callable, nrg_reminder_args: List[str], spawn_reminder_callback: callable,
        spawn_reminder_args: List[str], nrg_time_ms_override: int = None, spawn_time_ms_override: int = None):
        """Add a whimsy shop reminder. Actually a pair of reminders, one for NRG spending and one for whimsy spawning.

        The first reminder is set for 30 minutes after now, and reminds the user that they can now start spending NRG.
        The second reminder is set for 60 minutes after now, and reminds the user that they can now spawn a new Whimsy shop.

        args:
        owner_name                  name of the owner, used in the description of the task.
        ower_id                     unique ID of the owner, used to construct IDs for the reminders.
        nrg_reminder_callback       the callback function (must be a callable) to be invoked for the nrg reminder.
        nrg_reminder_args           positional arguments to be passed to the nrg_reminder_callback.
        spawn_reminder_callback     the callback function (must be a callable) to be invoked for the whimsy spawn reminder.
        spawn_reminder_args         positional arguments to be passed to the spawn_reminder_callback.
        nrg_time_ms_override        if specified, overrides the amount of time before the nrg reminder fires from 30 minutes to the specified number of ms
        spawn_time_ms_override      if specified, overrides the amount of time before the spawn reminder fires from 60 minutes to the specified number of ms

        The nrg reminder will have the name "<owner_name>#whimsy-nrg", i.e. if the owner_name is "bob" then the ID of the reminder is "bob#whimsy-nrg"
        The spawn reminder will have the name "<owner_name>#whimsy-spawn", i.e. if the owner_name is "bob" then the ID of the reminder is "bob#whimsy-spawn"
        """
        nrg_job_id = owner_id + '#whimsy-nrg'
        nrg_job_desc = '#whimsy-nrg reminder for ' + owner_name + ' (id=' + owner_id + ')'
        now = datetime.datetime.now(tz=utc)
        nrg_execute_at = now + datetime.timedelta(minutes=30)
        if nrg_time_ms_override:
            nrg_execute_at = now + datetime.timedelta(milliseconds=nrg_time_ms_override)
        spawn_job_id = owner_id + '#whimsy-spawn'
        spawn_job_desc = '#whimsy-spawn reminder for ' + owner_name + ' (id=' + owner_id + ')'
        spawn_execute_at = now + datetime.timedelta(hours=1)
        if spawn_time_ms_override:
            spawn_execute_at = now + datetime.timedelta(milliseconds=spawn_time_ms_override)
        self.scheduler.add_job(nrg_reminder_callback, trigger='date', run_date=nrg_execute_at, args=nrg_reminder_args, kwargs=None,
            id=nrg_job_id, name=nrg_job_desc, misfire_grace_time=30*60, coalesce=True, max_instances=1, replace_existing=True)
        self.scheduler.add_job(spawn_reminder_callback, trigger='date', run_date=spawn_execute_at, args=spawn_reminder_args, kwargs=None,
            id=spawn_job_id, name=spawn_job_desc, misfire_grace_time=30*60, coalesce=True, max_instances=1, replace_existing=True)

    def getWhimsyReminders(self, owner_id: str) -> Dict[str, apscheduler.job.Job]:
        """Fetch any whimsy reminders outstanding for the specified owner id.

        The returned dictionary contains 2 entries:
            'nrg':  <the NRG reminder, or None if there is no such reminder or the reminder has expired.>
            'spawn': <the spawn reminder, or None if there is no such reminder or the reminder has expired.>
        """
        return {
            'nrg': self.scheduler.get_job(owner_id + '#whimsy-nrg'),
            'spawn': self.scheduler.get_job(owner_id + '#whimsy-spawn'),
        }

    def hasPendingWhimsyNrgReminder(self, owner_id: str) -> bool:
        """Return true if the specified user has a pending whimsy shop NRG reminder."""
        scheduled: Dict[str, apscheduler.job.Job] = self.getWhimsyReminders(owner_id)
        return scheduled and 'nrg' in scheduled and scheduled['nrg'] and scheduled['nrg'].next_run_time and scheduled['nrg'].next_run_time > datetime.datetime.now(tz=utc)

    def hasPendingWhimsySpawnReminder(self, owner_id: str) -> bool:
        """Return true if the specified user has a pending whimsy shop spawn reminder."""
        scheduled: Dict[str, apscheduler.job.Job] = self.getWhimsyReminders(owner_id)
        return scheduled and 'spawn' in scheduled and scheduled['spawn'] and scheduled['spawn'].next_run_time and scheduled['spawn'].next_run_time > datetime.datetime.now(tz=utc)

    def timeTillWhimsyNrgReminder(self, owner_id: str) -> int:
        """If the specified user has a whimsy-shop NRG reminder in the future, return the number of seconds until that reminder fires; else return None."""
        scheduled: Dict[str, apscheduler.job.Job] = self.getWhimsyReminders(owner_id)
        if scheduled and 'nrg' in scheduled and scheduled['nrg'] and scheduled['nrg'].next_run_time and scheduled['nrg'].next_run_time > datetime.datetime.now(tz=utc):
            next_run_time: datetime.datetime = scheduled['nrg'].next_run_time
            return (next_run_time - datetime.datetime.now(tz=utc)).total_seconds()
        return None

    def timeTillWhimsySpawnReminder(self, owner_id: str) -> int:
        """If the specified user has a whimsy-shop spawn reminder in the future, return the number of seconds until that reminder fires; else return None."""
        scheduled: Dict[str, apscheduler.job.Job] = self.getWhimsyReminders(owner_id)
        if scheduled and 'spawn' in scheduled and scheduled['spawn'] and scheduled['spawn'].next_run_time and scheduled['spawn'].next_run_time > datetime.datetime.now(tz=utc):
            next_run_time: datetime.datetime = scheduled['spawn'].next_run_time
            return (next_run_time - datetime.datetime.now(tz=utc)).total_seconds()
        return None

    def cancelWhimsyReminders(self, owner_id: str):
        """Cancels any and all oustanding whimsy reminders for the specified owner."""
        job: apscheduler.job.Job = None
        job = self.scheduler.get_job(owner_id + '#whimsy-nrg')
        if job:
            job.remove()
        job = self.scheduler.get_job(owner_id + '#whimsy-spawn')
        if job:
            job.remove()
Example #9
0
class Scheduler:
    """
    Nice class for sheduled jobs

    Example usage

    1. Set functions
    >>> core.scheduler.set_functions({
    ...    'test_notify_function': test_notify_function
    ... })

    2. Restore jobs from db
    >>> core.scheduler.restore_jobs()

    Need to have defined key generate function
    >>> def getJobId(label, chat_id):
    ...     job_id = "{}:{}".format(chat_id, label)
    ...     return job_id

    Then you can do what you want.

    3. Create new job
    >>> core.scheduler.add_job(
    ...     label='test_notify_function',
    ...     func_args=[chat_id],
    ...     trigger='cron',
    ...     trigger_params={'second': '*/5'},
    ...     job_id=getJobId('test_notify', chat_id)
    ... )

    4. Remove existing job
    >>> core.scheduler.remove_job(getJobId('test_notify_function', chat_id))

    """

    scheduler = None

    COLLECTION_JOBS = 'app_jobs'

    functions = {}

    def __init__(self, core):
        self.core = core
        self.scheduler = AsyncIOScheduler()
        self.scheduler.start()
        self.table = self.core.db[self.COLLECTION_JOBS]

    def set_functions(self, functions={}):
        """
        Dictionary "label" -> "function"

        >>> core.scheduler.set_functions({
        ...    'test_notify_function': test_notify_function
        ... })

        :param functions dict:
            'print': print
            'checkFeeds': test.getFeed
        """
        for label in functions:
            self.functions[label] = functions[label]

    def run_job(self, params):
        label = params['label']
        job_function = self.functions[label]
        trigger = params['trigger']
        func_args = params['func_args']
        trigger_params = params['trigger_params']
        job_id = params['job_id']

        return self.scheduler.add_job(job_function, trigger, **trigger_params, args=func_args, id=job_id)


    def add_job(self, label, func_args=[], trigger='', trigger_params=[], job_id=''):
        """
        core.scheduler.add_job(
            label='message',
            func_args=['blet'],
            trigger='cron',
            trigger_params={'minute': '*'},
            job_id=getJobId('message', chat_id)
        )
        """
        try:
            job_params = {
                'label': label,
                'func_args': func_args,
                'trigger': trigger,
                'trigger_params': trigger_params,
                'job_id': job_id
            }


            if self.scheduler.get_job(job_id):
                self.remove_job(job_id)

            # run job
            self.run_job(job_params)

            # save job to db
            self.table.insert_one(job_params)
        except Exception as e:
            self.core.logger.error(e, exc_info=e)

    def remove_job(self, job_id):
        """
        Remove running job

        >>> core.scheduler.remove_job(getJobId('message', chat_id))
        """
        try:
            if self.scheduler.get_job(job_id):
                self.scheduler.remove_job(job_id)
                self.table.remove({'job_id': job_id})
        except Exception as e:
            self.core.logger.error(e, exc_info=e)

    def restore_jobs(self):
        """
        Restore jobs from db

        >>> core.scheduler.restore_jobs()
        """
        try:
            # get jobs from db
            jobs = self.table.find()

            # run jobs
            for job in jobs:
                self.run_job(job)

        except Exception as e:
            self.core.logger.error(e, exc_info=e)
Example #10
0
class Scheduler:
    def __init__(self,
                 clan_tag: str,
                 db: DB,
                 web_refresher: Refresher,
                 bot: commands.bot = None) -> None:
        self.clan_tag: str = clan_tag
        self.scheduler = AsyncIOScheduler()
        self.db = db
        self.web_refresher = web_refresher
        self.bot = bot

        # Start the scheduler
        self.scheduler.start()

    def add_jobs(self) -> None:
        # track the war once per hour starting now-ish
        self.scheduler.add_job(
            self.war_tracking,
            "interval",
            next_run_time=pendulum.now("UTC").add(seconds=5),
            minutes=60,
            timezone="UTC",
        )

        self.scheduler.add_job(
            Tracker.track_war_battles,
            "interval",
            args=[self.clan_tag, self.db],
            next_run_time=pendulum.now("UTC").add(seconds=3),
            minutes=30,
            timezone="UTC",
            id="track_war_battles",
            name="Track war battles",
        )

        self.scheduler.add_job(
            Tracker.track_clan,
            "interval",
            args=[self.clan_tag, self.db],
            next_run_time=pendulum.now("UTC").add(seconds=2),
            minutes=30,
            jitter=350,
            timezone="UTC",
            id="track_clan",
            name="Track clan data",
        )

        self.scheduler.add_job(
            Tracker.track_war_logs,
            "interval",
            args=[self.clan_tag, self.db],
            next_run_time=pendulum.now("UTC").add(seconds=60),
            hours=4,
            jitter=350,
            timezone="UTC",
            id="track_war_logs",
            name="Track war logs",
        )

        self.scheduler.add_job(
            self.inactive_members,
            "interval",
            args=[self.clan_tag],
            next_run_time=pendulum.now("UTC").end_of("week").add(minutes=5),
            weeks=1,
            timezone="UTC",
            id="inactives",
            name="Inactive players",
        )

        self.scheduler.add_job(
            self.top_donators,
            "interval",
            args=[self.clan_tag],
            next_run_time=pendulum.now("UTC")._end_of_week().subtract(
                minutes=1),
            weeks=1,
            timezone="UTC",
            id="top_donators",
            name="Top donators",
        )

        self.scheduler.add_job(
            self.web_refresher.run_no_spin,
            "interval",
            next_run_time=pendulum.now("UTC").add(minutes=3),
            hours=6,
            jitter=350,
            timezone="UTC",
            id="web_refresh",
            name="Refresh clan member battles on http://royaleapi.com",
        )

    async def war_tracking(self):
        current_war = await Tracker.track_war(self.clan_tag, self.db)

        if current_war["state"] == "warDay" or current_war[
                "state"] == "collectionDay":
            self.schedule_end_of_war_jobs(current_war)

    def schedule_end_of_war_jobs(self, war):
        war_end_timestamp = war.get("collectionEndTime", None) or war.get(
            "warEndTime", None)
        war_end_date = pendulum.from_timestamp(war_end_timestamp, tz="UTC")

        # War tracking jobs to collected war data
        t_minus_jobs = [1, 5, 10, 20, 30]
        for t_minus in t_minus_jobs:
            schedule_time = war_end_date.subtract(minutes=t_minus)
            job_id = self.get_job_id(
                war, "Tminus{}-{:.0f}".format(t_minus,
                                              schedule_time.timestamp()))
            self.schedule_war_job(
                self.war_tracking,
                schedule_time,
                job_id,
                "End of war job for {}".format(job_id),
            )

        # Job to fetch war logs if this is a War Day
        if war["state"] == "warDay":
            war_log_time = war_end_date.add(seconds=10)
            job_id = self.get_job_id(war, "war_logs")
            self.schedule_war_job(
                self.war_logs,
                war_log_time,
                job_id,
                "War logs job for {}".format(job_id),
                [war["clan"]["tag"]],
            )

        # Job to have bot print war summary
        summary_time = war_end_date.add(seconds=15)
        job_id = self.get_job_id(war, "war_summary")
        self.schedule_war_job(
            self.war_summary,
            summary_time,
            job_id,
            "War summary job for {}".format(job_id),
            [war["clan"]["tag"]],
        )

        log.debug(self.scheduler.get_jobs())

    def schedule_war_job(self, func, date, job_id, name=None, args=None):
        if not self.scheduler.get_job(job_id):
            self.scheduler.add_job(func,
                                   "date",
                                   id=job_id,
                                   name=name or job_id,
                                   run_date=date,
                                   args=args)

    async def war_logs(self, clan_tag):
        await Tracker.track_war_logs(clan_tag, self.db)

    async def war_summary(self, clan_tag):
        if not self.bot:
            return

        if not self.bot.is_ready() or not self.bot.get_cog("WarLog"):
            return

        await self.bot.get_cog("WarLog").war_summary_auto(clan_tag)

    async def inactive_members(self, clan_tag):
        if not self.bot:
            return

        if not self.bot.is_ready() or not self.bot.get_cog("WarLog"):
            return

        await self.bot.get_cog("WarLog").inactives_auto(clan_tag)

    async def top_donators(self, clan_tag):
        if not self.bot:
            return

        if not self.bot.is_ready() or not self.bot.get_cog("WarLog"):
            return

        await self.bot.get_cog("WarLog").top_donators_auto(clan_tag)

    @staticmethod
    def get_job_id(war, suffix):
        end_time = war.get("collectionEndTime", None) or war.get(
            "warEndTime", None)
        return "{}-{}-{}-{}".format(war["clan"]["tag"], war["state"], end_time,
                                    suffix)
class AlamoScheduler(object):
    message_queue = None
    loop = handler = None

    def __init__(self, loop=None):
        kw = dict()
        if loop:
            kw['event_loop'] = loop

        self.scheduler = AsyncIOScheduler(**kw)

    def setup(self, loop=None):
        if loop is None:
            loop = asyncio.get_event_loop()
            asyncio.set_event_loop(loop)
        self.loop = loop
        self.message_queue = ZeroMQQueue(
            settings.ZERO_MQ_HOST,
            settings.ZERO_MQ_PORT
        )
        self.message_queue.connect()
        self.scheduler.add_listener(
            self.event_listener,
            EVENT_JOB_ERROR | EVENT_JOB_MISSED | EVENT_JOB_MAX_INSTANCES
        )

    @aiostats.increment()
    def _schedule_check(self, check):
        """Schedule check."""
        logger.info(
            'Check `%s:%s` scheduled!', check['uuid'], check['name']
        )

        check['scheduled_time'] = datetime.now(tz=pytz_utc).isoformat()
        self.message_queue.send(check)

    def remove_job(self, job_id):
        """Remove job."""
        try:
            logger.info('Removing job for check id=`%s`', job_id)
            self.scheduler.remove_job(str(job_id))
        except JobLookupError:
            pass

    def schedule_check(self, check):
        """Schedule check with proper interval based on `frequency`.

        :param dict check: Check definition
        """
        try:
            frequency = check['fields']['frequency'] = int(
                check['fields']['frequency']
            )
            logger.info(
                'Scheduling check `%s` with id `%s` and interval `%s`',
                check['name'], check['id'], frequency
            )
            jitter = random.randint(0, frequency)
            first_run = datetime.now() + timedelta(seconds=jitter)
            kw = dict(
                seconds=frequency,
                id=str(check['uuid']),
                next_run_time=first_run,
                args=(check,)
            )
            self.schedule_job(self._schedule_check, **kw)

        except KeyError as e:
            logger.exception('Failed to schedule check: %s. Exception: %s',
                             check, e)

    def schedule_job(self, method, **kwargs):
        """Add new job to scheduler.

        :param method: reference to method that should be scheduled
        :param kwargs: additional kwargs passed to `add_job` method
        """
        try:
            self.scheduler.add_job(
                method, 'interval',
                misfire_grace_time=settings.JOBS_MISFIRE_GRACE_TIME,
                max_instances=settings.JOBS_MAX_INSTANCES,
                coalesce=settings.JOBS_COALESCE,
                **kwargs
            )
        except ConflictingIdError as e:
            logger.error(e)

    def event_listener(self, event):
        """React on events from scheduler.

        :param apscheduler.events.JobExecutionEvent event: job execution event
        """
        if event.code == EVENT_JOB_MISSED:
            aiostats.increment.incr('job.missed')
            logger.warning("Job %s scheduler for %s missed.", event.job_id,
                           event.scheduled_run_time)
        elif event.code == EVENT_JOB_ERROR:
            aiostats.increment.incr('job.error')
            logger.error("Job %s scheduled for %s failed. Exc: %s",
                         event.job_id,
                         event.scheduled_run_time,
                         event.exception)
        elif event.code == EVENT_JOB_MAX_INSTANCES:
            aiostats.increment.incr('job.max_instances')
            logger.warning(
                'Job `%s` could not be submitted. '
                'Maximum number of running instances was reached.',
                event.job_id
            )

    @aiostats.increment()
    def get_jobs(self):
        return [job.id for job in self.scheduler.get_jobs()]

    async def checks(self, request=None):

        uuid = request.match_info.get('uuid', None)
        if uuid is None:
            jobs = self.get_jobs()
            return json_response(data=dict(count=len(jobs), results=jobs))
        job = self.scheduler.get_job(uuid)
        if job is None:
            return json_response(
                data={'detail': 'Check does not exists.'}, status=404
            )

        check, = job.args
        return json_response(data=check)

    @aiostats.timer()
    async def update(self, request=None):
        check = await request.json()
        check_uuid = check.get('uuid')
        check_id = check.get('id')

        message = dict(status='ok')

        if not check_id or not check_uuid:
            return json_response(status=400)

        if check_id % settings.SCHEDULER_COUNT != settings.SCHEDULER_NR:
            return json_response(data=message, status=202)

        job = self.scheduler.get_job(str(check_uuid))

        if job:
            scheduled_check, = job.args
            timestamp = scheduled_check.get('timestamp', 0)

            if timestamp > check['timestamp']:
                return json_response(data=message, status=202)
            message = dict(status='deleted')
            self.remove_job(check_uuid)

        if any([trigger['enabled'] for trigger in check['triggers']]):
            self.schedule_check(check)
            message = dict(status='scheduled')

        return json_response(data=message, status=202)

    def wait_and_kill(self, sig):
        logger.warning('Got `%s` signal. Preparing scheduler to exit ...', sig)
        self.scheduler.shutdown()
        self.loop.stop()

    def register_exit_signals(self):
        for sig in ['SIGQUIT', 'SIGINT', 'SIGTERM']:
            logger.info('Registering handler for `%s` signal '
                        'in current event loop ...', sig)
            self.loop.add_signal_handler(
                getattr(signal, sig),
                self.wait_and_kill, sig
            )

    def start(self, loop=None):
        """Start scheduler."""
        self.setup(loop=loop)
        self.register_exit_signals()
        self.scheduler.start()

        logger.info(
            'Press Ctrl+%s to exit.', 'Break' if os.name == 'nt' else 'C'
        )
        try:
            self.loop.run_forever()
        except KeyboardInterrupt:
            pass
        logger.info('Scheduler was stopped!')
Example #12
0
class scheduling:
    def __init__(self, client, cursor, zone):
        self.scheduler = AsyncIOScheduler()
        self.scheduler.start()
        self.client = client
        self.cursor = cursor
        self.timezone = timezone(zone)

        self.cursor.execute('''SELECT * FROM Guilds''')

        rows = self.cursor.fetchall()

        for row in rows:
            self.cursor.execute('''SELECT * FROM {0}'''.format(row[1]))
            events = self.cursor.fetchall()
            
            self.cursor.execute('''SELECT * FROM Guilds WHERE id='{0}';'''.format(row[0]))
            guild = self.cursor.fetchone()

            for event in events:
                # Tell create the scheduler and tell the scheduler to send a message a certain time
                eventid = event[0]
                date = standardtime(ttype='string', string=event[3])
                repeat = event[7]
                if repeat == 'daily':
                    self.scheduler.add_job(self.notifier, 'interval', start_date=(date.datetime - timedelta(hours=int(event[6]))).astimezone(self.timezone), days=1, id=eventid, kwargs={'gid': guild[0],'eventid': eventid})
                elif repeat == 'weekly':
                    self.scheduler.add_job(self.notifier, 'interval', start_date=(date.datetime - timedelta(hours=int(event[6]))).astimezone(self.timezone), weeks=1, id=eventid, kwargs={'gid': guild[0],'eventid': eventid})
                else:
                    self.scheduler.add_job(self.notifier, 'date', run_date=(date.datetime - timedelta(hours=int(event[6]))).astimezone(self.timezone), id=eventid, kwargs={'gid': guild[0],'eventid': eventid})

        print('Currently setting up all events again')

    async def events(self, message, chunks):
        self.cursor.execute('''SELECT * FROM G_{0}'''.format(message.guild.id))
        rows = self.cursor.fetchall()

        for row in rows:
            title = '***{0}***\n\n'.format(row[1])
            description = '' if (row[2] == 'N/A') else 'Description: {0}\n'.format(row[2])
            zone = standardtime(ttype='string', string=row[3])
            time = datetime.now(self.timezone).astimezone(zone.datetime.tzinfo)
            hours = time.hour + int(row[6])
            time = time.replace(day=(time.day + math.floor(hours/24)), hour=(hours%24))
            time = time.strftime('Time: on %m-%d-%Y at %H:%M\n')
            location = '' if (row[4] == 'N/A') else 'Location: {0}\n'.format(row[4])
            link = '' if (row[5] == 'N/A') else 'Link: {0}\n'.format(row[5])

            await message.channel.send('{0}{1}{2}{3}{4}'.format(title, description, time, location, link))
            await message.channel.send('========================================')

    ## Allows for the creation of a new recurring event
    async def create(self, message, chunks):

        # If the command is used incorrectly, tell the user what they are doing wrong
        if len(chunks) == 0:
            await message.channel.send('Usage: sudo create <title> [options]\n\n' \
            'Where options include:\n\n' \
            '-d <description> to set a description\n' \
            '-t <date/time> indicate what day and time to start at using the following format MM-DD-YYYYThh:mm\n' \
            '-a <location> indicate where the event will take place\n' \
            '-l <link> indicate a link to which members can refer for information about the event\n' \
            '-r <weekly, daily, none> specify how often you want the event to repeat\n' \
            '-b <hours> the number of hours before an event that people should be notified\n' \
            '-c <channel> ping a channel to indicate which channel the announcement should be provided in')

            return
        
        # Don't crash the bot if something goes wrong
        try:
            title = await self.parseTitle(chunks)
            eventid = 'G_{0}_{1}'.format(message.guild.id, title)

            self.cursor.execute(''' SELECT * FROM Guilds WHERE id='{0}';'''.format(message.guild.id))
            row = self.cursor.fetchone()
            # Check if server has already been added to guilds
            if row is None:
                self.cursor.execute('''INSERT INTO Guilds VALUES ('{0}', 'G_{0}', '+0000')'''.format(message.guild.id))
            self.cursor.execute('''CREATE TABLE IF NOT EXISTS G_{0}(id TEXT NOT NULL PRIMARY KEY, title TEXT, description TEXT, date TEXT, location TEXT, link TEXT, before INT, repeat TEXT, channel INT, role INT);'''.format(message.guild.id))

            # Get information from the array of inputs
            description, date, location, link, before, repeat, channel = await self.parseOptions(chunks, message, ['N/A'], standardtime(ttype="time"), ['N/A'], 'N/A', 6, 'weekly', message.channel.id)

            role = await message.guild.create_role(name='{0} Participants'.format(title), mentionable=True)
            # Add the data to a SQL table
            self.cursor.execute('''INSERT INTO G_{0} VALUES ('{1}', '{2}', '{3}', '{4}', '{5}', '{6}', '{7}', '{8}', '{9}', '{10}');'''.format(message.guild.id, eventid, title, description, str(date), location, link, before, repeat, channel, role.id))
            self.cursor.execute('''CREATE TABLE IF NOT EXISTS `{0}`(member_id INT);'''.format(eventid))

            # Tell create the scheduler and tell the scheduler to send a message a certain time
            if repeat == 'daily':
                self.scheduler.add_job(self.notifier, 'interval', start_date=(date.datetime - timedelta(hours=int(before))).astimezone(self.timezone), days=1, id=eventid, kwargs={'gid': message.guild.id,'eventid': eventid})
            elif repeat == 'weekly':
                self.scheduler.add_job(self.notifier, 'interval', start_date=(date.datetime - timedelta(hours=int(before))).astimezone(self.timezone), weeks=1, id=eventid, kwargs={'gid': message.guild.id,'eventid': eventid})
            else:
                self.scheduler.add_job(self.notifier, 'date', run_date=(date.datetime - timedelta(hours=int(before))).astimezone(self.timezone), id=eventid, kwargs={'gid': message.guild.id,'eventid': eventid})

            # Confirm successfull creation of the event
            await message.channel.send('Your event {0}, has been created'.format(title))
        except Exception as inst:
            print(inst)
            await message.channel.send('There was an error creating your event, please retry')
    
    async def edit(self, message, chunks):
        # If the command is used incorrectly, tell the user what they are doing wrong
        if len(chunks) == 0:
            await message.channel.send('Usage: sudo create <title> [options]\n\n' \
            'Where options include:\n\n' \
            '-d <description> to set a description\n' \
            '-t <date/time> indicate what day and time to start at using the following format MM-DD-YYYYThh:mm\n' \
            '-a <location> indicate where the event will take place\n' \
            '-l <link> indicate a link to which members can refer for information about the event\n' \
            '-r <weekly, daily, none> specify how often you want the event to repeat\n' \
            '-b <hours> the number of hours before an event that people should be notified\n' \
            '-c <channel> ping a channel to indicate which channel the announcement should be provided in')

            return

        try:
            title = await self.parseTitle(chunks)
            eventid = 'G_{0}_{1}'.format(message.guild.id, title)
            self.cursor.execute('''SELECT * FROM G_{0} WHERE id='{1}';'''.format(message.guild.id, eventid))

            row = self.cursor.fetchone()

            description, date, location, link, before, repeat, channel = await self.parseOptions(chunks, message, [row[2]], standardtime(ttype="string", string=row[3]), [row[4]], row[5], row[6], row[7], row[8])

            # Update the sqlite3 database
            self.cursor.execute('''UPDATE G_{0} SET description='{1}', date='{2}', location='{3}', link='{4}', before='{5}', repeat='{6}', channel='{7}' WHERE id='{8}';'''.format(message.guild.id, description, str(date), location, link, before, repeat, channel, eventid))

            if self.scheduler.get_job(eventid) is not None:
                self.scheduler.remove_job(eventid)

            self.cursor.execute('''SELECT * FROM Guilds WHERE id='{0}';'''.format(message.guild.id))
            guild = self.cursor.fetchone()

            if guild is not None:
                # Tell create the scheduler and tell the scheduler to send a message a certain time
                if repeat == 'daily':
                    self.scheduler.add_job(self.notifier, 'interval', start_date=(date.datetime - timedelta(hours=int(before))).astimezone(self.timezone), days=1, id=eventid, kwargs={'gid': message.guild.id,'eventid': eventid})
                elif repeat == 'weekly':
                    self.scheduler.add_job(self.notifier, 'interval', start_date=(date.datetime - timedelta(hours=int(before))).astimezone(self.timezone), weeks=1, id=eventid, kwargs={'gid': message.guild.id,'eventid': eventid})
                else:
                    self.scheduler.add_job(self.notifier, 'date', run_date=(date.datetime - timedelta(hours=int(before))).astimezone(self.timezone), id=eventid, kwargs={'gid': message.guild.id,'eventid': eventid})

            await message.channel.send('Event successfully updated')
        except Exception as inst:
            print(inst)
            await message.channel.send('There was an error editing your event, please retry')

    ## Removes a recurring event from the list of recurring events
    async def remove(self, message, chunks):
        # Checks whether or not the command is valid
        if len(chunks) == 0:
            await message.channel.send('Usage: sudo remove <title>')
        
            return
        
        try:
            # Gets the title from the command
            title = await self.parseTitle(chunks)
            eventid = 'G_{0}_{1}'.format(message.guild.id, title)

            self.cursor.execute('''SELECT * FROM G_{0} WHERE id='{1}';'''.format(message.guild.id, eventid))
            event = self.cursor.fetchone()
            role = message.guild.get_role(int(event[9]))
            if role is not None:
                await role.delete()

            # Removes the event from the sqlite3 database
            self.cursor.execute('''DELETE FROM G_{0} WHERE id='{1}';'''.format(message.guild.id, eventid))
            self.cursor.execute('''DROP TABLE IF EXISTS `{0}`'''.format(eventid))

            if self.scheduler.get_job(eventid) is not None:
                self.scheduler.remove_job(eventid)

            await message.channel.send('Event successfully removed')
        except Exception as inst:
            print(inst)
            await message.channel.send('There was an error removing your event, please retry')

    ## Subscribes the user to a role which gets pinged with an event
    async def subscribe(self, message, chunks):
        # Checks whether or not the command is valid
        if len(chunks) == 0:
            await message.channel.send('Usage: sudo subscribe <title>')
        
            return
        
        try:
            # Gets the title from the command
            title = await self.parseTitle(chunks)
            eventid = 'G_{0}_{1}'.format(message.guild.id, title)

            self.cursor.execute(''' SELECT * FROM `{0}` WHERE member_id='{1}';'''.format(eventid, message.author.id))
            row = self.cursor.fetchone()
            # Check if server has already been added to guilds
            if row is None:
                self.cursor.execute('''INSERT INTO `{0}` VALUES ('{1}')'''.format(eventid, message.author.id))

            self.cursor.execute('''SELECT * FROM G_{0} WHERE id='{1}';'''.format(message.guild.id, eventid))
            row = self.cursor.fetchone()

            if row is not None:
                await message.author.add_roles(message.guild.get_role(int(row[9])))

            await message.channel.send('You have subcribed to the event')
        except Exception as inst:
            print(inst)
            await message.channel.send('You could not subscribe to the event')

    ## Unsubscribes a user from receiving messages
    async def unsubscribe(self, message, chunks):
        # Checks whether or not the command is valid
        if len(chunks) == 0:
            await message.channel.send('Usage: sudo unsubscribe <title>')
        
            return
        
        try:
            # Gets the title from the command
            title = await self.parseTitle(chunks)
            eventid = 'G_{0}_{1}'.format(message.guild.id, title)

            self.cursor.execute(''' SELECT * FROM `{0}` WHERE member_id='{1}';'''.format(eventid, message.author.id))
            row = self.cursor.fetchone()
            # Check if server has already been added to guilds
            if row is not None:
                self.cursor.execute('''DELETE FROM `{0}` WHERE member_id='{1}')'''.format(eventid, message.author.id))

            self.cursor.execute('''SELECT * FROM G_{0} WHERE id='{1}';'''.format(message.guild.id, eventid))
            row = self.cursor.fetchone()

            if row is not None:
                await message.author.remove_roles(message.guild.get_role(int(row[9])))

            await message.channel.send('You have unsubscribed from the event')
        except Exception as inst:
            print(inst)
            await message.channel.send('You could not unsubscribe from the event')

    ## Sends a notification about the event to all users that are subscribed to that event, this function is scheduled
    async def notifier(self, gid, eventid):
        try:
            self.cursor.execute('''SELECT * FROM G_{0} WHERE id='{1}';'''.format(gid, eventid))

            row = self.cursor.fetchone()

            title = '***{0}***\n\n'.format(row[1])
            description = '' if (row[2] == 'N/A') else 'Description: {0}\n'.format(row[2])
            zone = standardtime(ttype='string', string=row[3])
            time = datetime.now(self.timezone).astimezone(zone.datetime.tzinfo)
            hours = time.hour + int(row[6])
            time = time.replace(day=(time.day + math.floor(hours/24)), hour=(hours%24))
            time = time.strftime('Time: on %m-%d-%Y at %H:%M\n')
            location = '' if (row[4] == 'N/A') else 'Location: {0}\n'.format(row[4])
            link = '' if (row[5] == 'N/A') else 'Link: {0}\n'.format(row[5])

            sent = await self.client.get_channel(int(row[8])).send('{0}{1}{2}{3}{4}\n{5}'.format(title, description, time, location, link, self.client.get_guild(gid).get_role(int(row[9])).mention))
            await sent.add_reaction('\U00002705')
            await sent.add_reaction('\U0000274E')
            await sent.add_reaction('\U00002754')
        except Exception as inst:
            print(inst)
            return

    ## Gets and separates all then important information from a command
    async def parseOptions(self, chunks, message, description, date, location, link, before, repeat, channel):
        # Define what the event is and then add properties based on selected options
        mode = ''

        # Go through all the options and check what data needs to be parsed
        for chunk in chunks:
            if chunk == '-d':
                description = ['N/A']
                mode = '-d'

            elif chunk == '-t':
                mode = '-t'

            elif chunk == '-a':
                location = ['N/A']
                mode = '-a'

            elif chunk == '-l':
                mode = '-l'

            elif chunk == '-b':
                mode = '-b'

            elif chunk == '-r':
                mode = '-r'

            elif chunk == '-c':
                mode = '-c'

            else:
                if mode == '-d':
                    description.append(chunk)

                elif mode == '-t':
                    self.cursor.execute('''SELECT * FROM Guilds WHERE id='{0}';'''.format(message.guild.id))
                    row = self.cursor.fetchone()
                    date = standardtime(ttype='string', string='{0}{1}'.format(chunk, row[2]))

                elif mode == '-a':
                    location.append(chunk)

                elif mode == '-l':
                    link = chunk

                elif mode == '-b':
                    before = int(chunk)

                elif mode == '-r':
                    repeat = chunk

                elif mode == '-c':
                    channel = message.channel_mentions[0].id

        if len(description) > 1:
            if description[0] == 'N/A':
                description = description[1:]

        if len(location) > 1:
            if location[0] == 'N/A':
                location = location[1:]

        # Reformat the data so that it is correct
        description = ' '.join(description)
        location = ' '.join(location)

        return description, date, location, link, before, repeat, channel

    async def parseTitle(self, chunks):
        title = []

        for chunk in chunks:
            if chunk != '-d' and chunk != '-r' and chunk != '-t' and chunk != '-c':
                title.append(chunk)
            else:
                break

        return ' '.join(title) 
        
Example #13
0
class Break(commands.Cog):
    def __init__(self, bot):
        self.bot = bot
        self.scheduler= AsyncIOScheduler({'apscheduler.timezone': 'Europe/Helsinki'}) 
        self.scheduler.start()
        self.setup_in_progress = False

    @commands.group(name='break', help='Handles reminders for break time')
    async def _break(self, ctx):
        if ctx.subcommand_passed is None:
            await ctx.send_help(ctx.command)

    @_break.command(help='Shows the breaks that have been set up')
    async def show(self, ctx):
        jobs = self.scheduler.get_jobs()
        if len(jobs) == 0:
            await ctx.send('No job setup. Schedule on with \'!break setup\'.')
        else:
            jobs_str = [self.job_tostring(j, f'Break #{i}', j.id)
                        for i, j in enumerate(jobs)]
            await ctx.send('\n'.join(jobs_str))

    @_break.command(help='Removes a break by id')
    async def remove(self, ctx, id):
        if self.scheduler.get_job(id) is None:
            await ctx.send(f'No break with id \'{id}\' exists.')
        else:
            self.scheduler.remove_job(id)
            await ctx.send(f'Break with id \'{id}\' removed successfully.')

    @_break.command(help='Removes all breaks.')
    async def clear(self, ctx):
        self.scheduler.remove_all_jobs()
        await ctx.send('All breaks have been removed successfully.')

    @_break.command(help='Sets up the break time interactively, use \'!break abort\' to abort')
    async def setup(self, ctx, id=None):
        if self.setup_in_progress:
            await ctx.send('Another break setup is in progress, please wait for it to finish.')
            return
        self.setup_in_progress = True

        job_id = id if id is not None else f'break_{len(self.scheduler.get_jobs()) + 1}'

        def check_context(m):
            return m.channel == ctx.channel and m.author == ctx.author

        def check_command(m):
            # Only allow '!break abort' through
            return m.content == '!break abort' or not m.content.startswith(ctx.prefix)

        def check_range(m, lower_inc, upper_inc):
            try:
                num = int(m.content)
                return num >= lower_inc and num <= upper_inc
            except ValueError:
                return False

        def check_message(m):
            return check_context(m) and check_command(m)

        def check_weekday(m):
            if not check_context(m):
                return False
            if check_command(m):
                return True
            if m.content in ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']:
                return True
            return check_range(m, 0, 6)

        def check_hour(m):
            if not check_context(m):
                return False
            if check_command(m):
                return True
            return check_range(m, 0, 23)

        def check_minute(m):
            if not check_context(m):
                return False
            if check_command(m):
                return True
            return check_range(m, 0, 59)

        timeout_err_msg = 'Took too long to answer, aborting break time setup.'

        msg = await self._prompt(ctx, 'Message?', check_message, timeout_err_msg, 60.0)
        if msg is None:
            return

        weekday = await self._prompt(ctx, 'Week day?', check_weekday, timeout_err_msg, 60.0)
        if weekday is None:
            return

        hour = await self._prompt(ctx, 'Hour(s)?', check_hour, timeout_err_msg, 60.0)
        if hour is None:
            return

        minute = await self._prompt(ctx, 'Minute(s)?', check_minute, timeout_err_msg, 60.0)
        if minute is None:
            return

        try:
            self.scheduler.add_job(send_message, 'cron', args=[ctx, msg], name=msg,
                                         id=job_id, replace_existing=True,
                                         day_of_week=weekday, hour=hour, minute=minute)
            await ctx.send('Break setup successfully.')
        except ValueError:
            await ctx.send('Invalid argument format(s)! Try again.')

        self.setup_in_progress = False

    async def _prompt(self, ctx, msg, check, err_msg=None, timeout_sec=60.0):
        await ctx.send(msg)
        try:
            reply = await self.bot.wait_for('message', check=check, timeout=timeout_sec)
            if reply.content == '!break abort':
                await self._abort_setup(ctx, 'Setup aborted.')
                return None
            return reply.content
        except asyncio.TimeoutError:
            await self._abort_setup(ctx, err_msg)
            return None

    async def _abort_setup(self, ctx, msg=None):
        if msg is not None:
            await ctx.send(msg)
        self.setup_in_progress = False

    def job_tostring(self, job, title, id):
        t = job.trigger
        fields = {f.name: str(f) for f in t.fields}
        time = datetime.time(hour=int(fields['hour']),
                             minute=int(fields['minute']))
        return f'''{title} (id: {id})
class ApSchedulerAdapter(SchedulerAdapter):
    def __init__(self, dsn, exec_func, log_service):
        super().__init__(exec_func=exec_func, log_service=log_service)
        self.dsn = dsn

        self.scheduler = None

        self.create_scheduler()

    def create_scheduler(self):
        self.scheduler = AsyncIOScheduler()

        self.scheduler.configure(
            jobstores={
                'default': SQLAlchemyJobStore(url=self.dsn),
            },
            executors={
                'default': AsyncIOExecutor(),
            },
            job_defaults={
                'coalesce': False,
                'max_instances': 1,
                'misfire_grace_time': (60 * 60)
            },
            timezone="UTC",
        )

    async def start(self):
        self.scheduler.start()

    async def stop(self):
        self.scheduler.shutdown(wait=True)

    async def add_cron_job(
            self,
            id,
            name,
            args,
            kwargs,
            crontab_expr,
            weekdays,
            hour,
            minute,
            second,
            start_date,
            end_date,
            timezone,
            coalesce=False,
            misfire_grace_time=(60 * 60),
            replace_existing=True,
    ):
        weekdays = weekdays if weekdays is not None else []

        trigger = None

        if crontab_expr is not None:
            trigger = \
                CronTrigger.from_crontab(
                    crontab_expr,
                    timezone=timezone,
                )
        else:
            trigger = \
                CronTrigger(
                    day_of_week=
                    ",".join(
                        [w[0:3].upper() for w in weekdays]
                    )
                    if len(weekdays) > 0
                    else None,
                    hour=hour,
                    minute=minute,
                    second=second,
                    start_date=start_date,
                    end_date=end_date,
                    timezone=timezone,
                )

        await self._add_job(
            id=id,
            name=name,
            func=self.exec_func,
            kwargs=kwargs,
            trigger=trigger,
            coalesce=coalesce,
            misfire_grace_time=misfire_grace_time,
            replace_existing=replace_existing,
        )

    async def add_date_job(
            self,
            id,
            name,
            args,
            kwargs,
            date,
            timezone,
            coalesce=False,
            misfire_grace_time=(60 * 60),
            replace_existing=True,
    ):
        raise NotImplementedError()

    async def add_interval_job(
            self,
            id,
            name,
            args,
            kwargs,
            interval,
            days,
            hours,
            minutes,
            seconds,
            start_date,
            end_date,
            timezone,
            coalesce=False,
            misfire_grace_time=(60 * 60),
            replace_existing=True,
    ):
        raise NotImplementedError()

    async def get_job(self, job_id):
        return self.scheduler.get_job(job_id)

    async def remove_job(self, job_id, raises=False):
        try:
            self.scheduler.remove_job(job_id)
        except JobLookupError as e:
            if "No job by the id of" in str(e):
                self.log_service.error(
                    f"Tried to remove apscheduler job with ID '{job_id}' but "
                    f"it wasn't found. Ignoring but you should look this "
                    f"up since it should never happen.")
                if raises:
                    raise JobNotFound(job_id)
            else:
                raise

    async def remove_all_jobs(self):
        """
        Removes all scheduled jobs.
        """
        for job in self.scheduler.get_jobs():
            self.scheduler.remove_job(job.id)

    # Helpers

    async def _add_job(self, **kwargs):
        """
        Convenience method for adding a job.
        """
        self.scheduler.add_job(**kwargs)
Example #15
0
class Scheduler:
    def __init__(self):
        scheduler_config = json.loads(
            config.httpdb.scheduling.scheduler_config)
        self._scheduler = AsyncIOScheduler(gconfig=scheduler_config,
                                           prefix=None)
        # this should be something that does not make any sense to be inside project name or job name
        self._job_id_separator = "-_-"
        # we don't allow to schedule a job to run more then one time per X
        # NOTE this cannot be less then one minute - see _validate_cron_trigger
        self._min_allowed_interval = config.httpdb.scheduling.min_allowed_interval
        self._secrets_provider = schemas.SecretProviderName.kubernetes

    async def start(self, db_session: Session):
        logger.info("Starting scheduler")
        self._scheduler.start()
        # the scheduler shutdown and start operation are not fully async compatible yet -
        # https://github.com/agronholm/apscheduler/issues/360 - this sleep make them work
        await asyncio.sleep(0)

        # don't fail the start on re-scheduling failure
        try:
            self._reload_schedules(db_session)
        except Exception as exc:
            logger.warning("Failed reloading schedules", exc=exc)

    async def stop(self):
        logger.info("Stopping scheduler")
        self._scheduler.shutdown()
        # the scheduler shutdown and start operation are not fully async compatible yet -
        # https://github.com/agronholm/apscheduler/issues/360 - this sleep make them work
        await asyncio.sleep(0)

    def create_schedule(
        self,
        db_session: Session,
        auth_info: mlrun.api.schemas.AuthInfo,
        project: str,
        name: str,
        kind: schemas.ScheduleKinds,
        scheduled_object: Union[Dict, Callable],
        cron_trigger: Union[str, schemas.ScheduleCronTrigger],
        labels: Dict = None,
        concurrency_limit: int = None,
    ):
        if concurrency_limit is None:
            concurrency_limit = config.httpdb.scheduling.default_concurrency_limit
        if isinstance(cron_trigger, str):
            cron_trigger = schemas.ScheduleCronTrigger.from_crontab(
                cron_trigger)

        self._validate_cron_trigger(cron_trigger)

        logger.debug(
            "Creating schedule",
            project=project,
            name=name,
            kind=kind,
            scheduled_object=scheduled_object,
            cron_trigger=cron_trigger,
            labels=labels,
            concurrency_limit=concurrency_limit,
        )
        self._ensure_auth_info_has_access_key(auth_info, kind)
        self._store_schedule_secrets(auth_info, project, name)
        get_db().create_schedule(
            db_session,
            project,
            name,
            kind,
            scheduled_object,
            cron_trigger,
            concurrency_limit,
            labels,
        )
        self._create_schedule_in_scheduler(
            project,
            name,
            kind,
            scheduled_object,
            cron_trigger,
            concurrency_limit,
            auth_info,
        )

    def update_schedule(
        self,
        db_session: Session,
        auth_info: mlrun.api.schemas.AuthInfo,
        project: str,
        name: str,
        scheduled_object: Union[Dict, Callable] = None,
        cron_trigger: Union[str, schemas.ScheduleCronTrigger] = None,
        labels: Dict = None,
        concurrency_limit: int = None,
    ):
        if isinstance(cron_trigger, str):
            cron_trigger = schemas.ScheduleCronTrigger.from_crontab(
                cron_trigger)

        if cron_trigger is not None:
            self._validate_cron_trigger(cron_trigger)

        logger.debug(
            "Updating schedule",
            project=project,
            name=name,
            scheduled_object=scheduled_object,
            cron_trigger=cron_trigger,
            labels=labels,
            concurrency_limit=concurrency_limit,
        )
        get_db().update_schedule(
            db_session,
            project,
            name,
            scheduled_object,
            cron_trigger,
            labels,
            concurrency_limit,
        )
        db_schedule = get_db().get_schedule(db_session, project, name)
        updated_schedule = self._transform_and_enrich_db_schedule(
            db_session, db_schedule)

        self._ensure_auth_info_has_access_key(auth_info, db_schedule.kind)
        self._store_schedule_secrets(auth_info, project, name)
        self._update_schedule_in_scheduler(
            project,
            name,
            updated_schedule.kind,
            updated_schedule.scheduled_object,
            updated_schedule.cron_trigger,
            updated_schedule.concurrency_limit,
            auth_info,
        )

    def list_schedules(
        self,
        db_session: Session,
        project: str = None,
        name: str = None,
        kind: str = None,
        labels: str = None,
        include_last_run: bool = False,
        include_credentials: bool = False,
    ) -> schemas.SchedulesOutput:
        logger.debug("Getting schedules",
                     project=project,
                     name=name,
                     labels=labels,
                     kind=kind)
        db_schedules = get_db().list_schedules(db_session, project, name,
                                               labels, kind)
        schedules = []
        for db_schedule in db_schedules:
            schedule = self._transform_and_enrich_db_schedule(
                db_session, db_schedule, include_last_run, include_credentials)
            schedules.append(schedule)
        return schemas.SchedulesOutput(schedules=schedules)

    def get_schedule(
        self,
        db_session: Session,
        project: str,
        name: str,
        include_last_run: bool = False,
        include_credentials: bool = False,
    ) -> schemas.ScheduleOutput:
        logger.debug("Getting schedule", project=project, name=name)
        db_schedule = get_db().get_schedule(db_session, project, name)
        return self._transform_and_enrich_db_schedule(db_session, db_schedule,
                                                      include_last_run,
                                                      include_credentials)

    def delete_schedule(
        self,
        db_session: Session,
        project: str,
        name: str,
    ):
        logger.debug("Deleting schedule", project=project, name=name)
        self._remove_schedule_scheduler_resources(project, name)
        get_db().delete_schedule(db_session, project, name)

    def delete_schedules(
        self,
        db_session: Session,
        project: str,
    ):
        schedules = self.list_schedules(
            db_session,
            project,
        )
        logger.debug("Deleting schedules", project=project)
        for schedule in schedules.schedules:
            self._remove_schedule_scheduler_resources(schedule.project,
                                                      schedule.name)
        get_db().delete_schedules(db_session, project)

    def _remove_schedule_scheduler_resources(self, project, name):
        self._remove_schedule_from_scheduler(project, name)
        self._remove_schedule_secrets(project, name)

    def _remove_schedule_from_scheduler(self, project, name):
        job_id = self._resolve_job_id(project, name)
        # don't fail on delete if job doesn't exist
        job = self._scheduler.get_job(job_id)
        if job:
            self._scheduler.remove_job(job_id)

    async def invoke_schedule(
        self,
        db_session: Session,
        auth_info: mlrun.api.schemas.AuthInfo,
        project: str,
        name: str,
    ):
        logger.debug("Invoking schedule", project=project, name=name)
        db_schedule = await fastapi.concurrency.run_in_threadpool(
            get_db().get_schedule, db_session, project, name)
        function, args, kwargs = self._resolve_job_function(
            db_schedule.kind,
            db_schedule.scheduled_object,
            project,
            name,
            db_schedule.concurrency_limit,
            auth_info,
        )
        return await function(*args, **kwargs)

    def _ensure_auth_info_has_access_key(
        self,
        auth_info: mlrun.api.schemas.AuthInfo,
        kind: schemas.ScheduleKinds,
    ):
        if (kind not in schemas.ScheduleKinds.local_kinds() and mlrun.api.
                utils.auth.verifier.AuthVerifier().is_jobs_auth_required()
                and (not auth_info.access_key or auth_info.access_key
                     == mlrun.model.Credentials.generate_access_key)):
            auth_info.access_key = mlrun.api.utils.auth.verifier.AuthVerifier(
            ).get_or_create_access_key(auth_info.session)

    def _store_schedule_secrets(
        self,
        auth_info: mlrun.api.schemas.AuthInfo,
        project: str,
        name: str,
    ):
        # import here to avoid circular imports
        import mlrun.api.crud

        if mlrun.api.utils.auth.verifier.AuthVerifier().is_jobs_auth_required(
        ):
            # sanity
            if not auth_info.access_key:
                raise mlrun.errors.MLRunAccessDeniedError(
                    "Access key is required to create schedules in OPA authorization mode"
                )
            access_key_secret_key = mlrun.api.crud.Secrets(
            ).generate_schedule_access_key_secret_key(name)
            # schedule name may be an invalid secret key, therefore we're using the key map feature of our secrets
            # handler
            secret_key_map = (mlrun.api.crud.Secrets().
                              generate_schedule_key_map_secret_key())
            secrets = {
                access_key_secret_key: auth_info.access_key,
            }
            if auth_info.username:
                username_secret_key = mlrun.api.crud.Secrets(
                ).generate_schedule_username_secret_key(name)
                secrets[username_secret_key] = auth_info.username
            mlrun.api.crud.Secrets().store_secrets(
                project,
                schemas.SecretsData(
                    provider=self._secrets_provider,
                    secrets=secrets,
                ),
                allow_internal_secrets=True,
                key_map_secret_key=secret_key_map,
            )

    def _remove_schedule_secrets(
        self,
        project: str,
        name: str,
    ):
        # import here to avoid circular imports
        import mlrun.api.crud

        if mlrun.api.utils.auth.verifier.AuthVerifier().is_jobs_auth_required(
        ):
            access_key_secret_key = mlrun.api.crud.Secrets(
            ).generate_schedule_access_key_secret_key(name)
            username_secret_key = mlrun.api.crud.Secrets(
            ).generate_schedule_username_secret_key(name)
            secret_key_map = (mlrun.api.crud.Secrets().
                              generate_schedule_key_map_secret_key())
            # TODO: support delete secrets (plural and not only singular) using key map
            mlrun.api.crud.Secrets().delete_secret(
                project,
                self._secrets_provider,
                access_key_secret_key,
                allow_secrets_from_k8s=True,
                allow_internal_secrets=True,
                key_map_secret_key=secret_key_map,
            )
            mlrun.api.crud.Secrets().delete_secret(
                project,
                self._secrets_provider,
                username_secret_key,
                allow_secrets_from_k8s=True,
                allow_internal_secrets=True,
                key_map_secret_key=secret_key_map,
            )

    def _get_schedule_secrets(
        self,
        project: str,
        name: str,
        include_username: bool = True
    ) -> typing.Tuple[typing.Optional[str], typing.Optional[str]]:
        # import here to avoid circular imports
        import mlrun.api.crud

        schedule_access_key_secret_key = mlrun.api.crud.Secrets(
        ).generate_schedule_access_key_secret_key(name)
        secret_key_map = mlrun.api.crud.Secrets(
        ).generate_schedule_key_map_secret_key()
        # TODO: support listing (and not only get) secrets using key map
        access_key = mlrun.api.crud.Secrets().get_secret(
            project,
            self._secrets_provider,
            schedule_access_key_secret_key,
            allow_secrets_from_k8s=True,
            allow_internal_secrets=True,
            key_map_secret_key=secret_key_map,
        )
        username = None
        if include_username:
            schedule_username_secret_key = mlrun.api.crud.Secrets(
            ).generate_schedule_username_secret_key(name)
            username = mlrun.api.crud.Secrets().get_secret(
                project,
                self._secrets_provider,
                schedule_username_secret_key,
                allow_secrets_from_k8s=True,
                allow_internal_secrets=True,
                key_map_secret_key=secret_key_map,
            )

        return username, access_key

    def _validate_cron_trigger(
        self,
        cron_trigger: schemas.ScheduleCronTrigger,
        # accepting now from outside for testing purposes
        now: datetime = None,
    ):
        """
        Enforce no more then one job per min_allowed_interval
        """
        logger.debug("Validating cron trigger")
        apscheduler_cron_trigger = self.transform_schemas_cron_trigger_to_apscheduler_cron_trigger(
            cron_trigger)
        now = now or datetime.now(apscheduler_cron_trigger.timezone)
        next_run_time = None
        second_next_run_time = now

        # doing 60 checks to allow one minute precision, if the _min_allowed_interval is less then one minute validation
        # won't fail in certain scenarios that it should. See test_validate_cron_trigger_multi_checks for detailed
        # explanation
        for index in range(60):
            next_run_time = apscheduler_cron_trigger.get_next_fire_time(
                None, second_next_run_time)
            # will be none if we got a schedule that has no next fire time - for example schedule with year=1999
            if next_run_time is None:
                return
            second_next_run_time = apscheduler_cron_trigger.get_next_fire_time(
                next_run_time, next_run_time)
            # will be none if we got a schedule that has no next fire time - for example schedule with year=2050
            if second_next_run_time is None:
                return
            min_allowed_interval_seconds = humanfriendly.parse_timespan(
                self._min_allowed_interval)
            if second_next_run_time < next_run_time + timedelta(
                    seconds=min_allowed_interval_seconds):
                logger.warn(
                    "Cron trigger too frequent. Rejecting",
                    cron_trigger=cron_trigger,
                    next_run_time=next_run_time,
                    second_next_run_time=second_next_run_time,
                    delta=second_next_run_time - next_run_time,
                )
                raise ValueError(
                    f"Cron trigger too frequent. no more then one job "
                    f"per {self._min_allowed_interval} is allowed")

    def _create_schedule_in_scheduler(
        self,
        project: str,
        name: str,
        kind: schemas.ScheduleKinds,
        scheduled_object: Any,
        cron_trigger: schemas.ScheduleCronTrigger,
        concurrency_limit: int,
        auth_info: mlrun.api.schemas.AuthInfo,
    ):
        job_id = self._resolve_job_id(project, name)
        logger.debug("Adding schedule to scheduler", job_id=job_id)
        function, args, kwargs = self._resolve_job_function(
            kind, scheduled_object, project, name, concurrency_limit,
            auth_info)

        # we use max_instances as well as our logic in the run wrapper for concurrent jobs
        # in order to allow concurrency for triggering the jobs (max_instances), and concurrency
        # of the jobs themselves (our logic in the run wrapper).
        self._scheduler.add_job(
            function,
            self.transform_schemas_cron_trigger_to_apscheduler_cron_trigger(
                cron_trigger),
            args,
            kwargs,
            job_id,
            max_instances=concurrency_limit,
        )

    def _update_schedule_in_scheduler(
        self,
        project: str,
        name: str,
        kind: schemas.ScheduleKinds,
        scheduled_object: Any,
        cron_trigger: schemas.ScheduleCronTrigger,
        concurrency_limit: int,
        auth_info: mlrun.api.schemas.AuthInfo,
    ):
        job_id = self._resolve_job_id(project, name)
        logger.debug("Updating schedule in scheduler", job_id=job_id)
        function, args, kwargs = self._resolve_job_function(
            kind, scheduled_object, project, name, concurrency_limit,
            auth_info)
        trigger = self.transform_schemas_cron_trigger_to_apscheduler_cron_trigger(
            cron_trigger)
        now = datetime.now(self._scheduler.timezone)
        next_run_time = trigger.get_next_fire_time(None, now)
        self._scheduler.modify_job(
            job_id,
            func=function,
            args=args,
            kwargs=kwargs,
            trigger=trigger,
            next_run_time=next_run_time,
        )

    def _reload_schedules(self, db_session: Session):
        logger.info("Reloading schedules")
        db_schedules = get_db().list_schedules(db_session)
        for db_schedule in db_schedules:
            # don't let one failure fail the rest
            try:
                # import here to avoid circular imports
                import mlrun.api.crud

                access_key = None
                username = None
                if mlrun.api.utils.auth.verifier.AuthVerifier(
                ).is_jobs_auth_required():
                    username, access_key = self._get_schedule_secrets(
                        db_schedule.project, db_schedule.name)
                self._create_schedule_in_scheduler(
                    db_schedule.project,
                    db_schedule.name,
                    db_schedule.kind,
                    db_schedule.scheduled_object,
                    db_schedule.cron_trigger,
                    db_schedule.concurrency_limit,
                    mlrun.api.schemas.AuthInfo(username=username,
                                               access_key=access_key),
                )
            except Exception as exc:
                logger.warn(
                    "Failed rescheduling job. Continuing",
                    exc=str(exc),
                    traceback=traceback.format_exc(),
                    db_schedule=db_schedule,
                )

    def _transform_and_enrich_db_schedule(
        self,
        db_session: Session,
        schedule_record: schemas.ScheduleRecord,
        include_last_run: bool = False,
        include_credentials: bool = False,
    ) -> schemas.ScheduleOutput:
        schedule_dict = schedule_record.dict()
        schedule_dict["labels"] = {
            label["name"]: label["value"]
            for label in schedule_dict["labels"]
        }
        schedule = schemas.ScheduleOutput(**schedule_dict)

        job_id = self._resolve_job_id(schedule_record.project,
                                      schedule_record.name)
        job = self._scheduler.get_job(job_id)
        if job:
            schedule.next_run_time = job.next_run_time

        if include_last_run:
            self._enrich_schedule_with_last_run(db_session, schedule)

        if include_credentials:
            self._enrich_schedule_with_credentials(schedule)

        return schedule

    @staticmethod
    def _enrich_schedule_with_last_run(
            db_session: Session, schedule_output: schemas.ScheduleOutput):
        if schedule_output.last_run_uri:
            run_project, run_uid, iteration, _ = RunObject.parse_uri(
                schedule_output.last_run_uri)
            run_data = get_db().read_run(db_session, run_uid, run_project,
                                         iteration)
            schedule_output.last_run = run_data

    def _enrich_schedule_with_credentials(
            self, schedule_output: schemas.ScheduleOutput):
        _, schedule_output.credentials.access_key = self._get_schedule_secrets(
            schedule_output.project,
            schedule_output.name,
            include_username=False)

    def _resolve_job_function(
        self,
        scheduled_kind: schemas.ScheduleKinds,
        scheduled_object: Any,
        project_name: str,
        schedule_name: str,
        schedule_concurrency_limit: int,
        auth_info: mlrun.api.schemas.AuthInfo,
    ) -> Tuple[Callable, Optional[Union[List, Tuple]], Optional[Dict]]:
        """
        :return: a tuple (function, args, kwargs) to be used with the APScheduler.add_job
        """

        if scheduled_kind == schemas.ScheduleKinds.job:
            scheduled_object_copy = copy.deepcopy(scheduled_object)
            return (
                Scheduler.submit_run_wrapper,
                [
                    self,
                    scheduled_object_copy,
                    project_name,
                    schedule_name,
                    schedule_concurrency_limit,
                    auth_info,
                ],
                {},
            )
        if scheduled_kind == schemas.ScheduleKinds.local_function:
            return scheduled_object, [], {}

        # sanity
        message = "Scheduled object kind missing implementation"
        logger.warn(message, scheduled_object_kind=scheduled_kind)
        raise NotImplementedError(message)

    def _list_schedules_from_scheduler(self, project: str):
        jobs = self._scheduler.get_jobs()
        return [
            job for job in jobs if self._resolve_job_id(project, "") in job.id
        ]

    def _resolve_job_id(self, project, name) -> str:
        """
        :return: returns the identifier that will be used inside the APScheduler
        """
        return self._job_id_separator.join([project, name])

    @staticmethod
    async def submit_run_wrapper(
        scheduler,
        scheduled_object,
        project_name,
        schedule_name,
        schedule_concurrency_limit,
        auth_info: mlrun.api.schemas.AuthInfo,
    ):
        # import here to avoid circular imports
        import mlrun.api.crud
        from mlrun.api.api.utils import submit_run

        # removing the schedule from the body otherwise when the scheduler will submit this task it will go to an
        # endless scheduling loop
        scheduled_object.pop("schedule", None)

        # removing the uid from the task metadata so that a new uid will be generated for every run
        # otherwise all runs will have the same uid
        scheduled_object.get("task", {}).get("metadata", {}).pop("uid", None)

        if "task" in scheduled_object and "metadata" in scheduled_object[
                "task"]:
            scheduled_object["task"]["metadata"].setdefault("labels", {})
            scheduled_object["task"]["metadata"]["labels"][
                schemas.constants.LabelNames.schedule_name] = schedule_name

        db_session = create_session()

        active_runs = mlrun.api.crud.Runs().list_runs(
            db_session,
            state=RunStates.non_terminal_states(),
            project=project_name,
            labels=
            f"{schemas.constants.LabelNames.schedule_name}={schedule_name}",
        )
        if len(active_runs) >= schedule_concurrency_limit:
            logger.warn(
                "Schedule exceeded concurrency limit, skipping this run",
                project=project_name,
                schedule_name=schedule_name,
                schedule_concurrency_limit=schedule_concurrency_limit,
                active_runs=len(active_runs),
            )
            return

        # if credentials are needed but missing (will happen for schedules on upgrade from scheduler that didn't store
        # credentials to one that does store) enrich them
        # Note that here we're using the "knowledge" that submit_run only requires the access key of the auth info
        if (not auth_info.access_key and mlrun.api.utils.auth.verifier.
                AuthVerifier().is_jobs_auth_required()):
            # import here to avoid circular imports
            import mlrun.api.utils.auth
            import mlrun.api.utils.singletons.project_member

            logger.info(
                "Schedule missing auth info which is required. Trying to fill from project owner",
                project_name=project_name,
                schedule_name=schedule_name,
            )

            project_owner = mlrun.api.utils.singletons.project_member.get_project_member(
            ).get_project_owner(db_session, project_name)
            # Update the schedule with the new auth info so we won't need to do the above again in the next run
            scheduler.update_schedule(
                db_session,
                mlrun.api.schemas.AuthInfo(username=project_owner.username,
                                           access_key=project_owner.session),
                project_name,
                schedule_name,
            )

        response = await submit_run(db_session, auth_info, scheduled_object)

        run_metadata = response["data"]["metadata"]
        run_uri = RunObject.create_uri(run_metadata["project"],
                                       run_metadata["uid"],
                                       run_metadata["iteration"])
        get_db().update_schedule(
            db_session,
            run_metadata["project"],
            schedule_name,
            last_run_uri=run_uri,
        )

        close_session(db_session)

        return response

    @staticmethod
    def transform_schemas_cron_trigger_to_apscheduler_cron_trigger(
        cron_trigger: schemas.ScheduleCronTrigger, ):
        return APSchedulerCronTrigger(
            cron_trigger.year,
            cron_trigger.month,
            cron_trigger.day,
            cron_trigger.week,
            cron_trigger.day_of_week,
            cron_trigger.hour,
            cron_trigger.minute,
            cron_trigger.second,
            cron_trigger.start_date,
            cron_trigger.end_date,
            cron_trigger.timezone,
            cron_trigger.jitter,
        )
Example #16
0
class Scheduler:
    def __init__(self):
        self._scheduler = AsyncIOScheduler()
        # this should be something that does not make any sense to be inside project name or job name
        self._job_id_separator = "-_-"
        # we don't allow to schedule a job to run more then one time per X
        # NOTE this cannot be less then one minute - see _validate_cron_trigger
        self._min_allowed_interval = config.httpdb.scheduling.min_allowed_interval

    async def start(self, db_session: Session):
        logger.info("Starting scheduler")
        self._scheduler.start()
        # the scheduler shutdown and start operation are not fully async compatible yet -
        # https://github.com/agronholm/apscheduler/issues/360 - this sleep make them work
        await asyncio.sleep(0)

        # don't fail the start on re-scheduling failure
        try:
            self._reload_schedules(db_session)
        except Exception as exc:
            logger.warning("Failed reloading schedules", exc=exc)

    async def stop(self):
        logger.info("Stopping scheduler")
        self._scheduler.shutdown()
        # the scheduler shutdown and start operation are not fully async compatible yet -
        # https://github.com/agronholm/apscheduler/issues/360 - this sleep make them work
        await asyncio.sleep(0)

    def create_schedule(
        self,
        db_session: Session,
        project: str,
        name: str,
        kind: schemas.ScheduleKinds,
        scheduled_object: Union[Dict, Callable],
        cron_trigger: Union[str, schemas.ScheduleCronTrigger],
        labels: Dict = None,
        concurrency_limit: int = config.httpdb.scheduling.
        default_concurrency_limit,
    ):
        if isinstance(cron_trigger, str):
            cron_trigger = schemas.ScheduleCronTrigger.from_crontab(
                cron_trigger)

        self._validate_cron_trigger(cron_trigger)

        logger.debug(
            "Creating schedule",
            project=project,
            name=name,
            kind=kind,
            scheduled_object=scheduled_object,
            cron_trigger=cron_trigger,
            labels=labels,
            concurrency_limit=concurrency_limit,
        )
        get_project_member().ensure_project(db_session, project)
        get_db().create_schedule(
            db_session,
            project,
            name,
            kind,
            scheduled_object,
            cron_trigger,
            concurrency_limit,
            labels,
        )
        self._create_schedule_in_scheduler(
            project,
            name,
            kind,
            scheduled_object,
            cron_trigger,
            concurrency_limit,
        )

    def update_schedule(
        self,
        db_session: Session,
        project: str,
        name: str,
        scheduled_object: Union[Dict, Callable] = None,
        cron_trigger: Union[str, schemas.ScheduleCronTrigger] = None,
        labels: Dict = None,
        concurrency_limit: int = None,
    ):
        if isinstance(cron_trigger, str):
            cron_trigger = schemas.ScheduleCronTrigger.from_crontab(
                cron_trigger)

        if cron_trigger is not None:
            self._validate_cron_trigger(cron_trigger)

        logger.debug(
            "Updating schedule",
            project=project,
            name=name,
            scheduled_object=scheduled_object,
            cron_trigger=cron_trigger,
            labels=labels,
            concurrency_limit=concurrency_limit,
        )
        get_db().update_schedule(
            db_session,
            project,
            name,
            scheduled_object,
            cron_trigger,
            labels,
            concurrency_limit,
        )
        db_schedule = get_db().get_schedule(db_session, project, name)
        updated_schedule = self._transform_and_enrich_db_schedule(
            db_session, db_schedule)

        self._update_schedule_in_scheduler(
            project,
            name,
            updated_schedule.kind,
            updated_schedule.scheduled_object,
            updated_schedule.cron_trigger,
            updated_schedule.concurrency_limit,
        )

    def list_schedules(
        self,
        db_session: Session,
        project: str = None,
        name: str = None,
        kind: str = None,
        labels: str = None,
        include_last_run: bool = False,
    ) -> schemas.SchedulesOutput:
        logger.debug("Getting schedules",
                     project=project,
                     name=name,
                     labels=labels,
                     kind=kind)
        db_schedules = get_db().list_schedules(db_session, project, name,
                                               labels, kind)
        schedules = []
        for db_schedule in db_schedules:
            schedule = self._transform_and_enrich_db_schedule(
                db_session, db_schedule, include_last_run)
            schedules.append(schedule)
        return schemas.SchedulesOutput(schedules=schedules)

    def get_schedule(
        self,
        db_session: Session,
        project: str,
        name: str,
        include_last_run: bool = False,
    ) -> schemas.ScheduleOutput:
        logger.debug("Getting schedule", project=project, name=name)
        db_schedule = get_db().get_schedule(db_session, project, name)
        return self._transform_and_enrich_db_schedule(db_session, db_schedule,
                                                      include_last_run)

    def delete_schedule(self, db_session: Session, project: str, name: str):
        logger.debug("Deleting schedule", project=project, name=name)
        job_id = self._resolve_job_id(project, name)
        # don't fail on delete if job doesn't exist
        job = self._scheduler.get_job(job_id)
        if job:
            self._scheduler.remove_job(job_id)
        get_db().delete_schedule(db_session, project, name)

    async def invoke_schedule(self, db_session: Session, project: str,
                              name: str):
        logger.debug("Invoking schedule", project=project, name=name)
        db_schedule = await fastapi.concurrency.run_in_threadpool(
            get_db().get_schedule, db_session, project, name)
        function, args, kwargs = self._resolve_job_function(
            db_schedule.kind,
            db_schedule.scheduled_object,
            project,
            name,
            db_schedule.concurrency_limit,
        )
        return await function(*args, **kwargs)

    def _validate_cron_trigger(
        self,
        cron_trigger: schemas.ScheduleCronTrigger,
        # accepting now from outside for testing purposes
        now: datetime = None,
    ):
        """
        Enforce no more then one job per min_allowed_interval
        """
        logger.debug("Validating cron trigger")
        apscheduler_cron_trigger = self.transform_schemas_cron_trigger_to_apscheduler_cron_trigger(
            cron_trigger)
        now = now or datetime.now(apscheduler_cron_trigger.timezone)
        next_run_time = None
        second_next_run_time = now

        # doing 60 checks to allow one minute precision, if the _min_allowed_interval is less then one minute validation
        # won't fail in certain scenarios that it should. See test_validate_cron_trigger_multi_checks for detailed
        # explanation
        for index in range(60):
            next_run_time = apscheduler_cron_trigger.get_next_fire_time(
                None, second_next_run_time)
            # will be none if we got a schedule that has no next fire time - for example schedule with year=1999
            if next_run_time is None:
                return
            second_next_run_time = apscheduler_cron_trigger.get_next_fire_time(
                next_run_time, next_run_time)
            # will be none if we got a schedule that has no next fire time - for example schedule with year=2050
            if second_next_run_time is None:
                return
            min_allowed_interval_seconds = humanfriendly.parse_timespan(
                self._min_allowed_interval)
            if second_next_run_time < next_run_time + timedelta(
                    seconds=min_allowed_interval_seconds):
                logger.warn(
                    "Cron trigger too frequent. Rejecting",
                    cron_trigger=cron_trigger,
                    next_run_time=next_run_time,
                    second_next_run_time=second_next_run_time,
                    delta=second_next_run_time - next_run_time,
                )
                raise ValueError(
                    f"Cron trigger too frequent. no more then one job "
                    f"per {self._min_allowed_interval} is allowed")

    def _create_schedule_in_scheduler(
        self,
        project: str,
        name: str,
        kind: schemas.ScheduleKinds,
        scheduled_object: Any,
        cron_trigger: schemas.ScheduleCronTrigger,
        concurrency_limit: int,
    ):
        job_id = self._resolve_job_id(project, name)
        logger.debug("Adding schedule to scheduler", job_id=job_id)
        function, args, kwargs = self._resolve_job_function(
            kind,
            scheduled_object,
            project,
            name,
            concurrency_limit,
        )

        # we use max_instances as well as our logic in the run wrapper for concurrent jobs
        # in order to allow concurrency for triggering the jobs (max_instances), and concurrency
        # of the jobs themselves (our logic in the run wrapper).
        self._scheduler.add_job(
            function,
            self.transform_schemas_cron_trigger_to_apscheduler_cron_trigger(
                cron_trigger),
            args,
            kwargs,
            job_id,
            max_instances=concurrency_limit,
        )

    def _update_schedule_in_scheduler(
        self,
        project: str,
        name: str,
        kind: schemas.ScheduleKinds,
        scheduled_object: Any,
        cron_trigger: schemas.ScheduleCronTrigger,
        concurrency_limit: int,
    ):
        job_id = self._resolve_job_id(project, name)
        logger.debug("Updating schedule in scheduler", job_id=job_id)
        function, args, kwargs = self._resolve_job_function(
            kind,
            scheduled_object,
            project,
            name,
            concurrency_limit,
        )
        trigger = self.transform_schemas_cron_trigger_to_apscheduler_cron_trigger(
            cron_trigger)
        now = datetime.now(self._scheduler.timezone)
        next_run_time = trigger.get_next_fire_time(None, now)
        self._scheduler.modify_job(
            job_id,
            func=function,
            args=args,
            kwargs=kwargs,
            trigger=trigger,
            next_run_time=next_run_time,
        )

    def _reload_schedules(self, db_session: Session):
        logger.info("Reloading schedules")
        db_schedules = get_db().list_schedules(db_session)
        for db_schedule in db_schedules:
            # don't let one failure fail the rest
            try:
                self._create_schedule_in_scheduler(
                    db_schedule.project,
                    db_schedule.name,
                    db_schedule.kind,
                    db_schedule.scheduled_object,
                    db_schedule.cron_trigger,
                    db_schedule.concurrency_limit,
                )
            except Exception as exc:
                logger.warn(
                    "Failed rescheduling job. Continuing",
                    exc=str(exc),
                    db_schedule=db_schedule,
                )

    def _transform_and_enrich_db_schedule(
        self,
        db_session: Session,
        schedule_record: schemas.ScheduleRecord,
        include_last_run: bool = False,
    ) -> schemas.ScheduleOutput:
        schedule_dict = schedule_record.dict()
        schedule_dict["labels"] = {
            label["name"]: label["value"]
            for label in schedule_dict["labels"]
        }
        schedule = schemas.ScheduleOutput(**schedule_dict)

        job_id = self._resolve_job_id(schedule_record.project,
                                      schedule_record.name)
        job = self._scheduler.get_job(job_id)
        if job:
            schedule.next_run_time = job.next_run_time

        if include_last_run:
            schedule = self._enrich_schedule_with_last_run(
                db_session, schedule)

        return schedule

    @staticmethod
    def _enrich_schedule_with_last_run(
            db_session: Session, schedule_output: schemas.ScheduleOutput):
        if schedule_output.last_run_uri:
            run_project, run_uid, iteration, _ = RunObject.parse_uri(
                schedule_output.last_run_uri)
            run_data = get_db().read_run(db_session, run_uid, run_project,
                                         iteration)
            schedule_output.last_run = run_data
        return schedule_output

    def _resolve_job_function(
        self,
        scheduled_kind: schemas.ScheduleKinds,
        scheduled_object: Any,
        project_name: str,
        schedule_name: str,
        schedule_concurrency_limit: int,
    ) -> Tuple[Callable, Optional[Union[List, Tuple]], Optional[Dict]]:
        """
        :return: a tuple (function, args, kwargs) to be used with the APScheduler.add_job
        """

        if scheduled_kind == schemas.ScheduleKinds.job:
            scheduled_object_copy = copy.deepcopy(scheduled_object)
            return (
                Scheduler.submit_run_wrapper,
                [
                    scheduled_object_copy,
                    project_name,
                    schedule_name,
                    schedule_concurrency_limit,
                ],
                {},
            )
        if scheduled_kind == schemas.ScheduleKinds.local_function:
            return scheduled_object, [], {}

        # sanity
        message = "Scheduled object kind missing implementation"
        logger.warn(message, scheduled_object_kind=scheduled_kind)
        raise NotImplementedError(message)

    def _resolve_job_id(self, project, name) -> str:
        """
        :return: returns the identifier that will be used inside the APScheduler
        """
        return self._job_id_separator.join([project, name])

    @staticmethod
    async def submit_run_wrapper(scheduled_object, project_name, schedule_name,
                                 schedule_concurrency_limit):
        # import here to avoid circular imports
        from mlrun.api.api.utils import submit_run

        # removing the schedule from the body otherwise when the scheduler will submit this task it will go to an
        # endless scheduling loop
        scheduled_object.pop("schedule", None)

        # removing the uid from the task metadata so that a new uid will be generated for every run
        # otherwise all runs will have the same uid
        scheduled_object.get("task", {}).get("metadata", {}).pop("uid", None)

        if "task" in scheduled_object and "metadata" in scheduled_object[
                "task"]:
            scheduled_object["task"]["metadata"].setdefault("labels", {})
            scheduled_object["task"]["metadata"]["labels"][
                schemas.constants.LabelNames.schedule_name] = schedule_name

        db_session = create_session()

        active_runs = get_db().list_runs(
            db_session,
            state=RunStates.non_terminal_states(),
            project=project_name,
            labels=
            f"{schemas.constants.LabelNames.schedule_name}={schedule_name}",
        )
        if len(active_runs) >= schedule_concurrency_limit:
            logger.warn(
                "Schedule exceeded concurrency limit, skipping this run",
                project=project_name,
                schedule_name=schedule_name,
                schedule_concurrency_limit=schedule_concurrency_limit,
                active_runs=len(active_runs),
            )
            return

        response = await submit_run(db_session, scheduled_object)

        run_metadata = response["data"]["metadata"]
        run_uri = RunObject.create_uri(run_metadata["project"],
                                       run_metadata["uid"],
                                       run_metadata["iteration"])
        get_db().update_schedule(
            db_session,
            run_metadata["project"],
            schedule_name,
            last_run_uri=run_uri,
        )

        close_session(db_session)

        return response

    @staticmethod
    def transform_schemas_cron_trigger_to_apscheduler_cron_trigger(
        cron_trigger: schemas.ScheduleCronTrigger, ):
        return APSchedulerCronTrigger(
            cron_trigger.year,
            cron_trigger.month,
            cron_trigger.day,
            cron_trigger.week,
            cron_trigger.day_of_week,
            cron_trigger.hour,
            cron_trigger.minute,
            cron_trigger.second,
            cron_trigger.start_date,
            cron_trigger.end_date,
            cron_trigger.timezone,
            cron_trigger.jitter,
        )
Example #17
0
class Scheduler(Starlette):

    days = {
        "0": "sun",
        "1": "mon",
        "2": "tue",
        "3": "wed",
        "4": "thu",
        "5": "fri",
        "6": "sat",
        "7": "sun",
        "*": "*",
    }

    seconds = {"seconds": 1, "minutes": 60, "hours": 3600, "days": 86400}

    def __init__(self):
        super().__init__()
        with open(Path.cwd().parent / "setup" / "scheduler.json", "r") as file:
            self.settings = load(file)
        dictConfig(self.settings["logging"])
        self.configure_scheduler()
        self.register_routes()

    @staticmethod
    def aps_date(date):
        if not date:
            return
        date = datetime.strptime(date, "%d/%m/%Y %H:%M:%S")
        return datetime.strftime(date, "%Y-%m-%d %H:%M:%S")

    def configure_scheduler(self):
        self.scheduler = AsyncIOScheduler(self.settings["config"])
        self.scheduler.start()

    def register_routes(self):
        @self.route("/job", methods=["DELETE"])
        async def delete(request):
            job_id = await request.json()
            if self.scheduler.get_job(job_id):
                self.scheduler.remove_job(job_id)
            return JSONResponse(True)

        @self.route("/next_runtime/{task_id}")
        async def next_runtime(request):
            job = self.scheduler.get_job(request.path_params["task_id"])
            if job and job.next_run_time:
                return JSONResponse(job.next_run_time.strftime("%Y-%m-%d %H:%M:%S"))
            return JSONResponse("Not Scheduled")

        @self.route("/schedule", methods=["POST"])
        async def schedule(request):
            data = await request.json()
            if data["mode"] in ("resume", "schedule"):
                result = self.schedule_task(data["task"])
                if not result:
                    return JSONResponse({"alert": "Cannot schedule in the past."})
                else:
                    return JSONResponse({"response": "Task resumed.", "active": True})
            else:
                try:
                    self.scheduler.pause_job(data["task"]["id"])
                    return JSONResponse({"response": "Task paused."})
                except JobLookupError:
                    return JSONResponse({"alert": "There is no such job scheduled."})

        @self.route("/time_left/{task_id}")
        async def time_left(request):
            job = self.scheduler.get_job(request.path_params["task_id"])
            if job and job.next_run_time:
                delta = job.next_run_time.replace(tzinfo=None) - datetime.now()
                hours, remainder = divmod(delta.seconds, 3600)
                minutes, seconds = divmod(remainder, 60)
                days = f"{delta.days} days, " if delta.days else ""
                return JSONResponse(f"{days}{hours}h:{minutes}m:{seconds}s")
            return JSONResponse("Not Scheduled")

    @staticmethod
    def run_service(task_id):
        auth = HTTPBasicAuth(environ.get("ENMS_USER"), environ.get("ENMS_PASSWORD"))
        post(f"{environ.get('ENMS_ADDR')}/rest/run_task", json=task_id, auth=auth)

    def schedule_task(self, task):
        if task["scheduling_mode"] == "cron":
            crontab = task["crontab_expression"].split()
            crontab[-1] = ",".join(self.days[day] for day in crontab[-1].split(","))
            trigger = {"trigger": CronTrigger.from_crontab(" ".join(crontab))}
        elif task["frequency"]:
            trigger = {
                "trigger": "interval",
                "start_date": self.aps_date(task["start_date"]),
                "end_date": self.aps_date(task["end_date"]),
                "seconds": int(task["frequency"])
                * self.seconds[task["frequency_unit"]],
            }
        else:
            trigger = {"trigger": "date", "run_date": self.aps_date(task["start_date"])}
        if not self.scheduler.get_job(task["id"]):
            job = self.scheduler.add_job(
                id=str(task["id"]),
                replace_existing=True,
                func=self.run_service,
                args=[task["id"]],
                **trigger,
            )
        else:
            job = self.scheduler.reschedule_job(str(task["id"]), **trigger)
        return job.next_run_time > datetime.now(job.next_run_time.tzinfo)
Example #18
0
if __name__ == '__main__':
    loop_time = celery.conf["MAESTRO_LOOP_TIME"]

    scheduler = AsyncIOScheduler(timezone=utc,
                                 jobstores=jobstores,
                                 misfire_grace_time=15)
    Jobber = Jobs()

    def warmup():
        Jobber.tick()
        jobs = Jobber.get_jobs()
        SpawnJobs().spawn(jobs, scheduler)

    scheduler.start()

    warmjob = scheduler.get_job("warmup")
    if not warmjob:
        scheduler.add_job(warmup,
                          trigger='interval',
                          seconds=loop_time,
                          id="warmup")
    else:
        scheduler.reschedule_job(trigger='interval',
                                 seconds=loop_time,
                                 job_id="warmup")
        pass

    try:
        asyncio.get_event_loop().run_forever()
    except (KeyboardInterrupt, SystemExit):
        pass