Esempio n. 1
0
    def main():

        scheduler = AsyncIOScheduler()
        scheduler.configure({
            'apscheduler.executors.default': {
                'class': 'apscheduler.executors.pool:ThreadPoolExecutor',
                'max_workers': '1000'
            },
            'apscheduler.executors.processpool': {
                'type': 'processpool',
                'max_workers': '1000'
            },
            'apscheduler.timezone': 'UTC',
        })

        #lg.basicConfig()
        #lg.getLogger('apscheduler').setLevel(lg.DEBUG)

        #running monitor first
        scheduler.add_job(run_spawn_monitor, None, [scheduler])

        scheduler.start()
        print('Press Ctrl+{0} to exit'.format('Break' if os.name ==
                                              'nt' else 'C'))

        gclog.write('[HISTORICAL SCHEDULER ALIVE]', 'DEBUG')

        # Execution will block here until Ctrl+C (Ctrl+Break on Windows) is pressed.
        try:
            asyncio.get_event_loop().run_forever()
        except (KeyboardInterrupt, SystemExit):
            pass
Esempio n. 2
0
def scheduler(client):
    global afterhours_start_hour
    global afterhours_end_hour
    hour_start = os.getenv('AFTERHOURS_START_HOUR', '23')
    hour_end = os.getenv('AFTERHOURS_END', '6')
    afterhours_start_hour = int(hour_start)
    afterhours_end_hour = int(hour_end)

    print("Starting Job Scheduler")
    scheduler = AsyncIOScheduler()
    scheduler.configure(timezone=tz)
    scheduler.add_job(open_channel,
                      trigger='cron',
                      args=(client, ),
                      hour=hour_start,
                      id='id_open_channel',
                      misfire_grace_time=300,
                      coalesce=True)
    scheduler.add_job(close_channel,
                      trigger='cron',
                      args=(client, ),
                      hour=hour_end,
                      id='id_close_channel',
                      misfire_grace_time=300,
                      coalesce=True)
    scheduler.start()
Esempio n. 3
0
class Bot(commands.Bot):
    __slots__ = ("extensions", "scheduler")

    def __init__(self) -> None:
        self.extensions = [p.stem for p in Path(".").glob("./modmail/bot/extensions/*.py")]
        self.scheduler = AsyncIOScheduler()
        self.scheduler.configure(timezone=utc)

        super().__init__(
            command_prefix=Config.PREFIX,
            status=discord.Status.online,
            intents=discord.Intents.all(),
        )

    def __call__(self) -> None:
        self.run()

    def setup(self) -> None:
        print("Running setup...")
        for ext in self.extensions:
            self.load_extension(f"modmail.bot.extensions.{ext}")
            print(f" `{ext}` extension loaded.")

    def run(self) -> None:
        self.setup()
        print("Running bot...")
        super().run(Config.TOKEN, reconnect=True)

    async def close(self) -> None:
        print("Shutting down...")
        if stdout := self.get_cog("Hub").stdout:
            await stdout.send(f"Modmail v{modmail.__version__} is shutting down.")
        await super().close()
        print(" Bot shut down.")
Esempio n. 4
0
 async def start_giveaway(self,
                          end_time,
                          prize,
                          channel,
                          winners,
                          donator_note=None,
                          donator=None,
                          giveaway_id=None):
     # starts a giveaway
     # creates the embed and sends it
     giveaway_embed = discord.Embed(
         color=discord.Color.blue(),
         title=f"{GIVEAWAY_EMOTE} GIVEAWAY! {GIVEAWAY_EMOTE}",
         description=f"**Prize:** {prize}\n"
         f"**Possible winners:** {winners} {'member' if winners == 1 else 'members'}\n"
         f"Click on the {GIVEAWAY_EMOTE} reaction to enter!")
     giveaway_embed.timestamp = end_time
     giveaway_embed.set_footer(text="Ends at")
     if donator_note is not None:
         donator = discord.utils.get(channel.guild.members, id=donator)
         if donator is not None: donator = donator.name
         giveaway_embed.add_field(name=f"A message by {donator}:",
                                  value=donator_note)
     msg = await channel.send(embed=giveaway_embed)
     await msg.add_reaction(f"{GIVEAWAY_EMOTE}")
     # sets up the scheduler
     scheduler = AsyncIOScheduler()
     scheduler.configure(timezone=end_time.tzname())
     scheduler.add_job(func=self.giveaway_embed,
                       trigger="date",
                       run_date=end_time,
                       args=(prize, channel, msg.id, winners, end_time,
                             giveaway_id))
     scheduler.start()
     return True
Esempio n. 5
0
async def initialize_scheduler(app, loop):
    scheduler = AsyncIOScheduler({'event_loop': loop})
    scheduler.add_job(fetch,
                      'interval',
                      hours=1,
                      next_run_time=datetime.datetime.now(),
                      timezone=utc,
                      args=[app])
    scheduler.configure(timezone=utc)
    scheduler.start()
Esempio n. 6
0
class TimerJob:
    _scheduler = None

    def __init__(self):
        self._scheduler = AsyncIOScheduler()
        self._scheduler.configure(timezone=utc)  # utc作为调度程序的时区

    def init_timer(self):
        print("启动调度器...")
        self._scheduler.start()

    def close_timer(self):
        self._scheduler.shutdown(wait=True)
        print("关闭调度器...")

    def new_job(self, j_id: str, func: object, args: tuple, cron: str):
        """添加定时任务"""
        return self._scheduler.add_job(id=j_id,
                                       func=func,
                                       args=args,
                                       trigger=CronTrigger.from_crontab(cron))

    def delete_job(self, j_id: str):
        """删除定时任务"""
        return self._scheduler.remove_job(job_id=j_id)

    def stop_job(self, j_id: str):
        """暂停任务"""
        return self._scheduler.pause_job(job_id=j_id)

    def replay_job(self, j_id: str):
        """恢复任务"""
        return self._scheduler.resume_job(job_id=j_id)

    def modify_job(self, j_id: str, func: object, args: tuple, cron: str):
        """更新任务"""
        return self._scheduler.modify_job(
            job_id=j_id,
            func=func,
            args=args,
            trigger=CronTrigger.from_crontab(cron))

    def get_job(self, j_id: str):
        """获取定时任务信息"""
        return self._scheduler.get_job(job_id=j_id)

    def get_all(self):
        """所有任务"""
        self._scheduler.get_jobs()
Esempio n. 7
0
from asyncio import CancelledError as CancelError
from asyncio import TimeoutError as AsyncTimeoutError
from sqlite3 import OperationalError
from http.client import RemoteDisconnected
from urllib.error import URLError
from concurrent.futures._base import TimeoutError
from redis.exceptions import ResponseError

persistent_vars = {}
module_dir = __path__[0]
working_dir = getcwd()
config = None
help_messages = {}
scheduler = AsyncIOScheduler()
if not scheduler.running:
    scheduler.configure(timezone="Asia/ShangHai")
    scheduler.start()
version = 0.1
logs = getLogger(__name__)
logging_format = "%(levelname)s [%(asctime)s] [%(name)s] %(message)s"
logging_handler = StreamHandler()
logging_handler.setFormatter(ColoredFormatter(logging_format))
root_logger = getLogger()
root_logger.setLevel(ERROR)
root_logger.addHandler(logging_handler)
basicConfig(level=INFO)
logs.setLevel(INFO)

try:
    config = load(open(r"config.yml"), Loader=FullLoader)
except FileNotFoundError:
Esempio n. 8
0
        str(config['schedule']),
        str(config['restart']),
        str(config['restart_after']),
        str(config['container_uid'])
    ])


if __name__ == '__main__':

    scheduler = AsyncIOScheduler()
    scheduler.configure({
        'apscheduler.executors.default': {
            'class': 'apscheduler.executors.pool:ThreadPoolExecutor',
            'max_workers': '1000'
        },
        'apscheduler.executors.processpool': {
            'type': 'processpool',
            'max_workers': '1000'
        },
        'apscheduler.timezone': 'UTC',
    })

    #running monitor first
    scheduler.add_job(run_monitor, None, [scheduler])

    scheduler.start()
    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))

    # Execution will block here until Ctrl+C (Ctrl+Break on Windows) is pressed.
    try:
        asyncio.get_event_loop().run_forever()
    def main():

        scheduler = AsyncIOScheduler()
        scheduler.configure({
            'apscheduler.executors.default': {
                'class': 'apscheduler.executors.pool:ThreadPoolExecutor',
                'max_workers': '1000'
            },
            'apscheduler.executors.processpool': {
                'type': 'processpool',
                'max_workers': '1000'
            },
            'apscheduler.timezone': 'UTC',
        })

        #lg.basicConfig()
        #lg.getLogger('apscheduler').setLevel(lg.DEBUG)

        #running monitor first
        scheduler.add_job(run_monitor, None, [scheduler])

        #pushgateway monitor
        scheduler.add_job(pushgateway_monitor, None, [])

        config_path = os.path.join(os.path.dirname(__file__), './config.json')

        with open(config_path) as f:
            config = json.load(f)

        #spawning X copies of the same process
        default_agent_copies = config['default_agent_copies']
        jobs = []
        for item in config['jobs']:
            i = 1
            if 'default_agent_copies' in item:
                copies_to_spawn = item['default_agent_copies']
            else:
                copies_to_spawn = default_agent_copies
            while i <= copies_to_spawn:
                item['container_name'] = uuid.uuid4(
                ).hex  #unique container name
                item['copy'] = i  #number of agent copy
                jobs.append(item.copy())
                i = i + 1

        #get list of redis-running agents
        r_agents_keys = []
        agents = redis.get_state('JOB_*')
        for agent_key, agent_state in agents.items():
            r_agents_keys.append(agent_key.decode("utf-8"))

        #get list of agents to be loaded
        c_agents_keys = []
        for job in jobs:
            job_key = redis.generate_key(job)
            c_agents_keys.append(job_key)

        #drop zomby-keys left from previous monitor
        for r_agent_key in r_agents_keys:
            if r_agent_key not in c_agents_keys:
                zomby_container = redis.get_agent_container(r_agent_key)
                gclog.write(
                    'Zomby-Agent detected in Redis, agent: {r_agent_key}, container: {zomby_container}'
                    .format(r_agent_key=r_agent_key,
                            zomby_container=zomby_container), 'DEBUG')
                scheduler.add_job(stop_container, None, [zomby_container])
                drop_redis_key(r_agent_key)

        #executing jobs
        for job in jobs:
            if job['schedule'] == 'cron':
                #parse crontab expression with seconds parameter (6 params required)
                cron = job['cron'].split('_')
                scheduler.add_job(start_agent,
                                  'cron',
                                  second=cron[0],
                                  minute=cron[1],
                                  hour=cron[2],
                                  day=cron[3],
                                  month=cron[4],
                                  year=cron[5],
                                  args=[job])
            else:
                scheduler.add_job(start_agent, None, [job])

        scheduler.start()
        print('Press Ctrl+{0} to exit'.format('Break' if os.name ==
                                              'nt' else 'C'))

        gclog.write('[SCHEDULER ALIVE]', 'DEBUG')

        # Execution will block here until Ctrl+C (Ctrl+Break on Windows) is pressed.
        try:
            asyncio.get_event_loop().run_forever()
        except (KeyboardInterrupt, SystemExit):
            pass
Esempio n. 10
0
class UQCSBot(object):
    user_token: Optional[str] = underscored_getter("user_token")
    bot_token: Optional[str] = underscored_getter("bot_token")
    bot_client: Optional[slack.WebClient] = underscored_getter("bot_client")
    user_client: Optional[slack.WebClient] = underscored_getter("user_client")
    rtm_client: Optional[slack.RTMClient] = underscored_getter("rtm_client")
    verification_token: Optional[str] = underscored_getter(
        "verification_token")
    executor: concurrent.futures.ThreadPoolExecutor = underscored_getter(
        "executor")

    def __init__(self, logger=None):
        self._user_token = None
        self._user_client = None
        self._bot_token = None
        self._bot_client = None
        self._verification_token = None
        self._executor = concurrent.futures.ThreadPoolExecutor()
        self.logger = logger or logging.getLogger("uqcsbot")
        self._handlers: DefaultDict[str, list] = collections.defaultdict(list)
        self._command_registry: DefaultDict[
            str, list] = collections.defaultdict(list)
        self._scheduler = AsyncIOScheduler()

        self.register_handler('message', self._handle_command)
        self.register_handler('hello', self._handle_hello)
        self.register_handler('goodbye', self._handle_goodbye)

        self.channels = ChannelWrapper(self)
        self.users = UsersWrapper(self)

    async def _handle_hello(self, evt):
        if evt != {"type": "hello"}:
            self.logger.debug(f"Hello event has unexpected extras: {evt}")
        self.logger.info(f"Successfully connected to server")

    async def _handle_goodbye(self, evt):
        if evt != {"type": "goodbye"}:
            self.logger.debug(f"Goodbye event has unexpected extras: {evt}")
        self.logger.info(f"Server is about to disconnect")

    def on_command(self, command_name: str):
        def decorator(command_fn):
            """
            Decorator function which returns a wrapper function that catches any
            UsageSyntaxExceptions and sends the wrapped command's helper doc to the calling channel.
            Also adds the function as a handler for the given command name.
            """

            if asyncio.iscoroutinefunction(command_fn):
                raise TypeError(
                    "Commands currently don't support async functions")

            @wraps(command_fn)
            def wrapper(command: Command):
                try:
                    return command_fn(command)
                except UsageSyntaxException:
                    helper_doc = get_helper_doc(command.name)
                    self.post_message(command.channel_id,
                                      f'usage: {helper_doc}')

            self._command_registry[command_name].append(wrapper)
            return wrapper

        return decorator

    def on(self, message_type: Optional[str], fn: Optional[Callable] = None):
        if fn is None:
            return partial(self.register_handler, message_type)
        return self.register_handler(message_type, fn)

    def on_schedule(self, *args, **kwargs):
        return lambda f: self._scheduler.add_job(f, *args, **kwargs)

    def register_handler(self, message_type: Optional[str],
                         handler_fn: Callable):
        if message_type is None:
            message_type = ""
        if not callable(handler_fn):
            raise TypeError(f"Handler function {handler_fn} must be callable")
        self._handlers[message_type].append(handler_fn)
        return handler_fn

    def api_call(self, method, **kwargs):
        return getattr(self.api, method)(**kwargs)

    api_call.__doc__ = slack.WebClient.api_call.__doc__

    @property
    def api(self):
        """
        See uqcsbot.api.APIWrapper for usage information.
        """
        return APIWrapper(self.user_client, self.bot_client)

    def post_message(self, channel: Union[Channel, str], text: str, **kwargs):
        channel_id = channel if isinstance(channel, str) else channel.id
        return self.api.chat.postMessage(channel=channel_id,
                                         text=text,
                                         **kwargs)

    def get_event_loop(self) -> asyncio.AbstractEventLoop:
        """
        Provides an AbstractEventLoop that works in the current command context.

        This is the extent of our asyncio support.
        """
        policy = asyncio.get_event_loop_policy()
        if policy._local._loop is None:  # type: ignore
            policy.set_event_loop(policy.new_event_loop())
        return policy.get_event_loop()

    @contextmanager
    def _execution_context(self):
        """
        Starts the scheduler for timed tasks, and on error does cleanup

        Also configures the event loop
        """
        self._loop = self.get_event_loop()
        current = threading.current_thread()
        original_run_until_complete = self._loop.run_until_complete

        def wait_until_run_complete(fut):
            fut = asyncio.ensure_future(fut, loop=self._loop)
            if threading.current_thread() == current:
                return fut
            else:
                evt = threading.Event()
                self._loop.call_soon_threadsafe(fut.add_done_callback,
                                                lambda *a: evt.set())
                evt.wait()
                return fut.result()

        self._loop.run_until_complete = wait_until_run_complete

        self._user_client = slack.WebClient(token=self.user_token,
                                            loop=self._loop)
        self._bot_client = slack.WebClient(token=self.bot_token,
                                           loop=self._loop)

        self._scheduler.configure(event_loop=self._loop)
        self._scheduler.start()
        try:
            yield original_run_until_complete
        except Exception:
            self.logger.exception("An error occurred, exiting")
            raise
        finally:
            self._scheduler.shutdown()
            self._executor.shutdown()
            self._loop.close()

    def _execute_catching_error(self, handler, evt):
        """
        Wraps handler execution so that any errors that occur in a handler are
        logged and ignored.
        """
        try:
            return handler(evt)
        except Exception:
            self.logger.exception(f'Error in handler while processing {evt}')
            return None

    async def _handle_command(self, message: dict) -> None:
        """
        Run handlers for commands, wrapping messages in a `Command` object
        before passing them to the handler. Handlers are executed by a
        ThreadPoolExecutor.
        """
        command = Command.from_message(message)
        if command is None:
            return
        futures = [
            asyncio.ensure_future(self._loop.run_in_executor(
                self.executor,
                self._execute_catching_error,
                handler,
                command,
            ),
                                  loop=self._loop)
            for handler in self._command_registry[command.name]
        ]
        for fut in futures:
            await fut

    async def _run_handlers(self, event: dict):
        """
        Run handlers for raw messages based on message type. Handlers are
        executed by a ThreadPoolExecutor.
        """
        self.logger.debug(f"Running handlers for {event}")
        if "type" not in event:
            self.logger.error(f"No type in message: {event}")
        handlers = self._handlers[event['type']] + self._handlers['']
        futures = [
            self._loop.run_in_executor(self.executor,
                                       self._execute_catching_error, handler,
                                       event) for handler in handlers
        ]
        return [(await future) for future in futures]

    def run(self, user_token, bot_token):
        """
        Run the bot.

        api_token: Slack API token
        verification_token: Events API verification token
        """
        self._user_token = user_token
        self._bot_token = bot_token
        with self._execution_context() as run_future:
            self._rtm_client = ModifiedRTMClient(
                token=self.bot_token,
                executor=self.executor,
                handlers=self._handlers,
                loop=self._loop,
                run_async=True,
            )
            try:
                run_future(self.rtm_client.start())
            except KeyboardInterrupt:
                self.rtm_client.stop()
class ApSchedulerAdapter(SchedulerAdapter):
    def __init__(self, dsn, exec_func, log_service):
        super().__init__(exec_func=exec_func, log_service=log_service)
        self.dsn = dsn

        self.scheduler = None

        self.create_scheduler()

    def create_scheduler(self):
        self.scheduler = AsyncIOScheduler()

        self.scheduler.configure(
            jobstores={
                'default': SQLAlchemyJobStore(url=self.dsn),
            },
            executors={
                'default': AsyncIOExecutor(),
            },
            job_defaults={
                'coalesce': False,
                'max_instances': 1,
                'misfire_grace_time': (60 * 60)
            },
            timezone="UTC",
        )

    async def start(self):
        self.scheduler.start()

    async def stop(self):
        self.scheduler.shutdown(wait=True)

    async def add_cron_job(
            self,
            id,
            name,
            args,
            kwargs,
            crontab_expr,
            weekdays,
            hour,
            minute,
            second,
            start_date,
            end_date,
            timezone,
            coalesce=False,
            misfire_grace_time=(60 * 60),
            replace_existing=True,
    ):
        weekdays = weekdays if weekdays is not None else []

        trigger = None

        if crontab_expr is not None:
            trigger = \
                CronTrigger.from_crontab(
                    crontab_expr,
                    timezone=timezone,
                )
        else:
            trigger = \
                CronTrigger(
                    day_of_week=
                    ",".join(
                        [w[0:3].upper() for w in weekdays]
                    )
                    if len(weekdays) > 0
                    else None,
                    hour=hour,
                    minute=minute,
                    second=second,
                    start_date=start_date,
                    end_date=end_date,
                    timezone=timezone,
                )

        await self._add_job(
            id=id,
            name=name,
            func=self.exec_func,
            kwargs=kwargs,
            trigger=trigger,
            coalesce=coalesce,
            misfire_grace_time=misfire_grace_time,
            replace_existing=replace_existing,
        )

    async def add_date_job(
            self,
            id,
            name,
            args,
            kwargs,
            date,
            timezone,
            coalesce=False,
            misfire_grace_time=(60 * 60),
            replace_existing=True,
    ):
        raise NotImplementedError()

    async def add_interval_job(
            self,
            id,
            name,
            args,
            kwargs,
            interval,
            days,
            hours,
            minutes,
            seconds,
            start_date,
            end_date,
            timezone,
            coalesce=False,
            misfire_grace_time=(60 * 60),
            replace_existing=True,
    ):
        raise NotImplementedError()

    async def get_job(self, job_id):
        return self.scheduler.get_job(job_id)

    async def remove_job(self, job_id, raises=False):
        try:
            self.scheduler.remove_job(job_id)
        except JobLookupError as e:
            if "No job by the id of" in str(e):
                self.log_service.error(
                    f"Tried to remove apscheduler job with ID '{job_id}' but "
                    f"it wasn't found. Ignoring but you should look this "
                    f"up since it should never happen.")
                if raises:
                    raise JobNotFound(job_id)
            else:
                raise

    async def remove_all_jobs(self):
        """
        Removes all scheduled jobs.
        """
        for job in self.scheduler.get_jobs():
            self.scheduler.remove_job(job.id)

    # Helpers

    async def _add_job(self, **kwargs):
        """
        Convenience method for adding a job.
        """
        self.scheduler.add_job(**kwargs)
Esempio n. 12
0
class JobQueue:
    """This class allows you to periodically perform tasks with the bot. It is a convenience
    wrapper for the APScheduler library.

    Attributes:
        scheduler (:class:`apscheduler.schedulers.asyncio.AsyncIOScheduler`): The scheduler.

            .. versionchanged:: 20.0
                Uses :class:`~apscheduler.schedulers.asyncio.AsyncIOScheduler` instead of
                :class:`~apscheduler.schedulers.background.BackgroundScheduler`


    """

    __slots__ = ("_application", "scheduler", "_executor")
    _CRON_MAPPING = ("sun", "mon", "tue", "wed", "thu", "fri", "sat")

    def __init__(self) -> None:
        self._application: "Optional[weakref.ReferenceType[Application]]" = None
        self._executor = AsyncIOExecutor()
        self.scheduler = AsyncIOScheduler(timezone=pytz.utc, executors={"default": self._executor})

    def _tz_now(self) -> datetime.datetime:
        return datetime.datetime.now(self.scheduler.timezone)

    @overload
    def _parse_time_input(self, time: None, shift_day: bool = False) -> None:
        ...

    @overload
    def _parse_time_input(
        self,
        time: Union[float, int, datetime.timedelta, datetime.datetime, datetime.time],
        shift_day: bool = False,
    ) -> datetime.datetime:
        ...

    def _parse_time_input(
        self,
        time: Union[float, int, datetime.timedelta, datetime.datetime, datetime.time, None],
        shift_day: bool = False,
    ) -> Optional[datetime.datetime]:
        if time is None:
            return None
        if isinstance(time, (int, float)):
            return self._tz_now() + datetime.timedelta(seconds=time)
        if isinstance(time, datetime.timedelta):
            return self._tz_now() + time
        if isinstance(time, datetime.time):
            date_time = datetime.datetime.combine(
                datetime.datetime.now(tz=time.tzinfo or self.scheduler.timezone).date(), time
            )
            if date_time.tzinfo is None:
                date_time = self.scheduler.timezone.localize(date_time)
            if shift_day and date_time <= datetime.datetime.now(pytz.utc):
                date_time += datetime.timedelta(days=1)
            return date_time
        return time

    def set_application(self, application: "Application") -> None:
        """Set the application to be used by this JobQueue.

        Args:
            application (:class:`telegram.ext.Application`): The application.

        """
        self._application = weakref.ref(application)
        if isinstance(application.bot, ExtBot) and application.bot.defaults:
            self.scheduler.configure(
                timezone=application.bot.defaults.tzinfo or pytz.utc,
                executors={"default": self._executor},
            )

    @property
    def application(self) -> "Application":
        """The application this JobQueue is associated with."""
        if self._application is None:
            raise RuntimeError("No application was set for this JobQueue.")
        application = self._application()
        if application is not None:
            return application
        raise RuntimeError("The application instance is no longer alive.")

    def run_once(
        self,
        callback: JobCallback,
        when: Union[float, datetime.timedelta, datetime.datetime, datetime.time],
        data: object = None,
        name: str = None,
        chat_id: int = None,
        user_id: int = None,
        job_kwargs: JSONDict = None,
    ) -> "Job":
        """Creates a new :class:`Job` instance that runs once and adds it to the queue.

        Args:
            callback (:term:`coroutine function`): The callback function that should be executed by
                the new job. Callback signature::

                    async def callback(context: CallbackContext)

            when (:obj:`int` | :obj:`float` | :obj:`datetime.timedelta` |                         \
                  :obj:`datetime.datetime` | :obj:`datetime.time`):
                Time in or at which the job should run. This parameter will be interpreted
                depending on its type.

                * :obj:`int` or :obj:`float` will be interpreted as "seconds from now" in which the
                  job should run.
                * :obj:`datetime.timedelta` will be interpreted as "time from now" in which the
                  job should run.
                * :obj:`datetime.datetime` will be interpreted as a specific date and time at
                  which the job should run. If the timezone (:attr:`datetime.datetime.tzinfo`) is
                  :obj:`None`, the default timezone of the bot will be used, which is UTC unless
                  :attr:`telegram.ext.Defaults.tzinfo` is used.
                * :obj:`datetime.time` will be interpreted as a specific time of day at which the
                  job should run. This could be either today or, if the time has already passed,
                  tomorrow. If the timezone (:attr:`datetime.time.tzinfo`) is :obj:`None`, the
                  default timezone of the bot will be used, which is UTC unless
                  :attr:`telegram.ext.Defaults.tzinfo` is used.

            chat_id (:obj:`int`, optional): Chat id of the chat associated with this job. If
                passed, the corresponding :attr:`~telegram.ext.CallbackContext.chat_data` will
                be available in the callback.

                .. versionadded:: 20.0

            user_id (:obj:`int`, optional): User id of the user associated with this job. If
                passed, the corresponding :attr:`~telegram.ext.CallbackContext.user_data` will
                be available in the callback.

                .. versionadded:: 20.0
            data (:obj:`object`, optional): Additional data needed for the callback function.
                Can be accessed through :attr:`Job.data` in the callback. Defaults to
                :obj:`None`.

                .. versionchanged:: 20.0
                    Renamed the parameter ``context`` to :paramref:`data`.
            name (:obj:`str`, optional): The name of the new job. Defaults to
                :external:attr:`callback.__name__ <definition.__name__>`.
            job_kwargs (:obj:`dict`, optional): Arbitrary keyword arguments to pass to the
                :meth:`apscheduler.schedulers.base.BaseScheduler.add_job()`.

        Returns:
            :class:`telegram.ext.Job`: The new :class:`Job` instance that has been added to the job
            queue.

        """
        if not job_kwargs:
            job_kwargs = {}

        name = name or callback.__name__
        job = Job(callback=callback, data=data, name=name, chat_id=chat_id, user_id=user_id)
        date_time = self._parse_time_input(when, shift_day=True)

        j = self.scheduler.add_job(
            job.run,
            name=name,
            trigger="date",
            run_date=date_time,
            args=(self.application,),
            timezone=date_time.tzinfo or self.scheduler.timezone,
            **job_kwargs,
        )

        job.job = j
        return job

    def run_repeating(
        self,
        callback: JobCallback,
        interval: Union[float, datetime.timedelta],
        first: Union[float, datetime.timedelta, datetime.datetime, datetime.time] = None,
        last: Union[float, datetime.timedelta, datetime.datetime, datetime.time] = None,
        data: object = None,
        name: str = None,
        chat_id: int = None,
        user_id: int = None,
        job_kwargs: JSONDict = None,
    ) -> "Job":
        """Creates a new :class:`Job` instance that runs at specified intervals and adds it to the
        queue.

        Note:
            For a note about DST, please see the documentation of `APScheduler`_.

        .. _`APScheduler`: https://apscheduler.readthedocs.io/en/stable/modules/triggers/cron.html
                           #daylight-saving-time-behavior

        Args:
            callback (:term:`coroutine function`): The callback function that should be executed by
                the new job. Callback signature::

                    async def callback(context: CallbackContext)

            interval (:obj:`int` | :obj:`float` | :obj:`datetime.timedelta`): The interval in which
                the job will run. If it is an :obj:`int` or a :obj:`float`, it will be interpreted
                as seconds.
            first (:obj:`int` | :obj:`float` | :obj:`datetime.timedelta` |                        \
                   :obj:`datetime.datetime` | :obj:`datetime.time`, optional):
                Time in or at which the job should run. This parameter will be interpreted
                depending on its type.

                * :obj:`int` or :obj:`float` will be interpreted as "seconds from now" in which the
                  job should run.
                * :obj:`datetime.timedelta` will be interpreted as "time from now" in which the
                  job should run.
                * :obj:`datetime.datetime` will be interpreted as a specific date and time at
                  which the job should run. If the timezone (:attr:`datetime.datetime.tzinfo`) is
                  :obj:`None`, the default timezone of the bot will be used.
                * :obj:`datetime.time` will be interpreted as a specific time of day at which the
                  job should run. This could be either today or, if the time has already passed,
                  tomorrow. If the timezone (:attr:`datetime.time.tzinfo`) is :obj:`None`, the
                  default timezone of the bot will be used, which is UTC unless
                  :attr:`telegram.ext.Defaults.tzinfo` is used.

                Defaults to :paramref:`interval`
            last (:obj:`int` | :obj:`float` | :obj:`datetime.timedelta` |                        \
                   :obj:`datetime.datetime` | :obj:`datetime.time`, optional):
                Latest possible time for the job to run. This parameter will be interpreted
                depending on its type. See :paramref:`first` for details.

                If :paramref:`last` is :obj:`datetime.datetime` or :obj:`datetime.time` type
                and ``last.tzinfo`` is :obj:`None`, the default timezone of the bot will be
                assumed, which is UTC unless :attr:`telegram.ext.Defaults.tzinfo` is used.

                Defaults to :obj:`None`.
            data (:obj:`object`, optional): Additional data needed for the callback function.
                Can be accessed through :attr:`Job.data` in the callback. Defaults to
                :obj:`None`.

                .. versionchanged:: 20.0
                    Renamed the parameter ``context`` to :paramref:`data`.
            name (:obj:`str`, optional): The name of the new job. Defaults to
                :external:attr:`callback.__name__ <definition.__name__>`.
            chat_id (:obj:`int`, optional): Chat id of the chat associated with this job. If
                passed, the corresponding :attr:`~telegram.ext.CallbackContext.chat_data` will
                be available in the callback.

                .. versionadded:: 20.0

            user_id (:obj:`int`, optional): User id of the user associated with this job. If
                passed, the corresponding :attr:`~telegram.ext.CallbackContext.user_data` will
                be available in the callback.

                .. versionadded:: 20.0
            job_kwargs (:obj:`dict`, optional): Arbitrary keyword arguments to pass to the
                :meth:`apscheduler.schedulers.base.BaseScheduler.add_job()`.

        Returns:
            :class:`telegram.ext.Job`: The new :class:`Job` instance that has been added to the job
            queue.

        """
        if not job_kwargs:
            job_kwargs = {}

        name = name or callback.__name__
        job = Job(callback=callback, data=data, name=name, chat_id=chat_id, user_id=user_id)

        dt_first = self._parse_time_input(first)
        dt_last = self._parse_time_input(last)

        if dt_last and dt_first and dt_last < dt_first:
            raise ValueError("'last' must not be before 'first'!")

        if isinstance(interval, datetime.timedelta):
            interval = interval.total_seconds()

        j = self.scheduler.add_job(
            job.run,
            trigger="interval",
            args=(self.application,),
            start_date=dt_first,
            end_date=dt_last,
            seconds=interval,
            name=name,
            **job_kwargs,
        )

        job.job = j
        return job

    def run_monthly(
        self,
        callback: JobCallback,
        when: datetime.time,
        day: int,
        data: object = None,
        name: str = None,
        chat_id: int = None,
        user_id: int = None,
        job_kwargs: JSONDict = None,
    ) -> "Job":
        """Creates a new :class:`Job` that runs on a monthly basis and adds it to the queue.

        .. versionchanged:: 20.0
            The ``day_is_strict`` argument was removed. Instead one can now pass ``-1`` to the
            :paramref:`day` parameter to have the job run on the last day of the month.

        Args:
            callback (:term:`coroutine function`): The callback function that should be executed by
                the new job. Callback signature::

                    async def callback(context: CallbackContext)

            when (:obj:`datetime.time`): Time of day at which the job should run. If the timezone
                (``when.tzinfo``) is :obj:`None`, the default timezone of the bot will be used,
                which is UTC unless :attr:`telegram.ext.Defaults.tzinfo` is used.
            day (:obj:`int`): Defines the day of the month whereby the job would run. It should
                be within the range of ``1`` and ``31``, inclusive. If a month has fewer days than
                this number, the job will not run in this month. Passing ``-1`` leads to the job
                running on the last day of the month.
            data (:obj:`object`, optional): Additional data needed for the callback function.
                Can be accessed through :attr:`Job.data` in the callback. Defaults to
                :obj:`None`.

                .. versionchanged:: 20.0
                    Renamed the parameter ``context`` to :paramref:`data`.
            name (:obj:`str`, optional): The name of the new job. Defaults to
                :external:attr:`callback.__name__ <definition.__name__>`.
            chat_id (:obj:`int`, optional): Chat id of the chat associated with this job. If
                passed, the corresponding :attr:`~telegram.ext.CallbackContext.chat_data` will
                be available in the callback.

                .. versionadded:: 20.0

            user_id (:obj:`int`, optional): User id of the user associated with this job. If
                passed, the corresponding :attr:`~telegram.ext.CallbackContext.user_data` will
                be available in the callback.

                .. versionadded:: 20.0
            job_kwargs (:obj:`dict`, optional): Arbitrary keyword arguments to pass to the
                :meth:`apscheduler.schedulers.base.BaseScheduler.add_job()`.

        Returns:
            :class:`telegram.ext.Job`: The new :class:`Job` instance that has been added to the job
            queue.

        """
        if not job_kwargs:
            job_kwargs = {}

        name = name or callback.__name__
        job = Job(callback=callback, data=data, name=name, chat_id=chat_id, user_id=user_id)

        j = self.scheduler.add_job(
            job.run,
            trigger="cron",
            args=(self.application,),
            name=name,
            day="last" if day == -1 else day,
            hour=when.hour,
            minute=when.minute,
            second=when.second,
            timezone=when.tzinfo or self.scheduler.timezone,
            **job_kwargs,
        )
        job.job = j
        return job

    def run_daily(
        self,
        callback: JobCallback,
        time: datetime.time,
        days: Tuple[int, ...] = tuple(range(7)),
        data: object = None,
        name: str = None,
        chat_id: int = None,
        user_id: int = None,
        job_kwargs: JSONDict = None,
    ) -> "Job":
        """Creates a new :class:`Job` that runs on a daily basis and adds it to the queue.

        Note:
            For a note about DST, please see the documentation of `APScheduler`_.

        .. _`APScheduler`: https://apscheduler.readthedocs.io/en/stable/modules/triggers/cron.html
                           #daylight-saving-time-behavior

        Args:
            callback (:term:`coroutine function`): The callback function that should be executed by
                the new job. Callback signature::

                    async def callback(context: CallbackContext)

            time (:obj:`datetime.time`): Time of day at which the job should run. If the timezone
                (:obj:`datetime.time.tzinfo`) is :obj:`None`, the default timezone of the bot will
                be used, which is UTC unless :attr:`telegram.ext.Defaults.tzinfo` is used.
            days (Tuple[:obj:`int`], optional): Defines on which days of the week the job should
                run (where ``0-6`` correspond to sunday - saturday). By default, the job will run
                every day.

                .. versionchanged:: 20.0
                    Changed day of the week mapping of 0-6 from monday-sunday to sunday-saturday.
            data (:obj:`object`, optional): Additional data needed for the callback function.
                Can be accessed through :attr:`Job.data` in the callback. Defaults to
                :obj:`None`.

                .. versionchanged:: 20.0
                    Renamed the parameter ``context`` to :paramref:`data`.
            name (:obj:`str`, optional): The name of the new job. Defaults to
                :external:attr:`callback.__name__ <definition.__name__>`.
            chat_id (:obj:`int`, optional): Chat id of the chat associated with this job. If
                passed, the corresponding :attr:`~telegram.ext.CallbackContext.chat_data` will
                be available in the callback.

                .. versionadded:: 20.0

            user_id (:obj:`int`, optional): User id of the user associated with this job. If
                passed, the corresponding :attr:`~telegram.ext.CallbackContext.user_data` will
                be available in the callback.

                .. versionadded:: 20.0
            job_kwargs (:obj:`dict`, optional): Arbitrary keyword arguments to pass to the
                :meth:`apscheduler.schedulers.base.BaseScheduler.add_job()`.

        Returns:
            :class:`telegram.ext.Job`: The new :class:`Job` instance that has been added to the job
            queue.

        """
        # TODO: After v20.0, we should remove the this warning.
        warn(
            "Prior to v20.0 the `days` parameter was not aligned to that of cron's weekday scheme."
            "We recommend double checking if the passed value is correct.",
            stacklevel=2,
        )
        if not job_kwargs:
            job_kwargs = {}

        name = name or callback.__name__
        job = Job(callback=callback, data=data, name=name, chat_id=chat_id, user_id=user_id)

        j = self.scheduler.add_job(
            job.run,
            name=name,
            args=(self.application,),
            trigger="cron",
            day_of_week=",".join([self._CRON_MAPPING[d] for d in days]),
            hour=time.hour,
            minute=time.minute,
            second=time.second,
            timezone=time.tzinfo or self.scheduler.timezone,
            **job_kwargs,
        )

        job.job = j
        return job

    def run_custom(
        self,
        callback: JobCallback,
        job_kwargs: JSONDict,
        data: object = None,
        name: str = None,
        chat_id: int = None,
        user_id: int = None,
    ) -> "Job":
        """Creates a new custom defined :class:`Job`.

        Args:
            callback (:term:`coroutine function`): The callback function that should be executed by
                the new job. Callback signature::

                    async def callback(context: CallbackContext)

            job_kwargs (:obj:`dict`): Arbitrary keyword arguments. Used as arguments for
                :meth:`apscheduler.schedulers.base.BaseScheduler.add_job`.
            data (:obj:`object`, optional): Additional data needed for the callback function.
                Can be accessed through :attr:`Job.data` in the callback. Defaults to
                :obj:`None`.

                .. versionchanged:: 20.0
                    Renamed the parameter ``context`` to :paramref:`data`.
            name (:obj:`str`, optional): The name of the new job. Defaults to
                :external:attr:`callback.__name__ <definition.__name__>`.
            chat_id (:obj:`int`, optional): Chat id of the chat associated with this job. If
                passed, the corresponding :attr:`~telegram.ext.CallbackContext.chat_data` will
                be available in the callback.

                .. versionadded:: 20.0

            user_id (:obj:`int`, optional): User id of the user associated with this job. If
                passed, the corresponding :attr:`~telegram.ext.CallbackContext.user_data` will
                be available in the callback.

                .. versionadded:: 20.0

        Returns:
            :class:`telegram.ext.Job`: The new :class:`Job` instance that has been added to the job
            queue.

        """
        name = name or callback.__name__
        job = Job(callback=callback, data=data, name=name, chat_id=chat_id, user_id=user_id)

        j = self.scheduler.add_job(job.run, args=(self.application,), name=name, **job_kwargs)

        job.job = j
        return job

    async def start(self) -> None:
        # this method async just in case future versions need that
        """Starts the :class:`~telegram.ext.JobQueue`."""
        if not self.scheduler.running:
            self.scheduler.start()

    async def stop(self, wait: bool = True) -> None:
        """Shuts down the :class:`~telegram.ext.JobQueue`.

        Args:
            wait (:obj:`bool`, optional): Whether to wait until all currently running jobs
                have finished. Defaults to :obj:`True`.

        """
        # the interface methods of AsyncIOExecutor are currently not really asyncio-compatible
        # so we apply some small tweaks here to try and smoothen the integration into PTB
        # TODO: When APS 4.0 hits, we should be able to remove the tweaks
        if wait:
            # Unfortunately AsyncIOExecutor just cancels them all ...
            await asyncio.gather(
                *self._executor._pending_futures,  # pylint: disable=protected-access
                return_exceptions=True,
            )
        if self.scheduler.running:
            self.scheduler.shutdown(wait=wait)
            # scheduler.shutdown schedules a task in the event loop but immediately returns
            # so give it a tiny bit of time to actually shut down.
            await asyncio.sleep(0.01)

    def jobs(self) -> Tuple["Job", ...]:
        """Returns a tuple of all *scheduled* jobs that are currently in the :class:`JobQueue`."""
        return tuple(
            Job._from_aps_job(job)  # pylint: disable=protected-access
            for job in self.scheduler.get_jobs()
        )

    def get_jobs_by_name(self, name: str) -> Tuple["Job", ...]:
        """Returns a tuple of all *pending/scheduled* jobs with the given name that are currently
        in the :class:`JobQueue`.
        """
        return tuple(job for job in self.jobs() if job.name == name)
Esempio n. 13
0
class Bot(commands.Bot):
    def __init__(self) -> None:
        self._cogs: t.List[str] = [
            p.stem for p in Path(".").glob("./carberretta/bot/cogs/*.py")
        ]
        self.scheduler = AsyncIOScheduler()
        self.scheduler.configure(timezone=utc)
        self.db = Database(self)

        super().__init__(
            irc_token=Config.IRC_TOKEN,
            api_token=Config.API_TOKEN,
            client_id=Config.CLIENT_ID,
            client_secret=Config.CLIENT_SECRET,
            prefix=Config.PREFIX,
            nick=Config.NICK,
            initial_channels=Config.INITIAL_CHANNELS,
        )

    def setup(self) -> None:
        print("Running setup...")

        for cog in self._cogs:
            self.load_module(f"carberretta.bot.cogs.{cog}")
            print(f" Loaded `{cog}` cog.")

        print("Setup complete.")

    def run(self) -> None:
        self.setup()

        print("Running bot...")
        super().run()

    async def event_ready(self) -> None:
        await self.db.connect()
        print(" Connected to database.")

        self.scheduler.start()
        print(
            f" Scheduler started ({len(self.scheduler.get_jobs()):,} job(s)).")

        self.channel: twitchio.Channel = self.get_channel(
            next(iter(self.initial_channels)))
        await self.channel.send(f"{Config.NICK} is now online!")
        print("Bot ready. Do NOT use CTRL+C to shut the bot down!")

    @commands.command(name="shutdown")
    async def shutdown_command(self, ctx: commands.bot.Context) -> None:
        if ctx.author.name != next(iter(self.initial_channels)):
            return await ctx.send("You cannot shut the bot down.")

        await self.channel.send(f"{Config.NICK} is now shutting down.")
        self.scheduler.shutdown()
        await self.db.close()
        sys.exit(0)

    async def on_error(self, error, data=None) -> None:
        raise

    async def event_command_error(self, ctx: commands.bot.Context,
                                  exc: twitchio.ClientError) -> None:
        if isinstance(exc, commands.CommandNotFound):
            return await ctx.send(f"That is not a registered command. "
                                  "Type {Config.PREFIX}help for a list.")

        if isinstance(exc, commands.MissingRequiredArgument):
            return await ctx.send(
                f"No `{exc.param.name}` argument was passed, "
                "despite being required.")

        if isinstance(exc, commands.BadArgument):
            return await ctx.send("One or more arguments are invalid.")

        raise

    async def handle_commands(self, message: twitchio.Message) -> None:
        elem = message.content.split(maxsplit=1)
        message.content = f"{elem[0].lower()} {' '.join(elem[1:])}"
        await super().handle_commands(message)

    async def event_message(self, message: twitchio.Message) -> None:
        if message.author.name in (Config.NICK, "restreambot"):
            return

        await self.handle_commands(message)

    def get_user(self, name: str) -> t.Optional[twitchio.User]:
        for user in self.channel.chatters:
            if name == user.name:
                return user
        return None
Esempio n. 14
0
class Service:
    name: str
    plugin: "Plugin"
    logger: Logger

    def __init__(
        self,
        name=None,
        *,
        use_priv=Privilege.DEFAULT,
        manage_priv=Privilege.ADMIN,
        enable_on_default=None,
        visible=None,
    ):
        self.name = name or 'base'
        self.plugin = _tmp_current_plugin
        self.logger = self.plugin.logger.getChild(self.name)

        config = _load_service_config(self.key)

        self.use_priv = config.get('use_priv', use_priv)
        self.manage_priv = config.get('manage_priv', manage_priv)
        self.enable_on_default = config.get('enable_on_default')
        if self.enable_on_default is None:
            if enable_on_default is None:
                self.enable_on_default = name is None
            else:
                self.enable_on_default = enable_on_default
        self.visible = config.get('visible')
        if self.visible is None:
            if visible is None:
                self.visible = name is not None
            else:
                self.visible = visible
        self.enable_group = set(config.get('enable_group', []))
        self.disable_group = set(config.get('disable_group', []))
        self.user_privs = dict(config.get('user_privs', []))

        # self._node_key = PredicateFunction(lambda event: self.check_priv(event), notation=self)
        # self._node = PredicateNode(self._node_key)
        self._sv_node = router.meta_plugin_is(self.plugin)
        self._terminals: Set[TerminalNode] = set()

        self._scheduler = Scheduler()

        @self.on_loaded()
        def _start_scheduler():
            if self._scheduler and not self._scheduler.running and self._scheduler.get_jobs(
            ):
                self._scheduler.configure(ajenga.config.APSCHEDULER_CONFIG)
                self._scheduler.start()

        @self.on_unload()
        def _on_unload():
            self.logger.info(
                f'Unloading... Unsubscribe all {len(self._terminals)} subscribers.'
            )
            app.engine.unsubscribe_terminals(self._terminals)

            # Stop scheduler
            if self._scheduler and self._scheduler.running:
                self._scheduler.remove_all_jobs()

        # Add to service list
        if self.key in _loaded_services:
            self.logger.warning(f"Service {self} already exists")
        _loaded_services[self.key] = self
        self.plugin.add_service(self)

    def check_priv(self,
                   event: Event,
                   required_priv: Union[int, Callable[[int], bool]] = None):
        if event.type in MessageEventTypes:
            required_priv = self.use_priv if required_priv is None else required_priv
            user_priv = self.get_user_priv(event)

            if isinstance(event, GroupMessageEvent):
                if not self.check_enabled(event.group):
                    return False
            if isinstance(required_priv, int):
                return bool(user_priv >= required_priv)
            elif isinstance(required_priv, Callable):
                return required_priv(user_priv)
            else:
                return False
        else:
            return True

    @property
    def key(self):
        if self.plugin is None:
            self.logger.error('Access key when service initializing!')
        else:
            return f'{self.plugin.name}.{self.name}'

    def __str__(self):
        return f'<Service: {self.key}>'

    @property
    def scheduler(self) -> Scheduler:
        return self._scheduler

    def on(self,
           graph=std.true,
           *,
           priv: Union[int, Callable[[int], bool]] = None):
        return ServiceGraphImpl(self) & graph & PredicateNode(
            PredicateFunction(
                lambda event: self.check_priv(event, required_priv=priv),
                notation=self))

    def on_message(self,
                   graph=std.true,
                   *,
                   priv: Union[int, Callable[[int], bool]] = None):
        return self.on(ajenga.router.message.is_message & graph, priv=priv)

    def on_load(self, arg: Any = None):
        g = (ServiceGraphImpl(self) & router.event_type_is(EventType.Meta)
             & router.meta_type_is(MetaEventType.PluginLoad) & self._sv_node)
        if isinstance(arg, Callable):
            return g(arg)
        else:
            return g

    def on_loaded(self, arg: Any = None):
        g = (ServiceGraphImpl(self) & router.event_type_is(EventType.Meta)
             & router.meta_type_is(MetaEventType.PluginLoaded) & self._sv_node)
        if isinstance(arg, Callable):
            return g(arg)
        else:
            return g

    def on_unload(self, arg: Any = None):
        g = (ServiceGraphImpl(self) & router.event_type_is(EventType.Meta)
             & router.meta_type_is(MetaEventType.PluginUnload) & self._sv_node)
        if isinstance(arg, Callable):
            return g(arg)
        else:
            return g

    @staticmethod
    def check_block_group(group: int):
        if group in _black_list_group and datetime.now(
        ) > _black_list_group[group]:
            del _black_list_group[group]  # 拉黑时间过期
            return False
        return bool(group in _black_list_group)

    @staticmethod
    def check_block_user(qq: int):
        if qq in _black_list_user and datetime.now() > _black_list_user[qq]:
            del _black_list_user[qq]  # 拉黑时间过期
            return False
        return bool(qq in _black_list_user)

    @staticmethod
    def get_priv_from_event(event: Event):
        if isinstance(event, GroupMessageEvent):
            if event.sender.qq in ajenga.config.SUPERUSERS:
                return Privilege.SUPERUSER
            elif Service.check_block_user(event.sender.qq):
                return Privilege.BLACK
            elif event.sender.permission == GroupPermission.OWNER:
                return Privilege.OWNER
            elif event.sender.permission == GroupPermission.ADMIN:
                return Privilege.ADMIN
            elif event.sender.permission:
                return Privilege.GROUP
        elif isinstance(event, FriendMessageEvent):
            if event.sender.qq in ajenga.config.SUPERUSERS:
                return Privilege.SUPERUSER
            elif Service.check_block_user(event.sender.qq):
                return Privilege.BLACK
            else:
                return Privilege.PRIVATE_FRIEND
        elif isinstance(event, TempMessageEvent):
            if event.sender.qq in ajenga.config.SUPERUSERS:
                return Privilege.SUPERUSER
            elif Service.check_block_user(event.sender.qq):
                return Privilege.BLACK
            else:
                return Privilege.PRIVATE_GROUP
        return Privilege.DEFAULT

    async def get_user_priv_in_group(self, qq: ContactIdType,
                                     event: GroupMessageEvent, api: Api):
        priv = self.get_user_priv(qq)
        if priv == Privilege.BLACK:
            return priv
        member_info = await api.get_group_member_info(group=event.group, qq=qq)
        if member_info.ok:
            if member_info.data.permission == GroupPermission.OWNER:
                return max(priv, Privilege.OWNER)
            elif member_info.data.permission == GroupPermission.ADMIN:
                return max(priv, Privilege.ADMIN)
            else:
                return max(priv, Privilege.GROUP)

    def get_user_priv(self, qq_or_event: Union[ContactIdType,
                                               MessageEvent]) -> int:
        if isinstance(qq_or_event, ContactIdType):
            if qq_or_event in ajenga.config.SUPERUSERS:
                return Privilege.SUPERUSER
            else:
                return self.user_privs.get(qq_or_event, Privilege.DEFAULT)
        elif isinstance(qq_or_event, MessageEvent):
            qq = qq_or_event.sender.qq
            ev_priv = self.get_priv_from_event(qq_or_event)
            sv_priv = self.user_privs.get(qq, Privilege.DEFAULT)
            if qq in ajenga.config.SUPERUSERS:
                return Privilege.SUPERUSER
            elif ev_priv == Privilege.BLACK or sv_priv == Privilege.BLACK:
                return Privilege.BLACK
            else:
                return max(ev_priv, sv_priv)
        else:
            self.logger.error(f'Unknown qq_or_event {qq_or_event}')
            return Privilege.DEFAULT

    def set_user_priv(self, qq_or_event: Union[ContactIdType, MessageEvent],
                      priv: int):
        # print(self.user_privs)
        if isinstance(qq_or_event, int):
            self.user_privs[qq_or_event] = priv
        elif isinstance(qq_or_event, MessageEvent):
            self.user_privs[qq_or_event.sender.qq] = priv
        else:
            self.logger.error(f'Unknown qq_or_event {qq_or_event}')
        _save_service_config(self)

    def set_enable(self, group: ContactIdType):
        self.enable_group.add(group)
        self.disable_group.discard(group)
        _save_service_config(self)
        self.logger.info(f'Service {self.name} is enabled at group {group}')

    def set_disable(self, group: ContactIdType):
        self.enable_group.discard(group)
        self.disable_group.add(group)
        _save_service_config(self)
        self.logger.info(f'Service {self.name} is disabled at group {group}')

    def check_enabled(self, group: ContactIdType):
        return bool(
            (group in self.enable_group)
            or (self.enable_on_default and group not in self.disable_group))

    async def get_enabled_groups(self) -> dict:
        ret = {}
        for qq, ses in app.get_sessions().items():
            group_list = await ses.api.get_group_list()
            for group in group_list.data:
                if self.check_enabled(group.id):
                    ret[group.id] = qq
        return ret

    def scheduled_job(self, *args, **kwargs) -> Callable:
        kwargs.setdefault('timezone', pytz.timezone('Asia/Shanghai'))
        kwargs.setdefault('misfire_grace_time', 60)
        kwargs.setdefault('coalesce', True)

        def deco(func: Callable) -> Callable:
            @wraps(func)
            async def wrapper():
                try:
                    self.logger.info(f'Scheduled job {func.__name__} started.')
                    await func()
                except Exception as e:
                    self.logger.exception(e)
                    self.logger.error(
                        f'{type(e)} occurred when doing scheduled job {func.__name__}.'
                    )

            return self.scheduler.scheduled_job(*args, **kwargs)(wrapper)

        return deco

    def scheduled(self, *args, **kwargs) -> Callable:
        def deco(func: Callable) -> Callable:
            uid = str(uuid.uuid4())

            async def _sched():
                await app.handle_event(_scheduler_source,
                                       SchedulerEvent(id=uid))

            self.scheduler.scheduled_job(*args, **kwargs)(_sched)

            return self.on(
                router.event_type_is(EventType.Scheduler)
                & router.std.if_(lambda ev: ev.id == uid))(func)

        return deco

    async def broadcast(self, *messages: Message_T, interval=0.2):
        groups = await self.get_enabled_groups()
        for group, qq in groups.items():
            try:
                for message in messages:
                    await app.get_session(qq).api.send_group_message(
                        group=group, message=message)
                    await asyncio.sleep(interval)
            except Exception as e:
                self.logger.exception(e)
                self.logger.error(f"Failed to broadcast to group {group}")
Esempio n. 15
0
class Bot(commands.Bot):
    def __init__(self, version):
        self.version = version
        self._cogs = [p.stem for p in Path(".").glob("./carberretta/bot/cogs/*.py")]
        self._dynamic = "./carberretta/data/dynamic"
        self._static = "./carberretta/data/static"

        self.scheduler = AsyncIOScheduler()
        self.session = ClientSession()
        self.db = Database(self)
        self.emoji = utils.EmojiGetter(self)
        self.loc = utils.CodeCounter()
        self.ready = utils.Ready(self)

        self.scheduler.configure(timezone=utc)
        self.loc.count()

        super().__init__(
            command_prefix=self.command_prefix,
            case_insensitive=True,
            owner_ids=Config.OWNER_IDS,
            status=discord.Status.dnd,
            intents=discord.Intents.all(),
        )

    def setup(self):
        print("running setup...")

        for cog in self._cogs:
            self.load_extension(f"carberretta.bot.cogs.{cog}")
            print(f" {cog} cog loaded")

        print("setup complete")

    def run(self):
        self.setup()

        print(f"running bot...")
        super().run(Config.TOKEN, reconnect=True)

    async def close(self):
        print("shutting down...")
        for cog in self.cogs.values():
            if hasattr(cog, "on_shutdown"):
                await cog.on_shutdown()

        self.scheduler.shutdown()
        await self.db.close()
        await self.session.close()

        hub = self.get_cog("Hub")
        await hub.stdout.send(f"Carberretta is shutting down. (Version {self.version})")
        await super().close()

    async def command_prefix(self, bot, message):
        return commands.when_mentioned_or(Config.PREFIX)(bot, message)

    async def process_commands(self, message):
        ctx = await self.get_context(message, cls=commands.Context)

        if ctx.command is None:
            return

        if not self.ready.bot:
            return await ctx.send(
                "Carberretta is not ready to receive commands. Try again in a few seconds.", delete_after=5
            )

        support = self.get_cog("Support")
        if ctx.channel in [sc.channel for sc in support.available_channels] and ctx.command.name != "reopen":
            return await ctx.message.delete()

        await self.invoke(ctx)

    async def on_error(self, err, *args, **kwargs):
        async with self.session.post("https://mystb.in/documents", data=traceback.format_exc()) as response:
            if 200 <= response.status <= 299:
                data = await response.json()
                link = f"https://mystb.in/{data['key']}"
            else:
                link = f"[No link: {response.status} status]"

        hub = self.get_cog("Hub")
        await hub.stdout.send(f"Something went wrong: <{link}>")

        if err == "on_command_error":
            await args[0].send("Something went wrong. Let Carberra or Max know.")

        raise  # Re-raises the last known exception.

    async def on_command_error(self, ctx, exc):
        if isinstance(exc, commands.CommandNotFound):
            pass

        # Custom check failure handling.
        elif hasattr(exc, "msg"):
            await ctx.send(exc.msg)

        elif isinstance(exc, commands.MissingRequiredArgument):
            await ctx.send(f"No `{exc.param.name}` argument was passed, despite being required.")

        elif isinstance(exc, commands.BadArgument):
            await ctx.send(f"One or more arguments are invalid.")

        elif isinstance(exc, commands.TooManyArguments):
            await ctx.send(f"Too many arguments have been passed.",)

        elif isinstance(exc, commands.MissingPermissions):
            mp = utils.string.list_of([str(perm.replace("_", " ")).title() for perm in exc.missing_perms], sep="or")
            await ctx.send(f"You do not have the {mp} permission(s), which are required to use this command.")

        elif isinstance(exc, commands.BotMissingPermissions):
            try:
                mp = utils.string.list_of(
                    [str(perm.replace("_", " ")).title() for perm in exc.missing_perms], sep="or"
                )
                await ctx.send(
                    f"Carberretta does not have the {mp} permission(s), which are required to use this command."
                )
            except discord.Forbidden:
                # If Carberretta does not have the Send Messages permission
                # (might redirect this to log channel once it's set up).
                pass

        elif isinstance(exc, commands.NotOwner):
            await ctx.send(f"That command can only be used by Carberretta's owner.")

        elif isinstance(exc, commands.CommandOnCooldown):
            # Hooray for discord.py str() logic.
            cooldown_texts = {
                "BucketType.user": "******",
                "BucketType.guild": "The `{}` command can not be used in this server for another {}.",
                "BucketType.channel": "The `{}` command can not be used in this channel for another {}.",
                "BucketType.member": "You can not use the `{}` command in this server for another {}.",
                "BucketType.category": "The `{}` command can not be used in this category for another {}.",
            }
            await ctx.message.delete()
            await ctx.send(
                cooldown_texts[str(exc.cooldown.type)].format(
                    ctx.command.name, utils.chron.long_delta(dt.timedelta(seconds=exc.retry_after))
                ),
                delete_after=10,
            )

        elif isinstance(exc, commands.InvalidEndOfQuotedStringError):
            await ctx.send(
                f"Carberretta expected a space after the closing quote, but found a(n) `{exc.char}` instead."
            )

        elif isinstance(exc, commands.ExpectedClosingQuoteError):
            await ctx.send(f"Carberretta expected a closing quote character, but did not find one.")

        # Base errors.
        elif isinstance(exc, commands.UserInputError):
            await ctx.send(f"There was an unhandled user input problem (probably argument passing error).")

        elif isinstance(exc, commands.CheckFailure):
            await ctx.send(f"There was an unhandled command check error (probably missing privileges).")

        # Non-command errors.
        elif (original := getattr(exc, "original", None)) is not None:
            if isinstance(original, discord.HTTPException):
                await ctx.send(f"A HTTP exception occurred ({original.status})\n```{original.text}```")
            else:
                raise original

        else:
Esempio n. 16
0
        else:
            return True


async def daily_trendings():
    trd = get_trending()
    msg = generate_msg(trd)
    await c.send_message(TRD_CHAT, msg, disable_web_page_preview=True)
    for chat in trd:
        await send_trending_msg(chat)


async def weekly_trendings():
    trd = get_trending()
    msg = generate_msg(trd)
    clear_db()
    await c.send_message(TRD_CHAT, msg, disable_web_page_preview=True)
    for chat in trd:
        await send_trending_msg(chat)


scheduler = AsyncIOScheduler()

scheduler.configure(timezone="America/Sao_Paulo")

scheduler.add_job(daily_trendings, "cron", day_of_week="tue-sun")
scheduler.add_job(weekly_trendings, "cron", day_of_week="mon")

scheduler.start()
c.run()
Esempio n. 17
0
async def setup(config: Configuration, wires_in: WiresIn) -> WiresOut:
    scheduler = AsyncIOScheduler()
    scheduler.add_job(tick, "interval", seconds=5)
    scheduler.configure(jobstores={"default": wires_in.redis_job_store.job_store})
    return WiresOut(scheduler=SchedulerWire(scheduler=scheduler))
Esempio n. 18
0
class TaskManager(AsyncRunnable):
    _scheduler: AsyncIOScheduler

    def __init__(self):
        self._scheduler = AsyncIOScheduler()

    def add_interval(self,
                     weeks=0,
                     days=0,
                     hours=0,
                     minutes=0,
                     seconds=0,
                     start_date=None,
                     end_date=None,
                     timezone=None,
                     jitter=None):
        """decorator, add a interval type task"""
        trigger = IntervalTrigger(weeks=weeks,
                                  days=days,
                                  hours=hours,
                                  minutes=minutes,
                                  seconds=seconds,
                                  start_date=start_date,
                                  end_date=end_date,
                                  timezone=timezone,
                                  jitter=jitter)
        return lambda func: self._scheduler.add_job(func, trigger)

    def add_cron(self,
                 year=None,
                 month=None,
                 day=None,
                 week=None,
                 day_of_week=None,
                 hour=None,
                 minute=None,
                 second=None,
                 start_date=None,
                 end_date=None,
                 timezone=None,
                 jitter=None):
        """decorator, add a cron type task"""
        trigger = CronTrigger(year=year,
                              month=month,
                              day=day,
                              week=week,
                              day_of_week=day_of_week,
                              hour=hour,
                              minute=minute,
                              second=second,
                              start_date=start_date,
                              end_date=end_date,
                              timezone=timezone,
                              jitter=jitter)
        return lambda func: self._scheduler.add_job(func, trigger)

    def add_date(self, run_date=None, timezone=None):
        """decorator, add a date type task"""
        trigger = DateTrigger(run_date=run_date, timezone=timezone)
        return lambda func: self._scheduler.add_job(func, trigger)

    async def start(self):
        self._scheduler.configure({'event_loop': self.loop}, '')
        self._scheduler.add_listener(lambda e: log.exception(f'error raised during task', exc_info=e.exception),
                                     EVENT_JOB_ERROR)
        self._scheduler.start()  # reminder: this is not blocking
Esempio n. 19
0
                                    suffix=f"{info['image'][-3:]}") as moe:
                moe.write(await resp.read())

                logger.info("Posting to Discord and Twitter...")

                # Send out the gathered information to Twitter and Discord
                await asyncio.gather(
                    utils.post_webhook(DISCORD_WEBHOOK_URL, session, info),
                    utils.tweet_image(TWITTER_CONSUMER_KEY,
                                      TWITTER_CONSUMER_SECRET,
                                      TWITTER_ACCESS_TOKEN,
                                      TWITTER_ACCESS_TOKEN_SECRET, moe.name,
                                      info))

    logger.info("Successfully completed job.")


if __name__ == "__main__":
    # Setup scheduler for periodic jobs
    scheduler = AsyncIOScheduler()
    scheduler.configure(timezone='America/New_York')
    scheduler.add_job(main, trigger='cron', hour='*')
    scheduler.start()

    try:
        asyncio.get_event_loop().run_forever()

    except (KeyboardInterrupt, SystemExit):
        print("Killing...")
        pass
Esempio n. 20
0
jobstores = {
    "default": SQLAlchemyJobStore(url="sqlite:///sqlite_db/app.db")
}  # TODO #1: define a better locaton for the default db.

executors = {"default": AsyncIOExecutor()}

job_defaults = {
    "coalesce": False,
    "max_instances": 20,
    "misfire_grace_time": 3600
}

scheduler = AsyncIOScheduler()

scheduler.configure(jobstores=jobstores,
                    executors=executors,
                    job_defaults=job_defaults,
                    timezone=utc)


def start_jobs():

    # example of running cron job at 0 and 30 second mark
    scheduler.add_job(
        id="simple cron job 1",  # must be a unique name
        func=run_stats,
        trigger="cron",
        minute="*/15",  # every fifteen minutes cron style
        second=0,  # cron on 0 second
        max_instances=1,  # number of concurrent instances
        replace_existing=True,  # remove schedule if existing
        jitter=30,  # variation of +/- 30 seconds (*:14:30 - *:15:30)