示例#1
0
class Agent(object):
    def __init__(self, _client, _loop, _queue, _snode=None):
        """

        :param _client:
        :param _loop:
        :param _queue:
        :param _snode: ip of current node
        """
        self._loop = _loop
        self._queue = _queue
        self._serf_client = SerfClient()
        self._snode = _snode or self._get_local_ip()
        self.network_checker = NetworkChecker(_client, _loop, _queue, self._snode)
        self.scheduler = AsyncIOScheduler()
        self._add_job(_client)


        self._list_node = []


    def _add_job(self, _client):
        # self.scheduler.add_job(tick, 'interval',
        # seconds=config.check_interval, args=[_client,])
        # self.scheduler.add_job(self._loop.call_soon_threadsafe,
        # 'interval', seconds=config.check_interval,
        #  args=(self.network_checker,))
        self.scheduler.add_job(self.network_checker, 'interval',
                               seconds=config.check_interval,
                               args=(self._get_node,))

    def _get_node(self):
        """
        De quy cho mau =)))
        :return:
        """
        try:
            return self._list_node.pop()
        except IndexError:
            # self._list_node = self._hard_list_node[:]
            response = self._serf_client.members()
            self._list_node = [x[b'Addr'].decode()
                               for x in response.body[b'Members']]
            self._list_node.remove(self._snode)

            return self._list_node.pop()


    def _get_local_ip(self):
        """
        Get `name` of node thông qua serfclient `stats`.
        Then, get ip thoong qua function `members(name)`
        :return:
        """
        name = self._serf_client.stats().body[b'agent'][b'name']
        return self._serf_client.members(name).body[b'Members'][0][b'Addr'].decode()
示例#2
0
文件: runnable.py 项目: erasaur/pnu
    def run_interval (self, func=None, update_interval=None):
        if update_interval is None and self._update_interval is None:
            raise ValueError("missing interval")

        if func is None:
            raise ValueError("missing func")

        scheduler = AsyncIOScheduler()
        scheduler.add_job(
            func,
            'interval',
            seconds=self._update_interval
        )
        scheduler.start()
示例#3
0
def do_the_work():
    scheduler = AsyncIOScheduler()
    # scheduler.store = RedisJobStore(
    #     db=0,
    #     jobs_key='marvin:scheduler:jobs',
    #     run_times_key="marvin:scheduler:runtimes",
    #     args={'host': 'pub-redis-10118.us-east-1-2.4.ec2.garantiadata.com', 'port':10118,
    #     'password': "******"})

    standup_time = datetime.now() + timedelta(seconds=5)

    scheduler.add_job(some_job, 'date', next_run_time=standup_time)

    scheduler.start()
示例#4
0
 def restart(self):
     self._logging_daemon.info('TimerSwitch ... stopping for restart')
     self._scheduler.shutdown(0)
     self._scheduler = AsyncIOScheduler()
     self._scheduler.start()
     self.load()
     self._logging_daemon.info('TimerSwitch ... neu initialisiert')
示例#5
0
 def __init__(self, logging_daemon, db_connection, method):
     self._scheduler = AsyncIOScheduler()
     self._method = method
     self._logging_daemon = logging_daemon
     self._db_connection = db_connection
     self._scheduler.start()
     self.load()
     self._logging_daemon.info('TimerSwitch ... initialisiert')
示例#6
0
class AsyncIOJob:
    """
        AsyncIOScheduler를 이용하여 job을 등록하고, 
        실행하는 쪽에서 asyncio.get_event_loop().run_forever() 함수를 통해 
        KeyboardInterrupt/SystemExit 이벤트가 오기 전 까지 실행한다. 
    """

    def __init__(self):
        """ 
            scheduler를 AsyncIOScheduler로 생성한다.
        """
        self.scheduler = AsyncIOScheduler()
        self.scheduler.start()

    def add_job(self, job, typ, seconds):
        """ 
            Job을 등록한다. typ={'date', 'interval', 'cron'} 이 들어갈 수 있다. 
        """
        self.scheduler.add_job(job, typ, seconds=seconds)
示例#7
0
def asyncio_schedule():
    """
    python version >= 3.4.0
    :return:
    """
    from apscheduler.schedulers.asyncio import AsyncIOScheduler
    try:
        import asyncio
    except ImportError:
        import trollius as asyncio

    def tick():
        print('Tick! The time is: %s' % datetime.now())

    scheduler = AsyncIOScheduler()
    scheduler.add_job(tick, 'interval', seconds=3)
    scheduler.start()
    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))

    # Execution will block here until Ctrl+C (Ctrl+Break on Windows) is pressed.
    try:
        asyncio.get_event_loop().run_forever()
    except (KeyboardInterrupt, SystemExit):
        pass
示例#8
0
    def __init__(self, _client, _loop, _queue, _snode=None):
        """

        :param _client:
        :param _loop:
        :param _queue:
        :param _snode: ip of current node
        """
        self._loop = _loop
        self._queue = _queue
        self._serf_client = SerfClient()
        self._snode = _snode or self._get_local_ip()
        self.network_checker = NetworkChecker(_client, _loop, _queue, self._snode)
        self.scheduler = AsyncIOScheduler()
        self._add_job(_client)


        self._list_node = []
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--config", help="Define config file location", default="../producer_config.json")

    payload = parser.parse_args()

    config = load_config(payload.config, CONSTANTS.PRODUCER)
    http_config = load_metric_collector_config(config)
    kafka_config = load_kafka_config(config)

    # initialize metric collector object
    http_metric_collectors = HttpMetricsCollector(http_config)

    # initialize kafka publisher object
    kafka_publisher = KafkaTransportPublisher(kafka_config)
    kafka_publisher.set_topic(config.get(CONSTANTS.TOPIC))

    # initialize the runner
    runner = ProducerRunner(http_metric_collectors, kafka_publisher)

    # Initialize Async Event Scheduler and add producer run function as job with supplied interval
    scheduler = AsyncIOScheduler()
    scheduler.add_job(
        runner.run,
        "interval",
        seconds=config.get(CONSTANTS.INTERVAL)
    )

    # Start the scheduler
    scheduler.start()
    logger.info('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))

    # Execution will block here until Ctrl+C (Ctrl+Break on Windows) is pressed.
    try:
        asyncio.get_event_loop().run_forever()
    except (KeyboardInterrupt, SystemExit):
        logger.warning("System Interrupted")
示例#10
0
async def main(args=None):

    opts = open_config_file()

    options_db = opts.local_storage
    options_engine = opts.health
    options_tracer = opts.tracer

    scheduler = AsyncIOScheduler(timezone=utc)
    client = MongoClient(opts.database['url'])
    db = client['chariot_service_health']

    southbound = SouthboundConnector(options_engine)
    if options_tracer['enabled'] is True:
        options_tracer['service'] = __service_name_listener__
        logging.debug(
            f'Enabling tracing for service "{__service_name_listener__}"')
        tracer = Tracer(options_tracer)
        tracer.init_tracer()
        southbound.inject_tracer(tracer)

    options_db['database'] = options_engine['database']
    southbound.set_up_local_storage(options_db)
    client_south = await create_client(opts.brokers.southbound)
    southbound.register_for_client(client_south)
    southbound.inject_db(db)
    scheduler.add_job(southbound.send_ping,
                      'interval',
                      seconds=options_engine['interval'])

    southbound.set_topics([options_engine['listen']])
    southbound.subscribe_to_topics()
    await southbound.send_ping()

    scheduler.start()
    await STOP.wait()
    await client_south.disconnect()
示例#11
0
def main():
    uvloop.install()

    trade_lock = asyncio.Lock()

    logger.info('Environment Variables')
    logger.info('---------------------')
    for k, v in sorted(environ.items()):
        logger.info(f'{k}={v}')
    logger.info('---------------------\n')

    async def job():
        await tick(trade_lock=trade_lock)

    scheduler = AsyncIOScheduler()
    scheduler.add_job(
        job, IntervalTrigger(seconds=86400, timezone=pytz.timezone('UTC')))
    scheduler.start()
    logger.info('Neobabix is running, press Ctrl+C to exit')

    try:
        asyncio.get_event_loop().run_forever()
    except (KeyboardInterrupt, SystemExit):
        pass
示例#12
0
def asyncio_schedule():
    """
    python version >= 3.4.0
    :return:
    """
    from apscheduler.schedulers.asyncio import AsyncIOScheduler
    try:
        import asyncio
    except ImportError:
        import trollius as asyncio

    def tick():
        print('Tick! The time is: %s' % datetime.now())

    scheduler = AsyncIOScheduler()
    scheduler.add_job(tick, 'interval', seconds=3)
    scheduler.start()
    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))

    # Execution will block here until Ctrl+C (Ctrl+Break on Windows) is pressed.
    try:
        asyncio.get_event_loop().run_forever()
    except (KeyboardInterrupt, SystemExit):
        pass
示例#13
0
    ''' Update the data for a city. '''
    # Get the current formatted data for a city
    try:
        stations = eval(provider).stations(city)
    except:
        return
    # Update the database if the city can be predicted
    if predict == 'Yes':
        insert.city(city, stations)
    # Save the data for the map
    geojson = tb.json_to_geojson(stations)
    tb.write_json(geojson, '{0}/{1}.geojson'.format(geojsonFolder, city))
    # Refresh the latest update time
    updates = tb.read_json('{}/updates.json'.format(informationFolder))
    updates[city] = datetime.now().isoformat()
    tb.write_json(updates, '{}/updates.json'.format(informationFolder))

if __name__ == '__main__':
    scheduler = AsyncIOScheduler()
    for provider, cities in providers.items():
        for city in cities:
            update(provider, city, predictions[city])
            scheduler.add_job(update, 'interval', seconds=refresh,
                              args=[provider, city, predictions[city]],
                              misfire_grace_time=refresh, coalesce=True)
    scheduler.start()
    try:
        asyncio.get_event_loop().run_forever()
    except (KeyboardInterrupt, SystemExit):
        pass
def asyncio_scheduler(event_loop):
    scheduler = AsyncIOScheduler(event_loop=event_loop)
    scheduler.start(paused=True)
    yield scheduler
    scheduler.shutdown(False)
示例#15
0
    # load modules
    if not modulesdir:
        logging.warn("No modules directory specified using --modules")
    else:
        __detectModules(modulesdir)

    # register shutdown hooks
    signal.signal(signal.SIGHUP, _onHupSignal)
    signal.signal(signal.SIGTERM, _onTermSignal)

    # create and install asyncio main event loop
    # http://www.tornadoweb.org/en/stable/asyncio.html#tornado.platform.asyncio.AsyncIOMainLoop
    AsyncIOMainLoop().install()

    # advanced python scheduler for reoccuring tasks as defined by modules
    _scheduler = AsyncIOScheduler() #TornadoScheduler()

    # reload settings periodically
    _scheduler.add_job(settings.load_settings, 'interval', minutes=5)

    # add eve user and url of your application here
    import eveapi
    if 'in_game_owner' in cfgServer:
        eveapi.set_user_agent('Host: %s; Admin-EVE-Character: %s' % ('dev-local', cfgServer['in_game_owner']))

    # init universe data from SDE
    asyncio.get_event_loop().run_until_complete(universe.initCaches())

    # only import at this point to make sure all required modules have already been loaded
    from igbtoolbox import pages
示例#16
0
"""
Demonstrates how to use the Tornado compatible scheduler to schedule a job that executes on 3 second intervals.
"""

from datetime import datetime
import os

from apscheduler.schedulers.asyncio import AsyncIOScheduler

try:
    import asyncio
except ImportError:
    import trollius as asyncio


def tick():
    print('Tick! The time is: %s' % datetime.now())


if __name__ == '__main__':
    scheduler = AsyncIOScheduler()
    scheduler.add_job(tick, 'interval', seconds=3)
    scheduler.start()
    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))

    # Execution will block here until Ctrl+C (Ctrl+Break on Windows) is pressed.
    try:
        asyncio.get_event_loop().run_forever()
    except (KeyboardInterrupt, SystemExit):
        pass
示例#17
0
    def __init__(self, loop=None):
        kw = dict()
        if loop:
            kw['event_loop'] = loop

        self.scheduler = AsyncIOScheduler(**kw)
示例#18
0
文件: __init__.py 项目: R3M099/ICE-X
class Bot(BotBase):
    def __init__(self):
        #self.PREFIX = PREFIX
        self.ready = False
        self.cogs_ready = Ready()
        #self.guild = None
        self.scheduler = AsyncIOScheduler()

        try:
            with open("./data/banlist.txt", "r") as f:
                self.banlist = [int(line.strip()) for line in f.readlines()]

        except FileNotFoundError:
            self.banlist = []
        #db.autosave(self.scheduler)

        super().__init__(command_prefix=get_prefix)

    def setup(self):
        COGS = [
            "help", "fun", "info", "mod", "Welcome", "log", "invite", "afk",
            "roles", "astronomy", "xkcd", "reactions", "misc", "meta"
        ]
        for cog in COGS:
            self.load_extension(f"lib.cogs.{cog}")
            print(f"{cog} loaded.")

        print("setup complete")

    def run(self, version):
        self.VERSION = version
        print("Running setup.")
        self.setup()
        with open("./lib/bot/token.0", "r", encoding="utf-8") as tf:
            self.TOKEN = tf.read()
        print('Running bot.')
        super().run(self.TOKEN, reconnect=True)

    async def on_connect(self):
        print('Bot connected.')

    async def on_disconnect(self):
        print('Bot disconnected.')

    async def on_command_error(self, ctx, exc):
        if isinstance(exc, CommandNotFound):
            await ctx.send('Command not found.')
        elif isinstance(exc, CommandOnCooldown):
            await ctx.send(
                f'That command is in cooldown for {exc.retry_after:,.0f} seconds'
            )

    async def on_ready(self):
        if not self.ready:
            #while not self.cogs_ready.all_ready():
            #	await sleep(0.5)
            self.ready = True
            await bot.change_presence(
                status=discord.Status.online,
                activity=Activity(type=discord.ActivityType.listening,
                                  name="the screams of souls from Hell"))
            self.scheduler.start()
            print('Bot ready.')
        else:
            print('bot reconnected.')

    async def on_guild_join(self, guild):
        with open("./lib/cogs/prefixes.json", "r") as pf:
            prefixes = json.load(pf)

        prefixes[str(guild.id)] = "."

        with open("./lib/cogs/prefixes.json", "w") as pf:
            json.dump(prefixes, pf, indent=4)

    async def on_guild_remove(self, guild):
        with open("./lib/cogs/prefixes.json", "r") as pf:
            prefixes = json.load(pf)

        prefixes.pop(str(guild.id))

        with open("./lib/cogs/prefixes.json", "w") as pf:
            json.dump(prefixes, pf, indent=4)

    async def on_message(self, message):
        t = datetime.datetime.now()
        x = re.findall("^ice\s*x$", message.content)
        y = re.findall("^ICE\s*X$", message.content)
        #mention = message.mentions
        '''for user in mention:
			if user.id in afk_list:
				await message.channel.send(f"**{user.display_name}** is AFK, don't mention him/her")'''

        if x:
            await message.channel.send(
                f"Hello, World!, I have been created by almighty **℟ʓϺ𝞱#0522**"
            )

        elif y:
            await message.channel.send(
                f"Hello, World!, I have been created by almighty **℟ʓϺ𝞱#0522**"
            )
        '''for user in mentions:
			if user.id == 537634097137188875:
				await message.channel.send(f"**DO NOT MENTION HIM WITHOUT ANY REASON** :face_with_symbols_over_mouth:")'''

        if bot.user in message.mentions:
            await message.channel.send(
                f"Hello **{message.author.display_name}**\nThe Prefix for the server is **`{get_prefix(self, message)}`**"
            )

        if not message.author.bot:
            if isinstance(message.channel, DMChannel):
                if len(message.content) < 20 or len(message.content) > 500:
                    await message.channel.send(
                        "Your message should be atleast 20 characters long and no more then 500 characters long."
                    )
                else:
                    t = datetime.datetime.utcnow()
                    e = Embed(
                        description=f"Complaint from {message.author.mention}",
                        colour=0x000203)
                    e.set_thumbnail(url=message.author.avatar_url)
                    e.set_author(
                        name=
                        f"{message.author.display_name}#{message.author.discriminator}",
                        icon_url=message.author.avatar_url)
                    fields = [("Member", message.author.display_name, False),
                              ("Message", message.content, False)]
                    for name, value, inline in fields:
                        e.add_field(name=name, value=value, inline=inline)

                    e.set_footer(text=t.strftime('%b %d, %Y | %I:%M %p UTC'))
                    mod = self.get_cog("mod")
                    await self.get_channel(736790701794132029).send(embed=e)
                    await message.channel.send(
                        "Your message has been relayed to the Moderators/Admins, Thanks for using the modmail :slight_smile:"
                    )

            else:
                await self.process_commands(message)
示例#19
0
class TimerSwitch:
    UNIT2KEYWORD = {
        'Minuten': 'minutes',
        'Stunden': 'hours',
        'Tage': 'days',
        'Wochen': 'weeks'
    }
    WEEK2WEEK = [
        ('mon', 'weekly_monday'),
        ('tue', 'weekly_tuesday'),
        ('wed', 'weekly_wednesday'),
        ('thu', 'weekly_thursday'),
        ('fri', 'weekly_friday'),
        ('sat', 'weekly_saturday'),
        ('sun', 'weekly_sunday'),
    ]

    def __init__(self, logging_daemon, db_connection, method):
        self._scheduler = AsyncIOScheduler()
        self._method = method
        self._logging_daemon = logging_daemon
        self._db_connection = db_connection
        self._scheduler.start()
        self.load()
        self._logging_daemon.info('TimerSwitch ... initialisiert')

    def load(self, scheduler_id=None):
        sql = """SELECT
              schedulers.id AS scheduler_id,
              schedulers.title AS scheduler_title,
              schedulers.date_start_on,
              schedulers.date_start_off,
              schedulers.date_stop,
              schedulers.date_stop_on,
              schedulers.date_stop_off,
              schedulers.duration,
              schedulers.interval_number,
              schedulers.interval_unit,
              schedulers.weekly_monday,
              schedulers.weekly_tuesday,
              schedulers.weekly_wednesday,
              schedulers.weekly_thursday,
              schedulers.weekly_friday,
              schedulers.weekly_saturday,
              schedulers.weekly_sunday,
              switches.id AS switches_id,
              switches.title AS switches_title,
              switches.argA,
              switches.argB,
              switches.argC,
              switches.argD,
              switch_types.title AS switches_typ,
              clients.ip AS switches_ip
              FROM schedulers, switches, switch_types, clients
              WHERE schedulers.switches_id = switches.id
              AND switches.switch_types_id = switch_types.id
              AND switches.clients_id = clients.id"""
        if isinstance(scheduler_id, int):
            sql += " AND schedulers.id = %s"
            db_result = self._db_connection.query(sql, scheduler_id)
        else:
            db_result = self._db_connection.query(sql)
        results = db_result.fetchall()
        for result in results:
            self._add(result)

    def _add(self, dataset):
        scheduler_id = dataset['scheduler_id']
        title = dataset['scheduler_title']
        date_start_on = dataset['date_start_on']
        date_start_off = dataset['date_start_off']
        date_stop_on = dataset['date_stop_on']
        date_stop_off = dataset['date_stop_off']
        duration = dataset['duration']

        week = ','.join(
            abr for abr, full in self.WEEK2WEEK
            if dataset[full]
        )

        if duration == 'einmalig':
            scheduler_type = 'date'
            args_on = dict(run_date=date_start_on)
            args_off = dict(run_date=date_start_off)
            date_stop_off = date_start_off
        elif duration == 'intervall':
            scheduler_type = 'interval'
            interval_argument = {self.UNIT2KEYWORD[dataset['interval_unit']]: dataset['interval_number']}
            args_on = dict(interval_argument, start_date=date_start_on, end_date=date_stop_on)
            args_off = dict(interval_argument, start_date=date_start_off, end_date=date_stop_off)
        elif duration == 'wochentag':
            scheduler_type = 'cron'
            args_on = dict(
                day_of_week=week, hour=date_start_on.hour, minute=date_start_on.minute,
                start_date=date_start_on, end_date=date_stop_on)
            args_off = dict(
                day_of_week=week, hour=date_start_off.hour, minute=date_start_off.minute,
                start_date=date_start_off, end_date=date_stop_off)

        self._scheduler.add_job(self._method, scheduler_type,
                                args=[scheduler_id, dataset['switches_id'], dataset['switches_ip'], True, None],
                                id='%son' % scheduler_id, **args_on)
        self._scheduler.add_job(self._method, scheduler_type,
                                args=[scheduler_id, dataset['switches_id'], dataset['switches_ip'], False,
                                      date_stop_off], id='%soff' % scheduler_id, **args_off)

        self._logging_daemon.info('Timerswitch ... add_job "%s" (id = %s)' % (title, scheduler_id))
        self._logging_daemon.debug(
            'Timerswitch ... self._scheduler.add_job(%s, %s, args=[%s, %s, True, None], id=%soff, %s' % (
                self._method, scheduler_type, title, scheduler_id, scheduler_id, args_on))

    def reload(self, scheduler_id):
        self.delete_job(scheduler_id)
        self.load(int(scheduler_id))
        self._logging_daemon.info('Timerswitch ... Reload       ID = %s' % scheduler_id)

    def delete_job(self, scheduler_id):
        self._scheduler.remove_job(str(scheduler_id) + 'on')
        self._scheduler.remove_job(str(scheduler_id) + 'off')
        self._logging_daemon.info('Timerswitch ... Delete Job   ID = %s' % scheduler_id)

    def delete_db(self, scheduler_id):
        if isinstance(scheduler_id, int):
            scheduler_id = int(scheduler_id)
            self._db_connection.query("DELETE FROM schedulers WHERE id = %s", scheduler_id)
            self._logging_daemon.info('Timerswitch ... Delete DB    ID = %s' % scheduler_id)

    def restart(self):
        self._logging_daemon.info('TimerSwitch ... stopping for restart')
        self._scheduler.shutdown(0)
        self._scheduler = AsyncIOScheduler()
        self._scheduler.start()
        self.load()
        self._logging_daemon.info('TimerSwitch ... neu initialisiert')
示例#20
0
 def __init__(self):
     """ 
         scheduler를 AsyncIOScheduler로 생성한다.
     """
     self.scheduler = AsyncIOScheduler()
     self.scheduler.start()
示例#21
0
 def __init__(self, _client):
     self.network_checker = NetworkChecker()
     self.scheduler = AsyncIOScheduler()
     self._add_job(_client)
示例#22
0
        h1, m1, h2, m2 = eval(udB["NIGHT_TIME"])
    for chat in chats:
        try:
            await ultroid_bot(
                EditChatDefaultBannedRightsRequest(
                    chat,
                    banned_rights=ChatBannedRights(
                        until_date=None,
                        send_messages=True,
                    ),
                )
            )
            await ultroid_bot.send_message(
                chat, f"**NightMode : Group Closed**\n\nGroup Will Open At `{h2}:{m2}`"
            )
        except Exception as er:
            LOGS.info(er)


if night_grps():
    try:
        h1, m1, h2, m2 = 0, 0, 7, 0
        if udB.get("NIGHT_TIME"):
            h1, m1, h2, m2 = eval(udB["NIGHT_TIME"])
        sch = AsyncIOScheduler()
        sch.add_job(close_grp, trigger="cron", hour=h1, minute=m1)
        sch.add_job(open_grp, trigger="cron", hour=h2, minute=m2)
        sch.start()
    except Exception as er:
        LOGS.info(er)
示例#23
0
class AlamoScheduler(object):
    message_queue = None
    loop = handler = None

    def __init__(self, loop=None):
        kw = dict()
        if loop:
            kw['event_loop'] = loop

        self.scheduler = AsyncIOScheduler(**kw)

    def setup(self, loop=None):
        if loop is None:
            loop = asyncio.get_event_loop()
            asyncio.set_event_loop(loop)
        self.loop = loop
        self.message_queue = ZeroMQQueue(
            settings.ZERO_MQ_HOST,
            settings.ZERO_MQ_PORT
        )
        self.message_queue.connect()
        self.scheduler.add_listener(
            self.event_listener,
            EVENT_JOB_ERROR | EVENT_JOB_MISSED | EVENT_JOB_MAX_INSTANCES
        )

    @aiostats.increment()
    def _schedule_check(self, check):
        """Schedule check."""
        logger.info(
            'Check `%s:%s` scheduled!', check['uuid'], check['name']
        )

        check['scheduled_time'] = datetime.now(tz=pytz_utc).isoformat()
        self.message_queue.send(check)

    def remove_job(self, job_id):
        """Remove job."""
        try:
            logger.info('Removing job for check id=`%s`', job_id)
            self.scheduler.remove_job(str(job_id))
        except JobLookupError:
            pass

    def schedule_check(self, check):
        """Schedule check with proper interval based on `frequency`.

        :param dict check: Check definition
        """
        try:
            frequency = check['fields']['frequency'] = int(
                check['fields']['frequency']
            )
            logger.info(
                'Scheduling check `%s` with id `%s` and interval `%s`',
                check['name'], check['id'], frequency
            )
            jitter = random.randint(0, frequency)
            first_run = datetime.now() + timedelta(seconds=jitter)
            kw = dict(
                seconds=frequency,
                id=str(check['uuid']),
                next_run_time=first_run,
                args=(check,)
            )
            self.schedule_job(self._schedule_check, **kw)

        except KeyError as e:
            logger.exception('Failed to schedule check: %s. Exception: %s',
                             check, e)

    def schedule_job(self, method, **kwargs):
        """Add new job to scheduler.

        :param method: reference to method that should be scheduled
        :param kwargs: additional kwargs passed to `add_job` method
        """
        try:
            self.scheduler.add_job(
                method, 'interval',
                misfire_grace_time=settings.JOBS_MISFIRE_GRACE_TIME,
                max_instances=settings.JOBS_MAX_INSTANCES,
                coalesce=settings.JOBS_COALESCE,
                **kwargs
            )
        except ConflictingIdError as e:
            logger.error(e)

    def event_listener(self, event):
        """React on events from scheduler.

        :param apscheduler.events.JobExecutionEvent event: job execution event
        """
        if event.code == EVENT_JOB_MISSED:
            aiostats.increment.incr('job.missed')
            logger.warning("Job %s scheduler for %s missed.", event.job_id,
                           event.scheduled_run_time)
        elif event.code == EVENT_JOB_ERROR:
            aiostats.increment.incr('job.error')
            logger.error("Job %s scheduled for %s failed. Exc: %s",
                         event.job_id,
                         event.scheduled_run_time,
                         event.exception)
        elif event.code == EVENT_JOB_MAX_INSTANCES:
            aiostats.increment.incr('job.max_instances')
            logger.warning(
                'Job `%s` could not be submitted. '
                'Maximum number of running instances was reached.',
                event.job_id
            )

    @aiostats.increment()
    def get_jobs(self):
        return [job.id for job in self.scheduler.get_jobs()]

    async def checks(self, request=None):

        uuid = request.match_info.get('uuid', None)
        if uuid is None:
            jobs = self.get_jobs()
            return json_response(data=dict(count=len(jobs), results=jobs))
        job = self.scheduler.get_job(uuid)
        if job is None:
            return json_response(
                data={'detail': 'Check does not exists.'}, status=404
            )

        check, = job.args
        return json_response(data=check)

    @aiostats.timer()
    async def update(self, request=None):
        check = await request.json()
        check_uuid = check.get('uuid')
        check_id = check.get('id')

        message = dict(status='ok')

        if not check_id or not check_uuid:
            return json_response(status=400)

        if check_id % settings.SCHEDULER_COUNT != settings.SCHEDULER_NR:
            return json_response(data=message, status=202)

        job = self.scheduler.get_job(str(check_uuid))

        if job:
            scheduled_check, = job.args
            timestamp = scheduled_check.get('timestamp', 0)

            if timestamp > check['timestamp']:
                return json_response(data=message, status=202)
            message = dict(status='deleted')
            self.remove_job(check_uuid)

        if any([trigger['enabled'] for trigger in check['triggers']]):
            self.schedule_check(check)
            message = dict(status='scheduled')

        return json_response(data=message, status=202)

    def wait_and_kill(self, sig):
        logger.warning('Got `%s` signal. Preparing scheduler to exit ...', sig)
        self.scheduler.shutdown()
        self.loop.stop()

    def register_exit_signals(self):
        for sig in ['SIGQUIT', 'SIGINT', 'SIGTERM']:
            logger.info('Registering handler for `%s` signal '
                        'in current event loop ...', sig)
            self.loop.add_signal_handler(
                getattr(signal, sig),
                self.wait_and_kill, sig
            )

    def start(self, loop=None):
        """Start scheduler."""
        self.setup(loop=loop)
        self.register_exit_signals()
        self.scheduler.start()

        logger.info(
            'Press Ctrl+%s to exit.', 'Break' if os.name == 'nt' else 'C'
        )
        try:
            self.loop.run_forever()
        except KeyboardInterrupt:
            pass
        logger.info('Scheduler was stopped!')
示例#24
0
async def start_scheduler():
    try:
        scheduler = AsyncIOScheduler()
        scheduler.add_job(
            services_helper.print_where_food_notification,
            CronTrigger(day_of_week=weekday,
                        hour=where_time.split(":")[0],
                        minute=where_time.split(":")[1]))
        scheduler.add_job(
            services_helper.print_what_food_notification,
            CronTrigger(day_of_week=weekday,
                        hour=what_time.split(":")[0],
                        minute=what_time.split(":")[1]))
        scheduler.add_job(
            services_helper.get_users_and_channels_info,
            CronTrigger(day_of_week=weekday, hour="03", minute="00"))
        # scheduler.add_job(food_bot.update_restaurant_database, CronTrigger(day_of_week="wed", hour="09")) # Not needed as Takeaway scraping is broken
        food_bot.get_restaurants_from_takeaway()
        scheduler.add_job(
            admin_bot.reset_orders,
            CronTrigger(day_of_week="wed", hour="23", minute="59"))
        scheduler.start()
        logger.info('Started APScheduler!')
    except Exception as e:
        logger.exception(e)
示例#25
0
        else:
            print("Code is done running. switching back to background mode")
            #runCode = True
            return
        return

    else:
        print("uploaded over 7 hours ago")
        #will retry in an hour
        #runCode = True
        return


#run every hour
while True:
    scheduler = AsyncIOScheduler()
    scheduler.add_job(recentMediaInfo, 'interval', minutes=60, id='myJobID')
    scheduler.start()

    try:
        asyncio.get_event_loop().run_forever()
        #loop = asyncio.get_event_loop()

        #loop = asyncio.ensure_future()
        #loop.run_until_complete(recentMediaInfo())

        loop.close()
        scheduler.remove_job('myJobID')

    except (KeyboardInterrupt, SystemExit):
        pass
示例#26
0
def main():
    print("""==============================
              _           _
             | |         | |
  _   _  ___ | |__   ___ | |_
 | | | |/ _ \| '_ \ / _ \| __|
 | |_| | (_) | |_) | (_) | |_
  \__, |\___/|_.__/ \___/ \__|
   __/ |
  |___/
==============================""")
    print("正在初始化...")

    if os.path.exists('yobot_config.json'):
        basedir = "."
    else:
        basedir = "./yobot_data"
    if os.path.exists(os.path.join(basedir, "yobot_config.json")):
        try:
            with open(os.path.join(basedir, "yobot_config.json"),
                      "r",
                      encoding="utf-8-sig") as f:
                config = json.load(f)
        except json.JSONDecodeError as e:
            print('配置文件格式错误,请检查配置文件。三秒后关闭')
            time.sleep(3)
            raise e from e
        token = config.get("access_token", None)
        if token is None:
            print("警告:没有设置access_token,这会直接暴露机器人接口")
            print("详见https://yobot.win/usage/access-token/")
    else:
        token = None

    try:
        tzlocal.get_localzone()
    except:
        print("无法获取系统时区,请将系统时区设置为北京/上海时区")
        sys.exit()

    cqbot = CQHttp(access_token=token, enable_http_post=False)
    sche = AsyncIOScheduler()
    bot = yobot.Yobot(
        data_path=basedir,
        scheduler=sche,
        quart_app=cqbot.server_app,
        bot_api=cqbot._api,
    )
    host = bot.glo_setting.get("host", "0.0.0.0")
    port = bot.glo_setting.get("port", 9222)

    @cqbot.on_message
    async def handle_msg(context):
        if context["message_type"] == "group" or context[
                "message_type"] == "private":
            reply = await bot.proc_async(context)
        else:
            reply = None
        if isinstance(reply, str) and reply != "":
            return {'reply': reply, 'at_sender': False}
        else:
            return None

    async def send_it(func):
        if asyncio.iscoroutinefunction(func):
            to_sends = await func()
        else:
            to_sends = func()
        if to_sends is None:
            return
        for kwargs in to_sends:
            await asyncio.sleep(5)
            await cqbot.send_msg(**kwargs)

    jobs = bot.active_jobs()
    if jobs:
        for trigger, job in jobs:
            sche.add_job(func=send_it,
                         args=(job, ),
                         trigger=trigger,
                         coalesce=True,
                         max_instances=1,
                         misfire_grace_time=60)
        sche.start()

    print("初始化完成,启动服务...")

    cqbot.run(
        host=host,
        port=port,
        debug=False,
        use_reloader=False,
        loop=asyncio.get_event_loop(),
    )
示例#27
0
    m = multiprocessing.Manager()
    variable = m.Value('i', 60)

    schedfortest = BlockingScheduler()

    trigger_test = OrTrigger([CronTrigger(minute='*/1')])

    schedfortest.add_job(callbacktotal,
                         trigger_test,
                         minute='*/2',
                         max_instances=10)

    schedfortest.start()

    scheduler = AsyncIOScheduler()
    scheduler.add_job(day_limits,
                      'cron',
                      hour=7,
                      misfire_grace_time=3600,
                      timezone='GB')
    scheduler.add_job(night_limits,
                      'cron',
                      hour=19,
                      minute=32,
                      misfire_grace_time=3600,
                      timezone='GB')
    scheduler.start()

    scheduler.print_jobs()
示例#28
0
class D2info:
    version = '0.3.1'
    sched = ''
    args = []

    logging.basicConfig()
    logging.getLogger('apscheduler').setLevel(logging.DEBUG)

    def __init__(self, **options):
        super().__init__(**options)
        self.get_args()

        self.data = D2data(self.args.production,
                           (self.args.cert, self.args.key))

    def get_args(self):
        parser = argparse.ArgumentParser()
        parser.add_argument('--port',
                            help='specify a port to listen on',
                            default='4200')
        parser.add_argument('-p',
                            '--production',
                            help='Use to launch in production mode',
                            action='store_true')
        parser.add_argument('-nm',
                            '--nomessage',
                            help='Don\'t post any messages',
                            action='store_true')
        parser.add_argument('--oauth',
                            help='Get Bungie access token',
                            action='store_true')
        parser.add_argument('-k',
                            '--key',
                            help='SSL key',
                            type=str,
                            default='')
        parser.add_argument('-c',
                            '--cert',
                            help='SSL certificate',
                            type=str,
                            default='')
        self.args = parser.parse_args()

    async def init_data(self):
        await self.data.token_update()
        await self.data.get_chars()
        await self.data.get_seasonal_eververse()
        await self.data.get_daily_rotations()
        # await self.data.get_weekly_rotations()
        await self.data.get_weekly_eververse()

    def start_up(self):
        @app.listener('before_server_start')
        async def instantiate_scheduler(app, loop):
            self.sched = AsyncIOScheduler(timezone='UTC')
            self.sched.add_job(self.init_data, misfire_grace_time=86300)
            self.sched.add_job(self.data.token_update, 'interval', hours=1)
            self.sched.add_job(self.data.get_seasonal_eververse,
                               'cron',
                               day_of_week='tue',
                               hour='17',
                               minute='1',
                               second='40',
                               misfire_grace_time=86300)
            self.sched.add_job(self.data.get_weekly_eververse,
                               'cron',
                               day_of_week='tue',
                               hour='17',
                               minute='1',
                               second='40',
                               misfire_grace_time=86300)
            # self.sched.add_job(self.data.get_weekly_rotations, 'cron', day_of_week='tue', hour='17', minute='0',
            #                    second='40', misfire_grace_time=86300)
            # self.sched.add_job(self.data.get_daily_rotations, 'cron', hour='17', minute='0', second='40',
            #                    misfire_grace_time=86300)
            self.sched.start()

        app.static('/static', './static')
        app.error_handler = CustomErrorHandler()
        # app.url_for('static', filename='style.css', name='style')
        if self.args.production:
            app.run(host='0.0.0.0',
                    port=1423,
                    workers=1,
                    debug=False,
                    access_log=False
                    )  # ssl={'cert': self.args.cert, 'key': self.args.key})
        else:
            app.run()
示例#29
0
文件: scheduler.py 项目: andymor/eNMS
class Scheduler(Starlette):

    days = {
        "0": "sun",
        "1": "mon",
        "2": "tue",
        "3": "wed",
        "4": "thu",
        "5": "fri",
        "6": "sat",
        "7": "sun",
        "*": "*",
    }

    seconds = {"seconds": 1, "minutes": 60, "hours": 3600, "days": 86400}

    def __init__(self):
        super().__init__()
        with open(Path.cwd().parent / "setup" / "scheduler.json", "r") as file:
            self.settings = load(file)
        dictConfig(self.settings["logging"])
        self.configure_scheduler()
        self.register_routes()

    @staticmethod
    def aps_date(date):
        if not date:
            return
        date = datetime.strptime(date, "%d/%m/%Y %H:%M:%S")
        return datetime.strftime(date, "%Y-%m-%d %H:%M:%S")

    def configure_scheduler(self):
        self.scheduler = AsyncIOScheduler(self.settings["config"])
        self.scheduler.start()

    def register_routes(self):
        @self.route("/job", methods=["DELETE"])
        async def delete(request):
            job_id = await request.json()
            if self.scheduler.get_job(job_id):
                self.scheduler.remove_job(job_id)
            return JSONResponse(True)

        @self.route("/next_runtime/{task_id}")
        async def next_runtime(request):
            job = self.scheduler.get_job(request.path_params["task_id"])
            if job and job.next_run_time:
                return JSONResponse(job.next_run_time.strftime("%Y-%m-%d %H:%M:%S"))
            return JSONResponse("Not Scheduled")

        @self.route("/schedule", methods=["POST"])
        async def schedule(request):
            data = await request.json()
            if data["mode"] in ("resume", "schedule"):
                result = self.schedule_task(data["task"])
                if not result:
                    return JSONResponse({"alert": "Cannot schedule in the past."})
                else:
                    return JSONResponse({"response": "Task resumed.", "active": True})
            else:
                try:
                    self.scheduler.pause_job(data["task"]["id"])
                    return JSONResponse({"response": "Task paused."})
                except JobLookupError:
                    return JSONResponse({"alert": "There is no such job scheduled."})

        @self.route("/time_left/{task_id}")
        async def time_left(request):
            job = self.scheduler.get_job(request.path_params["task_id"])
            if job and job.next_run_time:
                delta = job.next_run_time.replace(tzinfo=None) - datetime.now()
                hours, remainder = divmod(delta.seconds, 3600)
                minutes, seconds = divmod(remainder, 60)
                days = f"{delta.days} days, " if delta.days else ""
                return JSONResponse(f"{days}{hours}h:{minutes}m:{seconds}s")
            return JSONResponse("Not Scheduled")

    @staticmethod
    def run_service(task_id):
        auth = HTTPBasicAuth(environ.get("ENMS_USER"), environ.get("ENMS_PASSWORD"))
        post(f"{environ.get('ENMS_ADDR')}/rest/run_task", json=task_id, auth=auth)

    def schedule_task(self, task):
        if task["scheduling_mode"] == "cron":
            crontab = task["crontab_expression"].split()
            crontab[-1] = ",".join(self.days[day] for day in crontab[-1].split(","))
            trigger = {"trigger": CronTrigger.from_crontab(" ".join(crontab))}
        elif task["frequency"]:
            trigger = {
                "trigger": "interval",
                "start_date": self.aps_date(task["start_date"]),
                "end_date": self.aps_date(task["end_date"]),
                "seconds": int(task["frequency"])
                * self.seconds[task["frequency_unit"]],
            }
        else:
            trigger = {"trigger": "date", "run_date": self.aps_date(task["start_date"])}
        if not self.scheduler.get_job(task["id"]):
            job = self.scheduler.add_job(
                id=str(task["id"]),
                replace_existing=True,
                func=self.run_service,
                args=[task["id"]],
                **trigger,
            )
        else:
            job = self.scheduler.reschedule_job(str(task["id"]), **trigger)
        return job.next_run_time > datetime.now(job.next_run_time.tzinfo)
示例#30
0
class BirthdayManager(Cog):
    def __init__(self, bot: Bot):
        self.birthdays: List[Person] = []
        self.bot = bot
        self.lock = Lock()
        self.timer: Optional[Timer] = None

        self.channel = int(environ.get("SAFETY_ANNOUNCEMENT_CHANNEL"))
        self.doc = environ.get("SAFETY_GOOGLE_DOCS_LINK")
        self.refresh_birthdays.start()

        self.scheduler = AsyncIOScheduler()
        self.scheduler.add_job(self.schedule_birthday,
                               trigger="cron",
                               hour=0,
                               minute=0,
                               second=0)
        self.scheduler.start()

    def cog_unload(self):
        self.refresh_birthdays.cancel()
        self.scheduler.remove_all_jobs()

    @tasks.loop(hours=48)
    async def refresh_birthdays(self):
        """
    Polls for changes from google sheet for birthdays
    """
        async with self.lock:
            data = sheets.spreadsheets() \
              .values() \
              .get(spreadsheetId=self.doc, range="A2:J500") \
              .execute() \
              .get("values", [])

            self.birthdays.clear()

            for person in data:
                kerberos = person[1]
                birthday = person[9] if len(person) >= 10 else person[-1]
                self.birthdays.append(
                    (kerberos, get_date(birthday, date_formats)))

            self.birthdays.sort(key=cmp_to_key(compare_people))

    async def schedule_birthday(self):
        """
    Reviews birthday metadata and sends birthday notice in SAFETY_ANNOUNCEMENT_CHANNEL
    if someone's birthday is today (server time)
    """
        try:
            async with self.lock:
                tz = get_localzone()

                now = datetime.now(tz)
                tomorrow = (now + timedelta(1)) \
                  .replace(hour=0, minute=0, second=0, microsecond=0)

                people: List[Tuple[str, datetime]] = []

                for person in self.birthdays:
                    if person[1] is None:
                        continue

                    current_birthday = localtime(
                        person[1].replace(year=now.year), tz)

                    if tomorrow - timedelta(1) <= current_birthday < tomorrow:
                        people.append(person)

                if len(people) == 0:
                    return

                names = [person[0] for person in people]
                names_str = ", ".join(names)
                message = f"Happy birthday to {names_str}!\n"

                for person in people:
                    difference = tomorrow - localtime(person[1], tz)
                    age = round(difference.total_seconds() / seconds_in_year)
                    message += f"{person[0]} is {age} years old\n"

                target_channel = self.bot.get_channel(self.channel)

                while target_channel is None:
                    await sleep(5)
                    target_channel = self.bot.get_channel(self.channel)

                await target_channel.send(message)
        except Exception as e:
            print(e)
示例#31
0
        msg = "What the f**k is this? Such city doesn't exist!!!"
        await bot.send_message(message.chat.id,
                               msg,
                               reply_to_message_id=message.message_id)


# Define the function that sends weather to the chat on a schedule
@dp.message_handler()
async def sched(msg=None):
    for id, arr in arr_dict.items():
        msg = get_weather(arr)
        await bot.send_message(chat_id=id, text=msg)


# Create scheduler with interval 1 day
scheduler = AsyncIOScheduler()
scheduler.add_job(sched, 'cron', day_of_week='mon-sun', hour=2, minute=00)
scheduler.start()


# Create the function to startup my bot
async def on_startup(dp):
    await bot.set_webhook(WEBHOOK_URL)


# Create the function to shutdown my bot
async def on_shutdown(dp):
    await bot.close()


# Main script
示例#32
0
def sche():
    scheduler = AsyncIOScheduler()
    # TODO: fit for all environments with different timezone, this is for 0 timezone
    scheduler.add_job(send_early_msg, 'cron', hour="3", minute="0")
    scheduler.add_job(send_new_day_msg, 'cron', hour="0", minute="0")
    scheduler.start()
示例#33
0
from apscheduler.executors.pool import ThreadPoolExecutor, ProcessPoolExecutor
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from utils.db_util import mdb, es, INDEX_TEXT, INDEX_MAPPING, create_es_doc
from elasticsearch import helpers

DATE_FORMAT = "%Y-%m-%dT%H:%M:%SZ"

executors = {
    "default": ThreadPoolExecutor(10),
    "processpool": ProcessPoolExecutor(2)
}
job_defaults = {"coalesce": False, "max_instances": 10}

loop = asyncio.get_event_loop()
scheduler = AsyncIOScheduler({"event_loop": loop},
                             executors=executors,
                             job_defaults=job_defaults)
ALL_CATEGORY = ["news"]


# minute
# second
# @scheduler.scheduled_job(trigger='cron', second='*/20', id='sync_to_es')
def sync_to_es():
    collections = mdb.collection_names()
    now = datetime.datetime.now().strftime(DATE_FORMAT)

    # reg = '.*' + now + '.*'
    # reg = '.*2018-02-23.*'
    # for collection in collections:
    #     now_date_news = mdb[collection].find({'insert_time': {'$regex': reg}}, projection={'_id': False})
示例#34
0
    subprocess.run([
        'scheduler/start-container.sh', config['module'].lower(),
        str(config['exchange']),
        str(config['url']),
        str(config['market']),
        str(config['module']),
        str(config['schedule']),
        str(config['restart']),
        str(config['restart_after']),
        str(config['container_uid'])
    ])


if __name__ == '__main__':

    scheduler = AsyncIOScheduler()
    scheduler.configure({
        'apscheduler.executors.default': {
            'class': 'apscheduler.executors.pool:ThreadPoolExecutor',
            'max_workers': '1000'
        },
        'apscheduler.executors.processpool': {
            'type': 'processpool',
            'max_workers': '1000'
        },
        'apscheduler.timezone': 'UTC',
    })

    #running monitor first
    scheduler.add_job(run_monitor, None, [scheduler])
示例#35
0
class Game_Channel_Bot:
    server_id = 356464812768886784
    category_id = 489221318978437120

    logging.basicConfig()
    logging.getLogger('apscheduler').setLevel(logging.DEBUG)
    sched = AsyncIOScheduler()
    client = discord.Client()

    # Get games on script startup
    date = datetime.datetime.today()
    parameter = date.strftime('%Y%m%d')
    parameter = str(
        20180928
    )  # This line is for testing purposes. Date is set to first day of preseason
    games = []
    format = '%H:%M %p'
    with urlopen('http://data.nba.com/prod/v2/' + parameter +
                 '/scoreboard.json') as url:
        j = json.loads(url.read().decode())
        for game in j["games"]:
            gameDetails = {}
            gameDetails["time"] = datetime.datetime.strptime(
                game["startTimeEastern"].replace(" ET", ""), format).time()
            gameDetails["home"] = game["hTeam"]["triCode"]
            gameDetails["away"] = game["vTeam"]["triCode"]
            gameDetails['created_channel'] = False
            #print(gameDetails)
            games.append(gameDetails)

    # Method to update games daily
    async def update_games_daily(self):
        for game in self.games:
            try:
                game['channel'].delete()
            except Exception as e:
                print(e)
                pass
        date = datetime.datetime.today()
        parameter = date.strftime('%Y%m%d')
        parameter = str(
            20180928
        )  # This line is for testing purposes. Date is set to first day of preseason
        self.games = []
        format = '%H:%M %p'
        with urlopen('http://data.nba.com/prod/v2/' + parameter +
                     '/scoreboard.json') as url:
            j = json.loads(url.read().decode())
            for game in j["games"]:
                gameDetails = {}
                gameDetails["time"] = datetime.datetime.strptime(
                    game["startTimeEastern"].replace(" ET", ""),
                    format).time()
                gameDetails["home"] = game["hTeam"]["triCode"]
                gameDetails["away"] = game["vTeam"]["triCode"]
                gameDetails['created_channel'] = False
                #print(gameDetails)
                self.games.append(gameDetails)

    # Creates game thread channels
    async def create_game_threads(self):
        for game in self.games:
            # If it's one hour before the game and we haven't created a channel yet
            if (datetime.datetime.now() + datetime.timedelta(hours=1)
                ).time() > game['time'] and game['created_channel'] == False:
                game['created_channel'] = True
            guild = self.client.get_guild(self.server_id)
            category = self.client.get_channel(self.category_id)
            channel = await guild.create_text_channel(
                game['away'] + '-at-' + game['home'], category)
            game['channel'] = channel

    # Init - Adds jobs to scheduler
    def __init__(self):
        self.sched.add_job(self.update_games_daily, 'cron', hour=1)
        self.sched.add_job(self.create_game_threads, 'interval', minutes=1)
        self.sched.start()
        self.client.run('')
示例#36
0
                "`12:00 Am, Group Is Closing Till 6 Am. Night Mode Started !` \n**Powered By @AuraXRobot**",
            )
            await tbot(
                functions.messages.EditChatDefaultBannedRightsRequest(
                    peer=int(warner.chat_id), banned_rights=hehes))
            if CLEAN_GROUPS:
                async for user in tbot.iter_participants(int(warner.chat_id)):
                    if user.deleted:
                        await tbot.edit_permissions(int(warner.chat_id),
                                                    user.id,
                                                    view_messages=False)
        except Exception as e:
            logger.info(f"Unable To Close Group {warner} - {e}")


scheduler = AsyncIOScheduler(timezone="Asia/Kolkata")
scheduler.add_job(job_close, trigger="cron", hour=23, minute=55)
scheduler.start()


async def job_open():
    ws_chats = get_all_chat_id()
    if len(ws_chats) == 0:
        return
    for warner in ws_chats:
        try:
            await tbot.send_message(
                int(warner.chat_id),
                "`06:00 Am, Group Is Opening.`\n**Powered By @AuraXRobot**",
            )
            await tbot(
示例#37
0
class TaskController:
    """handle commands of "creating timed task"""
    def __init__(self):
        """attributes:
            scheduler: job scheduler
            reply: reply to user command"""
        self.reply = ''  # during handling, help or reply messages may be needed
        self.scheduler = AsyncIOScheduler()
        self.id_count = 0
        self.scheduler.start()

    def handle_msg(self, msg, conversation: Union[Contact, Room], to_bot):
        """handle commands
        params:
            msg: str, messages from talker
            sayer: the place to say the words
            to_bot: bool, whether talking to the bot
            """
        # clear reply
        self.reply = ''
        # check if saying to the bot
        if not to_bot:
            return False
        if self.handle_create_task(msg, conversation):
            return True
        return False

    def get_reply(self):
        """return reply"""
        return self.reply

    def handle_create_task(self, msg: str, conversation: Union[Contact, Room]):
        """handle create task command"""
        if self.date_type_task(msg, conversation):
            return True
        if self.cron_type_task(msg, conversation):
            return True
        return False

    def date_type_task(self, msg: str, conversation: Union[Contact, Room]):
        """parse date type timed task
        pattern is like: timed message#y-m-d h:min:sec-msg"""
        pattern = re.compile(r'^\s*' + KEY_TIMED_TASK + r'\s*' + KEY_SPLIT +
                             r'\s*(\d+-\d+-\d+\s+\d+:\d+:.*?)-(.*?)$')
        msg = re.sub(r'\s+', ' ', msg)
        msg = re.sub(':', ':', msg)
        res = pattern.match(msg)
        if res is None:
            return False
        self.id_count += 1
        self.scheduler.add_job(conversation.say,
                               'date',
                               run_date=res.group(1),
                               args=[res.group(2)],
                               id=str(self.id_count))
        self.reply = reply.set_date_timed_task_success(res.group(1),
                                                       res.group(2))
        return True

    def cron_type_task(self, msg: str, conversation: Union[Contact, Room]):
        """parse cron type timed task
        pattern is like 'timed message#y-m-d-dof-h-min-msg"""
        pattern = re.compile(r'^\s*' + KEY_TIMED_TASK + r'\s*' + KEY_SPLIT +
                             r'\s*' + '(' +
                             '-'.join([r'(?:\d+|\*)' for _ in range(5)]) +
                             ')' + '-' + r'(.*?)$')
        res = pattern.match(msg)
        if res is None:
            return False
        try:
            params = parse_cron_str_to_dict(res.group(1), '-')
            self.id_count += 1
            self.scheduler.add_job(conversation.say,
                                   'cron',
                                   month=params['month'],
                                   day=params['day'],
                                   day_of_week=params['week day'],
                                   hour=params['hour'],
                                   minute=params['minute'],
                                   args=[res.group(2)],
                                   id=str(self.id_count))
            self.reply = reply.set_cron_timed_task_success(
                params, res.group(2))
        except ValueError:
            self.reply = reply.parse_datetime_error()
        return True
示例#38
0
# You should have received a copy of the GNU Affero General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.

from apscheduler.executors.asyncio import AsyncIOExecutor
from apscheduler.jobstores.redis import RedisJobStore
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from pytz import utc

from utah.config import get_str_key, get_int_key
from utah.utils.logger import log

DEFAULT = "default"

jobstores = {
    DEFAULT:
    RedisJobStore(host=get_str_key("REDIS_URI"),
                  port=get_str_key("REDIS_PORT"),
                  password=get_str_key("REDIS_PASS"))
}
executors = {DEFAULT: AsyncIOExecutor()}
job_defaults = {"coalesce": False, "max_instances": 3}

scheduler = AsyncIOScheduler(jobstores=jobstores,
                             executors=executors,
                             job_defaults=job_defaults,
                             timezone=utc)

log.info("Starting apscheduller...")
scheduler.start()
It's not intended to be part of the supported code base but just an example on how to schedule the commands of
mobilizon-reshare.
"""
import asyncio
import os
from functools import partial

from apscheduler.schedulers.asyncio import AsyncIOScheduler
from apscheduler.triggers.cron import CronTrigger

from mobilizon_reshare.cli import _safe_execution
from mobilizon_reshare.cli.commands.recap.main import recap
from mobilizon_reshare.cli.commands.start.main import start

sched = AsyncIOScheduler()

# Runs "start" from Monday to Friday every 15 mins
sched.add_job(
    partial(_safe_execution, start),
    CronTrigger.from_crontab(
        os.environ.get("MOBILIZON_RESHARE_INTERVAL", "*/15 10-18 * * 0-4")),
)
# Runs "recap" once a week
sched.add_job(
    partial(_safe_execution, recap),
    CronTrigger.from_crontab(
        os.environ.get("MOBILIZON_RESHARE_RECAP_INTERVAL", "5 11 * * 0")),
)
sched.start()
try:
示例#40
0
from requests.exceptions import ChunkedEncodingError
from requests.exceptions import ConnectionError as ConnectedError
from asyncio import CancelledError as CancelError
from asyncio import TimeoutError as AsyncTimeoutError
from sqlite3 import OperationalError
from http.client import RemoteDisconnected
from urllib.error import URLError
from concurrent.futures._base import TimeoutError
from redis.exceptions import ResponseError

persistent_vars = {}
module_dir = __path__[0]
working_dir = getcwd()
config = None
help_messages = {}
scheduler = AsyncIOScheduler()
if not scheduler.running:
    scheduler.configure(timezone="Asia/ShangHai")
    scheduler.start()
version = 0.1
logs = getLogger(__name__)
logging_format = "%(levelname)s [%(asctime)s] [%(name)s] %(message)s"
logging_handler = StreamHandler()
logging_handler.setFormatter(ColoredFormatter(logging_format))
root_logger = getLogger()
root_logger.setLevel(ERROR)
root_logger.addHandler(logging_handler)
basicConfig(level=INFO)
logs.setLevel(INFO)

try:
示例#41
0
predictions = tb.read_json('{}/predictions.json'.format(informationFolder))


def learn(city, station):
    # Get data from the past 30 days
    threshold = datetime.datetime.now() - datetime.timedelta(days=timespan)
    try:
        dataframe = query.station(city, station, threshold)
    except:
        return
    # Prepare the dataframe for learning
    dataframe = munging.prepare(dataframe)
    # Apply the regressor that is chosen in the settings
    method.fit(dataframe, 'bikes', city, station)
    method.fit(dataframe, 'spaces', city, station)

if __name__ == '__main__':
    scheduler = AsyncIOScheduler()
    for city in stationsFile.keys():
        if predictions[city] == 'Yes':
            for station in stationsFile[city]:
                learn(city, station)
                scheduler.add_job(learn, 'interval', days=refresh,
                                  args=[city, station], coalesce=True,
                                  misfire_grace_time=60*60*24*refresh)
    scheduler.start()
    try:
        asyncio.get_event_loop().run_forever()
    except (KeyboardInterrupt, SystemExit):
        pass
示例#42
0
class Reminder:
    """ Clase Reminder

    Se encarga de almacenar los eventos y crear recordatorios.
    """
    def __init__(self, secret):
        # Accedo a la base de datos
        self.db = DB(secret)

        # Arranco en Async Scheduler
        self.sched = AsyncIOScheduler()
        self.sched.start()

    @property
    def action(self):
        return self._action

    @action.setter
    def action(self, value):
        if not callable(value):
            raise ValueError("The value must be a function")
        self._action = value

    @property
    def reminders(self):
        return self._reminders

    @reminders.setter
    def reminders(self, value):
        if not isinstance(value, list):
            raise ValueError("The value must be a list")
        self._reminders = value

    # Funciones publicas

    async def add(self, date, time, time_zone, channel, text, author):
        """Agrega un nuevo evento y crea los recordatorios"""

        try:
            date_time = datetime.fromisoformat(f"{date}T{time}{time_zone}")
            date_time_now = datetime.utcnow().replace(tzinfo=timezone.utc)

            # Si la fecha del evento es anterior a la actual salgo
            if date_time < date_time_now:
                return []

            event = self._generate_event(date_time, channel, text, author)
            jobs_id = self._create_jobs(event)

            # Guardo el evento en la base de datos
            data = {
                "author": event['author'],
                "text": event['text'],
                "time": self.db.q.time(event['time'].isoformat()),
                "channel": event['channel'],
                "jobs": jobs_id
            }

            # Genero un registro local
            return self.db.create("Events", data)
        except:
            # Si el formato de la fecha es incorrecto
            return None

    async def load(self):
        """Carga los eventos de la base de datos

        Se utiliza para cargar los eventos que están guardados en la base de
        datos al momento de inciar el programa.

        Lee los eventos de la base de datos, los carga en el scheduler y
        actuliza la base de datos con los nuevos jobs_id
        """
        docs = self.db.get_all("all_events")
        new_docs = []
        for doc in docs['data']:
            event = {
                "text":
                doc['data']['text'],
                "time":
                datetime.fromisoformat(
                    f"{doc['data']['time'].value[:-1]}+00:00"),
                "channel":
                doc['data']['channel'],
                "reminders":
                self.reminders
            }

            # Creo los jobs
            jobs_id = self._create_jobs(event)
            new_docs.append((doc['ref'].id(), {"jobs": jobs_id}))

        # Actulizo la base de datos con los nuevos jobs_id
        return self.db.update_all_jobs("Events", new_docs)

    async def list(self):
        """Lista todos los eventos programados"""
        # events = self.db.get_by_author("events_by_author", author)
        events = self.db.get_all("all_events")
        return events['data']

    async def remove(self, id_, author):
        """Borro un evento programado"""
        return self._remove_by_id_and_author(id_, author)

    # Funciones privadas

    def _remove_by_id(self, id_: str):
        try:
            doc = self.db.delete("Events", id_)
            log.info("hola: %s", doc)
            for job in doc['data']['jobs']:
                self.sched.remove_job(job)
            return doc
        except:
            return []

    def _remove_by_id_and_author(self, id_: str, author: str):
        try:
            doc = self.db.delete_by_id_and_author("Events",
                                                  "event_by_id_and_author",
                                                  id_, author)
            for job in doc['data']['jobs']:
                self.sched.remove_job(job)
            return doc
        except:
            return []

    async def _remove_old_event(self):
        self.db.delete_by_expired_time("all_events_by_time")

    def _create_jobs(self, event):
        dt_event = event['time']
        dt_now = datetime.utcnow().replace(tzinfo=timezone.utc)

        jobs_id = []
        for reminder in event['reminders']:
            if dt_event > dt_now + reminder['delta']:
                log.info("Added event")
                job = self.sched.add_job(
                    self.action,
                    'date',
                    run_date=(dt_event - reminder['delta']),
                    args=[
                        reminder['message'], event['text'], event['channel']
                    ])
                jobs_id.append(job.id)

        # Job para eliminar el registro de la base de datos
        job = self.sched.add_job(self._remove_old_event,
                                 'date',
                                 run_date=(dt_event),
                                 args=[])
        jobs_id.append(job.id)

        return jobs_id

    def _generate_event(self, date_time, channel, text, author):
        return {
            "author": f"{author}",
            "text": text,
            "time": date_time,
            "channel": int(channel),
            "reminders": self.reminders
        }
示例#43
0
async def website_check():
    for item in website_list:
        url = item["url"]
        resp = None
        message = ""
        try:
            resp = requests.get(url)
        except Exception as e:
            message = f"{url} request error: \n{e}\n"

        if getattr(resp, "status_code", 0) != 200:
            content = getattr(resp, "content", None)
            message += f"{url} content error: \n{content}\n"

        if message:
            api = f"https://api.telegram.org/bot{bot_token}/sendMessage?chat_id=260260121&text={message}"
            requests.get(api).json()
            logging.error(message)
        else:
            logging.info("%s OK: %s bytes.", url, len(resp.content))


if __name__ == '__main__':
    scheduler = AsyncIOScheduler()
    scheduler.add_job(send_health_check, 'interval', seconds=300)
    scheduler.add_job(website_check, 'interval', seconds=60)
    scheduler.start()
    client.start()
    client.run_until_disconnected()
示例#44
0
文件: scheduler.py 项目: andymor/eNMS
 def configure_scheduler(self):
     self.scheduler = AsyncIOScheduler(self.settings["config"])
     self.scheduler.start()
示例#45
0
class Scheduler:
    def __init__(self):
        self._scheduler = AsyncIOScheduler()
        # this should be something that does not make any sense to be inside project name or job name
        self._job_id_separator = "-_-"
        # we don't allow to schedule a job to run more then one time per X
        # NOTE this cannot be less then one minute - see _validate_cron_trigger
        self._min_allowed_interval = config.httpdb.scheduling.min_allowed_interval

    async def start(self, db_session: Session):
        logger.info("Starting scheduler")
        self._scheduler.start()
        # the scheduler shutdown and start operation are not fully async compatible yet -
        # https://github.com/agronholm/apscheduler/issues/360 - this sleep make them work
        await asyncio.sleep(0)

        # don't fail the start on re-scheduling failure
        try:
            self._reload_schedules(db_session)
        except Exception as exc:
            logger.warning("Failed reloading schedules", exc=exc)

    async def stop(self):
        logger.info("Stopping scheduler")
        self._scheduler.shutdown()
        # the scheduler shutdown and start operation are not fully async compatible yet -
        # https://github.com/agronholm/apscheduler/issues/360 - this sleep make them work
        await asyncio.sleep(0)

    def create_schedule(
        self,
        db_session: Session,
        project: str,
        name: str,
        kind: schemas.ScheduleKinds,
        scheduled_object: Union[Dict, Callable],
        cron_trigger: Union[str, schemas.ScheduleCronTrigger],
        labels: Dict = None,
        concurrency_limit: int = config.httpdb.scheduling.
        default_concurrency_limit,
    ):
        if isinstance(cron_trigger, str):
            cron_trigger = schemas.ScheduleCronTrigger.from_crontab(
                cron_trigger)

        self._validate_cron_trigger(cron_trigger)

        logger.debug(
            "Creating schedule",
            project=project,
            name=name,
            kind=kind,
            scheduled_object=scheduled_object,
            cron_trigger=cron_trigger,
            labels=labels,
            concurrency_limit=concurrency_limit,
        )
        get_project_member().ensure_project(db_session, project)
        get_db().create_schedule(
            db_session,
            project,
            name,
            kind,
            scheduled_object,
            cron_trigger,
            concurrency_limit,
            labels,
        )
        self._create_schedule_in_scheduler(
            project,
            name,
            kind,
            scheduled_object,
            cron_trigger,
            concurrency_limit,
        )

    def update_schedule(
        self,
        db_session: Session,
        project: str,
        name: str,
        scheduled_object: Union[Dict, Callable] = None,
        cron_trigger: Union[str, schemas.ScheduleCronTrigger] = None,
        labels: Dict = None,
        concurrency_limit: int = None,
    ):
        if isinstance(cron_trigger, str):
            cron_trigger = schemas.ScheduleCronTrigger.from_crontab(
                cron_trigger)

        if cron_trigger is not None:
            self._validate_cron_trigger(cron_trigger)

        logger.debug(
            "Updating schedule",
            project=project,
            name=name,
            scheduled_object=scheduled_object,
            cron_trigger=cron_trigger,
            labels=labels,
            concurrency_limit=concurrency_limit,
        )
        get_db().update_schedule(
            db_session,
            project,
            name,
            scheduled_object,
            cron_trigger,
            labels,
            concurrency_limit,
        )
        db_schedule = get_db().get_schedule(db_session, project, name)
        updated_schedule = self._transform_and_enrich_db_schedule(
            db_session, db_schedule)

        self._update_schedule_in_scheduler(
            project,
            name,
            updated_schedule.kind,
            updated_schedule.scheduled_object,
            updated_schedule.cron_trigger,
            updated_schedule.concurrency_limit,
        )

    def list_schedules(
        self,
        db_session: Session,
        project: str = None,
        name: str = None,
        kind: str = None,
        labels: str = None,
        include_last_run: bool = False,
    ) -> schemas.SchedulesOutput:
        logger.debug("Getting schedules",
                     project=project,
                     name=name,
                     labels=labels,
                     kind=kind)
        db_schedules = get_db().list_schedules(db_session, project, name,
                                               labels, kind)
        schedules = []
        for db_schedule in db_schedules:
            schedule = self._transform_and_enrich_db_schedule(
                db_session, db_schedule, include_last_run)
            schedules.append(schedule)
        return schemas.SchedulesOutput(schedules=schedules)

    def get_schedule(
        self,
        db_session: Session,
        project: str,
        name: str,
        include_last_run: bool = False,
    ) -> schemas.ScheduleOutput:
        logger.debug("Getting schedule", project=project, name=name)
        db_schedule = get_db().get_schedule(db_session, project, name)
        return self._transform_and_enrich_db_schedule(db_session, db_schedule,
                                                      include_last_run)

    def delete_schedule(self, db_session: Session, project: str, name: str):
        logger.debug("Deleting schedule", project=project, name=name)
        job_id = self._resolve_job_id(project, name)
        # don't fail on delete if job doesn't exist
        job = self._scheduler.get_job(job_id)
        if job:
            self._scheduler.remove_job(job_id)
        get_db().delete_schedule(db_session, project, name)

    async def invoke_schedule(self, db_session: Session, project: str,
                              name: str):
        logger.debug("Invoking schedule", project=project, name=name)
        db_schedule = await fastapi.concurrency.run_in_threadpool(
            get_db().get_schedule, db_session, project, name)
        function, args, kwargs = self._resolve_job_function(
            db_schedule.kind,
            db_schedule.scheduled_object,
            project,
            name,
            db_schedule.concurrency_limit,
        )
        return await function(*args, **kwargs)

    def _validate_cron_trigger(
        self,
        cron_trigger: schemas.ScheduleCronTrigger,
        # accepting now from outside for testing purposes
        now: datetime = None,
    ):
        """
        Enforce no more then one job per min_allowed_interval
        """
        logger.debug("Validating cron trigger")
        apscheduler_cron_trigger = self.transform_schemas_cron_trigger_to_apscheduler_cron_trigger(
            cron_trigger)
        now = now or datetime.now(apscheduler_cron_trigger.timezone)
        next_run_time = None
        second_next_run_time = now

        # doing 60 checks to allow one minute precision, if the _min_allowed_interval is less then one minute validation
        # won't fail in certain scenarios that it should. See test_validate_cron_trigger_multi_checks for detailed
        # explanation
        for index in range(60):
            next_run_time = apscheduler_cron_trigger.get_next_fire_time(
                None, second_next_run_time)
            # will be none if we got a schedule that has no next fire time - for example schedule with year=1999
            if next_run_time is None:
                return
            second_next_run_time = apscheduler_cron_trigger.get_next_fire_time(
                next_run_time, next_run_time)
            # will be none if we got a schedule that has no next fire time - for example schedule with year=2050
            if second_next_run_time is None:
                return
            min_allowed_interval_seconds = humanfriendly.parse_timespan(
                self._min_allowed_interval)
            if second_next_run_time < next_run_time + timedelta(
                    seconds=min_allowed_interval_seconds):
                logger.warn(
                    "Cron trigger too frequent. Rejecting",
                    cron_trigger=cron_trigger,
                    next_run_time=next_run_time,
                    second_next_run_time=second_next_run_time,
                    delta=second_next_run_time - next_run_time,
                )
                raise ValueError(
                    f"Cron trigger too frequent. no more then one job "
                    f"per {self._min_allowed_interval} is allowed")

    def _create_schedule_in_scheduler(
        self,
        project: str,
        name: str,
        kind: schemas.ScheduleKinds,
        scheduled_object: Any,
        cron_trigger: schemas.ScheduleCronTrigger,
        concurrency_limit: int,
    ):
        job_id = self._resolve_job_id(project, name)
        logger.debug("Adding schedule to scheduler", job_id=job_id)
        function, args, kwargs = self._resolve_job_function(
            kind,
            scheduled_object,
            project,
            name,
            concurrency_limit,
        )

        # we use max_instances as well as our logic in the run wrapper for concurrent jobs
        # in order to allow concurrency for triggering the jobs (max_instances), and concurrency
        # of the jobs themselves (our logic in the run wrapper).
        self._scheduler.add_job(
            function,
            self.transform_schemas_cron_trigger_to_apscheduler_cron_trigger(
                cron_trigger),
            args,
            kwargs,
            job_id,
            max_instances=concurrency_limit,
        )

    def _update_schedule_in_scheduler(
        self,
        project: str,
        name: str,
        kind: schemas.ScheduleKinds,
        scheduled_object: Any,
        cron_trigger: schemas.ScheduleCronTrigger,
        concurrency_limit: int,
    ):
        job_id = self._resolve_job_id(project, name)
        logger.debug("Updating schedule in scheduler", job_id=job_id)
        function, args, kwargs = self._resolve_job_function(
            kind,
            scheduled_object,
            project,
            name,
            concurrency_limit,
        )
        trigger = self.transform_schemas_cron_trigger_to_apscheduler_cron_trigger(
            cron_trigger)
        now = datetime.now(self._scheduler.timezone)
        next_run_time = trigger.get_next_fire_time(None, now)
        self._scheduler.modify_job(
            job_id,
            func=function,
            args=args,
            kwargs=kwargs,
            trigger=trigger,
            next_run_time=next_run_time,
        )

    def _reload_schedules(self, db_session: Session):
        logger.info("Reloading schedules")
        db_schedules = get_db().list_schedules(db_session)
        for db_schedule in db_schedules:
            # don't let one failure fail the rest
            try:
                self._create_schedule_in_scheduler(
                    db_schedule.project,
                    db_schedule.name,
                    db_schedule.kind,
                    db_schedule.scheduled_object,
                    db_schedule.cron_trigger,
                    db_schedule.concurrency_limit,
                )
            except Exception as exc:
                logger.warn(
                    "Failed rescheduling job. Continuing",
                    exc=str(exc),
                    db_schedule=db_schedule,
                )

    def _transform_and_enrich_db_schedule(
        self,
        db_session: Session,
        schedule_record: schemas.ScheduleRecord,
        include_last_run: bool = False,
    ) -> schemas.ScheduleOutput:
        schedule_dict = schedule_record.dict()
        schedule_dict["labels"] = {
            label["name"]: label["value"]
            for label in schedule_dict["labels"]
        }
        schedule = schemas.ScheduleOutput(**schedule_dict)

        job_id = self._resolve_job_id(schedule_record.project,
                                      schedule_record.name)
        job = self._scheduler.get_job(job_id)
        if job:
            schedule.next_run_time = job.next_run_time

        if include_last_run:
            schedule = self._enrich_schedule_with_last_run(
                db_session, schedule)

        return schedule

    @staticmethod
    def _enrich_schedule_with_last_run(
            db_session: Session, schedule_output: schemas.ScheduleOutput):
        if schedule_output.last_run_uri:
            run_project, run_uid, iteration, _ = RunObject.parse_uri(
                schedule_output.last_run_uri)
            run_data = get_db().read_run(db_session, run_uid, run_project,
                                         iteration)
            schedule_output.last_run = run_data
        return schedule_output

    def _resolve_job_function(
        self,
        scheduled_kind: schemas.ScheduleKinds,
        scheduled_object: Any,
        project_name: str,
        schedule_name: str,
        schedule_concurrency_limit: int,
    ) -> Tuple[Callable, Optional[Union[List, Tuple]], Optional[Dict]]:
        """
        :return: a tuple (function, args, kwargs) to be used with the APScheduler.add_job
        """

        if scheduled_kind == schemas.ScheduleKinds.job:
            scheduled_object_copy = copy.deepcopy(scheduled_object)
            return (
                Scheduler.submit_run_wrapper,
                [
                    scheduled_object_copy,
                    project_name,
                    schedule_name,
                    schedule_concurrency_limit,
                ],
                {},
            )
        if scheduled_kind == schemas.ScheduleKinds.local_function:
            return scheduled_object, [], {}

        # sanity
        message = "Scheduled object kind missing implementation"
        logger.warn(message, scheduled_object_kind=scheduled_kind)
        raise NotImplementedError(message)

    def _resolve_job_id(self, project, name) -> str:
        """
        :return: returns the identifier that will be used inside the APScheduler
        """
        return self._job_id_separator.join([project, name])

    @staticmethod
    async def submit_run_wrapper(scheduled_object, project_name, schedule_name,
                                 schedule_concurrency_limit):
        # import here to avoid circular imports
        from mlrun.api.api.utils import submit_run

        # removing the schedule from the body otherwise when the scheduler will submit this task it will go to an
        # endless scheduling loop
        scheduled_object.pop("schedule", None)

        # removing the uid from the task metadata so that a new uid will be generated for every run
        # otherwise all runs will have the same uid
        scheduled_object.get("task", {}).get("metadata", {}).pop("uid", None)

        if "task" in scheduled_object and "metadata" in scheduled_object[
                "task"]:
            scheduled_object["task"]["metadata"].setdefault("labels", {})
            scheduled_object["task"]["metadata"]["labels"][
                schemas.constants.LabelNames.schedule_name] = schedule_name

        db_session = create_session()

        active_runs = get_db().list_runs(
            db_session,
            state=RunStates.non_terminal_states(),
            project=project_name,
            labels=
            f"{schemas.constants.LabelNames.schedule_name}={schedule_name}",
        )
        if len(active_runs) >= schedule_concurrency_limit:
            logger.warn(
                "Schedule exceeded concurrency limit, skipping this run",
                project=project_name,
                schedule_name=schedule_name,
                schedule_concurrency_limit=schedule_concurrency_limit,
                active_runs=len(active_runs),
            )
            return

        response = await submit_run(db_session, scheduled_object)

        run_metadata = response["data"]["metadata"]
        run_uri = RunObject.create_uri(run_metadata["project"],
                                       run_metadata["uid"],
                                       run_metadata["iteration"])
        get_db().update_schedule(
            db_session,
            run_metadata["project"],
            schedule_name,
            last_run_uri=run_uri,
        )

        close_session(db_session)

        return response

    @staticmethod
    def transform_schemas_cron_trigger_to_apscheduler_cron_trigger(
        cron_trigger: schemas.ScheduleCronTrigger, ):
        return APSchedulerCronTrigger(
            cron_trigger.year,
            cron_trigger.month,
            cron_trigger.day,
            cron_trigger.week,
            cron_trigger.day_of_week,
            cron_trigger.hour,
            cron_trigger.minute,
            cron_trigger.second,
            cron_trigger.start_date,
            cron_trigger.end_date,
            cron_trigger.timezone,
            cron_trigger.jitter,
        )
示例#46
0
import traceback
from io import StringIO
from typing import List, Dict, Tuple
from urllib.parse import urlencode

DEFAULT_SCRIPT_DIR = 'default-scripts'
DEBUG_CHANNEL = os.getenv('DEBUG_CHANNEL', 'alphabot')

WEB_PORT = int(os.getenv('WEB_PORT', 8000))
WEB_NO_SSL = os.getenv('WEB_NO_SSL', '') != ''
WEB_PORT_SSL = int(os.getenv('WEB_PORT_SSL', 8443))

log = logging.getLogger(__name__)
log_level = logging.getLevelName(os.getenv('LOG_LEVEL', 'INFO'))
log.setLevel(log_level)
scheduler = AsyncIOScheduler()
scheduler.start()


class AlphaBotException(Exception):
    """Top of hierarchy for all alphabot failures."""


class CoreException(AlphaBotException):
    """Used to signify a failure in the robot's core."""


class InvalidOptions(AlphaBotException):
    """Robot failed because input options were somehow broken."""

示例#47
0
logger = logging.getLogger('discord')
logger.setLevel(logging.DEBUG)
handler = logging.FileHandler(filename='discord.log',
                              encoding='utf-8',
                              mode='w')
handler.setFormatter(
    logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))
logger.addHandler(handler)

# Set to remember if the bot is already running, since on_ready may be called
# more than once on reconnects
this = sys.modules[__name__]
this.running = False

# Scheduler that will be used to manage events
sched = AsyncIOScheduler()

###############################################################################


def main():
    # Initialize the client
    print("Starting up...")
    client = discord.Client()

    # Define event handlers for the client
    # on_ready may be called multiple times in the event of a reconnect,
    # hence the running flag

    @client.event
    async def on_ready():
示例#48
0
    spider()
    work_schedule()


def execution_listener(event):
    if event.exception:
        _logger.error('The job crashed')
    else:
        # check that the executed job is the first job
        job = scheduler.get_job(event.job_id)
        if getattr(job, 'name', '') == 'spider':
            scheduler.add_job(work_schedule, name='work_schedule')


if __name__ == '__main__':
    scheduler = AsyncIOScheduler()
    scheduler.add_job(spider,
                      trigger='interval',
                      hours=1,
                      name='spider',
                      next_run_time=datetime.now() + timedelta(seconds=4))
    scheduler.add_listener(callback=execution_listener,
                           mask=EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)
    scheduler.start()

    # Execution will block here until Ctrl+C (Ctrl+Break on Windows) is pressed.
    try:
        asyncio.get_event_loop().run_forever()
    except (KeyboardInterrupt, SystemExit):
        pass
示例#49
-1
class Agent(object):
    def __init__(self, _client):
        self.network_checker = NetworkChecker()
        self.scheduler = AsyncIOScheduler()
        self._add_job(_client)

    def _add_job(self, _client):
        # self.scheduler.add_job(tick, 'interval', seconds=config.check_interval, args=[_client,])
        self.scheduler.add_job(tick, 'interval', seconds=config.check_interval, args=[_client,])