示例#1
0
class Agent(object):
    def __init__(self, _client, _loop, _queue, _snode=None):
        """

        :param _client:
        :param _loop:
        :param _queue:
        :param _snode: ip of current node
        """
        self._loop = _loop
        self._queue = _queue
        self._serf_client = SerfClient()
        self._snode = _snode or self._get_local_ip()
        self.network_checker = NetworkChecker(_client, _loop, _queue, self._snode)
        self.scheduler = AsyncIOScheduler()
        self._add_job(_client)


        self._list_node = []


    def _add_job(self, _client):
        # self.scheduler.add_job(tick, 'interval',
        # seconds=config.check_interval, args=[_client,])
        # self.scheduler.add_job(self._loop.call_soon_threadsafe,
        # 'interval', seconds=config.check_interval,
        #  args=(self.network_checker,))
        self.scheduler.add_job(self.network_checker, 'interval',
                               seconds=config.check_interval,
                               args=(self._get_node,))

    def _get_node(self):
        """
        De quy cho mau =)))
        :return:
        """
        try:
            return self._list_node.pop()
        except IndexError:
            # self._list_node = self._hard_list_node[:]
            response = self._serf_client.members()
            self._list_node = [x[b'Addr'].decode()
                               for x in response.body[b'Members']]
            self._list_node.remove(self._snode)

            return self._list_node.pop()


    def _get_local_ip(self):
        """
        Get `name` of node thông qua serfclient `stats`.
        Then, get ip thoong qua function `members(name)`
        :return:
        """
        name = self._serf_client.stats().body[b'agent'][b'name']
        return self._serf_client.members(name).body[b'Members'][0][b'Addr'].decode()
示例#2
0
def do_the_work():
    scheduler = AsyncIOScheduler()
    # scheduler.store = RedisJobStore(
    #     db=0,
    #     jobs_key='marvin:scheduler:jobs',
    #     run_times_key="marvin:scheduler:runtimes",
    #     args={'host': 'pub-redis-10118.us-east-1-2.4.ec2.garantiadata.com', 'port':10118,
    #     'password': "******"})

    standup_time = datetime.now() + timedelta(seconds=5)

    scheduler.add_job(some_job, 'date', next_run_time=standup_time)

    scheduler.start()
示例#3
0
文件: runnable.py 项目: erasaur/pnu
    def run_interval (self, func=None, update_interval=None):
        if update_interval is None and self._update_interval is None:
            raise ValueError("missing interval")

        if func is None:
            raise ValueError("missing func")

        scheduler = AsyncIOScheduler()
        scheduler.add_job(
            func,
            'interval',
            seconds=self._update_interval
        )
        scheduler.start()
示例#4
0
class AsyncIOJob:
    """
        AsyncIOScheduler를 이용하여 job을 등록하고, 
        실행하는 쪽에서 asyncio.get_event_loop().run_forever() 함수를 통해 
        KeyboardInterrupt/SystemExit 이벤트가 오기 전 까지 실행한다. 
    """

    def __init__(self):
        """ 
            scheduler를 AsyncIOScheduler로 생성한다.
        """
        self.scheduler = AsyncIOScheduler()
        self.scheduler.start()

    def add_job(self, job, typ, seconds):
        """ 
            Job을 등록한다. typ={'date', 'interval', 'cron'} 이 들어갈 수 있다. 
        """
        self.scheduler.add_job(job, typ, seconds=seconds)
示例#5
0
async def init(loop):
    conn = aiohttp.TCPConnector(limit=10000, limit_per_host=10)  #, ssl=False)
    session = aiohttp.ClientSession(connector=conn)
    cache = {'height': 0, 'fast': [], 'rpc': [], 'log': []}
    scheduler = AsyncIOScheduler(job_defaults={
        'coalesce': True,
        'max_instances': 1,
        'misfire_grace_time': 3
    })
    scheduler.add_job(scan,
                      'interval',
                      seconds=30,
                      args=[session, cache],
                      id='super_node')
    scheduler.add_job(update_height,
                      'interval',
                      seconds=4,
                      args=[session, cache],
                      id='update_height')
    scheduler.start()
    app = web.Application(loop=loop,
                          middlewares=[logger_factory, response_factory])
    listen_ip = get_listen_ip()
    listen_port = get_listen_port()
    app['session'] = session
    app['cache'] = cache
    app['scheduler'] = scheduler
    mongo_uri = get_mongo_uri()
    mongo_db = get_mongo_db()
    client = motor.motor_asyncio.AsyncIOMotorClient(mongo_uri)
    app['db'] = client[mongo_db]
    add_routes(app, 'handlers')
    srv = await loop.create_server(app.make_handler(), listen_ip, listen_port)
    logging.info('server started at http://%s:%s...' %
                 (listen_ip, listen_port))
    return srv
示例#6
0
def asyncio_schedule():
    """
    python version >= 3.4.0
    :return:
    """
    from apscheduler.schedulers.asyncio import AsyncIOScheduler
    try:
        import asyncio
    except ImportError:
        import trollius as asyncio

    def tick():
        print('Tick! The time is: %s' % datetime.now())

    scheduler = AsyncIOScheduler()
    scheduler.add_job(tick, 'interval', seconds=3)
    scheduler.start()
    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))

    # Execution will block here until Ctrl+C (Ctrl+Break on Windows) is pressed.
    try:
        asyncio.get_event_loop().run_forever()
    except (KeyboardInterrupt, SystemExit):
        pass
示例#7
0
def asyncio_schedule():
    """
    python version >= 3.4.0
    :return:
    """
    from apscheduler.schedulers.asyncio import AsyncIOScheduler
    try:
        import asyncio
    except ImportError:
        import trollius as asyncio

    def tick():
        print('Tick! The time is: %s' % datetime.now())

    scheduler = AsyncIOScheduler()
    scheduler.add_job(tick, 'interval', seconds=3)
    scheduler.start()
    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))

    # Execution will block here until Ctrl+C (Ctrl+Break on Windows) is pressed.
    try:
        asyncio.get_event_loop().run_forever()
    except (KeyboardInterrupt, SystemExit):
        pass
示例#8
0
class JobManager:
    @inject
    def __init__(self):
        self.scheduler = AsyncIOScheduler()

    def add_interval_job(self, callback, interval: float):
        self.scheduler.add_job(callback, 'interval', seconds=interval)

    def modify_interval_job(self, callback, interval: float):
        self.scheduler.reschedule_job(callback, 'interval', seconds=interval)

    def run_once(self, callback, args: list):
        job = self.scheduler.add_job(callback, 'date', args=args)
        return job

    def start(self):
        if self.scheduler is None:
            self.scheduler = AsyncIOScheduler()
        self.scheduler.start()

    def close(self):
        if self.scheduler:
            self.scheduler.shutdown()
            self.scheduler = None
示例#9
0
def run():
    if DEBUG:
        duration = dict(hour='*', minute='*/1')
    else:
        duration = dict(hour='*/1')

    try:
        scheduler = AsyncIOScheduler()
        scheduler.add_job(launcher, 'cron',
                          next_run_time=datetime.now(),
                          year='*', month='*', day='*', week='*', day_of_week='*',
                          **duration)
    except ValueError:
        scheduler = AsyncIOScheduler(timezone="UTC")
        scheduler.add_job(launcher, 'cron',
                          year='*', month='*', day='*', week='*', day_of_week='*',
                          next_run_time=datetime.now(), **duration)
    scheduler.start()
    print('Press Ctrl+C to exit')

    try:
        asyncio.get_event_loop().run_forever()
    except (KeyboardInterrupt, SystemExit):
        pass
示例#10
0
def start():
    """Запуск событий по Cron времени."""
    scheduler = AsyncIOScheduler(timezone='Europe/Moscow')
    scheduler.add_job(daily_send_list_tasks, 'cron', hour=9, day_of_week='0-5')
    scheduler.add_job(daily_report_create_task,
                      'cron',
                      hour=9,
                      minute=30,
                      day_of_week='0-5')
    scheduler.add_job(daily_send_list_tasks_with_unset_time,
                      'cron',
                      hour=9,
                      day_of_week='0-5')
    scheduler.add_job(daily_send_today_time_tracked_and_activity,
                      'cron',
                      hour=20,
                      day_of_week='0-5')
    scheduler.start()
示例#11
0
def start_scheduler(timed_updater):
    scheduler = AsyncIOScheduler()
    print(Fore.LIGHTBLUE_EX + 'Started Chron Monitors')

    scheduler.add_job(timed_updater.check_stellar_hot_wallet,
                      CronTrigger(second='00'), misfire_grace_time=10, max_instances=20)
    scheduler.add_job(timed_updater.check_expired_roles, CronTrigger(
        second='00'), misfire_grace_time=10, max_instances=20)

    scheduler.add_job(timed_updater.send_marketing_messages, CronTrigger(
        hour='17'), misfire_grace_time=10, max_instances=20)

    scheduler.add_job(timed_updater.send_builder_ranks,
                      CronTrigger(day_of_week='mon', hour='01', minute='00', second='00'),
                      misfire_grace_time=7, max_instances=20)
    scheduler.start()
    print(Fore.LIGHTBLUE_EX + 'Started Chron Monitors : DONE')
    return scheduler
示例#12
0
文件: run.py 项目: Negashev/gbir
async def connect_scheduler():
    scheduler = AsyncIOScheduler(timezone="UTC")
    scheduler.add_job(get_projects,
                      'interval',
                      seconds=int(os.getenv('GBIR_SECONDS_PROJECTS', 300)),
                      max_instances=1)
    scheduler.add_job(get_registry,
                      'interval',
                      seconds=int(os.getenv('GBIR_SECONDS_REGISTRY', 13)),
                      max_instances=10)
    scheduler.add_job(get_tags,
                      'interval',
                      seconds=int(os.getenv('GBIR_SECONDS_TAGS', 11)),
                      max_instances=10)
    scheduler.add_job(delete_tags,
                      'interval',
                      seconds=int(os.getenv('GBIR_SECONDS_DELETE_TAGS', 60)),
                      max_instances=1)

    scheduler.start()
示例#13
0
async def connect_scheduler():
    scheduler = AsyncIOScheduler(timezone="UTC")
    scheduler.add_job(add_registry_path,
                      CronTrigger.from_crontab(
                          os.getenv('DRS3GC_CRON_PATH', '0 0 * * 0')),
                      max_instances=1)
    scheduler.add_job(cleanup_tag,
                      'interval',
                      seconds=int(os.getenv('DRS3GC_SECONDS_CLEANUP', 5)),
                      max_instances=10)
    scheduler.add_job(scan_bucket,
                      'interval',
                      seconds=int(os.getenv('DRS3GC_SECONDS_SCAN', 1)),
                      max_instances=10)
    scheduler.start()
示例#14
0
class Scheduler(abstract.Scheduler):
    apscheduler: AsyncIOScheduler

    def __init__(self) -> None:
        super().__init__()
        self.apscheduler = AsyncIOScheduler()
        self.apscheduler.start()
        logger.info("Started scheduler")

    def add_scheduled_feeding(self, feeding_schedule: abstract.Schedule,
                              feeding_callback: Callable) -> time:
        kwargs = {
            "trigger": "cron",
            "id": feeding_schedule.get_id(),
            "name": "Scheduled Feeding",
            "misfire_grace_time": 3600,
            "coalesce": True,
            "max_instances": 1,
        }
        cron_args = feeding_schedule.get_cron_args()
        kwargs.update(cron_args)
        job = self.apscheduler.add_job(feeding_callback, **kwargs)
        logger.info("Added scheduled feeding: {}", job)
        return job.next_run_time

    def remove_scheduled_feeding(
            self, feeding_schedule: abstract.Schedule) -> Optional[time]:
        job = self.apscheduler.get_job(feeding_schedule.get_id())
        logger.info("Removing scheduled job: {}", job)
        if job:
            t = job.next_run_time
            job.remove()
            return t
        return None

    def list_scheduled_feedings(self) -> List[Tuple[str, time]]:
        return sorted(
            [(job.id, job.next_run_time)
             for job in self.apscheduler.get_jobs()],
            key=lambda x: x[1],
        )
示例#15
0
    def start(self):
        scheduler = AsyncIOScheduler()
        self._status = "Running"
        for sensor_id in (self.cfg['sensors']).keys():
            sensor_class = self.cfg['sensors'][sensor_id]['class']
            sensor_interval = self.cfg['sensors'][sensor_id]['interval']
            sensor_gpio = self.cfg['sensors'][sensor_id].get('gpio', None)
            sensor_identifier = self.cfg['sensors'][sensor_id].get('identifier', None)
            sensor_keys = self.cfg['sensors'][sensor_id]['keys']
            sensor_enabled = self.cfg['sensors'][sensor_id]['enabled']
            if (sensor_enabled):
                self._log.info('Scheduling: %s (%s)' % (sensor_id, sensor_interval))
                job = scheduler.add_job(self.sample, 'interval', args = [sensor_id, sensor_class, sensor_keys, sensor_gpio, sensor_identifier], seconds=sensor_interval, max_instances=3, id = sensor_id)
                self._jobs.add(sensor_id)
            else:
                self._log.debug('Sensor disabled: %s (%s)' % (sensor_id, sensor_interval))
        #scheduler.print_jobs()
        self._log.debug(str(self._jobs))
        scheduler.start()
        self._log.info('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))

        self._green_led.on()
        self._yellow_led.on()
        self._red_led.on()
        # Execution will block here until Ctrl+C (Ctrl+Break on Windows) is pressed.
        try:
            loop = asyncio.get_event_loop()
            self._red_led.off()
            loop.run_forever()
        except (KeyboardInterrupt, SystemExit):
            loop.stop()
            for job_id in self._jobs:
                scheduler.remove_job(job_id=job_id)
        finally:
            loop.close()
            self._green_led.off()
            self._yellow_led.off()
            self._red_led.off()
            self._log.info("Lituyamon shut down cleanly.")
示例#16
0
async def initialize_scheduler(app, loop):
    # Check that tables exist
    await db.gino.create_all()

    # Schedule exchangerate updates
    try:
        _ = open('scheduler.lock', 'w')
        fcntl.lockf(_.fileno(), fcntl.LOCK_EX | fcntl.LOCK_NB)

        scheduler = AsyncIOScheduler()
        scheduler.start()

        # Updates lates 90 days data
        scheduler.add_job(update_rates, 'interval', hours=1)

        # Fill up database with rates
        count = await db.func.count(ExchangeRates.date).gino.scalar()
        if count == 0:
            scheduler.add_job(update_rates, kwargs={'historic': True})
        else:
            scheduler.add_job(update_rates)
    except BlockingIOError:
        pass
示例#17
0
class TimerSwitch:
    UNIT2KEYWORD = {
        'Minuten': 'minutes',
        'Stunden': 'hours',
        'Tage': 'days',
        'Wochen': 'weeks'
    }
    WEEK2WEEK = [
        ('mon', 'weekly_monday'),
        ('tue', 'weekly_tuesday'),
        ('wed', 'weekly_wednesday'),
        ('thu', 'weekly_thursday'),
        ('fri', 'weekly_friday'),
        ('sat', 'weekly_saturday'),
        ('sun', 'weekly_sunday'),
    ]

    def __init__(self, logging_daemon, db_connection, method):
        self._scheduler = AsyncIOScheduler()
        self._method = method
        self._logging_daemon = logging_daemon
        self._db_connection = db_connection
        self._scheduler.start()
        self.load()
        self._logging_daemon.info('TimerSwitch ... initialisiert')

    def load(self, scheduler_id=None):
        sql = """SELECT
              schedulers.id AS scheduler_id,
              schedulers.title AS scheduler_title,
              schedulers.date_start_on,
              schedulers.date_start_off,
              schedulers.date_stop,
              schedulers.date_stop_on,
              schedulers.date_stop_off,
              schedulers.duration,
              schedulers.interval_number,
              schedulers.interval_unit,
              schedulers.weekly_monday,
              schedulers.weekly_tuesday,
              schedulers.weekly_wednesday,
              schedulers.weekly_thursday,
              schedulers.weekly_friday,
              schedulers.weekly_saturday,
              schedulers.weekly_sunday,
              switches.id AS switches_id,
              switches.title AS switches_title,
              switches.argA,
              switches.argB,
              switches.argC,
              switches.argD,
              switch_types.title AS switches_typ,
              clients.ip AS switches_ip
              FROM schedulers, switches, switch_types, clients
              WHERE schedulers.switches_id = switches.id
              AND switches.switch_types_id = switch_types.id
              AND switches.clients_id = clients.id"""
        if isinstance(scheduler_id, int):
            sql += " AND schedulers.id = %s"
            db_result = self._db_connection.query(sql, scheduler_id)
        else:
            db_result = self._db_connection.query(sql)
        results = db_result.fetchall()
        for result in results:
            self._add(result)

    def _add(self, dataset):
        scheduler_id = dataset['scheduler_id']
        title = dataset['scheduler_title']
        date_start_on = dataset['date_start_on']
        date_start_off = dataset['date_start_off']
        date_stop_on = dataset['date_stop_on']
        date_stop_off = dataset['date_stop_off']
        duration = dataset['duration']

        week = ','.join(
            abr for abr, full in self.WEEK2WEEK
            if dataset[full]
        )

        if duration == 'einmalig':
            scheduler_type = 'date'
            args_on = dict(run_date=date_start_on)
            args_off = dict(run_date=date_start_off)
            date_stop_off = date_start_off
        elif duration == 'intervall':
            scheduler_type = 'interval'
            interval_argument = {self.UNIT2KEYWORD[dataset['interval_unit']]: dataset['interval_number']}
            args_on = dict(interval_argument, start_date=date_start_on, end_date=date_stop_on)
            args_off = dict(interval_argument, start_date=date_start_off, end_date=date_stop_off)
        elif duration == 'wochentag':
            scheduler_type = 'cron'
            args_on = dict(
                day_of_week=week, hour=date_start_on.hour, minute=date_start_on.minute,
                start_date=date_start_on, end_date=date_stop_on)
            args_off = dict(
                day_of_week=week, hour=date_start_off.hour, minute=date_start_off.minute,
                start_date=date_start_off, end_date=date_stop_off)

        self._scheduler.add_job(self._method, scheduler_type,
                                args=[scheduler_id, dataset['switches_id'], dataset['switches_ip'], True, None],
                                id='%son' % scheduler_id, **args_on)
        self._scheduler.add_job(self._method, scheduler_type,
                                args=[scheduler_id, dataset['switches_id'], dataset['switches_ip'], False,
                                      date_stop_off], id='%soff' % scheduler_id, **args_off)

        self._logging_daemon.info('Timerswitch ... add_job "%s" (id = %s)' % (title, scheduler_id))
        self._logging_daemon.debug(
            'Timerswitch ... self._scheduler.add_job(%s, %s, args=[%s, %s, True, None], id=%soff, %s' % (
                self._method, scheduler_type, title, scheduler_id, scheduler_id, args_on))

    def reload(self, scheduler_id):
        self.delete_job(scheduler_id)
        self.load(int(scheduler_id))
        self._logging_daemon.info('Timerswitch ... Reload       ID = %s' % scheduler_id)

    def delete_job(self, scheduler_id):
        self._scheduler.remove_job(str(scheduler_id) + 'on')
        self._scheduler.remove_job(str(scheduler_id) + 'off')
        self._logging_daemon.info('Timerswitch ... Delete Job   ID = %s' % scheduler_id)

    def delete_db(self, scheduler_id):
        if isinstance(scheduler_id, int):
            scheduler_id = int(scheduler_id)
            self._db_connection.query("DELETE FROM schedulers WHERE id = %s", scheduler_id)
            self._logging_daemon.info('Timerswitch ... Delete DB    ID = %s' % scheduler_id)

    def restart(self):
        self._logging_daemon.info('TimerSwitch ... stopping for restart')
        self._scheduler.shutdown(0)
        self._scheduler = AsyncIOScheduler()
        self._scheduler.start()
        self.load()
        self._logging_daemon.info('TimerSwitch ... neu initialisiert')
示例#18
0
class AlamoScheduler(object):
    message_queue = None
    loop = handler = None

    def __init__(self, loop=None):
        kw = dict()
        if loop:
            kw['event_loop'] = loop

        self.scheduler = AsyncIOScheduler(**kw)

    def setup(self, loop=None):
        if loop is None:
            loop = asyncio.get_event_loop()
            asyncio.set_event_loop(loop)
        self.loop = loop
        self.message_queue = ZeroMQQueue(
            settings.ZERO_MQ_HOST,
            settings.ZERO_MQ_PORT
        )
        self.message_queue.connect()
        self.scheduler.add_listener(
            self.event_listener,
            EVENT_JOB_ERROR | EVENT_JOB_MISSED | EVENT_JOB_MAX_INSTANCES
        )

    @aiostats.increment()
    def _schedule_check(self, check):
        """Schedule check."""
        logger.info(
            'Check `%s:%s` scheduled!', check['uuid'], check['name']
        )

        check['scheduled_time'] = datetime.now(tz=pytz_utc).isoformat()
        self.message_queue.send(check)

    def remove_job(self, job_id):
        """Remove job."""
        try:
            logger.info('Removing job for check id=`%s`', job_id)
            self.scheduler.remove_job(str(job_id))
        except JobLookupError:
            pass

    def schedule_check(self, check):
        """Schedule check with proper interval based on `frequency`.

        :param dict check: Check definition
        """
        try:
            frequency = check['fields']['frequency'] = int(
                check['fields']['frequency']
            )
            logger.info(
                'Scheduling check `%s` with id `%s` and interval `%s`',
                check['name'], check['id'], frequency
            )
            jitter = random.randint(0, frequency)
            first_run = datetime.now() + timedelta(seconds=jitter)
            kw = dict(
                seconds=frequency,
                id=str(check['uuid']),
                next_run_time=first_run,
                args=(check,)
            )
            self.schedule_job(self._schedule_check, **kw)

        except KeyError as e:
            logger.exception('Failed to schedule check: %s. Exception: %s',
                             check, e)

    def schedule_job(self, method, **kwargs):
        """Add new job to scheduler.

        :param method: reference to method that should be scheduled
        :param kwargs: additional kwargs passed to `add_job` method
        """
        try:
            self.scheduler.add_job(
                method, 'interval',
                misfire_grace_time=settings.JOBS_MISFIRE_GRACE_TIME,
                max_instances=settings.JOBS_MAX_INSTANCES,
                coalesce=settings.JOBS_COALESCE,
                **kwargs
            )
        except ConflictingIdError as e:
            logger.error(e)

    def event_listener(self, event):
        """React on events from scheduler.

        :param apscheduler.events.JobExecutionEvent event: job execution event
        """
        if event.code == EVENT_JOB_MISSED:
            aiostats.increment.incr('job.missed')
            logger.warning("Job %s scheduler for %s missed.", event.job_id,
                           event.scheduled_run_time)
        elif event.code == EVENT_JOB_ERROR:
            aiostats.increment.incr('job.error')
            logger.error("Job %s scheduled for %s failed. Exc: %s",
                         event.job_id,
                         event.scheduled_run_time,
                         event.exception)
        elif event.code == EVENT_JOB_MAX_INSTANCES:
            aiostats.increment.incr('job.max_instances')
            logger.warning(
                'Job `%s` could not be submitted. '
                'Maximum number of running instances was reached.',
                event.job_id
            )

    @aiostats.increment()
    def get_jobs(self):
        return [job.id for job in self.scheduler.get_jobs()]

    async def checks(self, request=None):

        uuid = request.match_info.get('uuid', None)
        if uuid is None:
            jobs = self.get_jobs()
            return json_response(data=dict(count=len(jobs), results=jobs))
        job = self.scheduler.get_job(uuid)
        if job is None:
            return json_response(
                data={'detail': 'Check does not exists.'}, status=404
            )

        check, = job.args
        return json_response(data=check)

    @aiostats.timer()
    async def update(self, request=None):
        check = await request.json()
        check_uuid = check.get('uuid')
        check_id = check.get('id')

        message = dict(status='ok')

        if not check_id or not check_uuid:
            return json_response(status=400)

        if check_id % settings.SCHEDULER_COUNT != settings.SCHEDULER_NR:
            return json_response(data=message, status=202)

        job = self.scheduler.get_job(str(check_uuid))

        if job:
            scheduled_check, = job.args
            timestamp = scheduled_check.get('timestamp', 0)

            if timestamp > check['timestamp']:
                return json_response(data=message, status=202)
            message = dict(status='deleted')
            self.remove_job(check_uuid)

        if any([trigger['enabled'] for trigger in check['triggers']]):
            self.schedule_check(check)
            message = dict(status='scheduled')

        return json_response(data=message, status=202)

    def wait_and_kill(self, sig):
        logger.warning('Got `%s` signal. Preparing scheduler to exit ...', sig)
        self.scheduler.shutdown()
        self.loop.stop()

    def register_exit_signals(self):
        for sig in ['SIGQUIT', 'SIGINT', 'SIGTERM']:
            logger.info('Registering handler for `%s` signal '
                        'in current event loop ...', sig)
            self.loop.add_signal_handler(
                getattr(signal, sig),
                self.wait_and_kill, sig
            )

    def start(self, loop=None):
        """Start scheduler."""
        self.setup(loop=loop)
        self.register_exit_signals()
        self.scheduler.start()

        logger.info(
            'Press Ctrl+%s to exit.', 'Break' if os.name == 'nt' else 'C'
        )
        try:
            self.loop.run_forever()
        except KeyboardInterrupt:
            pass
        logger.info('Scheduler was stopped!')
示例#19
0
        str(config['restart_after']),
        str(config['container_uid'])
    ])


if __name__ == '__main__':

    scheduler = AsyncIOScheduler()
    scheduler.configure({
        'apscheduler.executors.default': {
            'class': 'apscheduler.executors.pool:ThreadPoolExecutor',
            'max_workers': '1000'
        },
        'apscheduler.executors.processpool': {
            'type': 'processpool',
            'max_workers': '1000'
        },
        'apscheduler.timezone': 'UTC',
    })

    #running monitor first
    scheduler.add_job(run_monitor, None, [scheduler])

    scheduler.start()
    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))

    # Execution will block here until Ctrl+C (Ctrl+Break on Windows) is pressed.
    try:
        asyncio.get_event_loop().run_forever()
    except (KeyboardInterrupt, SystemExit):
        pass
示例#20
0
    schedfortest = BlockingScheduler()

    trigger_test = OrTrigger([CronTrigger(minute='*/1')])

    schedfortest.add_job(callbacktotal,
                         trigger_test,
                         minute='*/2',
                         max_instances=10)

    schedfortest.start()

    scheduler = AsyncIOScheduler()
    scheduler.add_job(day_limits,
                      'cron',
                      hour=7,
                      misfire_grace_time=3600,
                      timezone='GB')
    scheduler.add_job(night_limits,
                      'cron',
                      hour=19,
                      minute=32,
                      misfire_grace_time=3600,
                      timezone='GB')
    scheduler.start()

    scheduler.print_jobs()

    executor = ProcessPoolExecutor(1)
    loop = asyncio.get_event_loop()
    baa = asyncio. async (loop.run_in_executor(
示例#21
0
            print("Code is done running. switching back to background mode")
            #runCode = True
            return
        return

    else:
        print("uploaded over 7 hours ago")
        #will retry in an hour
        #runCode = True
        return


#run every hour
while True:
    scheduler = AsyncIOScheduler()
    scheduler.add_job(recentMediaInfo, 'interval', minutes=60, id='myJobID')
    scheduler.start()

    try:
        asyncio.get_event_loop().run_forever()
        #loop = asyncio.get_event_loop()

        #loop = asyncio.ensure_future()
        #loop.run_until_complete(recentMediaInfo())

        loop.close()
        scheduler.remove_job('myJobID')

    except (KeyboardInterrupt, SystemExit):
        pass
示例#22
0
async def website_check():
    for item in website_list:
        url = item["url"]
        resp = None
        message = ""
        try:
            resp = requests.get(url)
        except Exception as e:
            message = f"{url} request error: \n{e}\n"

        if getattr(resp, "status_code", 0) != 200:
            content = getattr(resp, "content", None)
            message += f"{url} content error: \n{content}\n"

        if message:
            api = f"https://api.telegram.org/bot{bot_token}/sendMessage?chat_id=260260121&text={message}"
            requests.get(api).json()
            logging.error(message)
        else:
            logging.info("%s OK: %s bytes.", url, len(resp.content))


if __name__ == '__main__':
    scheduler = AsyncIOScheduler()
    scheduler.add_job(send_health_check, 'interval', seconds=300)
    scheduler.add_job(website_check, 'interval', seconds=60)
    scheduler.start()
    client.start()
    client.run_until_disconnected()
示例#23
0
class Scheduler:
    def __init__(self):
        self._scheduler = AsyncIOScheduler()
        # this should be something that does not make any sense to be inside project name or job name
        self._job_id_separator = "-_-"
        # we don't allow to schedule a job to run more then one time per X
        # NOTE this cannot be less then one minute - see _validate_cron_trigger
        self._min_allowed_interval = config.httpdb.scheduling.min_allowed_interval

    async def start(self, db_session: Session):
        logger.info("Starting scheduler")
        self._scheduler.start()
        # the scheduler shutdown and start operation are not fully async compatible yet -
        # https://github.com/agronholm/apscheduler/issues/360 - this sleep make them work
        await asyncio.sleep(0)

        # don't fail the start on re-scheduling failure
        try:
            self._reload_schedules(db_session)
        except Exception as exc:
            logger.warning("Failed reloading schedules", exc=exc)

    async def stop(self):
        logger.info("Stopping scheduler")
        self._scheduler.shutdown()
        # the scheduler shutdown and start operation are not fully async compatible yet -
        # https://github.com/agronholm/apscheduler/issues/360 - this sleep make them work
        await asyncio.sleep(0)

    def create_schedule(
        self,
        db_session: Session,
        project: str,
        name: str,
        kind: schemas.ScheduleKinds,
        scheduled_object: Union[Dict, Callable],
        cron_trigger: Union[str, schemas.ScheduleCronTrigger],
        labels: Dict = None,
        concurrency_limit: int = config.httpdb.scheduling.
        default_concurrency_limit,
    ):
        if isinstance(cron_trigger, str):
            cron_trigger = schemas.ScheduleCronTrigger.from_crontab(
                cron_trigger)

        self._validate_cron_trigger(cron_trigger)

        logger.debug(
            "Creating schedule",
            project=project,
            name=name,
            kind=kind,
            scheduled_object=scheduled_object,
            cron_trigger=cron_trigger,
            labels=labels,
            concurrency_limit=concurrency_limit,
        )
        get_project_member().ensure_project(db_session, project)
        get_db().create_schedule(
            db_session,
            project,
            name,
            kind,
            scheduled_object,
            cron_trigger,
            concurrency_limit,
            labels,
        )
        self._create_schedule_in_scheduler(
            project,
            name,
            kind,
            scheduled_object,
            cron_trigger,
            concurrency_limit,
        )

    def update_schedule(
        self,
        db_session: Session,
        project: str,
        name: str,
        scheduled_object: Union[Dict, Callable] = None,
        cron_trigger: Union[str, schemas.ScheduleCronTrigger] = None,
        labels: Dict = None,
        concurrency_limit: int = None,
    ):
        if isinstance(cron_trigger, str):
            cron_trigger = schemas.ScheduleCronTrigger.from_crontab(
                cron_trigger)

        if cron_trigger is not None:
            self._validate_cron_trigger(cron_trigger)

        logger.debug(
            "Updating schedule",
            project=project,
            name=name,
            scheduled_object=scheduled_object,
            cron_trigger=cron_trigger,
            labels=labels,
            concurrency_limit=concurrency_limit,
        )
        get_db().update_schedule(
            db_session,
            project,
            name,
            scheduled_object,
            cron_trigger,
            labels,
            concurrency_limit,
        )
        db_schedule = get_db().get_schedule(db_session, project, name)
        updated_schedule = self._transform_and_enrich_db_schedule(
            db_session, db_schedule)

        self._update_schedule_in_scheduler(
            project,
            name,
            updated_schedule.kind,
            updated_schedule.scheduled_object,
            updated_schedule.cron_trigger,
            updated_schedule.concurrency_limit,
        )

    def list_schedules(
        self,
        db_session: Session,
        project: str = None,
        name: str = None,
        kind: str = None,
        labels: str = None,
        include_last_run: bool = False,
    ) -> schemas.SchedulesOutput:
        logger.debug("Getting schedules",
                     project=project,
                     name=name,
                     labels=labels,
                     kind=kind)
        db_schedules = get_db().list_schedules(db_session, project, name,
                                               labels, kind)
        schedules = []
        for db_schedule in db_schedules:
            schedule = self._transform_and_enrich_db_schedule(
                db_session, db_schedule, include_last_run)
            schedules.append(schedule)
        return schemas.SchedulesOutput(schedules=schedules)

    def get_schedule(
        self,
        db_session: Session,
        project: str,
        name: str,
        include_last_run: bool = False,
    ) -> schemas.ScheduleOutput:
        logger.debug("Getting schedule", project=project, name=name)
        db_schedule = get_db().get_schedule(db_session, project, name)
        return self._transform_and_enrich_db_schedule(db_session, db_schedule,
                                                      include_last_run)

    def delete_schedule(self, db_session: Session, project: str, name: str):
        logger.debug("Deleting schedule", project=project, name=name)
        job_id = self._resolve_job_id(project, name)
        # don't fail on delete if job doesn't exist
        job = self._scheduler.get_job(job_id)
        if job:
            self._scheduler.remove_job(job_id)
        get_db().delete_schedule(db_session, project, name)

    async def invoke_schedule(self, db_session: Session, project: str,
                              name: str):
        logger.debug("Invoking schedule", project=project, name=name)
        db_schedule = await fastapi.concurrency.run_in_threadpool(
            get_db().get_schedule, db_session, project, name)
        function, args, kwargs = self._resolve_job_function(
            db_schedule.kind,
            db_schedule.scheduled_object,
            project,
            name,
            db_schedule.concurrency_limit,
        )
        return await function(*args, **kwargs)

    def _validate_cron_trigger(
        self,
        cron_trigger: schemas.ScheduleCronTrigger,
        # accepting now from outside for testing purposes
        now: datetime = None,
    ):
        """
        Enforce no more then one job per min_allowed_interval
        """
        logger.debug("Validating cron trigger")
        apscheduler_cron_trigger = self.transform_schemas_cron_trigger_to_apscheduler_cron_trigger(
            cron_trigger)
        now = now or datetime.now(apscheduler_cron_trigger.timezone)
        next_run_time = None
        second_next_run_time = now

        # doing 60 checks to allow one minute precision, if the _min_allowed_interval is less then one minute validation
        # won't fail in certain scenarios that it should. See test_validate_cron_trigger_multi_checks for detailed
        # explanation
        for index in range(60):
            next_run_time = apscheduler_cron_trigger.get_next_fire_time(
                None, second_next_run_time)
            # will be none if we got a schedule that has no next fire time - for example schedule with year=1999
            if next_run_time is None:
                return
            second_next_run_time = apscheduler_cron_trigger.get_next_fire_time(
                next_run_time, next_run_time)
            # will be none if we got a schedule that has no next fire time - for example schedule with year=2050
            if second_next_run_time is None:
                return
            min_allowed_interval_seconds = humanfriendly.parse_timespan(
                self._min_allowed_interval)
            if second_next_run_time < next_run_time + timedelta(
                    seconds=min_allowed_interval_seconds):
                logger.warn(
                    "Cron trigger too frequent. Rejecting",
                    cron_trigger=cron_trigger,
                    next_run_time=next_run_time,
                    second_next_run_time=second_next_run_time,
                    delta=second_next_run_time - next_run_time,
                )
                raise ValueError(
                    f"Cron trigger too frequent. no more then one job "
                    f"per {self._min_allowed_interval} is allowed")

    def _create_schedule_in_scheduler(
        self,
        project: str,
        name: str,
        kind: schemas.ScheduleKinds,
        scheduled_object: Any,
        cron_trigger: schemas.ScheduleCronTrigger,
        concurrency_limit: int,
    ):
        job_id = self._resolve_job_id(project, name)
        logger.debug("Adding schedule to scheduler", job_id=job_id)
        function, args, kwargs = self._resolve_job_function(
            kind,
            scheduled_object,
            project,
            name,
            concurrency_limit,
        )

        # we use max_instances as well as our logic in the run wrapper for concurrent jobs
        # in order to allow concurrency for triggering the jobs (max_instances), and concurrency
        # of the jobs themselves (our logic in the run wrapper).
        self._scheduler.add_job(
            function,
            self.transform_schemas_cron_trigger_to_apscheduler_cron_trigger(
                cron_trigger),
            args,
            kwargs,
            job_id,
            max_instances=concurrency_limit,
        )

    def _update_schedule_in_scheduler(
        self,
        project: str,
        name: str,
        kind: schemas.ScheduleKinds,
        scheduled_object: Any,
        cron_trigger: schemas.ScheduleCronTrigger,
        concurrency_limit: int,
    ):
        job_id = self._resolve_job_id(project, name)
        logger.debug("Updating schedule in scheduler", job_id=job_id)
        function, args, kwargs = self._resolve_job_function(
            kind,
            scheduled_object,
            project,
            name,
            concurrency_limit,
        )
        trigger = self.transform_schemas_cron_trigger_to_apscheduler_cron_trigger(
            cron_trigger)
        now = datetime.now(self._scheduler.timezone)
        next_run_time = trigger.get_next_fire_time(None, now)
        self._scheduler.modify_job(
            job_id,
            func=function,
            args=args,
            kwargs=kwargs,
            trigger=trigger,
            next_run_time=next_run_time,
        )

    def _reload_schedules(self, db_session: Session):
        logger.info("Reloading schedules")
        db_schedules = get_db().list_schedules(db_session)
        for db_schedule in db_schedules:
            # don't let one failure fail the rest
            try:
                self._create_schedule_in_scheduler(
                    db_schedule.project,
                    db_schedule.name,
                    db_schedule.kind,
                    db_schedule.scheduled_object,
                    db_schedule.cron_trigger,
                    db_schedule.concurrency_limit,
                )
            except Exception as exc:
                logger.warn(
                    "Failed rescheduling job. Continuing",
                    exc=str(exc),
                    db_schedule=db_schedule,
                )

    def _transform_and_enrich_db_schedule(
        self,
        db_session: Session,
        schedule_record: schemas.ScheduleRecord,
        include_last_run: bool = False,
    ) -> schemas.ScheduleOutput:
        schedule_dict = schedule_record.dict()
        schedule_dict["labels"] = {
            label["name"]: label["value"]
            for label in schedule_dict["labels"]
        }
        schedule = schemas.ScheduleOutput(**schedule_dict)

        job_id = self._resolve_job_id(schedule_record.project,
                                      schedule_record.name)
        job = self._scheduler.get_job(job_id)
        if job:
            schedule.next_run_time = job.next_run_time

        if include_last_run:
            schedule = self._enrich_schedule_with_last_run(
                db_session, schedule)

        return schedule

    @staticmethod
    def _enrich_schedule_with_last_run(
            db_session: Session, schedule_output: schemas.ScheduleOutput):
        if schedule_output.last_run_uri:
            run_project, run_uid, iteration, _ = RunObject.parse_uri(
                schedule_output.last_run_uri)
            run_data = get_db().read_run(db_session, run_uid, run_project,
                                         iteration)
            schedule_output.last_run = run_data
        return schedule_output

    def _resolve_job_function(
        self,
        scheduled_kind: schemas.ScheduleKinds,
        scheduled_object: Any,
        project_name: str,
        schedule_name: str,
        schedule_concurrency_limit: int,
    ) -> Tuple[Callable, Optional[Union[List, Tuple]], Optional[Dict]]:
        """
        :return: a tuple (function, args, kwargs) to be used with the APScheduler.add_job
        """

        if scheduled_kind == schemas.ScheduleKinds.job:
            scheduled_object_copy = copy.deepcopy(scheduled_object)
            return (
                Scheduler.submit_run_wrapper,
                [
                    scheduled_object_copy,
                    project_name,
                    schedule_name,
                    schedule_concurrency_limit,
                ],
                {},
            )
        if scheduled_kind == schemas.ScheduleKinds.local_function:
            return scheduled_object, [], {}

        # sanity
        message = "Scheduled object kind missing implementation"
        logger.warn(message, scheduled_object_kind=scheduled_kind)
        raise NotImplementedError(message)

    def _resolve_job_id(self, project, name) -> str:
        """
        :return: returns the identifier that will be used inside the APScheduler
        """
        return self._job_id_separator.join([project, name])

    @staticmethod
    async def submit_run_wrapper(scheduled_object, project_name, schedule_name,
                                 schedule_concurrency_limit):
        # import here to avoid circular imports
        from mlrun.api.api.utils import submit_run

        # removing the schedule from the body otherwise when the scheduler will submit this task it will go to an
        # endless scheduling loop
        scheduled_object.pop("schedule", None)

        # removing the uid from the task metadata so that a new uid will be generated for every run
        # otherwise all runs will have the same uid
        scheduled_object.get("task", {}).get("metadata", {}).pop("uid", None)

        if "task" in scheduled_object and "metadata" in scheduled_object[
                "task"]:
            scheduled_object["task"]["metadata"].setdefault("labels", {})
            scheduled_object["task"]["metadata"]["labels"][
                schemas.constants.LabelNames.schedule_name] = schedule_name

        db_session = create_session()

        active_runs = get_db().list_runs(
            db_session,
            state=RunStates.non_terminal_states(),
            project=project_name,
            labels=
            f"{schemas.constants.LabelNames.schedule_name}={schedule_name}",
        )
        if len(active_runs) >= schedule_concurrency_limit:
            logger.warn(
                "Schedule exceeded concurrency limit, skipping this run",
                project=project_name,
                schedule_name=schedule_name,
                schedule_concurrency_limit=schedule_concurrency_limit,
                active_runs=len(active_runs),
            )
            return

        response = await submit_run(db_session, scheduled_object)

        run_metadata = response["data"]["metadata"]
        run_uri = RunObject.create_uri(run_metadata["project"],
                                       run_metadata["uid"],
                                       run_metadata["iteration"])
        get_db().update_schedule(
            db_session,
            run_metadata["project"],
            schedule_name,
            last_run_uri=run_uri,
        )

        close_session(db_session)

        return response

    @staticmethod
    def transform_schemas_cron_trigger_to_apscheduler_cron_trigger(
        cron_trigger: schemas.ScheduleCronTrigger, ):
        return APSchedulerCronTrigger(
            cron_trigger.year,
            cron_trigger.month,
            cron_trigger.day,
            cron_trigger.week,
            cron_trigger.day_of_week,
            cron_trigger.hour,
            cron_trigger.minute,
            cron_trigger.second,
            cron_trigger.start_date,
            cron_trigger.end_date,
            cron_trigger.timezone,
            cron_trigger.jitter,
        )
示例#24
0
    ''' Update the data for a city. '''
    # Get the current formatted data for a city
    try:
        stations = eval(provider).stations(city)
    except:
        return
    # Update the database if the city can be predicted
    if predict == 'Yes':
        insert.city(city, stations)
    # Save the data for the map
    geojson = tb.json_to_geojson(stations)
    tb.write_json(geojson, '{0}/{1}.geojson'.format(geojsonFolder, city))
    # Refresh the latest update time
    updates = tb.read_json('{}/updates.json'.format(informationFolder))
    updates[city] = datetime.now().isoformat()
    tb.write_json(updates, '{}/updates.json'.format(informationFolder))

if __name__ == '__main__':
    scheduler = AsyncIOScheduler()
    for provider, cities in providers.items():
        for city in cities:
            update(provider, city, predictions[city])
            scheduler.add_job(update, 'interval', seconds=refresh,
                              args=[provider, city, predictions[city]],
                              misfire_grace_time=refresh, coalesce=True)
    scheduler.start()
    try:
        asyncio.get_event_loop().run_forever()
    except (KeyboardInterrupt, SystemExit):
        pass
示例#25
0
predictions = tb.read_json('{}/predictions.json'.format(informationFolder))


def learn(city, station):
    # Get data from the past 30 days
    threshold = datetime.datetime.now() - datetime.timedelta(days=timespan)
    try:
        dataframe = query.station(city, station, threshold)
    except:
        return
    # Prepare the dataframe for learning
    dataframe = munging.prepare(dataframe)
    # Apply the regressor that is chosen in the settings
    method.fit(dataframe, 'bikes', city, station)
    method.fit(dataframe, 'spaces', city, station)

if __name__ == '__main__':
    scheduler = AsyncIOScheduler()
    for city in stationsFile.keys():
        if predictions[city] == 'Yes':
            for station in stationsFile[city]:
                learn(city, station)
                scheduler.add_job(learn, 'interval', days=refresh,
                                  args=[city, station], coalesce=True,
                                  misfire_grace_time=60*60*24*refresh)
    scheduler.start()
    try:
        asyncio.get_event_loop().run_forever()
    except (KeyboardInterrupt, SystemExit):
        pass
示例#26
0
class Reminder:
    """ Clase Reminder

    Se encarga de almacenar los eventos y crear recordatorios.
    """
    def __init__(self, secret):
        # Accedo a la base de datos
        self.db = DB(secret)

        # Arranco en Async Scheduler
        self.sched = AsyncIOScheduler()
        self.sched.start()

    @property
    def action(self):
        return self._action

    @action.setter
    def action(self, value):
        if not callable(value):
            raise ValueError("The value must be a function")
        self._action = value

    @property
    def reminders(self):
        return self._reminders

    @reminders.setter
    def reminders(self, value):
        if not isinstance(value, list):
            raise ValueError("The value must be a list")
        self._reminders = value

    # Funciones publicas

    async def add(self, date, time, time_zone, channel, text, author):
        """Agrega un nuevo evento y crea los recordatorios"""

        try:
            date_time = datetime.fromisoformat(f"{date}T{time}{time_zone}")
            date_time_now = datetime.utcnow().replace(tzinfo=timezone.utc)

            # Si la fecha del evento es anterior a la actual salgo
            if date_time < date_time_now:
                return []

            event = self._generate_event(date_time, channel, text, author)
            jobs_id = self._create_jobs(event)

            # Guardo el evento en la base de datos
            data = {
                "author": event['author'],
                "text": event['text'],
                "time": self.db.q.time(event['time'].isoformat()),
                "channel": event['channel'],
                "jobs": jobs_id
            }

            # Genero un registro local
            return self.db.create("Events", data)
        except:
            # Si el formato de la fecha es incorrecto
            return None

    async def load(self):
        """Carga los eventos de la base de datos

        Se utiliza para cargar los eventos que están guardados en la base de
        datos al momento de inciar el programa.

        Lee los eventos de la base de datos, los carga en el scheduler y
        actuliza la base de datos con los nuevos jobs_id
        """
        docs = self.db.get_all("all_events")
        new_docs = []
        for doc in docs['data']:
            event = {
                "text":
                doc['data']['text'],
                "time":
                datetime.fromisoformat(
                    f"{doc['data']['time'].value[:-1]}+00:00"),
                "channel":
                doc['data']['channel'],
                "reminders":
                self.reminders
            }

            # Creo los jobs
            jobs_id = self._create_jobs(event)
            new_docs.append((doc['ref'].id(), {"jobs": jobs_id}))

        # Actulizo la base de datos con los nuevos jobs_id
        return self.db.update_all_jobs("Events", new_docs)

    async def list(self):
        """Lista todos los eventos programados"""
        # events = self.db.get_by_author("events_by_author", author)
        events = self.db.get_all("all_events")
        return events['data']

    async def remove(self, id_, author):
        """Borro un evento programado"""
        return self._remove_by_id_and_author(id_, author)

    # Funciones privadas

    def _remove_by_id(self, id_: str):
        try:
            doc = self.db.delete("Events", id_)
            log.info("hola: %s", doc)
            for job in doc['data']['jobs']:
                self.sched.remove_job(job)
            return doc
        except:
            return []

    def _remove_by_id_and_author(self, id_: str, author: str):
        try:
            doc = self.db.delete_by_id_and_author("Events",
                                                  "event_by_id_and_author",
                                                  id_, author)
            for job in doc['data']['jobs']:
                self.sched.remove_job(job)
            return doc
        except:
            return []

    async def _remove_old_event(self):
        self.db.delete_by_expired_time("all_events_by_time")

    def _create_jobs(self, event):
        dt_event = event['time']
        dt_now = datetime.utcnow().replace(tzinfo=timezone.utc)

        jobs_id = []
        for reminder in event['reminders']:
            if dt_event > dt_now + reminder['delta']:
                log.info("Added event")
                job = self.sched.add_job(
                    self.action,
                    'date',
                    run_date=(dt_event - reminder['delta']),
                    args=[
                        reminder['message'], event['text'], event['channel']
                    ])
                jobs_id.append(job.id)

        # Job para eliminar el registro de la base de datos
        job = self.sched.add_job(self._remove_old_event,
                                 'date',
                                 run_date=(dt_event),
                                 args=[])
        jobs_id.append(job.id)

        return jobs_id

    def _generate_event(self, date_time, channel, text, author):
        return {
            "author": f"{author}",
            "text": text,
            "time": date_time,
            "channel": int(channel),
            "reminders": self.reminders
        }
示例#27
0
class BirthdayManager(Cog):
    def __init__(self, bot: Bot):
        self.birthdays: List[Person] = []
        self.bot = bot
        self.lock = Lock()
        self.timer: Optional[Timer] = None

        self.channel = int(environ.get("SAFETY_ANNOUNCEMENT_CHANNEL"))
        self.doc = environ.get("SAFETY_GOOGLE_DOCS_LINK")
        self.refresh_birthdays.start()

        self.scheduler = AsyncIOScheduler()
        self.scheduler.add_job(self.schedule_birthday,
                               trigger="cron",
                               hour=0,
                               minute=0,
                               second=0)
        self.scheduler.start()

    def cog_unload(self):
        self.refresh_birthdays.cancel()
        self.scheduler.remove_all_jobs()

    @tasks.loop(hours=48)
    async def refresh_birthdays(self):
        """
    Polls for changes from google sheet for birthdays
    """
        async with self.lock:
            data = sheets.spreadsheets() \
              .values() \
              .get(spreadsheetId=self.doc, range="A2:J500") \
              .execute() \
              .get("values", [])

            self.birthdays.clear()

            for person in data:
                kerberos = person[1]
                birthday = person[9] if len(person) >= 10 else person[-1]
                self.birthdays.append(
                    (kerberos, get_date(birthday, date_formats)))

            self.birthdays.sort(key=cmp_to_key(compare_people))

    async def schedule_birthday(self):
        """
    Reviews birthday metadata and sends birthday notice in SAFETY_ANNOUNCEMENT_CHANNEL
    if someone's birthday is today (server time)
    """
        try:
            async with self.lock:
                tz = get_localzone()

                now = datetime.now(tz)
                tomorrow = (now + timedelta(1)) \
                  .replace(hour=0, minute=0, second=0, microsecond=0)

                people: List[Tuple[str, datetime]] = []

                for person in self.birthdays:
                    if person[1] is None:
                        continue

                    current_birthday = localtime(
                        person[1].replace(year=now.year), tz)

                    if tomorrow - timedelta(1) <= current_birthday < tomorrow:
                        people.append(person)

                if len(people) == 0:
                    return

                names = [person[0] for person in people]
                names_str = ", ".join(names)
                message = f"Happy birthday to {names_str}!\n"

                for person in people:
                    difference = tomorrow - localtime(person[1], tz)
                    age = round(difference.total_seconds() / seconds_in_year)
                    message += f"{person[0]} is {age} years old\n"

                target_channel = self.bot.get_channel(self.channel)

                while target_channel is None:
                    await sleep(5)
                    target_channel = self.bot.get_channel(self.channel)

                await target_channel.send(message)
        except Exception as e:
            print(e)
示例#28
0
class TaskController:
    """handle commands of "creating timed task"""
    def __init__(self):
        """attributes:
            scheduler: job scheduler
            reply: reply to user command"""
        self.reply = ''  # during handling, help or reply messages may be needed
        self.scheduler = AsyncIOScheduler()
        self.id_count = 0
        self.scheduler.start()

    def handle_msg(self, msg, conversation: Union[Contact, Room], to_bot):
        """handle commands
        params:
            msg: str, messages from talker
            sayer: the place to say the words
            to_bot: bool, whether talking to the bot
            """
        # clear reply
        self.reply = ''
        # check if saying to the bot
        if not to_bot:
            return False
        if self.handle_create_task(msg, conversation):
            return True
        return False

    def get_reply(self):
        """return reply"""
        return self.reply

    def handle_create_task(self, msg: str, conversation: Union[Contact, Room]):
        """handle create task command"""
        if self.date_type_task(msg, conversation):
            return True
        if self.cron_type_task(msg, conversation):
            return True
        return False

    def date_type_task(self, msg: str, conversation: Union[Contact, Room]):
        """parse date type timed task
        pattern is like: timed message#y-m-d h:min:sec-msg"""
        pattern = re.compile(r'^\s*' + KEY_TIMED_TASK + r'\s*' + KEY_SPLIT +
                             r'\s*(\d+-\d+-\d+\s+\d+:\d+:.*?)-(.*?)$')
        msg = re.sub(r'\s+', ' ', msg)
        msg = re.sub(':', ':', msg)
        res = pattern.match(msg)
        if res is None:
            return False
        self.id_count += 1
        self.scheduler.add_job(conversation.say,
                               'date',
                               run_date=res.group(1),
                               args=[res.group(2)],
                               id=str(self.id_count))
        self.reply = reply.set_date_timed_task_success(res.group(1),
                                                       res.group(2))
        return True

    def cron_type_task(self, msg: str, conversation: Union[Contact, Room]):
        """parse cron type timed task
        pattern is like 'timed message#y-m-d-dof-h-min-msg"""
        pattern = re.compile(r'^\s*' + KEY_TIMED_TASK + r'\s*' + KEY_SPLIT +
                             r'\s*' + '(' +
                             '-'.join([r'(?:\d+|\*)' for _ in range(5)]) +
                             ')' + '-' + r'(.*?)$')
        res = pattern.match(msg)
        if res is None:
            return False
        try:
            params = parse_cron_str_to_dict(res.group(1), '-')
            self.id_count += 1
            self.scheduler.add_job(conversation.say,
                                   'cron',
                                   month=params['month'],
                                   day=params['day'],
                                   day_of_week=params['week day'],
                                   hour=params['hour'],
                                   minute=params['minute'],
                                   args=[res.group(2)],
                                   id=str(self.id_count))
            self.reply = reply.set_cron_timed_task_success(
                params, res.group(2))
        except ValueError:
            self.reply = reply.parse_datetime_error()
        return True
示例#29
0
            )
            await tbot(
                functions.messages.EditChatDefaultBannedRightsRequest(
                    peer=int(warner.chat_id), banned_rights=hehes))
            if CLEAN_GROUPS:
                async for user in tbot.iter_participants(int(warner.chat_id)):
                    if user.deleted:
                        await tbot.edit_permissions(int(warner.chat_id),
                                                    user.id,
                                                    view_messages=False)
        except Exception as e:
            logger.info(f"Unable To Close Group {warner} - {e}")


scheduler = AsyncIOScheduler(timezone="Asia/Kolkata")
scheduler.add_job(job_close, trigger="cron", hour=23, minute=55)
scheduler.start()


async def job_open():
    ws_chats = get_all_chat_id()
    if len(ws_chats) == 0:
        return
    for warner in ws_chats:
        try:
            await tbot.send_message(
                int(warner.chat_id),
                "`06:00 Am, Group Is Opening.`\n**Powered By @AuraXRobot**",
            )
            await tbot(
                functions.messages.EditChatDefaultBannedRightsRequest(
示例#30
0
    spider()
    work_schedule()


def execution_listener(event):
    if event.exception:
        _logger.error('The job crashed')
    else:
        # check that the executed job is the first job
        job = scheduler.get_job(event.job_id)
        if getattr(job, 'name', '') == 'spider':
            scheduler.add_job(work_schedule, name='work_schedule')


if __name__ == '__main__':
    scheduler = AsyncIOScheduler()
    scheduler.add_job(spider,
                      trigger='interval',
                      hours=1,
                      name='spider',
                      next_run_time=datetime.now() + timedelta(seconds=4))
    scheduler.add_listener(callback=execution_listener,
                           mask=EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)
    scheduler.start()

    # Execution will block here until Ctrl+C (Ctrl+Break on Windows) is pressed.
    try:
        asyncio.get_event_loop().run_forever()
    except (KeyboardInterrupt, SystemExit):
        pass
示例#31
0
class D2info:
    version = '0.3.1'
    sched = ''
    args = []

    logging.basicConfig()
    logging.getLogger('apscheduler').setLevel(logging.DEBUG)

    def __init__(self, **options):
        super().__init__(**options)
        self.get_args()

        self.data = D2data(self.args.production,
                           (self.args.cert, self.args.key))

    def get_args(self):
        parser = argparse.ArgumentParser()
        parser.add_argument('--port',
                            help='specify a port to listen on',
                            default='4200')
        parser.add_argument('-p',
                            '--production',
                            help='Use to launch in production mode',
                            action='store_true')
        parser.add_argument('-nm',
                            '--nomessage',
                            help='Don\'t post any messages',
                            action='store_true')
        parser.add_argument('--oauth',
                            help='Get Bungie access token',
                            action='store_true')
        parser.add_argument('-k',
                            '--key',
                            help='SSL key',
                            type=str,
                            default='')
        parser.add_argument('-c',
                            '--cert',
                            help='SSL certificate',
                            type=str,
                            default='')
        self.args = parser.parse_args()

    async def init_data(self):
        await self.data.token_update()
        await self.data.get_chars()
        await self.data.get_seasonal_eververse()
        await self.data.get_daily_rotations()
        # await self.data.get_weekly_rotations()
        await self.data.get_weekly_eververse()

    def start_up(self):
        @app.listener('before_server_start')
        async def instantiate_scheduler(app, loop):
            self.sched = AsyncIOScheduler(timezone='UTC')
            self.sched.add_job(self.init_data, misfire_grace_time=86300)
            self.sched.add_job(self.data.token_update, 'interval', hours=1)
            self.sched.add_job(self.data.get_seasonal_eververse,
                               'cron',
                               day_of_week='tue',
                               hour='17',
                               minute='1',
                               second='40',
                               misfire_grace_time=86300)
            self.sched.add_job(self.data.get_weekly_eververse,
                               'cron',
                               day_of_week='tue',
                               hour='17',
                               minute='1',
                               second='40',
                               misfire_grace_time=86300)
            # self.sched.add_job(self.data.get_weekly_rotations, 'cron', day_of_week='tue', hour='17', minute='0',
            #                    second='40', misfire_grace_time=86300)
            # self.sched.add_job(self.data.get_daily_rotations, 'cron', hour='17', minute='0', second='40',
            #                    misfire_grace_time=86300)
            self.sched.start()

        app.static('/static', './static')
        app.error_handler = CustomErrorHandler()
        # app.url_for('static', filename='style.css', name='style')
        if self.args.production:
            app.run(host='0.0.0.0',
                    port=1423,
                    workers=1,
                    debug=False,
                    access_log=False
                    )  # ssl={'cert': self.args.cert, 'key': self.args.key})
        else:
            app.run()
示例#32
0
        h1, m1, h2, m2 = eval(udB["NIGHT_TIME"])
    for chat in chats:
        try:
            await ultroid_bot(
                EditChatDefaultBannedRightsRequest(
                    chat,
                    banned_rights=ChatBannedRights(
                        until_date=None,
                        send_messages=True,
                    ),
                )
            )
            await ultroid_bot.send_message(
                chat, f"**NightMode : Group Closed**\n\nGroup Will Open At `{h2}:{m2}`"
            )
        except Exception as er:
            LOGS.info(er)


if night_grps():
    try:
        h1, m1, h2, m2 = 0, 0, 7, 0
        if udB.get("NIGHT_TIME"):
            h1, m1, h2, m2 = eval(udB["NIGHT_TIME"])
        sch = AsyncIOScheduler()
        sch.add_job(close_grp, trigger="cron", hour=h1, minute=m1)
        sch.add_job(open_grp, trigger="cron", hour=h2, minute=m2)
        sch.start()
    except Exception as er:
        LOGS.info(er)
示例#33
0
        if lifetime <= 3600:
            message = f"My age: {math.floor(lifetime)} seconds"
        elif 3600 <= lifetime and lifetime <= 86400:
            message = f"My age: {math.floor(lifetime / 3600)} hours"
        else:
            message = f"My age: {math.floor(lifetime / 86400)} days"
        await app.send_message(subscriber_id,
                               randomInspiration() + f"\n{message}")
        peer = subscriber_telegram_id if subscriber_telegram_id else subscriber_id
        await app.send_message('djnotes', f'Inspiration sent to {peer}')
        #TODO: Save message and sending time in database
        # await app.send_message(id, f"My age: {time.time() - start_time} seconds")


scheduler = AsyncIOScheduler()
scheduler.add_job(job, "interval", seconds=3)


@app.on_message(filters.private)
async def handle(client, message):
    global scheduler
    global subscriber_id
    global subscriber_telegram_id
    if (message.text == '/start'):
        subscriber_id = message.from_user.id
        scheduler.start()
        await client.send_message('djnotes',
                                  f'Inspiration sent to {subscriber_id}')
        #TODO: Get user's Telegram ID and save it in receiver_telegram_id
        #TODO: Save the user in database
示例#34
0
class Polls(commands.Cog, name='polls'):
    """
    Polls commands
    """
    def __init__(self, client):
        self.client = client
        self.sql = SqlClass()

        self.pollsigns = [
            "🇦", "🇧", "🇨", "🇩", "🇪", "🇫", "🇬", "🇭", "🇮", "🇯", "🇰", "🇱", "🇲",
            "🇳", "🇴", "🇵", "🇶", "🇷", "🇸", "🇹", "🇺", "🇻", "🇼", "🇽", "🇾", "🇿"
        ]
        self.reg = re.compile(r'({.+})\ *(\[[^\n\r\[\]]+\] *)+')

        # starts up the schedular and all the tasks for all commands on timer
        self.sched = AsyncIOScheduler()
        self.sched.start()

        client.loop.create_task(self._async_init())

    async def _async_init(self) -> None:
        """Queues up all in progress polls
        :return:
        """
        await self.client.wait_until_ready()
        self._update_guild()
        polls = self.sql.get_polls()
        now = datetime.datetime.now()

        for poll in polls:
            if not poll[0]:
                pass  # no end time, does nothign
            else:
                time = datetime.datetime.strptime(poll[0],
                                                  '%Y-%m-%d %H:%M:%S.%f')
                if time > now:
                    self.sched.add_job(self._end_poll,
                                       "date",
                                       run_date=time,
                                       id=str(poll[1]) + str(poll[2]) +
                                       str(poll[3]),
                                       args=(poll[1], poll[2], poll[3]))
                else:
                    await self._end_poll(poll[1], poll[2], poll[3])

    def _update_guild(self) -> None:
        """Updates Guilds in the database
        :return:
        """
        guilds = self.client.guilds
        guilds = [guild.id for guild in guilds]

        db_guilds = self.sql.get_guilds()
        db_guilds = [db_guilds[0] for db_guilds in db_guilds]

        lst = []
        for guild in guilds:
            if guild not in db_guilds:
                lst.append(guild)

        self.sql.add_guilds(lst)

        lst = []
        for db_guild in db_guilds:
            if db_guild not in guilds:
                lst.append(db_guild)

        self.sql.remove_guilds(lst)

    def _delete_poll(self, message_id: int, channel_id: int,
                     guild_id: int) -> None:
        """Deletes the poll
        :param message_id:
        :param channel_id:
        :param guild_id:
        :return:
        """
        poll = self.sql.get_poll_time(message_id, channel_id, guild_id)
        if poll:
            self.sql.remove_poll(message_id, channel_id, guild_id)
            if poll[0][0]:
                self.sched.remove_job(
                    str(poll[0][1]) + str(poll[0][2]) + str(poll[0][3]))

    async def _end_poll(self, message_id: int, channel_id: int,
                        guild_id: int) -> None:
        """End function for when timed polls finish. counts up votes and sends into channel
        :param message_id: message id of the poll
        :param channel_id: channel id of the poll
        :param guild_id: guild id of the poll
        :return:
        """
        self._update_guild()
        embed = self._count_poll(message_id, channel_id, guild_id)
        channel = self.client.get_channel(channel_id)

        await channel.send(embed=embed)
        self.sql.remove_poll(message_id, channel_id, guild_id)

    def _count_poll(self, message_id: int, channel_id: int,
                    guild_id: int) -> object:
        """Counts up the votes for a poll and returns the embed
        :param message_id: message id of the poll
        :param channel_id: channel id of the poll
        :param guild_id: guild of the poll
        :return: discord.Embed
        """
        poll_info = self.sql.get_poll(message_id, channel_id, guild_id)

        votes = {}
        for poll in poll_info:
            votes[poll[2]] = [0, poll[1]]

        user_votes = self.sql.get_votes(message_id, channel_id, guild_id)

        for vote in user_votes:
            votes[vote[0]][0] += 1

        description = ''
        for emote, value in votes.items():
            description += f'{emote} {value[1]}: {value[0]}\n'

        embed = discord.Embed(title=poll_info[0][0],
                              color=discord.Color.gold(),
                              description=description)
        return embed

    @commands.Cog.listener()
    async def on_raw_reaction_add(self, payload) -> None:
        """Toggles user vote
        :param payload: info about the user who voted
        :return:
        """
        if payload.member == self.client.user: return

        if self.sql.get_poll(payload.message_id, payload.channel_id,
                             payload.guild_id):
            if self.sql.check_vote(payload.user_id, payload.emoji.name,
                                   payload.message_id, payload.channel_id,
                                   payload.guild_id):
                self.sql.remove_vote(payload.user_id, payload.emoji.name,
                                     payload.message_id, payload.channel_id,
                                     payload.guild_id)
            else:
                self.sql.add_user(payload.user_id, payload.guild_id)
                self.sql.add_vote(payload.user_id, payload.emoji.name,
                                  payload.message_id, payload.channel_id,
                                  payload.guild_id)

            # deletes reaction if it found the poll
            channel = self.client.get_channel(payload.channel_id)
            message = await channel.fetch_message(payload.message_id)
            await message.remove_reaction(payload.emoji, payload.member)

    @commands.Cog.listener()
    async def on_raw_message_delete(self, payload) -> None:
        """Checks whether a user deleted one of gregories messages
        then checks whether it was one of the polls and deletes it
        :param payload:
        :return:
        """
        if payload.cached_message is not None:
            if payload.cached_message.author == self.client.user:
                self._delete_poll(payload.message_id, payload.channel_id,
                                  payload.guild_id)

    @commands.command(aliases=['poll2'])
    async def anonpoll(self, ctx, *, args) -> None:
        """Creates anonymous poll with optional timed ending. limit of 20 options
        :param ctx:
        :param args: 1d2h3m4s {title}[arg][arg]
        :return: Creates a poll with timed output
        """
        time = None
        # checks message against regex to see if it matches
        if not self.reg.match(args):
            # check if it has time at start of command
            # splits arguments and datetime
            index = args.find(' ')
            time = args[:index]
            args = args[index:].lstrip()

            # converts to datetime
            time_dict = {}
            check = False
            for time_letter in ['d', 'h', 'm', 's']:
                found = time.find(time_letter)
                if found != -1:
                    check = True
                    split_arg = time.split(time_letter)
                    time_dict[time_letter] = float(split_arg[0])
                    time = split_arg[1]
                else:
                    time_dict[time_letter] = 0

            if not check:
                raise discord.errors.DiscordException
            else:
                time = datetime.datetime.now() + datetime.timedelta(
                    days=time_dict['d'],
                    hours=time_dict['h'],
                    minutes=time_dict['m'],
                    seconds=time_dict['s'])

            # checks if the args are formatted correctly
            if not self.reg.match(args):
                raise discord.errors.DiscordException

        # have args and possible datetime
        # formatting of arguments in message
        args = args.split('[')
        name = args.pop(0)[1:]
        name = name[:name.find('}')]
        args = [arg[:arg.find(']')]
                for arg in args]  # thanks ritz for this line

        # filtering out
        if len(args) > 20:
            return await ctx.send(
                f"bad {ctx.author.name}! thats too much polling >:(")
        elif len(args) == 0:
            return await ctx.send(
                f"bad {ctx.author.name}! thats too little polling >:(")
        elif name == '' or '' in args:
            return await ctx.send(
                f"bad {ctx.author.name}! thats too simplistic polling >:(")

        # creating embed for poll
        # main body
        description = ''
        for count in range(len(args)):
            description += f'{self.pollsigns[count]} {args[count]}\n\n'

        footer = 'endpoll <id> (true for dms)'

        embed = discord.Embed(title=name,
                              color=discord.Color.gold(),
                              description=description)
        embed.set_footer(text=footer)
        msg = await ctx.send(embed=embed)
        # adds a message id to the end of the poll
        footer += f'\nid: {msg.id}'
        embed.set_footer(text=footer)
        await msg.edit(embed=embed)

        # SQL Setup
        self._update_guild()
        self.sql.add_poll(msg.id, msg.channel.id, msg.author.guild.id, name,
                          time)
        self.sql.add_options(msg.id, msg.channel.id, msg.author.guild.id,
                             self.pollsigns, args)
        # Background task
        if time:
            self.sched.add_job(self._end_poll,
                               "date",
                               run_date=time,
                               id=str(msg.id) + str(msg.channel.id) +
                               str(msg.author.guild.id),
                               args=(msg.id, msg.channel.id,
                                     msg.author.guild.id))

        # adding reactions
        for count in range(len(args)):
            await msg.add_reaction(self.pollsigns[count])

    @anonpoll.error
    async def pollanon_error(self, ctx, error) -> None:
        """Error output for poll
        :param ctx:
        :param error: The type of error
        :return:
        """
        if isinstance(error,
                      commands.errors.MissingRequiredArgument) or isinstance(
                          error, discord.errors.DiscordException):
            await ctx.send(
                '`ERROR Missing Required Argument: make sure it is .anonpoll <time 1d2h3m4s> {title} [args]`'
            )
        else:
            print(error)

    @commands.command(aliases=['checkvote'])
    async def checkvotes(self, ctx) -> None:
        """Checks what polls you have voted on in this server
        :param ctx:
        :return: embed of votes sent to user's dms
        """
        await ctx.message.delete()

        votes = self.sql.check_votes(ctx.author.id, ctx.author.guild.id)
        # [(809138215331168317, 809080848057106432, 798298345177088022, '🇦', 'arg1', 'name of poll'), ...
        # count unique occurrences of (message_id, channel_id, guild_id) in (votes[0],votes[1],votes[2])
        polls = [(vote[0], vote[1], vote[2])
                 for vote in votes]  # removes unnessary code from list
        polls = list(dict.fromkeys(
            polls))  # removes item if it appears in the list poll
        # [(811247486101487646, 798309035945754694, 798298345177088022), ...

        if len(polls) == 0:
            # sends output if user hasnt voted on anything
            msg = await ctx.send('You havent voted on any active polls')
            await sleep(10)
            await msg.delete()
        else:
            # generates the embed
            embed = discord.Embed(title="You have voted",
                                  colour=discord.Color.gold())
            for poll in polls:
                # generates the field
                title = ''
                description = ''
                for vote in votes:
                    if (vote[0], vote[1], vote[2]
                        ) == poll:  # goes though each poll once by one
                        # TODO: Rewrite code to automatically do this using list ordering rather than this
                        title = vote[-1]
                        description += f"\n{vote[3]} {vote[4]}\n"

                embed.add_field(name=title,
                                value=description[:-1],
                                inline=True)
            try:
                await ctx.author.send(embed=embed)
            except discord.errors.Forbidden:
                # if user has dms disabled
                msg = await ctx.send(
                    'I cant send you a DM! please check your discord settings')
                await sleep(10)
                await msg.delete()

    @commands.command(
        aliases=['stoppoll', 'stopoll', 'deletepoll', 'yeetpoll', "polln't"])
    async def endpoll(self, ctx, message_id: int, dm: bool) -> None:
        """Ends the poll and outputs the result
        :param ctx:
        :param message_id: the message id of the poll
        :param dm: Whether the bot should send the results to dms
        :return:
        """
        embed = self._count_poll(message_id, ctx.channel.id,
                                 ctx.author.guild.id)
        if not dm:
            await ctx.send(embed=embed)
            self.sql.remove_poll(message_id, ctx.channel.id,
                                 ctx.author.guild.id)
        else:
            try:
                await ctx.author.send(embed=embed)
                self.sql.remove_poll(message_id, ctx.channel.id,
                                     ctx.author.guild.id)
            except discord.errors.Forbidden:
                # if user has dms disabled
                msg = await ctx.send(
                    'I cant send you a DM! please check your discord settings')
                await sleep(10)
                await msg.delete()

    @endpoll.error
    async def endpoll_error(self, ctx, error):
        """Error handling for the end poll function
        :param ctx:
        :param error:
        :return:
        """
        if isinstance(error,
                      commands.errors.MissingRequiredArgument) or isinstance(
                          error, discord.errors.DiscordException):
            await ctx.send(
                '`ERROR Missing Required Argument: make sure it is .endpoll <message id> <send to dms True/False>`'
            )
        else:
            print(error)

    @commands.command()
    async def poll(self, ctx, *, args: str = ' ') -> None:
        """
        Normal poll. Does {title} [option] [option] and "Foo Bar"
        :param ctx:
        :param args:
        :return:
        """
        if not self.reg.match(args):
            await ctx.message.add_reaction('👍')
            await ctx.message.add_reaction('👎')
            await ctx.message.add_reaction('🤷‍♀️')
            return

        args = args.split('[')
        name = args.pop(0)[1:]
        name = name[:name.find('}')]

        args = [arg[:arg.find(']')]
                for arg in args]  # thanks ritz for this line

        if len(args) > 20:
            await ctx.send(f"bad {ctx.author.name}! thats too much polling >:("
                           )
            return
        elif len(args) == 0:
            await ctx.send(
                f"bad {ctx.author.name}! thats too little polling >:(")
            return
        elif name == '' or '' in args:
            await ctx.send(
                f"bad {ctx.author.name}! thats too simplistic polling >:(")
            return

        description = ''
        for count in range(len(args)):
            description += f'{self.pollsigns[count]} {args[count]}\n\n'

        embed = discord.Embed(title=name,
                              color=discord.Color.gold(),
                              description=description)
        msg = await ctx.send(embed=embed)

        # add reactions
        for count in range(len(args)):
            await msg.add_reaction(self.pollsigns[count])

    @commands.command(aliases=['rp'])
    async def raidpoll(self, ctx, *, title='Raid Times'):
        """
        creates a poll for raiding
        """
        emotes = ["🇦", "🇧", "🇨", "🇩", "🇪", "🇫", "🇬", "🇭", "🇮", "🇯", "🇰", "🇱"]
        description = """
        🇦 1:00\n
        🇧 2:00\n
        🇨 3:00\n
        🇩 4:00\n
        🇪 5:00\n
        🇫 6:00\n
        🇬 7:00\n
        🇭 8:00\n
        🇮 9:00\n
        🇯 10:00\n
        🇰 11:00\n
        🇱 12:00\n
        """
        embed = discord.Embed(title=f'{title} AM',
                              color=discord.Color.gold(),
                              description=description)
        msg = await ctx.send(embed=embed)
        embed = discord.Embed(title=f'{title} PM',
                              color=discord.Color.gold(),
                              description=description)
        msg2 = await ctx.send(embed=embed)

        for emote in emotes:
            await msg.add_reaction(emote)
            await msg2.add_reaction(emote)
示例#35
0
def main():
    print("""==============================
              _           _
             | |         | |
  _   _  ___ | |__   ___ | |_
 | | | |/ _ \| '_ \ / _ \| __|
 | |_| | (_) | |_) | (_) | |_
  \__, |\___/|_.__/ \___/ \__|
   __/ |
  |___/
==============================""")
    print("正在初始化...")

    if os.path.exists('yobot_config.json'):
        basedir = "."
    else:
        basedir = "./yobot_data"
    if os.path.exists(os.path.join(basedir, "yobot_config.json")):
        try:
            with open(os.path.join(basedir, "yobot_config.json"),
                      "r",
                      encoding="utf-8-sig") as f:
                config = json.load(f)
        except json.JSONDecodeError as e:
            print('配置文件格式错误,请检查配置文件。三秒后关闭')
            time.sleep(3)
            raise e from e
        token = config.get("access_token", None)
        if token is None:
            print("警告:没有设置access_token,这会直接暴露机器人接口")
            print("详见https://yobot.win/usage/access-token/")
    else:
        token = None

    try:
        tzlocal.get_localzone()
    except:
        print("无法获取系统时区,请将系统时区设置为北京/上海时区")
        sys.exit()

    cqbot = CQHttp(access_token=token, enable_http_post=False)
    sche = AsyncIOScheduler()
    bot = yobot.Yobot(
        data_path=basedir,
        scheduler=sche,
        quart_app=cqbot.server_app,
        bot_api=cqbot._api,
    )
    host = bot.glo_setting.get("host", "0.0.0.0")
    port = bot.glo_setting.get("port", 9222)

    @cqbot.on_message
    async def handle_msg(context):
        if context["message_type"] == "group" or context[
                "message_type"] == "private":
            reply = await bot.proc_async(context)
        else:
            reply = None
        if isinstance(reply, str) and reply != "":
            return {'reply': reply, 'at_sender': False}
        else:
            return None

    async def send_it(func):
        if asyncio.iscoroutinefunction(func):
            to_sends = await func()
        else:
            to_sends = func()
        if to_sends is None:
            return
        for kwargs in to_sends:
            await asyncio.sleep(5)
            await cqbot.send_msg(**kwargs)

    jobs = bot.active_jobs()
    if jobs:
        for trigger, job in jobs:
            sche.add_job(func=send_it,
                         args=(job, ),
                         trigger=trigger,
                         coalesce=True,
                         max_instances=1,
                         misfire_grace_time=60)
        sche.start()

    print("初始化完成,启动服务...")

    cqbot.run(
        host=host,
        port=port,
        debug=False,
        use_reloader=False,
        loop=asyncio.get_event_loop(),
    )
示例#36
0
"""
Demonstrates how to use the Tornado compatible scheduler to schedule a job that executes on 3 second intervals.
"""

from datetime import datetime
import os

from apscheduler.schedulers.asyncio import AsyncIOScheduler

try:
    import asyncio
except ImportError:
    import trollius as asyncio


def tick():
    print('Tick! The time is: %s' % datetime.now())


if __name__ == '__main__':
    scheduler = AsyncIOScheduler()
    scheduler.add_job(tick, 'interval', seconds=3)
    scheduler.start()
    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))

    # Execution will block here until Ctrl+C (Ctrl+Break on Windows) is pressed.
    try:
        asyncio.get_event_loop().run_forever()
    except (KeyboardInterrupt, SystemExit):
        pass
示例#37
0
def sche():
    scheduler = AsyncIOScheduler()
    # TODO: fit for all environments with different timezone, this is for 0 timezone
    scheduler.add_job(send_early_msg, 'cron', hour="3", minute="0")
    scheduler.add_job(send_new_day_msg, 'cron', hour="0", minute="0")
    scheduler.start()
示例#38
0
        await bot.send_message(message.chat.id,
                               msg,
                               reply_to_message_id=message.message_id)


# Define the function that sends weather to the chat on a schedule
@dp.message_handler()
async def sched(msg=None):
    for id, arr in arr_dict.items():
        msg = get_weather(arr)
        await bot.send_message(chat_id=id, text=msg)


# Create scheduler with interval 1 day
scheduler = AsyncIOScheduler()
scheduler.add_job(sched, 'cron', day_of_week='mon-sun', hour=2, minute=00)
scheduler.start()


# Create the function to startup my bot
async def on_startup(dp):
    await bot.set_webhook(WEBHOOK_URL)


# Create the function to shutdown my bot
async def on_shutdown(dp):
    await bot.close()


# Main script
if __name__ == '__main__':
示例#39
0
文件: scheduler.py 项目: andymor/eNMS
class Scheduler(Starlette):

    days = {
        "0": "sun",
        "1": "mon",
        "2": "tue",
        "3": "wed",
        "4": "thu",
        "5": "fri",
        "6": "sat",
        "7": "sun",
        "*": "*",
    }

    seconds = {"seconds": 1, "minutes": 60, "hours": 3600, "days": 86400}

    def __init__(self):
        super().__init__()
        with open(Path.cwd().parent / "setup" / "scheduler.json", "r") as file:
            self.settings = load(file)
        dictConfig(self.settings["logging"])
        self.configure_scheduler()
        self.register_routes()

    @staticmethod
    def aps_date(date):
        if not date:
            return
        date = datetime.strptime(date, "%d/%m/%Y %H:%M:%S")
        return datetime.strftime(date, "%Y-%m-%d %H:%M:%S")

    def configure_scheduler(self):
        self.scheduler = AsyncIOScheduler(self.settings["config"])
        self.scheduler.start()

    def register_routes(self):
        @self.route("/job", methods=["DELETE"])
        async def delete(request):
            job_id = await request.json()
            if self.scheduler.get_job(job_id):
                self.scheduler.remove_job(job_id)
            return JSONResponse(True)

        @self.route("/next_runtime/{task_id}")
        async def next_runtime(request):
            job = self.scheduler.get_job(request.path_params["task_id"])
            if job and job.next_run_time:
                return JSONResponse(job.next_run_time.strftime("%Y-%m-%d %H:%M:%S"))
            return JSONResponse("Not Scheduled")

        @self.route("/schedule", methods=["POST"])
        async def schedule(request):
            data = await request.json()
            if data["mode"] in ("resume", "schedule"):
                result = self.schedule_task(data["task"])
                if not result:
                    return JSONResponse({"alert": "Cannot schedule in the past."})
                else:
                    return JSONResponse({"response": "Task resumed.", "active": True})
            else:
                try:
                    self.scheduler.pause_job(data["task"]["id"])
                    return JSONResponse({"response": "Task paused."})
                except JobLookupError:
                    return JSONResponse({"alert": "There is no such job scheduled."})

        @self.route("/time_left/{task_id}")
        async def time_left(request):
            job = self.scheduler.get_job(request.path_params["task_id"])
            if job and job.next_run_time:
                delta = job.next_run_time.replace(tzinfo=None) - datetime.now()
                hours, remainder = divmod(delta.seconds, 3600)
                minutes, seconds = divmod(remainder, 60)
                days = f"{delta.days} days, " if delta.days else ""
                return JSONResponse(f"{days}{hours}h:{minutes}m:{seconds}s")
            return JSONResponse("Not Scheduled")

    @staticmethod
    def run_service(task_id):
        auth = HTTPBasicAuth(environ.get("ENMS_USER"), environ.get("ENMS_PASSWORD"))
        post(f"{environ.get('ENMS_ADDR')}/rest/run_task", json=task_id, auth=auth)

    def schedule_task(self, task):
        if task["scheduling_mode"] == "cron":
            crontab = task["crontab_expression"].split()
            crontab[-1] = ",".join(self.days[day] for day in crontab[-1].split(","))
            trigger = {"trigger": CronTrigger.from_crontab(" ".join(crontab))}
        elif task["frequency"]:
            trigger = {
                "trigger": "interval",
                "start_date": self.aps_date(task["start_date"]),
                "end_date": self.aps_date(task["end_date"]),
                "seconds": int(task["frequency"])
                * self.seconds[task["frequency_unit"]],
            }
        else:
            trigger = {"trigger": "date", "run_date": self.aps_date(task["start_date"])}
        if not self.scheduler.get_job(task["id"]):
            job = self.scheduler.add_job(
                id=str(task["id"]),
                replace_existing=True,
                func=self.run_service,
                args=[task["id"]],
                **trigger,
            )
        else:
            job = self.scheduler.reschedule_job(str(task["id"]), **trigger)
        return job.next_run_time > datetime.now(job.next_run_time.tzinfo)
import asyncio
import os
from functools import partial

from apscheduler.schedulers.asyncio import AsyncIOScheduler
from apscheduler.triggers.cron import CronTrigger

from mobilizon_reshare.cli import _safe_execution
from mobilizon_reshare.cli.commands.recap.main import recap
from mobilizon_reshare.cli.commands.start.main import start

sched = AsyncIOScheduler()

# Runs "start" from Monday to Friday every 15 mins
sched.add_job(
    partial(_safe_execution, start),
    CronTrigger.from_crontab(
        os.environ.get("MOBILIZON_RESHARE_INTERVAL", "*/15 10-18 * * 0-4")),
)
# Runs "recap" once a week
sched.add_job(
    partial(_safe_execution, recap),
    CronTrigger.from_crontab(
        os.environ.get("MOBILIZON_RESHARE_RECAP_INTERVAL", "5 11 * * 0")),
)
sched.start()
try:
    asyncio.get_event_loop().run_forever()
except (KeyboardInterrupt, SystemExit):
    pass
示例#41
0
    else:
        __detectModules(modulesdir)

    # register shutdown hooks
    signal.signal(signal.SIGHUP, _onHupSignal)
    signal.signal(signal.SIGTERM, _onTermSignal)

    # create and install asyncio main event loop
    # http://www.tornadoweb.org/en/stable/asyncio.html#tornado.platform.asyncio.AsyncIOMainLoop
    AsyncIOMainLoop().install()

    # advanced python scheduler for reoccuring tasks as defined by modules
    _scheduler = AsyncIOScheduler() #TornadoScheduler()

    # reload settings periodically
    _scheduler.add_job(settings.load_settings, 'interval', minutes=5)

    # add eve user and url of your application here
    import eveapi
    if 'in_game_owner' in cfgServer:
        eveapi.set_user_agent('Host: %s; Admin-EVE-Character: %s' % ('dev-local', cfgServer['in_game_owner']))

    # init universe data from SDE
    asyncio.get_event_loop().run_until_complete(universe.initCaches())

    # only import at this point to make sure all required modules have already been loaded
    from igbtoolbox import pages

    # default routes
    urls = []
示例#42
-1
class Agent(object):
    def __init__(self, _client):
        self.network_checker = NetworkChecker()
        self.scheduler = AsyncIOScheduler()
        self._add_job(_client)

    def _add_job(self, _client):
        # self.scheduler.add_job(tick, 'interval', seconds=config.check_interval, args=[_client,])
        self.scheduler.add_job(tick, 'interval', seconds=config.check_interval, args=[_client,])