示例#1
0
 def create_scheduler(self):
     self.jobstores = {
         'mongo':
         MongoDBJobStore(collection='job1',
                         database='saasjob',
                         client=_mongoclient),
         'default':
         MemoryJobStore()
     }
     self.executors = {
         'default': ThreadPoolExecutor(20),
         'processpool': ProcessPoolExecutor(5)
     }
     self.job_defaults = {
         'coalesce': False,
         'misfire_grace_time': 1,
         'max_instances': 1
     }
     self._sched = BackgroundScheduler(jobstores=self.jobstores,
                                       executors=self.executors,
                                       job_defaults=self.job_defaults)
     # 添加 任务提交 事件监听
     self._sched.add_listener(self.when_job_submitted, EVENT_JOB_SUBMITTED)
     # # 添加 任务执行完成 事件监听
     # self._sched.add_listener(self.when_job_executed, EVENT_JOB_EXECUTED)
     # 添加 任务退出 事件监听
     self._sched.add_listener(self.when_job_crashed,
                              EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)
示例#2
0
def run():
    scheduler = BlockingScheduler()
    scheduler.add_executor(ThreadPoolExecutor(20))
    scheduler.add_jobstore(MemoryJobStore())
    scheduler.add_job(
        process_insights, "interval", minutes=2, max_instances=1, jitter=20
    )
    scheduler.add_job(mark_insights, "interval", minutes=2, max_instances=1, jitter=20)
    scheduler.add_job(save_facet_metrics, "cron", day="*", hour=1, max_instances=1)
    scheduler.add_job(
        download_product_dataset, "cron", day="*", hour="3", max_instances=1
    )
    scheduler.add_job(
        functools.partial(refresh_insights, with_deletion=True),
        "cron",
        day="*",
        hour="4",
        max_instances=1,
    )
    scheduler.add_job(
        generate_insights, "cron", day="*", hour="4", minute=15, max_instances=1
    )
    scheduler.add_job(
        generate_quality_facets,
        "cron",
        day="*",
        hour="5",
        minute=25,
        max_instances=1,
    )
    scheduler.add_listener(exception_listener, EVENT_JOB_ERROR)
    scheduler.start()
    def __init__(self, pyAAS):
        """Reads the configuration and (re-)starts.

        This function reads the XML-based configuration file and
        initializes the scheduler.

        """
        self.pyAAS = pyAAS
        self.f_modules = {}

        # set the defaults for the scheduler, which can not be changed
        # by configuration files
        # db_path = os.path.join(data_dir, 'jobs.sqlite')
        # db_url = ''.join(['sqlite:///', db_path])
        jobstores = {
            # 'default': SQLAlchemyJobStore(url=db_url)
            'default': MemoryJobStore()
        }
        executors = {
            'default': ThreadPoolExecutor(20),
        }
        job_defaults = {'coalesce': False, 'max_instances': 3}

        # initialize the scheduler
        self.scheduler = BackgroundScheduler(jobstores=jobstores,
                                             executors=executors,
                                             job_defaults=job_defaults)
        self.triggers = {}
 def test_add_jobstore_already_exists(self, scheduler):
     jobstore = MemoryJobStore()
     scheduler.add_jobstore(jobstore)
     exc = pytest.raises(ValueError, scheduler.add_jobstore, jobstore)
     assert str(
         exc.value
     ) == 'This scheduler already has a job store by the alias of "default"'
    def __init__(self):

        if CronJobManager.__instance != None:
            raise Exception("This class is a singleton!")
        else:
            self.update_cron_start_time()
            self.db_manager = Database.get_instance()
            self.dmm_ripper = DMMRipper.get_instance(
                CronJobManager.webdriver_config)
            jobstores = {
                # 'alchemy': SQLAlchemyJobStore(url='sqlite:///jobs.sqlite'),
                'default': MemoryJobStore()
            }
            executors = {
                'default': {
                    'type': 'threadpool',
                    'max_workers': 20
                },
                'processpool': ProcessPoolExecutor(max_workers=5)
            }
            job_defaults = {'coalesce': False, 'max_instances': 3}
            self.scheduler = BackgroundScheduler()
            self.scheduler.configure(jobstores=jobstores,
                                     executors=executors,
                                     job_defaults=job_defaults,
                                     timezone=CronJobManager.time_zone,
                                     daemon=False)
            self.scheduler.start()
            CronJobManager.__instance = self
示例#6
0
    def run(self):
        jobstores = {
            # 'mongo': MongoDBJobStore(),
            # 'default': SQLAlchemyJobStore(url='sqlite:///jobs.sqlite')
            "memory": MemoryJobStore(),
        }
        executors = {
            'default': ThreadPoolExecutor(5),
            'processpool': ProcessPoolExecutor(2)
        }
        job_defaults = {'coalesce': False, 'max_instances': 3}
        scheduler = BackgroundScheduler(jobstores=jobstores,
                                        executors=executors,
                                        job_defaults=job_defaults,
                                        timezone=china_tz)
        scheduler.add_listener(self.listener,
                               EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)

        #scheduler.add_job(weather.weather_alarm, 'interval', seconds=10*60, id='sign_push_report')
        scheduler.add_job(weather.weather_alarm,
                          'interval',
                          seconds=2,
                          id='sign_weather_alarm')
        scheduler.start()
        return scheduler
示例#7
0
    def test_remove_jobstore(self, scheduler, scheduler_events):
        scheduler.add_jobstore(MemoryJobStore(), 'foo')
        scheduler.remove_jobstore('foo')

        assert len(scheduler_events) == 2
        assert scheduler_events[1].code == EVENT_JOBSTORE_REMOVED
        assert scheduler_events[1].alias == 'foo'
示例#8
0
class ProdSettings(BaseSettings):
    # top level
    BASEPATH = os.path.dirname(
        os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

    # user configurable
    _sqlalchemy_database_uri = 'sqlite:///' + os.path.join(
        BASEPATH, 'elastichq.db')
    _scheduler_api_enabled = True
    _sqlalchemy_track_modifications = False

    # static
    HQ_SITE_URL = 'http://elastichq.org'
    HQ_GH_URL = 'https://github.com/ElasticHQ/elasticsearch-HQ'
    API_VERSION = '3.3.0'
    SERVER_NAME = None

    SCHEDULER_EXECUTORS = {
        'default': {
            'type': 'threadpool',
            'max_workers': 20
        }
    }

    SCHEDULER_JOB_DEFAULTS = {'coalesce': False, 'max_instances': 3}

    SCHEDULER_API_ENABLED = True
    SCHEDULER_JOBSTORES = {'default': MemoryJobStore()}
示例#9
0
    def test_jobstore_events_are_emitted_to_clients(self, mock_run, mock_emit):
        ui = SchedulerUI(self.scheduler)
        ui.start()

        # Job store addition.
        self.scheduler.add_jobstore(MemoryJobStore(), alias='in_memory')

        mock_emit.assert_called_once()
        self.assertEqual('jobstore_event', mock_emit.call_args[0][0])

        emitted_event = mock_emit.call_args[0][1]
        self.assertEqual('in_memory', emitted_event['jobstore_name'])
        self.assertEqual('jobstore_added', emitted_event['event_name'])
        self.assertIsInstance(
            datetime.strptime(emitted_event['event_ts'],
                              '%Y-%m-%d %H:%M:%S.%f'), datetime)

        # Job store removal.
        mock_emit.reset_mock()
        self.scheduler.remove_jobstore('in_memory')

        mock_emit.assert_called_once()
        emitted_event = mock_emit.call_args[0][1]
        self.assertEqual('in_memory', emitted_event['jobstore_name'])
        self.assertEqual('jobstore_removed', emitted_event['event_name'])
        self.assertIsInstance(
            datetime.strptime(emitted_event['event_ts'],
                              '%Y-%m-%d %H:%M:%S.%f'), datetime)
示例#10
0
def dealDataScheduleJob(mon_conn):
    #每次重启程序时需要删除队列
    mon_conn.kctest.dealDataJob.remove({})
    jobstores = {
        'mongo':
        MongoDBJobStore(collection='dealDataJob',
                        database='kctest',
                        client=mon_conn),
        'default':
        MemoryJobStore()
    }
    executors = {
        'default': ThreadPoolExecutor(4),
        'processpool': ProcessPoolExecutor(1)
    }
    job_defaults = {
        'coalesce': False,
        'max_instances': 3  #avoid block
    }

    scheduler = BlockingScheduler(jobstores=jobstores,
                                  executors=executors,
                                  job_defaults=job_defaults)
    scheduler.add_job(readWriteBusRunInfo,
                      'interval',
                      seconds=15,
                      jobstore="mongo")
    #scheduler.add_job(distinctBusInfo,'interval',seconds=30, jobstore='mongo')
    #定时任务
    #scheduler.add_job(statistics,"cron",hour=23,minute=59,jobstore='mongo')
    #scheduler.add_job(statistics,"interval",seconds=30,jobstore='default')
    scheduler.start()
示例#11
0
def run():
    scheduler = BlockingScheduler()
    scheduler.add_executor(ThreadPoolExecutor(20))
    scheduler.add_jobstore(MemoryJobStore())
    scheduler.add_job(process_insights,
                      'interval',
                      minutes=2,
                      max_instances=1,
                      jitter=20)
    scheduler.add_job(mark_insights,
                      'interval',
                      minutes=2,
                      max_instances=1,
                      jitter=20)
    scheduler.add_job(download_product_dataset,
                      'cron',
                      day='*',
                      hour='3',
                      max_instances=1)
    scheduler.add_job(refresh_insights,
                      'cron',
                      day='*',
                      hour='4',
                      max_instances=1)
    scheduler.add_job(generate_insights,
                      'cron',
                      day='*',
                      hour='4',
                      minute=15,
                      max_instances=1)
    scheduler.add_listener(exception_listener, EVENT_JOB_ERROR)
    scheduler.start()
示例#12
0
    def test_print_jobs(self, scheduler, start_scheduler, jobstore):
        scheduler.add_jobstore(MemoryJobStore(), 'other')
        if start_scheduler:
            scheduler.start(paused=True)

        scheduler.add_job(lambda: None, 'date', run_date='2099-09-09', id='job1',
                          name='test job 1')
        scheduler.add_job(lambda: None, 'date', run_date='2099-08-08', id='job2',
                          name='test job 2', jobstore='other')

        outfile = StringIO()
        scheduler.print_jobs(jobstore, outfile)

        if jobstore and not start_scheduler:
            assert outfile.getvalue() == """\
Pending jobs:
    test job 2 (trigger: date[2099-08-08 00:00:00 CET], pending)
"""
        elif jobstore and start_scheduler:
            assert outfile.getvalue() == """\
Jobstore other:
    test job 2 (trigger: date[2099-08-08 00:00:00 CET], next run at: 2099-08-08 00:00:00 CET)
"""
        elif not jobstore and not start_scheduler:
            assert outfile.getvalue() == """\
Pending jobs:
    test job 1 (trigger: date[2099-09-09 00:00:00 CET], pending)
    test job 2 (trigger: date[2099-08-08 00:00:00 CET], pending)
"""
        else:
            assert outfile.getvalue() == """\
示例#13
0
 def _build_scheduler(self, default_max_workers):
     jobstores = {
         'memory': MemoryJobStore(),
     }
     jobstores['default'] = jobstores['memory']
     try:
         jobstores['sqlalchemy'] = SQLAlchemyJobStore(
             url=self.config.scheduler.db_uri)
     except AttributeError:
         pass
     executors = {}
     try:
         executors['default'] = ThreadPoolExecutor(
             max_workers=self.config.scheduler.max_workers)
     except AttributeError:
         executors['default'] = ThreadPoolExecutor(
             max_workers=default_max_workers)
     sched = BackgroundScheduler(jobstores=jobstores,
                                 executors=executors,
                                 tz=pytz.timezone(self.config.tz))
     sched.add_listener(functools.partial(_done_listener, sched),
                        events.EVENT_JOB_EXECUTED | events.EVENT_JOB_ERROR)
     sched.add_listener(functools.partial(_submitted_listener, sched),
                        events.EVENT_JOB_SUBMITTED)
     sched.add_listener(functools.partial(_modified_listener, sched),
                        events.EVENT_JOB_MODIFIED)
     return sched
示例#14
0
class FlaskConfigScheduler:
    # 配置定时任务
    JOBS = [
        # 机器人自动清理任务
        {
            'id': 'robot_clear_task',  # 任务标识,必须唯一
            'func': RobotService.clearRobot,
            'args': None,
            'trigger': 'interval',
            'seconds': 3600  # 单位秒,本任务为每1小时执行一次
        }
        # 知识库更新任务
        ,
        {
            'id': 'knowledge_update_task',
            'func': call_knowledge_update_task,
            'args': None,
            'trigger': 'interval',
            'seconds': 5 * 60  # 单位秒,本任务为每5分钟执行一次
        }
        # TODO 机器人模型更新任务
        # , {
        #     'id': 'knowledge_update_task',
        #     'func': call_knowledge_update_task,
        #     'args': None,
        #     'trigger': 'interval',
        #     'seconds': 300  # 单位秒,本任务为每5分钟执行一次
        # },
        # 可以在下面添加自定义任务
        # ,{
        #     'id': 'scheduler_dev_queueing',
        #     'func': task2,
        #     'args': None,
        #     'trigger': {  # 本任务为每周一五点五十九分四十秒执行一次
        #         'type': 'cron',  # 类型
        #         'day_of_week': "0",  # 可定义具体哪几天要执行
        #         'hour': '5',  # 小时数
        #         'minute': '59',
        #         'second': '40'  # "*/3" 表示每3秒执行一次,单独一个"3" 表示每分钟的3秒。现在就是每一分钟的第3秒时循环执行。
        #     }
        # }
    ]
    # 存储位置
    SCHEDULER_JOBSTORES = {
        # 默认任务放在内存中,也可以配置成数据库
        'default': MemoryJobStore()
    }
    # 线程池配置
    SCHEDULER_EXECUTORS = {
        'default': {
            'type': 'threadpool',
            'max_workers': 20
        }
    }
    # 配置时区
    SCHEDULER_TIMEZONE = 'Asia/Shanghai'
    SCHEDULER_JOB_DEFAULTS = {'coalesce': False, 'max_instances': 3}
    # 调度器开关
    SCHEDULER_API_ENABLED = True
    SCHEDULER_API_PREFIX = '/schedule'
示例#15
0
 def __init__(self):
     self.jobstores = {
         'mongo':
         MongoDBJobStore(collection='job1',
                         database='saasjob',
                         client=_mongoclient),
         'default':
         MemoryJobStore()
     }
     self.executors = {
         'default': ThreadPoolExecutor(1),
         'processpool': ProcessPoolExecutor(1)
     }
     self.job_defaults = {
         'coalesce': False,
         'misfire_grace_time': 1,
         'max_instances': 1
     }
     self._sched = BackgroundScheduler(jobstores=self.jobstores,
                                       executors=self.executors,
                                       job_defaults=self.job_defaults)
     # 添加 任务提交 事件监听
     self._sched.add_listener(self.when_job_submitted, EVENT_JOB_SUBMITTED)
     # 添加 任务执行完成 事件监听
     self._sched.add_listener(self.when_job_executed, EVENT_JOB_EXECUTED)
     # 添加 任务异常退出 事件监听
     self._sched.add_listener(self.when_job_crashed, EVENT_JOB_ERROR)
     self._jobs = {}
     self._jobhandlers = {}  # format, key: jobid,  value: jobhandler
     self._jobs_key = ["name", "func", "args", "kwargs"]
     self.start()
示例#16
0
def init_db(app):
    print("init db")

    weidancing = get_db_uri(
        app.config['DB_WEIDANCING_HOST'],
        app.config['DB_WEIDANCING_PORT'],
        app.config['DB_WEIDANCING_USER'],
        app.config['DB_WEIDANCING_PWD'],
        app.config['DB_WEIDANCING_DB'],
    )
    print("init db, weidancing, " + weidancing)

    app.config['SQLALCHEMY_DATABASE_URI'] = weidancing
    app.config['SQLALCHEMY_BINDS'] = {
        'weidancing': weidancing,
    }

    app.config['SCHEDULER_JOBSTORES'] = {'default': MemoryJobStore()}

    if 'DB_SD_BASKETBALL_SOURCE_POOL_SIZE' in app.config.keys():
        app.config['SQLALCHEMY_POOL_SIZE'] = app.config[
            'DB_SD_BASKETBALL_SOURCE_POOL_SIZE']
    else:
        app.config['SQLALCHEMY_POOL_SIZE'] = 300
        app.config['SQLALCHEMY_MAX_OVERFLOW'] = 600
    app.config['SQLALCHEMY_POOL_RECYCLE'] = 10
    app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
    db.init_app(app)
示例#17
0
 def init_scheduler(self):
     store = FreeNASJobStore()
     self.scheduler = BackgroundScheduler(jobstores={
         'default': store,
         'temp': MemoryJobStore()
     },
                                          timezone=pytz.utc)
     self.scheduler.start()
示例#18
0
    def test_adding_a_jobstore_adds_all_jobs_in_it(self, mock_notify_jobstore_event, mock_notify_job_event, _):
        watcher = SchedulerWatcher(self.scheduler)

        jobstore = MemoryJobStore()

        jobstore.add_job(Job(scheduler=self.scheduler, id='job_1', next_run_time=datetime.now() + timedelta(days=1)))
        jobstore.add_job(Job(scheduler=self.scheduler, id='job_2', next_run_time=datetime.now() + timedelta(days=2)))

        self.assertEqual(0, len(watcher.jobs))

        self.scheduler.add_jobstore(jobstore, alias='in_memory_2')

        self.assertIn('in_memory_2', watcher.jobstores, 'Watcher should have the new jobstore tracked')
        self.assertEqual(2, len(watcher.jobs), 'Watcher should add all jobs in the newly added jobstore')
        self.assertTrue(all([job_id in watcher.jobs for job_id in ['job_1', 'job_2']]))
        self.assertEqual(2, mock_notify_job_event.call_count)
        mock_notify_jobstore_event.assert_called_once()
示例#19
0
    def test_add_jobstore_already_exists(self, scheduler):
        """
        Test that ValueError is raised when a job store is added with an alias that already exists.

        """
        jobstore = MemoryJobStore()
        scheduler.add_jobstore(jobstore)
        exc = pytest.raises(ValueError, scheduler.add_jobstore, jobstore)
        assert str(exc.value) == 'This scheduler already has a job store by the alias of "default"'
示例#20
0
    def init_schedule(self):
        # schedule
        jobstores = {'default': MemoryJobStore()}
        executors = {'default': ThreadPoolExecutor(20)}
        job_defaults = {'coalesce': False, 'max_instances': 1}

        self.scheduler = BackgroundScheduler(jobstores=jobstores,
                                             executors=executors,
                                             job_defaults=job_defaults)
示例#21
0
def make_default_scheduler():
    if config.scheduler_job_store == 'redis':
        jobstore = RedisJobStore(host=config.redis_host, port=config.redis_port)
    elif config.scheduler_job_store == 'memory':
        jobstore = MemoryJobStore()
    else:
        raise ValueError("unknown or unsupported job store type '{}'".format(config.scheduler_job_store))
    return BackgroundScheduler(executors={'default': ThreadPoolExecutor(config.worker_pool_size)},
                               jobstores={'default': jobstore },
                               job_defaults={'misfire_grace_time': config.update_frequency})
示例#22
0
class Scheduler:
    jobstores = {
        'default': MemoryJobStore(),
        'default_test': MemoryJobStore(),
    }
    executors = {
        'default': ThreadPoolExecutor(200),
        'processpool': ProcessPoolExecutor(10),
    }
    job_defaults = {
        'coalesce': True,
        'max_instances': 1,
        'misfire_grace_time': 60,
    }

    @staticmethod
    def get_sched():
        scheduler = BackgroundScheduler()
        return scheduler
示例#23
0
    def test_add_jobstore(self, scheduler, stopped):
        scheduler._stopped = stopped
        jobstore = MemoryJobStore()
        jobstore.start = MagicMock()
        scheduler._real_add_job = MagicMock()
        scheduler._dispatch_event = MagicMock()
        scheduler.wakeup = MagicMock()
        scheduler.add_jobstore(jobstore)

        assert scheduler._dispatch_event.call_count == 1
        event = scheduler._dispatch_event.call_args[0][0]
        assert event.code == EVENT_JOBSTORE_ADDED
        assert event.alias == "default"
        if stopped:
            assert jobstore.start.call_count == 0
            assert scheduler.wakeup.call_count == 0
        else:
            scheduler.wakeup.assert_called_once_with()
            jobstore.start.assert_called_once_with(scheduler, "default")
示例#24
0
    def test_remove_jobstore(self, scheduler):
        scheduler.add_jobstore(MemoryJobStore(), 'foo')
        scheduler._dispatch_event = MagicMock()
        scheduler.remove_jobstore('foo')

        assert scheduler._jobstores == {}
        assert scheduler._dispatch_event.call_count == 1
        event = scheduler._dispatch_event.call_args[0][0]
        assert event.code == EVENT_JOBSTORE_REMOVED
        assert event.alias == 'foo'
示例#25
0
    def test_add_jobstore(self, scheduler, stopped):
        scheduler._stopped = stopped
        jobstore = MemoryJobStore()
        jobstore.start = MagicMock()
        scheduler._real_add_job = MagicMock()
        scheduler._dispatch_event = MagicMock()
        scheduler.wakeup = MagicMock()
        scheduler.add_jobstore(jobstore)

        assert scheduler._dispatch_event.call_count == 1
        event = scheduler._dispatch_event.call_args[0][0]
        assert event.code == EVENT_JOBSTORE_ADDED
        assert event.alias == 'default'
        if stopped:
            assert jobstore.start.call_count == 0
            assert scheduler.wakeup.call_count == 0
        else:
            scheduler.wakeup.assert_called_once_with()
            jobstore.start.assert_called_once_with(scheduler, 'default')
示例#26
0
 def __init__(self, bot: CustomBot, database: SchedulerDatabase):
     self.bot = bot
     self.database = database
     self.scheduler = BlockingScheduler()
     self.scheduler.add_jobstore(MemoryJobStore(), alias='scheduled')
     self.logger = getLogger(__name__)
     self.actions = [
         ['send_and_delete_message', self.send_and_delete_message],
         ['open_link_with_delay', self.open_link_with_delay]
     ]
示例#27
0
class Config(object):
    JOBS = [{
        'id': 'periodic_speedtest',
        'func': run_speedtest,
        'trigger': 'interval',
        'seconds': TEST_INTERVAL
    }]

    SCHEDULER_JOBSTORES = {'default': MemoryJobStore()}

    SCHEDULER_API_ENABLED = False
示例#28
0
def main():
    args = parse_args()

    print(args)

    logfile = path.expanduser(args.logfile)
    if not path.exists(path.dirname(logfile)):
        os.makedirs(path.dirname(logfile))

    root_logger = logging.getLogger()
    formatter = logging.Formatter(
        "%(asctime)s:%(levelname)s:%(name)s:%(message)s")
    handler = logging.handlers.RotatingFileHandler(args.logfile,
                                                   maxBytes=args.logsize,
                                                   backupCount=args.logcount)
    handler.setFormatter(formatter)
    root_logger.addHandler(handler)
    root_logger.setLevel(args.loglevel)

    jobstores = {'memory': MemoryJobStore()}
    executors = {
        'default': ProcessPoolExecutor(args.processes),
        'threadpool': ThreadPoolExecutor(args.threads)
    }
    job_defaults = {'max_instances': 10000}
    scheduler = BackgroundScheduler(jobstores=jobstores,
                                    executors=executors,
                                    job_defaults=job_defaults)
    ''' Add jobs here '''
    x = 1
    for x in range(1, 10000):
        interval = random.randint(30, 120)
        scheduler.add_job(handle_job,
                          'interval',
                          seconds=interval,
                          kwargs={
                              'id': str(x),
                              'interval': str(interval)
                          })
        x += 1

    print("\nStarting Scheduler...")

    scheduler.start()

    while True:
        time.sleep(1)

    print("Scheduleder started")

    print("Shutting down... please wait!")

    scheduler.shutdown()
    logging.shutdown()
示例#29
0
def create_scheduler():
    new_scheduler = AsyncIOScheduler(
        jobstores={"default": MemoryJobStore()},
        executors={"default": AsyncIOExecutor()},
        job_defaults={
            "coalesce": False,
            "max_instances": 3
        },
        timezone=utc,
    )
    return new_scheduler
示例#30
0
 def __init__(self):
     if self.is_api_caller(caller=inspect.currentframe()):
         sqlalchemy_url = cnaas_nms.db.session.get_sqlalchemy_conn_str()
         jobstores = {'default': SQLAlchemyJobStore(url=sqlalchemy_url)}
     else:
         jobstores = {'default': MemoryJobStore()}
     self._scheduler = BackgroundScheduler(
         executors={'default': ThreadPoolExecutor(10)},
         jobstores=jobstores,
         job_defaults={},
         timezone=utc)
示例#31
0
    def __init__(self, guild_id, config, bot):
        self.guild_id = guild_id
        self.config = config
        self.bot = bot

        self.jobstores = {
            "maintenance": MemoryJobStore(),
            "pings": MemoryJobStore()
        }

        if self.config['doc_key']:
            self._init_sheet(self.config['doc_key'])
            bot.ping_scheduler.setup_guild(self)
        else:
            self.sheet_handler = None
            self.players = None
            self.week_schedule = None
            self.valid_activities = None

        self.scanning = False