Ejemplo n.º 1
0
    def __init__(self):
        '''
        Important

        If you schedule jobs in a persistent job store during your application's
        initialization, you MUST define an explicit ID for the job and use 
        replace_existing=True or you will get a new copy of the job every time your 
        application restarts!
        '''

        print('IOThermostat: Starting Heater scheduler..')

        self.jobstores = {
            'default': SQLAlchemyJobStore(url='sqlite:///jobs.sqlite')
        }

        self.job_defaults = {
            'coalesce': True,
            'max_instances': 1,
            'misfire_grace_time': 15 * 60
        }

        self.scheduler = BackgroundScheduler(job_defaults=self.job_defaults)
        self.scheduler.start(paused=True)

        print('IOThermostat: Heater scheduler active.')
Ejemplo n.º 2
0
class Config(object):
    PROJECT_ROOT = os.environ.get('PROJECT_ROOT') or os.getcwd()
    SECRET_KEY = os.environ.get('SECRET_KEY')
    MAX_CONTENT_LENGTH =  int(os.environ.get('MAX_CONTENT_LENGTH')) if os.environ.get('MAX_CONTENT_LENGTH') else \
        32 * 1024 * 1024
    LOGFILE = os.environ.get('LOGFILE') or '/var/log/janitor.log'
    LOG_LEVEL = os.environ.get('LOG_LEVEL') or 'INFO'
    CHECK_INTERVAL = os.environ.get('CHECK_INTERVAL') or 600
    POSTS_PER_PAGE = os.environ.get('POSTS_PER_PAGE') or 20
    SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \
        'sqlite:///' + os.path.join(basedir, 'app.db')
    SQLALCHEMY_TRACK_MODIFICATIONS = False
    SCHEDULER_JOBSTORES = {
        'default': SQLAlchemyJobStore(url=SQLALCHEMY_DATABASE_URI)
    }
    SCHEDULER_API_ENABLED = True
    SCHEDULER_TIMEZONE = 'UTC'
    TZ_PREFIX = os.environ.get('TZ_PREFIX')
    MAIL_USERNAME = os.environ.get('MAIL_USERNAME')
    MAILBOX = os.environ.get('MAILBOX') or 'INBOX'
    SLACK_WEBHOOK_URL = os.environ.get('SLACK_WEBHOOK_URL')
    SLACK_CHANNEL = os.environ.get('SLACK_CHANNEL')
    MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD')
    MAIL_SERVER = os.environ.get('MAIL_SERVER')
    MAIL_CLIENT = os.environ.get('MAIL_CLIENT')
    PROMETHEUS_DIR = os.environ.get('prometheus_multiproc_dir') or os.environ.get('PROMETHEUS_DIR')
    # Uploads
    UPLOADS_DEFAULT_DEST = os.environ.get('UPLOADS_DEFAULT_DEST') or PROJECT_ROOT + '/app/static/circuits/'
    UPLOADED_DOCUMENTS_DEST = os.environ.get('UPLOADED_DOCUMENTS_DEST') or PROJECT_ROOT + '/app/static/circuits/'
    UPLOADED_DOCUMENTS_ALLOW = ('pdf', 'zip', 'gzip', 'tar', 'bz')
    JANITOR_URL = os.environ.get('JANITOR_URL')
Ejemplo n.º 3
0
def setup_scheduler(manager):
    """Configure and start apscheduler"""
    global scheduler
    if logging.getLogger().getEffectiveLevel() > logging.DEBUG:
        logging.getLogger('apscheduler').setLevel(logging.WARNING)
    jobstores = {
        'default':
        SQLAlchemyJobStore(engine=manager.engine, metadata=Base.metadata)
    }
    # If job was meant to run within last day while daemon was shutdown, run it once when continuing
    job_defaults = {'coalesce': True, 'misfire_grace_time': 60 * 60 * 24}
    try:
        timezone = tzlocal.get_localzone()
        if timezone.zone == 'local':
            timezone = None
    except pytz.UnknownTimeZoneError:
        timezone = None
    except struct.error as e:
        # Hiding exception that may occur in tzfile.py seen in entware
        log.warning('Hiding exception from tzlocal: %s', e)
        timezone = None
    if not timezone:
        # The default sqlalchemy jobstore does not work when there isn't a name for the local timezone.
        # Just fall back to utc in this case
        # FlexGet #2741, upstream ticket https://bitbucket.org/agronholm/apscheduler/issue/59
        log.info(
            'Local timezone name could not be determined. Scheduler will display times in UTC for any log'
            'messages. To resolve this set up /etc/timezone with correct time zone name.'
        )
        timezone = pytz.utc
    scheduler = BackgroundScheduler(jobstores=jobstores,
                                    job_defaults=job_defaults,
                                    timezone=timezone)
    setup_jobs(manager)
Ejemplo n.º 4
0
    def __init__(self):
        self.settings = {
            'jobstore': {
                # Keep the schedule information in an encrypted SQLite local store.
                'default': SQLAlchemyJobStore(url='sqlite:///jobs.sqlite')
            },

            # TODO: Avoid hardcoding ThreadPoolExecutor and ProcessPoolExecutor
            'executors': {
                'default': ThreadPoolExecutor(20),
                'processpool': ProcessPoolExecutor(5)
            },

            'job_defaults': {
                # TODO: Investigate if we need to coalesce missed jobs
                'coalesce': False,
                'max_instances': 1
            },

            'timezone': "Europe/Zurich"
        }

        print("[Schedule Manager] Initializing APScheduler")
        APScheduler.scheduler = BackgroundScheduler(jobstores=self.settings['jobstore'],
                                                    executors=self.settings['executors'],
                                                    job_defaults=self.settings['job_defaults'],
                                                    timezone=self.settings['timezone'])

        print("[Schedule Manager] Listening to all Transfer events")
        APScheduler.scheduler.add_listener(self.event_listener, events.EVENT_ALL)
Ejemplo n.º 5
0
class Config(object):
    SCHEDULER_JOBSTORES = {
        'default': SQLAlchemyJobStore(url='sqlite:///jobs.db')
    }

    SCHEDULER_API_ENABLED = True
    SCHEDULER_TIMEZONE = 'Europe/London'
Ejemplo n.º 6
0
    def __init__(self, db_url, datastore_dir, threat_max, timezone):
        global _scheduler_

        self.timezone = timezone

        url = db_url if db_url else 'sqlite:///{}/scheduler.sqlite'.format(datastore_dir)

        lock = threading.Lock()
        with lock:
            if not _scheduler_:
                jobstores = {
                    'default': SQLAlchemyJobStore(url=url),
                }
                executors = {
                    'default': ThreadPoolExecutor(threat_max),
                }
                job_defaults = {
                    'coalesce': False,
                    'max_instances': 1
                }
                _scheduler_ = BackgroundScheduler(
                    jobstores=jobstores, executors=executors,
                    job_defaults=job_defaults, timezone=timezone
                )
                _scheduler_.start()
Ejemplo n.º 7
0
 def __init__(self, flask_app, gconfig={}, **options):
     if not isinstance(flask_app, Flask):
         raise TypeError('flask_app MUST be an instance of Flask!')
     self.flask_app = flask_app
     options['jobstores'] = {'default': SQLAlchemyJobStore(url=flask_app.config['SQLALCHEMY_DATABASE_URI'])}
     super().__init__(gconfig=gconfig, **options)
     atexit.register(lambda: self.shutdown())
Ejemplo n.º 8
0
    def init_app(self, app):
        app.logger.info("Initialize Advanced Python Scheduler.")

        user_timezone = timezone(config.TIMEZONE)

        # Persistence settings using SQLAlchemy and PostgreSQL.
        jobstores = {
            'default': SQLAlchemyJobStore(url=config.SQLALCHEMY_DATABASE_URI)
        }

        executors = {
            'default': ThreadPoolExecutor(10)  # 20
        }

        job_defaults = {'coalesce': False, 'max_instances': 5}

        # Use the BackgroundScheduler.
        scheduler = BackgroundScheduler(jobstores=jobstores,
                                        executors=executors,
                                        job_defaults=job_defaults,
                                        timezone=user_timezone)

        # Start the scheduler.
        scheduler.start()
        self._scheduler = scheduler
Ejemplo n.º 9
0
class Config(object):
    ADMIN = '*****@*****.**'
    BLOGGING_SITENAME = os.environ.get('SITENAME') or 'LibrePatron'
    BLOGGING_SITEURL = os.environ.get('SITEURL') or 'https://example.com'
    BLOGGING_URL_PREFIX = '/updates'
    BLOGGING_BRANDURL = os.environ.get('BRANDURL')
    BLOGGING_TWITTER_USERNAME = os.environ.get('TWITTER')
    BLOGGING_DISQUS_SITENAME = os.environ.get('DISQUS')
    BLOGGING_GOOGLE_ANALYTICS = os.environ.get('GOOGLE_ANALYTICS')
    BLOGGING_PERMISSIONS = True
    BLOGGING_PERMISSIONNAME = 'admin'
    BLOGGING_PLUGINS = None
    BLOGGING_ALLOW_FILE_UPLOAD = True
    BLOGGING_ESCAPE_MARKDOWN = False
    ISSO_CONFIG_PATH = f'/tmp/{os.urandom(16)}'
    COMMENTS_DB_PATH = os.environ.get(
        'COMMENTS_DB_PATH_TEST') or '/var/lib/db/comments.db'
    PREFERRED_URL_SCHEME = 'https'
    SCHEDULER_BASE = datetime.now() + timedelta(minutes=1)
    SCHEDULER_HOUR = SCHEDULER_BASE.hour
    SCHEDULER_MINUTE = SCHEDULER_BASE.minute
    SECRET_KEY = 'a-very-secret-key'
    SECRET_KEY_LOCATION = f'/tmp/{os.urandom(16)}'
    SQLALCHEMY_DATABASE_URI = 'sqlite:///' + join(basedir, 'app_test.db')
    SCHEDULER_JOBSTORES = {
        'default': SQLAlchemyJobStore(url=SQLALCHEMY_DATABASE_URI)
    }
    SQLALCHEMY_TRACK_MODIFICATIONS = False
    THEME = 'spacelab'
    SERVER_NAME = 'librepatron.com'
    BCRYPT_LOG_ROUNDS = 4
    TESTING = True
    WTF_CSRF_ENABLED = False
Ejemplo n.º 10
0
def start_scheduler(settings):
    assert settings['scheduler.store'] in ('redis', 'sqlalchemy'),\
        'Uknown job store, must by one of redis or sqlalchemy'

    if settings['scheduler.store'] == 'redis':
        jobstores = {
            'default': RedisJobStore(db=settings['scheduler.db'])
        }
    else:
        jobstores = {
            'default': SQLAlchemyJobStore(url=settings['scheduler.url'])
        }
        
    executors = {
        'default': {
            'type': settings['scheduler.executors.type'],
            'max_workers': settings['scheduler.executors.max_workers']
        },
        'processpool': ProcessPoolExecutor(
            max_workers=settings['scheduler.executors.processpool.max_workers']
        )
    }
    job_defaults = {
        'coalesce': False,
        'max_instances': settings['scheduler.job_defaults.max_instances']
    }
    scheduler.configure(
        jobstores=jobstores,
        executors=executors,
        job_defaults=job_defaults,
        timezone=timezone('UTC')
    )
    if settings['scheduler.autostart'] == 'true':
        scheduler.start()
Ejemplo n.º 11
0
def start_sched():
    sched.configure(jobstores={
        'default': SQLAlchemyJobStore(url=settings.SCHEDULER_DB_URL)
    })
    log.info("Starting scheduler")
    sched.start()
    return sched
Ejemplo n.º 12
0
class EmailScheduler():
    jobstores = {
        'default':
        SQLAlchemyJobStore(
            f'postgresql://192.168.5.172/billtrak?user=dj&password={os.getenv("dbpw")}'
        )
    }
    executors = {
        'default': {
            'type': 'threadpool',
            'max_workers': 20
        },
        'processpool': ProcessPoolExecutor(max_workers=5)
    }
    job_defaults = {'coalesce': False, 'max_instances': 3}
    scheduler = BackgroundScheduler()

    def getcron(self):
        return cron.CronTrigger()

    scheduler.configure(jobstores=jobstores,
                        executors=executors,
                        job_defaults=job_defaults,
                        timezone=utc)

    def getscheduler(self):
        return self.scheduler
Ejemplo n.º 13
0
 def _build_scheduler(self, default_max_workers):
     jobstores = {
         'memory': MemoryJobStore(),
     }
     jobstores['default'] = jobstores['memory']
     try:
         jobstores['sqlalchemy'] = SQLAlchemyJobStore(
             url=self.config.scheduler.db_uri)
     except AttributeError:
         pass
     executors = {}
     try:
         executors['default'] = ThreadPoolExecutor(
             max_workers=self.config.scheduler.max_workers)
     except AttributeError:
         executors['default'] = ThreadPoolExecutor(
             max_workers=default_max_workers)
     sched = BackgroundScheduler(jobstores=jobstores,
                                 executors=executors,
                                 tz=pytz.timezone(self.config.tz))
     sched.add_listener(functools.partial(_done_listener, sched),
                        events.EVENT_JOB_EXECUTED | events.EVENT_JOB_ERROR)
     sched.add_listener(functools.partial(_submitted_listener, sched),
                        events.EVENT_JOB_SUBMITTED)
     sched.add_listener(functools.partial(_modified_listener, sched),
                        events.EVENT_JOB_MODIFIED)
     return sched
Ejemplo n.º 14
0
    def run(self):
        self._l.debug('Starting run loop for Nido daemon')
        self.controller = Controller()
        config = Config().get_config()
        poll_interval = config['schedule']['poll_interval']
        db_path = config['schedule']['db']
        rpc_port = config['schedule']['rpc_port']

        self.scheduler = BackgroundScheduler()
        jobstores = {
            'default': {'type': 'memory'},
            'schedule': SQLAlchemyJobStore(
                url='sqlite:///{}'.format(db_path)
            )
        }
        job_defaults = {'coalesce': True, 'misfire_grace_time': 10}
        self.scheduler.configure(jobstores=jobstores,
                                 job_defaults=job_defaults)
        self.scheduler.add_job(
            NidoSchedulerService.wakeup, trigger='interval',
            seconds=poll_interval, name='Poll'
        )
        self.scheduler.add_job(NidoSchedulerService.wakeup, name='Poll')
        self.scheduler.start()

        RPCserver = ThreadedServer(
            NidoSchedulerService(self.scheduler),
            port=rpc_port,
            protocol_config={
                'allow_public_attrs': True,
                'allow_pickle': True
            }
        )
        RPCserver.start()
Ejemplo n.º 15
0
class Config():

    DEBUG = os.environ.get('FLASK_DEBUG')
    SECRET_KEY = os.environ.get('SECRET_KEY')
    HOST_NAME = os.environ.get('MYSQL_HOSTNAME')
    MYSQL_PORT = os.environ.get('MYSQL_PORT')
    MYSQL_DATABASE = os.environ.get('MYSQL_DATABASE')
    MYSQL_USERNAME = os.environ.get('MYSQL_USERNAME')
    MYSQL_PASSWORD = os.environ.get('MYSQL_PASSWORD')
    SQLALCHEMY_DATABASE_URI = f'mysql://{MYSQL_USERNAME}:{MYSQL_PASSWORD}@{HOST_NAME}:{MYSQL_PORT}/{MYSQL_DATABASE}'
    SQLALCHEMY_TRACK_MODIFICATIONS = False
    SQLALCHEMY_POOL_SIZE = 10000
    SCHEDULER_API_ENABLED = True
    SCHEDULER_JOBSTORES = {
        'default': SQLAlchemyJobStore(url=SQLALCHEMY_DATABASE_URI)
    }
    SCHEDULER_EXECUTORS = {
        'default': {
            'type': 'threadpool',
            'max_workers': 10
        }
    }
    SCHEDULER_TIMEZONE = 'Asia/Shanghai'
    INQUIRY_EMAIL = ''
    INQUIRY_EMAIL_PW = ''
    INQUIRY_EMAIL_LIST = []
    # 日志输出到控制台还是日志文件中
    LOG_TO_STDOUT = os.environ.get('LOG_TO_STDOUT',
                                   'false').lower() in ['true', 'on', '1']
Ejemplo n.º 16
0
    def __init__(
        self,
        wechaty: Wechaty,
        endpoint: EndPoint,
        scheduler_options: Optional[Union[AsyncIOScheduler,
                                          WechatySchedulerOptions]] = None):
        self._plugins: Dict[str, WechatyPlugin] = OrderedDict()
        self._wechaty: Wechaty = wechaty
        self._plugin_status: Dict[str, PluginStatus] = {}

        self.app: Quart = cors(Quart('Wechaty Server', static_folder=None))

        self.endpoint: Tuple[str, int] = endpoint

        if scheduler_options is None:
            scheduler_options = WechatySchedulerOptions()

        if isinstance(scheduler_options, WechatySchedulerOptions):
            scheduler = AsyncIOScheduler()

            if isinstance(scheduler_options.job_store, str):
                scheduler_options.job_store = SQLAlchemyJobStore(
                    scheduler_options.job_store)

            scheduler.add_jobstore(scheduler_options.job_store,
                                   scheduler_options.job_store_alias)
        self.scheduler: AsyncIOScheduler = scheduler
Ejemplo n.º 17
0
class Config:
    SECRET_KEY = os.environ.get(
        'SECRET_KEY') or 'afafd6a5f65a6f5a65df6a5f6af65daf84df23sfa6d5fa'
    SQLALCHEMY_TRACK_MODIFICATIONS = True
    SQLALCHEMY_DATABASE_URI = os.environ.get(
        'DATABASE_URL') or 'mysql+pymysql://root@localhost/issa-challenge'
    SPARKPOST_KEY = os.environ.get('SPARKPOST_KEY')
    SPARKPOST_NOTIFICATION_EMAIL = os.environ.get(
        'SPARKPOST_NOTIFICATION_EMAIL')
    SPARKPOST_CONTACT_EMAIL = os.environ.get('SPARKPOST_CONTACT_EMAIL')

    SCHEDULER_JOBSTORES = {
        'default':
        SQLAlchemyJobStore(
            url=os.environ.get('DATABASE_URL') or
            'postgres://*****:*****@ec2-54-228-229-10.eu-west-1.compute.amazonaws.com:5432/d1mngqi2ppvuv9'
        )
    }

    SCHEDULER_EXECUTORS = {
        'default': {
            'type': 'threadpool',
            'max_workers': 20
        }
    }

    SCHEDULER_API_ENABLED = True

    @staticmethod
    def init_app(app):
        pass
Ejemplo n.º 18
0
class ProdConfig(Config):
    """
    Set Flask configuration vars for production.
    """
    SQLALCHEMY_DATABASE_URI = db_url
    # Apscheduler Config
    SCHEDULER_JOBSTORES = {'default': SQLAlchemyJobStore(url=db_url)}
Ejemplo n.º 19
0
    def __init__(self, clist_user_name, clist_api_key, mount_point, bot, fallback):
        self.clist_user_name = clist_user_name
        self.clist_api_key = clist_api_key
        self.bot = bot
        self.ong = None
        self.upc = None
        self.mount_point = mount_point
        self.utility = ContestUtility(mount_point)
        self.jobstores = {
            'default': SQLAlchemyJobStore(url='sqlite:///' + mount_point + 'coders1.db')
        }
        self.schedule = BackgroundScheduler(jobstores=self.jobstores)
        self.schedule.start()
        self.conv_handler = ConversationHandler(
            entry_points=[CommandHandler('upcoming', self.upcoming)],
            allow_reentry=True,
            states={
                SCHED: [CallbackQueryHandler(self.remind, pattern=r"^[0-9]*$")]
            },
            fallbacks=[fallback]
        )
        self.conv_handler1 = ConversationHandler(
            entry_points=[CommandHandler('dontRemindMe', self.removeRemind)],
            allow_reentry=True,
            states={
                REMNOTI: [CallbackQueryHandler(self.remnoti, pattern=r'^.*notiplz.*$')]
            },

            fallbacks=[fallback]
        )
Ejemplo n.º 20
0
class Config(object):
    DEBUG = False
    TESTING = False
    SECRET_KEY = 'operation_yp'
    # 数据库连接
    SQLALCHEMY_DATABASE_URI = JsonConfig['SQLALCHEMY_DATABASE_URI']
    SQLALCHEMY_BINDS = JsonConfig['SQLALCHEMY_BINDS']
    SQLALCHEMY_TRACK_MODIFICATIONS = False
    SQLALCHEMY_ECHO = False

    CACHE_LRU = pylru.lrucache(200)
    CACHE_TIMEOUT = 7200000

    JOBS = []
    SCHEDULER_JOBSTORES = {
        'default': SQLAlchemyJobStore(url=JsonConfig['SQLALCHEMY_DATABASE_URI'])
    }
    SCHEDULER_EXECUTORS = {
        'default': {'type': 'threadpool', 'max_workers': 20}
    }
    SCHEDULER_JOB_DEFAULTS = {
        'coalesce': False,
        'max_instances': 3
    }
    SCHEDULER_API_ENABLED = True

    @staticmethod
    def init_app(app):
        pass
Ejemplo n.º 21
0
def get_scheduler(store_path=None, log_file=None):
    if store_path is None:
        store_path = r'jobstore.sqlite'
    if log_file is None:
        log_file = r'logger.log'
    scheduler = BackgroundScheduler({'apscheduler.timezone': 'Asia/Shanghai'})
    jobstores = {
        'default': SQLAlchemyJobStore(url='sqlite:///{0}'.format(store_path))
    }
    executors = {
        'default': ThreadPoolExecutor(20),
        'processpool': ProcessPoolExecutor(5)
    }
    job_defaults = {
        'coalesce': False,
        'max_instances': 1
    }
    scheduler.configure(jobstores=jobstores, executors=executors)
    # 事件记录
    scheduler.add_listener(
        lambda event: event_listener(event, scheduler),
        EVENT_JOB_EXECUTED | EVENT_JOB_ERROR | EVENT_JOB_ADDED | EVENT_JOB_SUBMITTED | EVENT_JOB_REMOVED
    )
    # 日志定制
    scheduler._logger = modify_logger(scheduler._logger, log_file=log_file)
    return scheduler
Ejemplo n.º 22
0
	def __init__(self):
		self.state = "ASLEEP"
		jobstore = {'default' : SQLAlchemyJobStore(url='sqlite:///jobs.sqlite')}
		self.scheduler = BackgroundScheduler(jobstores=jobstore)
		
		self.scheduler.start()
		self.schedule_brew(datetime.now()+timedelta(minutes=11))
Ejemplo n.º 23
0
class Config(object):
    JOBS = []
    SCHEDULER_JOBSTORES = {
        'default': SQLAlchemyJobStore(url='sqlite:///shebei.db')
    }
    SCHEDULER_EXECUTORS = {'processpool': ProcessPoolExecutor(4)}
    SCHEDULER_API_ENABLED = True
class ProductionConfig(Config):
    SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://root:[email protected]:3306/api_test'  # 123456表示密码,test代表数据库名称
    SCHEDULER_JOBSTORES = {
        'default': SQLAlchemyJobStore(url=SQLALCHEMY_DATABASE_URI)
    }
    SQLALCHEMY_TRACK_MODIFICATIONS = True
    SQLALCHEMY_POOL_TIMEOUT = 20
Ejemplo n.º 25
0
def beginJobService(rHelper):
    logger.info("Starting up")

    if rHelper.debug:
        dbName = 'jobs.sqlite'
    else:
        dbName = os.path.join(prodconf.DIR_PATH, "jobs.sqlite")

    logger.debug("DBPATH: %s" % dbName)

    jobstores = {'default': SQLAlchemyJobStore(url='sqlite:///%s' % dbName)}

    firstTrigger = CronTrigger(hour=22, minute=5)
    secondTrigger = CronTrigger(hour=10, minute=5)

    sched = BlockingScheduler(jobstores=jobstores)
    sched.add_job(rHelper.postDaily, trigger=firstTrigger)
    sched.add_job(rHelper.postDaily, trigger=secondTrigger)

    for w in weeklies:
        sched.add_job(rHelper.postWeekly, trigger=w.trigger, args=[w])

    try:
        logger.info("Starting blocking scheduler")
        sched.start()
    except (KeyboardInterrupt, SystemExit):
        logger.info("Removing all jobs in end exception")
        sched.remove_all_jobs()
    finally:
        logger.info("Removing all jobs")
        sched.remove_all_jobs()
Ejemplo n.º 26
0
def init_scheduler():
    """初始化调度器"""
    job_sqlite_path = os.path.join(CommonConf.SQLITE_DIR, 'jobs.sqlite')
    # 每次启动任务时删除数据库
    os.remove(job_sqlite_path) if os.path.exists(job_sqlite_path) else None
    jobstores = {
        'default': SQLAlchemyJobStore(url='sqlite:///' + job_sqlite_path)  # SQLAlchemyJobStore指定存储链接
    }
    executors = {
        'default': {'type': 'threadpool', 'max_workers': 10},  # 最大工作线程数20
        'processpool': ProcessPoolExecutor(max_workers=1)  # 最大工作进程数为5
    }

    scheduler.configure(jobstores=jobstores, executors=executors)

    # scheduler.add_job(ThirdEtcApi.my_job1, trigger='cron', minute="*/2", max_instances=2)
    # scheduler.add_job(ThirdEtcApi.my_job2, trigger='cron', minute="*/5")
    # scheduler.add_job(ThirdEtcApi.download_blacklist_base, trigger='cron', hour='1')
    # scheduler.add_job(ThirdEtcApi.download_blacklist_incre, trigger='cron', hour='*/1')
    scheduler.add_job(ThirdEtcApi.reupload_etc_deduct_from_db, trigger='cron', hour='*/1')
    scheduler.add_job(RsuStatus.monitor_rsu_heartbeat, trigger='cron', second='*/30',
                      kwargs={'callback': ThirdEtcApi.tianxian_heartbeat}, max_instances=2)
    scheduler.add_job(TimingOperateRsu.turn_off_rsu, trigger='cron', hour='0')
    scheduler.add_job(TimingOperateRsu.turn_on_rsu, trigger='cron', hour='5')
    logger.info("启动调度器...")

    scheduler.start()
Ejemplo n.º 27
0
    def __init__(self, main):
        """
        initialise Scheduler with basic configuration

        Parameters
        ----------
        main
            type: bool
            whether the initiated scheduler is the nephos's scheduler or not
        """
        self.main = main
        job_stores = {
            'default': SQLAlchemyJobStore(url='sqlite:///' + PATH_JOB_DB)
        }

        if self.main:
            LOG.debug("Storing scheduler jobs in %s", job_stores["default"])

        executors = {'default': ThreadPoolExecutor(MAX_CONCURRENT_JOBS)}

        if self.main:
            LOG.info("Initialising scheduler with timezone %s", TMZ)
        try:
            self._scheduler = BackgroundScheduler(jobstores=job_stores,
                                                  executors=executors,
                                                  timezone=TMZ)
        # catch if the timezone is not recognised by the scheduler
        except UnknownTimeZoneError as _:
            LOG.warning("Unknown timezone %s, resetting timezone to 'utc'",
                        TMZ)
            self._scheduler = BackgroundScheduler(jobstores=job_stores,
                                                  executors=executors,
                                                  timezone='utc')
        if self.main:
            LOG.info("Scheduler initialised with database at %s", PATH_JOB_DB)
Ejemplo n.º 28
0
    def __get_apscheduler_settings(self):
        try:
            jobstore_url = "oracle+cx_oracle://{username}:{password}${host}:{port}/{dbname}".format(
                username=self.__config.db_user,
                password=self.__config.db_pwd,
                host=self.__config.db_host,
                port=self.__config.db_port,
                dbname=self.__config.db_name,
            )

            jobstores = {
                "default":
                SQLAlchemyJobStore(url=jobstore_url,
                                   tablename=self.__config.tablename)
            }

            executors = {
                "default": ThreadPoolExecutor(self.__config.max_workers),
                "processpool": ProcessPoolExecutor(2)
            }

            job_defaults = {
                "coalesce": True,
                "max_instances": 10,
                "misfire_grace_time": 30
            }

            timezone = self.__config.timezone

            return jobstores, executors, job_defaults, timezone

        except Exception as e:
            raise e
Ejemplo n.º 29
0
class DevelopmentConfig(Config):
    SQLALCHEMY_DATABASE_URI = os.environ.get(
        'DEV_DATABASE_URL') or 'sqlite:///' + os.path.join(
            basedir, 'data.sqlite')
    SCHEDULER_JOBSTORES = {
        'default': SQLAlchemyJobStore(url=SQLALCHEMY_DATABASE_URI)
    }
Ejemplo n.º 30
0
class DevelopmentConfig(Config):
    # SQLALCHEMY_DATABASE_URI = os.environ.get('DEV_DATABASE_URL') or 'sqlite:///' + os.path.join(basedir, 'data.sqlite')
    SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://root:[email protected]:3306/api_test'
    #SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://root:[email protected]:3306/api_test'
    SCHEDULER_JOBSTORES = {
        'default': SQLAlchemyJobStore(url=SQLALCHEMY_DATABASE_URI)
    }