Exemplo n.º 1
0
def InitConfig():
    client = AliyunClient()
    client.worker.start()
    sched = TornadoScheduler()
    sched.add_job(InitModel, 'interval', seconds=60, id="1")
    sched.start()
    ioloop.IOLoop.instance().start()
Exemplo n.º 2
0
    def __init__(self):
        # 获取今天的时间和星期数
        self.date = time.strftime("%Y-%m-%d", time.localtime())
        self.weekday = time.strftime("%w", time.localtime())
        # 每天刷新
        scheduler = TornadoScheduler()
        scheduler.add_job(self._daytask, 'cron', hour='0')

        scheduler.add_job(self.session_start,
                          'cron', (1, ),
                          hour=session_time["start"][1][0],
                          minute=session_time["start"][1][1])
        scheduler.add_job(self.session_start,
                          'cron', (3, ),
                          hour=session_time["start"][3][0],
                          minute=session_time["start"][3][1])
        scheduler.add_job(self.session_start,
                          'cron', (5, ),
                          hour=session_time["start"][5][0],
                          minute=session_time["start"][5][1])
        scheduler.add_job(self.session_start,
                          'cron', (7, ),
                          hour=session_time["start"][7][0],
                          minute=session_time["start"][7][1])
        scheduler.add_job(self.session_start,
                          'cron', (9, ),
                          hour=session_time["start"][9][0],
                          minute=session_time["start"][9][1])
        scheduler.start()
Exemplo n.º 3
0
    def __init__(self, config=None, syncobj=None):
        if config is None:
            config = Config()
        self.config = config
        executors = {
            'default': ThreadPoolExecutor(20),
            'processpool': ProcessPoolExecutor(5)
        }
        self.scheduler = TornadoScheduler(executors=executors)
        self.task_queue = Queue()
        self.poll_task_queue_callback = None
        self.pool_task_queue_interval = 10
        self.ioloop = IOLoop.instance()
        self.poll_task_queue_callback = PeriodicCallback(
            self.poll_task_queue, self.pool_task_queue_interval * 1000)
        self.clear_finished_jobs_callback = PeriodicCallback(
            self.clear_finished_jobs, 60 * 1000)
        self.reset_timeout_job_callback = PeriodicCallback(
            self.reset_timeout_job, 10 * 1000)

        self.sync_obj = syncobj
        if syncobj is not None:
            self.sync_obj.set_on_remove_schedule_job(
                self.on_cluster_remove_scheduling_job)
            self.sync_obj.set_on_add_schedule_job(
                self.on_cluster_add_scheduling_job)
Exemplo n.º 4
0
class ChatConnection(sockjs.tornado.SockJSConnection):
    """Chat connection implementation"""
    # Class level variable
    participants = set()
    scheduler = TornadoScheduler()

    def on_open(self, info):
        # Add client to the clients list
        self.participants.add(self)
        self.on_tick()

        if not self.scheduler.running:
            self.scheduler.start()
            self.scheduler.add_job(self.on_tick, 'interval', seconds=10)

    def on_tick(self):
        images = requests.get("http://localhost:4243/images/json").json()

        containers = requests.get("http://localhost:4243/containers/json").json()

        info = {'images': images, 'containers': containers}
        self.broadcast(self.participants, json.dumps(info))

    def on_close(self):
        # Remove client from the clients list and broadcast leave message
        self.participants.remove(self)
Exemplo n.º 5
0
    def __init__(self):
        jobstores = {
            'mongo': CustomStore(host='localhost', port=27017),
        }
        executors = {
            'default': ThreadPoolExecutor(20),
            'processpool': ProcessPoolExecutor(5)
        }
        job_defaults = {'coalesce': False, 'max_instances': 1}
        self.scheduler = TornadoScheduler(
            jobstores=jobstores,
            executors=executors,
            job_defaults=job_defaults,
            timezone=pytz.timezone('Asia/Shanghai'))
        self.scheduler.add_listener(
            self.schedulerListener,
            EVENT_JOB_EXECUTED | EVENT_JOB_ERROR | EVENT_JOB_MISSED)

        settings = dict(template_path=os.path.join(os.path.dirname(__file__),
                                                   "view"),
                        static_path=os.path.join(os.path.dirname(__file__),
                                                 "static"),
                        debug=False)

        conn = MongoClient("localhost", 27017)
        self.db = conn['orderMonitor']
        self.prepareJobResultStore()
        tornado.web.Application.__init__(self, handlers, **settings)
Exemplo n.º 6
0
def main(port, host):
    app = Application()
    app.listen(port, "0.0.0.0")
    if platform.system() == 'Linux':
        get_server_info()
    scheduler = TornadoScheduler()
    scheduler.add_job(modify_ip_protectgroup.modify_ip_protectgroup,
                      'interval',
                      seconds=10)
    scheduler.add_job(modify_ip_protectgroup.modify_package,
                      'interval',
                      seconds=10)
    scheduler.add_job(CheckProtectPackageDueTime.CheckProtectPackageDueTime,
                      'cron',
                      hour=0,
                      minute=0)
    scheduler.add_job(delete_ip_white_list.delete_ip_white_list,
                      'interval',
                      seconds=20)
    # if CONFIG.RecordLog.record_way == 'new':
    #     scheduler.add_job(getblocklist.block_cmcc_log, 'interval', seconds=1200)
    #     scheduler.add_job(getblocklist.block_cnc_log, 'interval', seconds=1200)
    #     scheduler.add_job(getblocklist.block_ctc_log, 'interval', seconds=1200)
    # else:
    #     scheduler.add_job(getblocklist.block_cmcc_log, 'interval', seconds=60)
    #     scheduler.add_job(getblocklist.block_cnc_log, 'interval', seconds=60)
    #     scheduler.add_job(getblocklist.block_ctc_log, 'interval', seconds=60)
    scheduler.start()
    tornado.ioloop.IOLoop.instance().start()
Exemplo n.º 7
0
def get_mongo_job_stores():
    from pytz import utc
    from apscheduler.jobstores.mongodb import MongoDBJobStore, MongoClient
    from apscheduler.executors.pool import ProcessPoolExecutor
    from ops.apscheduler.db_context import get_mongo_client

    client = get_mongo_client()

    jobstores = {
        'mongo':
        MongoDBJobStore(collection='job',
                        database='apscheduler',
                        client=client),
        'default':
        MongoDBJobStore(collection='job',
                        database='apscheduler2',
                        client=client),
    }
    executors = {
        'default': {
            'type': 'threadpool',
            'max_workers': 20
        },
        'processpool': ProcessPoolExecutor(max_workers=5)
    }
    job_defaults = {'coalesce': False, 'max_instances': 3}
    scheduler = TornadoScheduler()
    scheduler.configure(jobstores=jobstores,
                        executors=executors,
                        job_defaults=job_defaults,
                        timezone=utc)

    return scheduler
Exemplo n.º 8
0
    def __init__(self):
        g.load_global_data()
        if not g.init_success:
            raise Exception('初始化加载 global_data 失败!')

        scheduler = TornadoScheduler()
        scheduler.add_job(scheduler_service.do_every_day, 'cron', day_of_week='0-6', hour=0, minute=30)
        scheduler.start()
Exemplo n.º 9
0
 def run(self):
     scheduler = TornadoScheduler()
     scheduler.add_job(sync_acba_sentence,
                       "cron",
                       hour='16',
                       minute='43',
                       args=[self.Session])
     scheduler.start()
Exemplo n.º 10
0
def myjob():
    scheduler = TornadoScheduler()
    scheduler.add_job(
        night, 'cron', hour=20, minute=0, second=0
    )  #自动发送day_of_week='mon,tue,wed,thu,fri,sat,sun',单多选周几可连续mon-wed,调用的函数无参
    scheduler.add_job(moring, 'cron', hour=7, minute=0,
                      second=0)  #hour=5, minute=30, end_date='2016-12-31'截止日期
    scheduler.start()
Exemplo n.º 11
0
def main():
    # Use the above instantiated scheduler
    # Set Tornado Scheduler
    scheduler = TornadoScheduler()
    # Use the imported jobs, every 60 minutes
    scheduler.add_job(log_theta, 'interval', minutes=60)
    scheduler.start()
    application.listen(settings["listen.port"])
    tornado.ioloop.IOLoop.instance().start()
 def __init__(self, redis_args, channel_name, postgres_url):
     self.redis_args = redis_args
     self.channel_name = channel_name
     self.scheduler = TornadoScheduler({
         'apscheduler.jobstores.default': {
             'type': 'sqlalchemy',
             'url': postgres_url
         }
     })
Exemplo n.º 13
0
def interval_event():
    stsTokenManager = StsTokenManager()
    scheduler = TornadoScheduler()
    scheduler.add_job(stsTokenManager.createStsToken, 'interval', seconds=3000)
    # scheduler.add_job(task2.pushTemplateMessage, 'cron', second=0, minute=30, hour=9)
    # scheduler.add_job(task2.pushTemplateMessage, 'cron', second=0, minute=30, hour=11)
    # scheduler.add_job(task2.pushTemplateMessage, 'cron', second=0, minute=30, hour=13)
    # scheduler.add_job(task2.pushTemplateMessage, 'cron', second=0, minute=30, hour=17)
    scheduler.start()
Exemplo n.º 14
0
 def __init__(self, job_type, store_executor_alias, process_count):
     self.sche = TornadoScheduler()
     self.host = MONGO_CONFIG.get('host')
     self.mongo_client = MongoClient(self.host)
     self.job_type = job_type
     self.mongo_job_store = MongoDBJobStore(collection='job',
                                            database=DBNAME,
                                            client=self.mongo_client)
     self.store_executor_alise = store_executor_alias
     self.process_poll = ProcessPoolExecutor(process_count)
Exemplo n.º 15
0
 async def __setup_scheduler(self):
     self.scheduler = TornadoScheduler()
     for server in self.config.get("servers", []):
         self.scheduler.add_job(run_checks,
                                'interval', [self.db_pool, server],
                                name=f"check_{server.get('name')}",
                                next_run_time=datetime_in_n_seconds(30),
                                hours=24,
                                jitter=300)
     self.scheduler.start()
Exemplo n.º 16
0
def _setup_scheduler():
    job_stores = {"beer_garden": BGJobStore()}
    executors = {"default": APThreadPoolExecutor(config.scheduler.max_workers)}
    job_defaults = config.scheduler.job_defaults.to_dict()

    return TornadoScheduler(
        jobstores=job_stores,
        executors=executors,
        job_defaults=job_defaults,
        timezone=utc,
    )
Exemplo n.º 17
0
 def __init__(self, name="QUEUE"):
     super(SRQueue, self).__init__()
     self.name = name
     self.scheduler = TornadoScheduler({'apscheduler.timezone': 'UTC'})
     self.queue = PriorityQueue()
     self._result_queue = Queue()
     self._queue_items = []
     self.processing = []
     self.min_priority = SRQueuePriorities.EXTREME
     self.amActive = False
     self.stop = False
Exemplo n.º 18
0
    def __init__(self, debug=False):
        TORNADO_SETTINGS['debug'] = debug
        if debug:
            LOGGING_CONSOLE_CONFIG['level'] = logging.DEBUG
        dictConfig(LOGGING_CONFIG)

        tornado.web.Application.__init__(self, [
            (r"/", common.HomeHandler),
            (r"/api/xml/(?P<db_id>[\d\_]+)?/*", xml_list.XmlGridHandler),
            (r"/api/xml/ie/(?P<db_id>[\d\_]+)?", xml_list.XmlImpExpHandler),
            (r"/api/xml/validate/(?P<xml_id>[\d\_]+)?/",
             xml_bldr.XmlValidationHandler),
            (r"/api/xml/tree/(?P<xml_id>[\d\_]+)/(?P<node_id>[\d\_]+)?",
             xml_bldr.XmlNodeHandler),
            (r"/api/xml/attr/(?P<db_id>[\d\_]+)?",
             xml_bldr.XmlNodeAttrHandler),
            (r"/api/xml/attr/available/(?P<node_id>[\d\_]+)/?",
             xml_bldr.XmlNodeAvailableAttrHandler),
            (r"/api/xml/attr/validate/", xml_bldr.XmlNodeAttrValidateHandler),
            (r"/api/xml/epoch/(?P<xml_id>[\d\_]+)/*",
             xml_bldr.XmlEpochHandler),
            (r"/api/xml/template/(?P<node_id>[\d\_]+)?/*",
             xml_tpl.XmlTemplateHandler),
            (r"/api/nrl/sensors/(?P<key>[^/]+)?",
             xml_nrl.XmlNrlSensorsHandler),
            (r"/api/nrl/sensor/response/", xml_nrl.XmlNrlSensorRespHandler),
            (r"/api/nrl/dataloggers/(?P<key>[^/]+)?",
             xml_nrl.XmlDataloggersHandler),
            (r"/api/nrl/datalogger/response/",
             xml_nrl.XmlDataloggerRespHandler),
            (r"/api/nrl/channel/response/preview/",
             xml_nrl.XmlChannelRespHandler),
            (r"/api/wizard/channel/(?P<station_node_id>[\d\_]+)?/*",
             wizard.CreateChannelHandler),
            (r"/api/wizard/guess/code/", wizard.CreateGuessCodeHandler),
            (r"/api/help/(?P<key>[\w\_]+)?/*", common.HelpHandler, None,
             'HelpHandler'),
            (r"/api/cfg/(?P<db_id>[\d\_]+)?/*", config.ConfigHandler),
            (r"/api/attr/(?P<node_id>[\d\_]+)?/*", config.AttributeHandler),
        ],
                                         default_handler_class=ErrorHandler,
                                         **TORNADO_SETTINGS)

        self.scheduler = TornadoScheduler()
        self.scheduler.start()
        # start sync nrl job
        trigger = OrTrigger([
            DateTrigger(run_date=datetime.now() + timedelta(seconds=10)),
            CronTrigger(**NRL_CRON)
        ])

        self.nrl_sync_job = self.scheduler.add_job(self.sync_nrl, trigger)

        ProcessMixin.__init__(self)
Exemplo n.º 19
0
def TornadoScheduler_test():
    sched = TornadoScheduler()
    sched.add_job(tick, 'interval', seconds=3)
    sched.add_job(tick1, 'interval', id='1', seconds=1)
    sched.start()

    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))

    try:
        IOLoop.instance().start()
    except (KeyboardInterrupt, SystemExit):
        pass
Exemplo n.º 20
0
def _setup_scheduler():
    jobstores = {'beer_garden': BGJobStore()}
    # TODO: Look at creating a custom executor using process pools
    executors = {'default': TornadoExecutor(config.scheduler.max_workers)}
    job_defaults = config.scheduler.job_defaults.to_dict()

    return TornadoScheduler(
        jobstores=jobstores,
        executors=executors,
        job_defaults=job_defaults,
        timezone=utc
    )
Exemplo n.º 21
0
Arquivo: waker.py Projeto: 106-/waker
 def __new__(self):
     if not self._instance:
         self._lock.acquire()
         self._instance = super().__new__(self)
         self.scheduler = TornadoScheduler()
         self.scheduler.add_jobstore('redis',
                                     host=REDIS_HOST,
                                     port=REDIS_PORT)
         self.scheduler.start()
         pygame.init()
         self._lock.release()
     return self._instance
Exemplo n.º 22
0
    def excute_job(self):
        sched = TornadoScheduler()
        for arg in self.args:
            sched.add_job(func=self.job,
                          args=arg,
                          trigger=IntervalTrigger(start_date=self.timer,
                                                  days=1))
        sched.start()

        try:
            tornado.ioloop.IOLoop.current().start()
        except (KeyboardInterrupt, SystemExit):
            sched.shutdown()
Exemplo n.º 23
0
def main():
    verifyDatabase()
    trigger = CronTrigger(second='*/59')
    scheduler = TornadoScheduler()
    scheduler.add_job(runTests, trigger)
    scheduler.start()
    http_server = tornado.httpserver.HTTPServer(Application())
    http_server.listen(RFBGATEWAYPORT)
    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))
    try:
        tornado.ioloop.IOLoop.instance().start()
    except (KeyboardInterrupt, SystemExit):
        pass
Exemplo n.º 24
0
def main():
    # Running APScheduler
    aps = TornadoScheduler()
    aps.add_jobstore('mongodb', collection='example_jobs')
    aps.remove_all_jobs()
    aps.add_job(tick, 'interval', seconds=3)
    aps.add_job(tick, 'interval', seconds=3)
    aps.add_job(tick, 'interval', seconds=3)
    aps.start()
    # Running server
    app = TornadoApplication()
    app.listen(options.port)
    tornado.ioloop.IOLoop.current().start()
Exemplo n.º 25
0
def init_scheduler(database='qascheduler', collection='jobs'):

    jobstores = {
        'default':
        MongoDBJobStore(database=database,
                        collection=collection,
                        client=pymongo.MongoClient(host=mongo_ip,
                                                   port=mongo_port))
    }
    global scheduler
    scheduler = TornadoScheduler(jobstores=jobstores)
    scheduler.start()
    print('[QAScheduler Init]Scheduler has been started')
    return scheduler
Exemplo n.º 26
0
 def __init__(self, with_celery, model):
     tornado.web.Application.__init__(self, urls, **settings)
     self.mongo_client = motor.motor_tornado.MotorClient(
         '/usr/local/var/run/mongodb/mongodb-27017.sock')
     self.db = self.mongo_client.ubernow
     self.redis = redis.StrictRedis(
         unix_socket_path='/usr/loca/var/run/redis/redis.sock')
     self.scheduler = TornadoScheduler(jobstores=JOBSTORES,
                                       executors=EXECUTORS,
                                       job_defaults=JOB_DEFAULTS,
                                       timezone=utc)
     self.scheduler.start()
     self.model = model
     self.with_celery = with_celery
Exemplo n.º 27
0
def get_scheduler():
    # define job stores
    jobstores = {
        'default':
        SQLAlchemyJobStore(url='sqlite:///{0}/magicstack.db'.format(BASE_DIR))
    }

    # define executors
    executors = {'default': ThreadPoolExecutor(5)}

    # define job
    job_defaults = {'coalesce': False, 'max_instances': 3}
    return TornadoScheduler(jobstores=jobstores,
                            executors=executors,
                            job_defaults=job_defaults)
Exemplo n.º 28
0
def start_schedule():
    global schedule
    if schedule is None:
        logger.info("Launching scheduler")
        schedule = TornadoScheduler()
        schedule.start()
        logger.info("Hydrating schedule with surveys")
        load_persisted_tasks()
        logger.info(
            "Preparing maintenance jobs for updating schedule (adding and removing)"
        )
        schedule.add_job(check_for_new_tasks, 'interval', minutes=5)
        schedule.add_job(check_for_removed_tasks, 'interval', minutes=30)
    else:
        logger.info("Schedule was already running")
Exemplo n.º 29
0
 def __init__(self):
     handlers = [
         ("/tasks", TaskHandler),
     ]
     defaults = {
         "coalesce": True,
         "max_instances": 5,
         "misfire_grace_time": 120,
         "replace_existing": True
     }
     scheduler = TornadoScheduler(job_defaults=defaults)
     scheduler.start()
     self.sdr = scheduler
     self.db = get_mongodb_database("thirdparty", "third")
     init_schedule_task(scheduler, self.db)
     web.Application.__init__(self, handlers=handlers)
Exemplo n.º 30
0
def get_redis_jobstores():
    jobstores = {
        'default':
        RedisJobStore(jobs_key='xpm_cron.jobs',
                      run_times_key='xpm_cron.run_times',
                      host='192.168.2.227',
                      port=6379,
                      password='******',
                      db=11)
    }
    executors = {
        'default': ThreadPoolExecutor(100),
        'processpool': ProcessPoolExecutor(5)
    }
    scheduler = TornadoScheduler(jobstores=jobstores, executors=executors)
    return scheduler