コード例 #1
0
ファイル: main.py プロジェクト: DanielNobbe/Forex
def start_scheduler():
    """
    Function for starting the scheduler. Initialises the trading job,
    and adds an error listener.
    """
    access_token = API_CONFIG[cfg['account_type']]['access_token']
    accountID = API_CONFIG[cfg['account_type']]['accountID']

    instrument = cfg["instrument"]

    predictor = Predictor.build_from_cfg(cfg)

    # Interpreter is called to handle trades. Should it be called the trader?
    Inter = Interpreter((accountID, access_token), cfg, predictor)
    sched = BackgroundScheduler()
    interval = cfg['period']
    sched.add_job(Inter.perform_trade,
                  args=(),
                  trigger='interval',
                  seconds=interval)

    error_handler = partial(handle_job_error, sched)
    sched.add_listener(error_handler, EVENT_JOB_ERROR)

    sched.start()
    # TODO: Make this more persistent, automatically re-starting jobs that were running before
    return sched
コード例 #2
0
def callback(icon):
    global scheduler

    # Create Scheduler
    scheduler = BackgroundScheduler()
    scheduler.add_listener(myListener, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)
    scheduler.start()

    try:
        # From 06:00 to 22:00
        now = dt.datetime.now()
        logger.info("# Now : {}".format(now))
        # for hour in range(6, 24, 2):
        #     hour = str(hour).zfill(2)
        #     exec_time = dt.datetime.strptime("{}-{}-{} {}:01:00".format(now.year, now.month, now.day, hour), '%Y-%m-%d %H:%M:%S')

        #     # Register only future time
        #     if now < exec_time:
        #         logger.info("# Registered : {}".format(str(exec_time)))
        #         scheduler.add_job(func, 'cron', hour=hour, minute="00", second='30', id="Every2Hour{}".format(hour))

        # Privacy-i and BlackMagic
        # scheduler.add_job(lambda: os.system(exe_file), 'interval', seconds=30, id="PrivacyI")

        # AhnLab V3 : 11:30:30
        # exec_time = dt.datetime.strptime("{}-{}-{} 11:30:30".format(now.year, now.month, now.day), '%Y-%m-%d %H:%M:%S')
        # if now < exec_time:
        #     logger.info("# Registered : {}".format(str(exec_time)))
        #     scheduler.add_job(lambda: os.system(exe_file + " v3"), 'cron', hour="11", minute="30", second='30', id="ForV3")

        # No Screen Saver
        logger.info("# Registered : No Screen Saver")
        scheduler.add_job(preventScreenSaver,
                          'interval',
                          minutes=4,
                          id="NoScreenSaver")

        # Telegram Push
        logger.info("# Registered : Telegram Shift Push")
        scheduler.add_job(lambda: os.system(exe_shift_push),
                          'cron',
                          day_of_week="0-4",
                          hour="16",
                          minute="05",
                          second='01',
                          id="TelegramShiftPush")

        # ON Calendar Sync
        logger.info("# Registered : ON Calendar Sync")
        scheduler.add_job(lambda: os.system(exe_on_cal_sync),
                          'interval',
                          hours=1,
                          id="ONCalendarSync")

        logger.info("## Adding jobs success !")
    except Exception as e:
        scheduler.shutdown()
        logger.exception(str(e))

    icon.visible = True
コード例 #3
0
def run_scheduler_thread():
    log.info("Setting up scheduler.")
    aplogger = logging.getLogger('apscheduler')
    if aplogger.hasHandlers():
        aplogger.handlers.clear()

    aplogger.setLevel(logging.ERROR)

    sched = BackgroundScheduler({
        'apscheduler.jobstores.default': {
            'type': 'memory'
        },
        'apscheduler.executors.default': {
            'class': 'apscheduler.executors.pool:ThreadPoolExecutor',
            'max_workers': 5
        },
        'apscheduler.job_defaults.coalesce':
        True,
        'apscheduler.job_defaults.max_instances':
        1,
        'apscheduler.job_defaults.misfire_grace_time ':
        60 * 60 * 2,
    })

    sched.start()

    sched.add_listener(
        job_evt_listener, apscheduler.events.EVENT_JOB_EXECUTED
        | apscheduler.events.EVENT_JOB_ERROR
        | apscheduler.events.EVENT_JOB_MISSED
        | apscheduler.events.EVENT_JOB_MAX_INSTANCES)

    schedule_jobs(sched)
    aplogger.setLevel(logging.DEBUG)
    log.info("Scheduler is running!")
コード例 #4
0
ファイル: scheduler_task.py プロジェクト: IanVzs/WindWhisper
    def run(self):
        jobstores = {
            # 'mongo': MongoDBJobStore(),
            # 'default': SQLAlchemyJobStore(url='sqlite:///jobs.sqlite')
            "memory": MemoryJobStore(),
        }
        executors = {
            'default': ThreadPoolExecutor(5),
            'processpool': ProcessPoolExecutor(2)
        }
        job_defaults = {'coalesce': False, 'max_instances': 3}
        scheduler = BackgroundScheduler(jobstores=jobstores,
                                        executors=executors,
                                        job_defaults=job_defaults,
                                        timezone=china_tz)
        scheduler.add_listener(self.listener,
                               EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)

        #scheduler.add_job(weather.weather_alarm, 'interval', seconds=10*60, id='sign_push_report')
        scheduler.add_job(weather.weather_alarm,
                          'interval',
                          seconds=2,
                          id='sign_weather_alarm')
        scheduler.start()
        return scheduler
コード例 #5
0
ファイル: middleware.py プロジェクト: SaschaHeyer/moesifwsgi
    def schedule_background_job(self):
        try:
            from apscheduler.schedulers.background import BackgroundScheduler
            from apscheduler.triggers.interval import IntervalTrigger
            from apscheduler.events import EVENT_JOB_ERROR, EVENT_JOB_EXECUTED
            import atexit

            scheduler = BackgroundScheduler(daemon=True)
            scheduler.add_listener(self.moesif_event_listener,
                                   EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)
            scheduler.start()
            try:
                scheduler.add_job(
                    func=lambda: self.send_async_events.batch_events(
                        self.api_client, self.moesif_events_queue, self.DEBUG,
                        self.BATCH_SIZE),
                    trigger=IntervalTrigger(seconds=1),
                    id='moesif_events_batch_job',
                    name='Schedule events batch job every 1 second',
                    replace_existing=True)

                # Exit handler when exiting the app
                atexit.register(lambda: self.send_async_events.exit_handler(
                    scheduler, self.DEBUG))
            except Exception as ex:
                if self.DEBUG:
                    print("Error while calling async function")
                    print(str(ex))
        except:
            if self.DEBUG:
                print("Error when scheduling the job")
コード例 #6
0
ファイル: server.py プロジェクト: zixzeus/schedule-system
def get_scheduler(store_path=None, log_file=None):
    if store_path is None:
        store_path = r'jobstore.sqlite'
    if log_file is None:
        log_file = r'logger.log'
    scheduler = BackgroundScheduler({'apscheduler.timezone': 'Asia/Shanghai'})
    jobstores = {
        'default': SQLAlchemyJobStore(url='sqlite:///{0}'.format(store_path))
    }
    executors = {
        'default': ThreadPoolExecutor(20),
        'processpool': ProcessPoolExecutor(5)
    }
    job_defaults = {
        'coalesce': False,
        'max_instances': 1
    }
    scheduler.configure(jobstores=jobstores, executors=executors)
    # 事件记录
    scheduler.add_listener(
        lambda event: event_listener(event, scheduler),
        EVENT_JOB_EXECUTED | EVENT_JOB_ERROR | EVENT_JOB_ADDED | EVENT_JOB_SUBMITTED | EVENT_JOB_REMOVED
    )
    # 日志定制
    scheduler._logger = modify_logger(scheduler._logger, log_file=log_file)
    return scheduler
コード例 #7
0
ファイル: subwaystatus.py プロジェクト: susana/subwaystatus
def run(stdscr):
    logging.getLogger('apscheduler.executors').setLevel(logging.ERROR)
    client = Client("http://www.mta.info/service_status_json/")
    scheduler = BackgroundScheduler()
    stdscr.bkgd(' ', curses.color_pair(0))
    scheduler.add_listener(handle_missed_job, EVENT_JOB_MISSED)
    scheduler.add_job(display,
                      args=[stdscr, client],
                      trigger='interval',
                      next_run_time=get_upcoming_hour(),
                      hours=1,
                      coalesce=True)
    scheduler.start()
    display(stdscr, client)

    while True:
        c = stdscr.getch()
        if c == ord('q'):
            stdscr.clear()
            scheduler.shutdown()
            break
        elif c == ord('r'):
            display(stdscr, client)
        elif c == curses.KEY_RESIZE:
            stdscr.refresh()
コード例 #8
0
def bg_sched(job, job_args=None, trigger=None, **trigger_args):
    """
    BackgroundScheduler: use when you’re not using any of the frameworks below,
        and want the scheduler to run in the background inside your application
    :return:
    """
    global scheduler
    global trigger_type

    scheduler = BackgroundScheduler()
    trigger_type = trigger
    job_args = tuple(job_args) if job_args is not None else ()
    if trigger == 'cron':
        scheduler.add_job(job, trigger, args=job_args)
    elif trigger == 'date':
        scheduler.add_job(job, trigger, run_date=trigger_args['date'], args=job_args)
    elif trigger == 'interval':
        start = trigger_args.get('start_date')
        end = trigger_args.get('end_date')
        if start is None:
            start = datetime.now()
        if end:
            scheduler.add_job(job, trigger, seconds=trigger_args['seconds'], start_date=start, end_date=end)
        else:
            scheduler.add_job(job, trigger, seconds=trigger_args['seconds'], start_date=start)

    scheduler.add_listener(job_listener, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)
    scheduler.start()
コード例 #9
0
ファイル: mail.py プロジェクト: man-group/adaero
def includeme(config):
    """Pyramid convention that allows invocation of a function prior to
    server start and is found through `config.scan` in the main function"""
    scheduler = BackgroundScheduler()
    settings = config.get_settings()

    _get_send_email_flag(settings)

    log.info("Hostname in emails will be set to %s" % get_root_url(settings))

    should_run = bool(
        get_config_value(settings, constants.RUN_EMAIL_INTERVAL_JOB_KEY))
    if not should_run:
        log.info("Setting %s is false, not running email job." %
                 constants.RUN_EMAIL_INTERVAL_JOB_KEY)
        return

    log.info("Setting up email scheduler...")
    interval_s = int(
        get_config_value(settings, constants.CHECK_AND_SEND_EMAIL_INT_KEY))

    if not interval_s:
        msg = ("Settings %s is not set! Please set and restart the "
               "application" % constants.CHECK_AND_SEND_EMAIL_INT_KEY)
        raise ValueError(constants.MISCONFIGURATION_MESSAGE.format(error=msg))

    scheduler.add_job(email_job,
                      trigger="interval",
                      args=(settings, ),
                      seconds=interval_s)
    scheduler.add_listener(email_event_handler,
                           EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)
    scheduler.start()
    log.info("Email scheduling setup completed!")
コード例 #10
0
ファイル: scheduler.py プロジェクト: garrylachman/happy-tasks
class Scheduler(Singleton):
    scheduler: BackgroundScheduler
    
    def __init__(self, maxWorkers: int = 20, maxExecuters:int = 5):
        jobstores = {
            'default': MemoryJobStore()
        }
        executors = {
            'default': {'type': 'threadpool', 'max_workers': maxWorkers}
        }
        job_defaults = {
            'coalesce': False,
            'max_instances': 3
        }
        self.scheduler = BackgroundScheduler()
        self.scheduler.configure(jobstores=jobstores, executors=executors, job_defaults=job_defaults)
        self.scheduler.add_listener(self.onEvent)
        
    def onEvent(self, event: JobEvent):
        print(event)
        
    def addJob(self, crontabStr: str, func: FunctionType) -> Job:
        trigger: CronTrigger = None
        if crontabStr:
            trigger = CronTrigger.from_crontab(crontabStr)
        return self.scheduler.add_job(func, trigger)

    def parseCronTab(self, crontabStr: str) -> CronTrigger:
        return CronTrigger.from_crontab(crontabStr)
コード例 #11
0
ファイル: __init__.py プロジェクト: c2corg/v6_api
def configure_scheduler_from_config(settings):
    scheduler = BackgroundScheduler()
    scheduler.start()

    # run `purge_account` job at 0:00
    scheduler.add_job(
        purge_account,
        id='purge_account',
        name='Purge accounts which where not activated',
        trigger='cron',
        hour=0,
        minute=0
    )

    # run `purge_token` job at 0:30
    scheduler.add_job(
        purge_token,
        id='purge_token',
        name='Purge expired tokens',
        trigger='cron',
        hour=0,
        minute=30
    )

    scheduler.add_listener(exception_listener, EVENT_JOB_ERROR)

    atexit.register(lambda: scheduler.shutdown())
コード例 #12
0
    def run_selected_jobs(self, request, queryset):
        scheduler = BackgroundScheduler()
        scheduler.add_jobstore(self._memory_jobstore)
        scheduler.add_listener(self._handle_execution_event, events.EVENT_JOB_EXECUTED)

        scheduler.start()

        num_jobs_scheduled = 0
        self._jobs_executed = []
        start_time = timezone.now()

        for item in queryset:
            django_job = self._django_jobstore.lookup_job(item.id)

            if not django_job:
                msg_dict = {"job_id": item.id}
                msg = _(
                    "Could not find job {job_id} in the database! Skipping execution..."
                )
                self.message_user(
                    request, format_html(msg, **msg_dict), messages.WARNING
                )
                continue

            scheduler.add_job(
                django_job.func_ref,
                trigger=None,  # Run immediately
                args=django_job.args,
                kwargs=django_job.kwargs,
                id=django_job.id,
                name=django_job.name,
                misfire_grace_time=django_job.misfire_grace_time,
                coalesce=django_job.coalesce,
                max_instances=django_job.max_instances,
            )

            num_jobs_scheduled += 1

        while len(self._jobs_executed) < num_jobs_scheduled:
            # Wait for selected jobs to be executed.
            if timezone.now() > start_time + timedelta(
                seconds=self._job_execution_timeout
            ):
                msg = _(
                    "Maximum runtime exceeded! Not all jobs could be completed successfully."
                )
                self.message_user(request, msg, messages.ERROR)

                scheduler.shutdown(wait=False)
                return None

            time.sleep(0.1)

        for job_id in self._jobs_executed:
            msg_dict = {"job_id": job_id}
            msg = _("Executed job '{job_id}'!")
            self.message_user(request, format_html(msg, **msg_dict))

        scheduler.shutdown()
        return None
コード例 #13
0
ファイル: jobs.py プロジェクト: cAtaman/skylight
def start_scheduled_jobs():
    timezone = tz('Africa/Lagos')
    backup_trigger = CronTrigger(hour=17, minute=5, jitter=120)
    cache_trigger = CronTrigger(hour=17, minute=2, jitter=120)
    test_trigger = IntervalTrigger(seconds=10, jitter=2)
    job_defaults = {
        'coalesce': True,
        'max_instances': 1,
        # 'misfire_grace_time': 3
    }
    _scheduler = BackgroundScheduler(timezone=timezone,
                                     job_defaults=job_defaults)
    _scheduler.add_listener(
        job_listener, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR | EVENT_JOB_MISSED)

    jobs = [
        # _scheduler.add_job(backup_databases, id='backup_databases', trigger=backup_trigger),
        _scheduler.add_job(clear_cache_base_dir,
                           id='clear_cache_base_dir',
                           trigger=cache_trigger),
        _scheduler.add_job(remove_old_backups,
                           id='remove_old_backups',
                           trigger=backup_trigger),
    ]
    try:
        _scheduler.start()
    except SchedulerAlreadyRunningError:
        pass

    return _scheduler, jobs
コード例 #14
0
ファイル: scheduler.py プロジェクト: dragowave/eva
def get_scheduler():
    """
    Function used to return the `APScheduler <https://apscheduler.readthedocs.io/en/latest/>`_
    instance that is used by Eva and plugins.

    .. warning::

        This function should only be used by Eva. Plugins should access the
        scheduler through Eva's singleton object::

            from eva import scheduler
            # This will fire off the job immediately.
            scheduler.add_job(func_name, id="eva_<plugin_id>_job")

    .. todo::

        Need to add listeners for all event types:
        https://apscheduler.readthedocs.io/en/latest/modules/events.html#event-codes

    :note: This function most likely needs to be revisited as it may not be
        thread-safe. Eva and plugins can modify the config singleon
        simultaneously inside and outside of jobs.

    :return: The scheduler object used by plugins to schedule long-running jobs.
    :rtype: `apscheduler.schedulers.background.BackgroundScheduler
        <https://apscheduler.readthedocs.io/en/latest/modules/schedulers/background.html>`_
    """
    client = get_mongo_client()
    db_name = conf['mongodb']['database']
    scheduler = BackgroundScheduler(jobstore=MongoDBJobStore(
        database=db_name, collection='scheduler', client=client))
    scheduler.add_listener(job_succeeded, EVENT_JOB_EXECUTED)
    scheduler.add_listener(job_failed, EVENT_JOB_ERROR)
    scheduler.start()
    return scheduler
コード例 #15
0
def run_processes_through_apscheduler():
    #config = {'apscheduler.misfire_grace_time': 45}
    #add listener to scheduler
    #sched.add_listener(my_listener, EVENT_JOB_ERROR)

    try:
        executors = {
            'default': ThreadPoolExecutor(20),
            'processpool': ProcessPoolExecutor(5)
        }
        #executors = {'default': ProcessPoolExecutor(2)}
        sched = BackgroundScheduler(executors=executors)
        sched.add_listener(my_listener, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)
        #sched.add_job(run_processes, trigger='cron', hour='14', minute='06')
        sched.add_job(run_processes)
        sched.start()
        ################# SPOT PRICE MODEL #######################
        ###### DE/AT #####
        #regular forecasts
        #sched.add_cron_job(run_processes,  hour='12', minute = '22')

        print "Import Scheduler is running"

        input("Press enter to exit.")
        #print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))
        sched.shutdown()

    except Exception as exc:
        print "ERROR: " + str(exc.args)
コード例 #16
0
def go(managedNamespace):
    log.info("Go()")

    resetter = xascraper.status_monitor.StatusResetter()
    resetter.resetRunState()

    # statusMgr = manage.statusDbManager.StatusResource()
    managedNamespace.run = True
    managedNamespace.serverRun = True

    server_process = multiprocessing.Process(target=serverProcess,
                                             args=(managedNamespace, ))
    if "debug" in sys.argv:
        log.info("Not starting scheduler due to debug mode!")
        sched = None
    else:
        sched = BackgroundScheduler({
            'apscheduler.jobstores.default': {
                'type': 'memory'
            },
            'apscheduler.executors.default': {
                'class': 'apscheduler.executors.pool:ThreadPoolExecutor',
                'max_workers': 5
            },
            'apscheduler.job_defaults.coalesce':
            True,
            'apscheduler.job_defaults.max_instances':
            1,
            'apscheduler.job_defaults.misfire_grace_time ':
            60 * 60 * 2,
        })

        logging.getLogger('apscheduler').setLevel(logging.DEBUG)
        sched.add_listener(
            job_evt_listener, apscheduler.events.EVENT_JOB_EXECUTED
            | apscheduler.events.EVENT_JOB_ERROR
            | apscheduler.events.EVENT_JOB_MISSED
            | apscheduler.events.EVENT_JOB_MAX_INSTANCES)
        scheduleJobs(sched, managedNamespace)
        sched.start()
        log.info("Scheduler is running!")

    log.info("Launching server process")
    server_process.start()
    loopCtr = 0

    log.info("Entering idle loop.")
    while managedNamespace.run:
        time.sleep(0.1)
        # if loopCtr % 100 == 0:
        # 	for job in sched.get_jobs():
        # 		print("Job: ", job.name, job.next_run_time.timestamp())
        # 		# statusMgr.updateNextRunTime(job.name, job.next_run_time.timestamp())
        loopCtr += 1

    if sched:
        sched.shutdown()
    log.info("Joining on web thread.")
    server_process.join()
コード例 #17
0
ファイル: main.py プロジェクト: fake-name/xA-Scraper
def go(managedNamespace):
	log.info("Go()")


	resetter = xascraper.status_monitor.StatusResetter()
	resetter.resetRunState()

	# statusMgr = manage.statusDbManager.StatusResource()
	managedNamespace.run = True
	managedNamespace.serverRun = True

	server_process = multiprocessing.Process(target=serverProcess, args=(managedNamespace,))
	if "debug" in sys.argv:
		log.info("Not starting scheduler due to debug mode!")
		sched = None
	else:
		sched = BackgroundScheduler({
				'apscheduler.jobstores.default': {
					'type': 'memory'
				},
				'apscheduler.executors.default': {
					'class': 'apscheduler.executors.pool:ThreadPoolExecutor',
					'max_workers'                              : 5
				},
				'apscheduler.job_defaults.coalesce'            : True,
				'apscheduler.job_defaults.max_instances'       : 1,
				'apscheduler.job_defaults.misfire_grace_time ' : 60 * 60 * 2,
			})


		logging.getLogger('apscheduler').setLevel(logging.DEBUG)
		sched.add_listener(job_evt_listener,
				apscheduler.events.EVENT_JOB_EXECUTED |
				apscheduler.events.EVENT_JOB_ERROR    |
				apscheduler.events.EVENT_JOB_MISSED   |
				apscheduler.events.EVENT_JOB_MAX_INSTANCES
			)
		scheduleJobs(sched, managedNamespace)
		sched.start()
		log.info("Scheduler is running!")

	log.info("Launching server process")
	server_process.start()
	loopCtr = 0

	log.info("Entering idle loop.")
	while managedNamespace.run:
		time.sleep(0.1)
		# if loopCtr % 100 == 0:
		# 	for job in sched.get_jobs():
		# 		print("Job: ", job.name, job.next_run_time.timestamp())
		# 		# statusMgr.updateNextRunTime(job.name, job.next_run_time.timestamp())
		loopCtr += 1

	if sched:
		sched.shutdown()
	log.info("Joining on web thread.")
	server_process.join()
コード例 #18
0
class RunnableGraph:
    def __init__(self, is_blocking=False) -> None:
        self.nodes = []

        executors = {
            'default': ThreadPoolExecutor(max_workers=THREAD_POOL_SIZE)
        }

        if is_blocking:
            self.scheduler = BlockingScheduler(executors=executors)
        else:
            self.scheduler = BackgroundScheduler(executors=executors)

        self._produce_listeners()
        self.sink_nodes = set()
        self.ready_to_shutdown = False

    def link_nodes(self, source: Node, target: Node) -> None:
        pipe = Pipe(source.output_type)

        source.append_output_pipe(pipe)
        target.append_input_pipe(pipe)

        source.append_subscriber(target)

        self.nodes.extend([source, target])

    def _produce_listeners(self):
        def node_listener(event):
            signal, obj, subscribers = event.retval

            if signal is Signal.STOP:
                self.sink_nodes = self.sink_nodes.difference({obj})
                if not self.sink_nodes:
                    self.ready_to_shutdown = True

            if not subscribers:
                return

            for node in subscribers:
                self.scheduler.add_job(func=node.do_work)

        def exception_handler(event):
            print('Users function raised exception: ', event.exception)

        self.scheduler.add_listener(node_listener, EVENT_JOB_EXECUTED)
        self.scheduler.add_listener(exception_handler, EVENT_JOB_ERROR)

    def run(self) -> None:
        sources = {node for node in self.nodes if node.is_source}
        self.sink_nodes = {node for node in self.nodes if node.is_sink}

        for node in sources:
            self.scheduler.add_job(func=node.do_work)

        self.scheduler.start()
コード例 #19
0
ファイル: app.py プロジェクト: rtran9/news-bundles
def initialize():
    scheduler = BackgroundScheduler()
    print_date_time()
    print("initializing app")
    scheduler.add_job(cache_data, 'interval', hours=2, replace_existing=True)
    scheduler.add_listener(my_listener, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)
    print("starting scheduler")
    scheduler.start()
    # Shut down the scheduler when exiting the app
    atexit.register(lambda: scheduler.shutdown())
コード例 #20
0
ファイル: scheduler.py プロジェクト: asd307769162/youxiang
def job_tasks():

    scheduler = BackgroundScheduler(timezone="Asia/Shanghai")

    tb_job_tasks(scheduler)
    jd_job_task(scheduler)

    # 加一个监控
    scheduler.add_listener(scheduler_listener, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)
    scheduler.start()
コード例 #21
0
def doSchedulejob():
    # 创建调度器:BlockingScheduler
    scheduler = BackgroundScheduler()
    # 添加任务,时间间隔2S
    scheduler.add_job(monitorSystem, 'interval', seconds=2, id='test_job1')
    # 添加任务,时间间隔5S
    scheduler.add_job(monitorNetWork, 'interval', seconds=3, id='test_job2')
    scheduler.add_listener(apschedulerListener, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)
    scheduler._logger = logging
    scheduler.start()
コード例 #22
0
class MiningScheduler(object):
    def __init__(self, mining_server, conf):
        self.logger = logging.getLogger("root")
        self.conn = get_conn(conf.get("db", "host"), conf.getint("db", "port"), conf.get("db", "database"))
        self.mining_server = mining_server
        # self.init_seed()
        self.init_job()

    def init_job(self):
        self.scheduler = BackgroundScheduler()
        executors = {"default": {"type": "threadpool", "max_workers": 5}}
        self.scheduler.configure(logger=logging.getLogger("apscheduler"), executors=executors)
        self.scheduler.add_listener(
            self.err_handle, apscheduler.events.EVENT_JOB_ERROR | apscheduler.events.EVENT_JOB_MISSED
        )

        job_co = self.conn.mining_seed_job
        total = job_co.count()
        interval = 60 * 60
        time_delta = interval * 1.0 / total
        all_job = list(job_co.find())

        for i, job in enumerate(all_job):
            next_run_time = datetime.now() + timedelta(milliseconds=int(time_delta * i * 1000), seconds=10)
            self.scheduler.add_job(
                self.put_to_queue,
                "interval",
                args=(job,),
                seconds=interval,
                next_run_time=next_run_time,
                id=str(job.get("_id")),
                misfire_grace_time=10,
            )

    def init_seed(self):
        data_list = []
        with open("../data/url_filter.txt") as fin:
            for line in fin:
                line = line.strip()
                _, url, block = line.split("\t")
                data_list.append(dict(url=url, block=block))
        self.conn.mining_seed_job.remove({})
        self.conn.mining_seed_job.insert_many(data_list)

    def err_handle(self, ev):
        self.logger.error("apscheduler error:" + str(ev))

    def start(self):
        self.logger.info("scheduler started")
        self.scheduler.start()

    def put_to_queue(self, job):
        url = job.get("url")
        rs = self.conn.mining_seed_task.insert_one(dict(job_id=job.get("_id"), time=datetime.now()))
        self.mining_server.process_task(url, rs.inserted_id)
コード例 #23
0
class OwpingScheduler():
    """
    Scheduler for launching owping to all indicated addresses at a fixed frequency
    """
    def __init__(self, config_store: Config_store, callback: callable,
                 callback_fail: callable):
        self.config_store = config_store  # config store
        self.callback = callback  # callback function when job done
        self.callback_fail = callback_fail  # callback function when job failed
        self.address_list = config_store.address_list  # address list to ping
        self.schedule = config_store.schedule  # schedule of pings
        self.scheduler = BackgroundScheduler()  # scheduler
        self.owpingers = []  # list of owamp clients

    def start_owping_scheduler(self):
        """
        Start the scheduling of owpings
        """
        self.scheduler.start()
        self.scheduler.add_listener(self._scheduler_listener,
                                    EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)
        self._create_owpingers()
        for owpinger in self.owpingers:
            self.scheduler.add_job(owpinger.owping,
                                   trigger="interval",
                                   seconds=20,
                                   replace_existing=True,
                                   next_run_time=datetime.now())

    def shutdown_owping_scheduler(self):
        """
        Shutdown the scheduler
        """
        self.scheduler.shutdown()

    def _scheduler_listener(self, event):
        if event.exception:
            self.callback_fail()
        else:
            owamp_stats = event.retval[0]
            stderr = event.retval[1]
            if owamp_stats.exit_code == 0:
                self.callback(owamp_stats)

            else:
                message = "Owping for address {addr} did not work".format(
                    addr=owamp_stats.address)
                print(message)
                print("owping program error:\n", stderr)

    def _create_owpingers(self):
        for addr in self.address_list:
            owpinger = OwampClient(addr, self.config_store)

            self.owpingers.append(owpinger)
コード例 #24
0
def main():

    # 定义事件监听
    def job_exception_listener(event):

        if event.code == EVENT_SCHEDULER_STARTED:
            print("SCHEDULER_STARTED")

        elif event.code == EVENT_SCHEDULER_SHUTDOWN:
            print("SCHEDULER_SHUTDOWN")

        elif isinstance(event, JobExecutionEvent) and event.exception:
            # todo:异常处理, 告警等
            print('The job crashed :(' + event.traceback)

        elif isinstance(event, JobExecutionEvent):
            print('The job worked :)')

        else:
            print('uncheck event')

    # 定义调度器
    # scheduler = BackgroundScheduler(jobstores=jobstores, executors=executors, job_defaults=job_defaults, timezone=utc)
    scheduler = BackgroundScheduler()

    # 定义任务
    def job_func(job_id):
        print('job %s is runed at %s' %
              (job_id, datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')))

    # 添加任务
    scheduler.add_job(job_func,
                      id='job_id',
                      name='a test job',
                      args=['winjean_test_job'],
                      trigger='interval',
                      start_date='2020-08-01 09:30:00',
                      end_date='2020-10-15 11:00:00',
                      seconds=3,
                      max_instances=10,
                      jobstore='default',
                      executor='default')

    # 增加监听事件
    scheduler.add_listener(
        job_exception_listener, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR
        | EVENT_SCHEDULER_SHUTDOWN | EVENT_SCHEDULER_STARTED)

    # 启动调度器
    scheduler.start()

    time.sleep(10)

    # 关闭调度器
    scheduler.shutdown()
コード例 #25
0
class Schedule:
    def __init__(self) -> None:
        self.scheduler = BackgroundScheduler()
        self.scheduler.add_listener(Schedule.listener, mask=EVENT_JOB_ERROR | EVENT_JOB_ADDED | EVENT_JOB_EXECUTED)
        self.parsing_moderation_job = self.scheduler.add_job(
            func=moderation.moderate_queue,
            trigger=CronTrigger(
                start_date=utils.round_publication_date(datetime.datetime.now()),
                second=0,
                minute=0,
                hour='8,10,12,14,16,18'
            ),
            id='parsing_moderation_job',
            name='VK Parsing Moderation',
            max_instances=1
        )
        self.publication_moderation_job = self.scheduler.add_job(
            func=publication_service.process_moderation,
            trigger=IntervalTrigger(seconds=5, start_date=datetime.datetime.now()),
            id='publication_moderation_job',
            name='Publication Moderation Job',
            max_instances=1
        )
        self.publication_job = self.scheduler.add_job(
            func=publication_service.process_publication,
            trigger=IntervalTrigger(
                start_date=utils.round_publication_date(datetime.datetime.now()),
                minutes=cfg.publication_interval
            ),
            id='publications_job',
            name='Publications Job',
            max_instances=1
        )
        self.clean_job = self.scheduler.add_job(
            func=moderation.clean_old_messages,
            trigger=IntervalTrigger(
                start_date=datetime.datetime.now(),
                minutes=5,
            ),
            name='Cleanup Messages Job'
        )

    def start(self):
        self.scheduler.start()

    @staticmethod
    def listener(event):
        if event.code == EVENT_JOB_ERROR:
            logger.exception('Exception while executing job')
        elif event.code == EVENT_JOB_ADDED:
            logger.info(f'Added job with id {event.job_id}')
        elif event.code == EVENT_JOB_EXECUTED:
            logger.info(f'Job with id {event.job_id} successfully executed')
コード例 #26
0
ファイル: register.py プロジェクト: santiagoyegros/SPH
def start():
    scheduler = BackgroundScheduler()
    scheduler.add_listener(listener_error,
                           EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)
    scheduler.add_job(ausencias.registrar_ausencia, 'interval', minutes=0.017)
    scheduler.add_job(alertas.registrar_alerta, 'interval', minutes=15)
    scheduler.add_job(temporal.borrar_temporales, 'cron', hour=2, minute=0)
    scheduler.add_job(procesoCupos.generarCantidadCupos,
                      'cron',
                      hour=23,
                      minute=55)
    scheduler.start()
コード例 #27
0
def simulate(args):
    '''
    Simulate data
    '''
    #logger.setLevel(args.loglevel)
    global host
    host = args.host
    # Get simulated data files.
    files = {}
    if args.simulator.endswith('.json'):
        with open(args.simulator, 'r') as f:
            contents = f.read()
            try:
                files[args.simulator] = json.loads(contents)
            except Exception as e:
                logger.error("Error reading " + args.simulator + " -- " +
                             str(e))
    else:
        filelist = listdir(args.simulator)

        for file in filelist:
            if file.endswith('.json'):
                with open(args.simulator + "/" + file, 'r') as f:
                    contents = f.read()
                    try:
                        files[file] = json.loads(contents)
                    except Exception as e:
                        logger.error("Error reading " + str(file) + " -- " +
                                     str(e))

    if len(files) == 0:
        logger.error("No simulator files found")

    for file, values in files.iteritems():
        simulators.append(
            SimulatedData(values['sensors'], values['sensor_auth']))

    global scheduler
    scheduler = BackgroundScheduler()
    scheduler.add_listener(processJobError, apscheduler.events.EVENT_JOB_ERROR)

    scheduler.add_job(checkSensors)  # Run initial sensor grab right away
    scheduler.add_job(checkSensors,
                      'interval',
                      seconds=1,
                      max_instances=3,
                      name="sensor_check")  # set job for future runs

    scheduler.start()
    import time
    while (True):
        time.sleep(1)
コード例 #28
0
def scrape():
    scheduler = BackgroundScheduler()
    scheduler.add_job(scrape_bbc_news,
                      'interval',
                      minutes=15,
                      id='cnn_scraper')
    scheduler.add_job(scrape_techcrunch_items,
                      'interval',
                      minutes=15,
                      id='techcrunch_scraper')
    scheduler.add_job(scrape_ynet, 'interval', minutes=15, id='ynet_scraper')
    scheduler.add_listener(events_listener,
                           EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)
    scheduler.start()
コード例 #29
0
 def start_schedule(self):
     sched = BackgroundScheduler()
     job = sched.add_job(self.do_post, 'date', run_date=chun.next_sunrise)
     sched.add_listener(self.job_executed_listener, EVENT_JOB_EXECUTED)
     sched.start()
     try:
         while True:
             self.running = True
             while self.running:
                 time.sleep(10)
             job.remove()
             job = sched.add_job(self.do_post, 'date', run_date=self.next_sunrise, misfire_grace_time=120)
     except (KeyboardInterrupt, SystemExit):
         sched.shutdown()
コード例 #30
0
    def job_interval(self,minutes =10):
        scheduler = BackgroundScheduler()
        scheduler.add_job(self.chk_IP_job, 'interval', minutes=minutes)
        scheduler.add_listener(self.my_listener, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)
        scheduler.start()

        print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))

        try:
            # This is here to simulate application activity (which keeps the main thread alive).
            while True:
                time.sleep(5)
        except (KeyboardInterrupt, SystemExit):
            # Not strictly necessary if daemonic mode is enabled but should be done if possible
            scheduler.shutdown()
コード例 #31
0
def backgroud_trigger():
    scheduler = BackgroundScheduler()
    scheduler.add_job(tick, "interval", seconds=3)
    scheduler.add_listener(my_listener, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)
    scheduler.start()
    print("Press Ctrl+{0} to exit".format("Break" if os.name == "nt" else "C"))
    try:
        # This is here to simulate application activity (which keeps the main
        # thread alive).
        while True:
            time.sleep(2)
    except (KeyboardInterrupt, SystemExit):
        # Not strictly necessary if daemonic mode is enabled but should be done
        # if possible
        scheduler.shutdown()
コード例 #32
0
    def __init__(self,
                 job_store: Optional[Any] = None,
                 executor: Optional[Any] = None,
                 job_defaults: Optional[Any] = None,
                 time_zone: Optional[Any] = None,
                 scheduler_type: Optional[str] = 'default',
                 **kwargs) -> Any:
        if not executor:
            executor = {
                'default': ThreadPoolExecutor(20),
                'processpool': ProcessPoolExecutor(5)
            }
        if not job_store:
            REDIS = {
                'host':
                config_map.get(env).REDIS_HOST,
                'port':
                config_map.get(env).REDIS_PORT,  #,'32360',
                'db':
                config_map.get(env).REDIS_DB,
                'password':
                CommonHelper.base64_decode(config_map.get(env).REDIS_PASSWORD)
            }
            default_redis_jobstore = RedisJobStore(**REDIS)
            job_store = {'redis': default_redis_jobstore}
        global job_stores
        job_stores = 'redis'

        if not job_defaults:
            job_defaults = {'coalesce': False, 'max_instances': 3}
        if not time_zone:
            time_zone = utc
        init_scheduler_options = {
            "job_defaults": job_defaults,
            "jobstores": job_store,
            "executors": executor,
            "timezone": time_zone
        }
        global scheduler
        if scheduler_type == 'default':
            scheduler = BackgroundScheduler(**init_scheduler_options)
        elif scheduler_type == 'async':
            scheduler = AsyncIOScheduler(**init_scheduler_options)
        elif scheduler_type == 'block':
            scheduler = BlockingScheduler(**init_scheduler_options)
        scheduler.add_listener(self.job_execute_listener, EVENT_JOB_EXECUTED)
        scheduler.start()
コード例 #33
0
ファイル: check.py プロジェクト: xmjharry/helpers
def check(_type):
    write_log(datetime.datetime.today().strftime('%Y-%m-%d_%H:%M:%S') +
              ' 执行打卡操作')
    if is_check():
        if _type == 'in':
            check_str = '上班'
            start_datetime = datetime.datetime.today().replace(
                hour=9, minute=0, second=0, microsecond=0).timestamp()
            end_datetime = datetime.datetime.today().replace(
                hour=9, minute=10, second=0, microsecond=0).timestamp()
        elif _type == 'out':
            check_str = '下班'
            start_datetime = datetime.datetime.today().replace(
                hour=19, minute=0, second=0, microsecond=0).timestamp()
            end_datetime = datetime.datetime.today().replace(
                hour=20, minute=30, second=0, microsecond=0).timestamp()
        else:
            raise ValueError('Type is error')
        run_date = datetime.datetime.fromtimestamp(
            random.randrange(start_datetime, end_datetime))
        # run_date = datetime.datetime.now() + datetime.timedelta(seconds=3)
        ifttt.send(
            'check', {
                'value2': run_date.strftime('%H:%M:%S'),
                'value1': f"{run_date.strftime('%m.%d')}预计{check_str}打卡时间:"
            })
        scheduler = BackgroundScheduler()
        scheduler.add_listener(scheduler_listener,
                               EVENT_JOB_ERROR | EVENT_JOB_MISSED)
        scheduler.add_job(
            send_request,
            'date',
            run_date=run_date,
            args=(_type, ),
            id=f"{run_date.strftime('%Y-%m-%d_%H:%M:%S')}-{_type}")
        scheduler.start()
        try:
            while len(scheduler.get_jobs()) > 0:
                time.sleep(2)
        except (KeyboardInterrupt, SystemExit):
            scheduler.shutdown()
            write_log('Exit the job!')
    else:
        pass
コード例 #34
0
class SeedScheduler(object):
    def __init__(self, scheduler, handler, conf):
        '''
        scheduler: blocking or background
        '''
        self.logger = logging.getLogger("root")
        self.handler = handler
        self.init_job(scheduler, conf)

    def init_job(self, scheduler, conf):
        base = SeedBase(conf)
        if scheduler == 'blocking':
            self.scheduler = BlockingScheduler()
        elif scheduler == 'background':
            self.scheduler = BackgroundScheduler()

        executors = {
                    'default': {'type': 'threadpool', 'max_workers': 5},
                        'processpool': ProcessPoolExecutor(max_workers=5)
                        }
        self.scheduler.configure(logger=logging.getLogger('apscheduler'), executors=executors)

        self.scheduler.add_listener(self.err_handle,
                apscheduler.events.EVENT_JOB_ERROR | apscheduler.events.EVENT_JOB_MISSED)
        for seed in base.get_all_seed():
            print seed
            start_date=datetime.datetime.now()
            self.scheduler.add_job(self.handler.handle, 'interval',
                    args=(seed, self.logger),
                    seconds=seed['interval'],
                    id=str(seed['jobid'])
                    )

    def err_handle(self, ev):
        self.logger.error(str(ev))

    def start(self):
        self.logger.info('scheduler started')
        self.scheduler.start()
コード例 #35
0
ファイル: rpicam_sch.py プロジェクト: istvanzk/rpicampy
		for k in message_values:
			RESTfeed.setfield(k, message_values[k])

		if status_message is not None:
			RESTfeed.setfield('status', status_message)

		RESTfeed.update()



### The APScheduler
schedRPi = BackgroundScheduler(alias='BkgScheduler')
#schedRPi = BlockingScheduler(alias='BlkScheduler')

# Add job execution event handler
schedRPi.add_listener(jobListener, EVENT_JOB_ERROR | EVENT_JOB_EXECUTED | EVENT_JOB_ADDED | EVENT_JOB_REMOVED)


### The events
eventsRPi = rpievents.rpiEventsClass(RPIJOBNAMES)
rpiLogger.info(eventsRPi)

### Instantiate the job classes
imgCam = rpicam.rpiCamClass(RPIJOBNAMES['cam'], schedRPi, eventsRPi, camConfig)
rpiLogger.info(imgCam)

imgDbx = rpimgdb.rpiImageDbxClass(RPIJOBNAMES['dbx'], schedRPi, eventsRPi, dbxConfig, imgCam.imageFIFO)
rpiLogger.info(imgDbx)

imgDir = rpimgdir.rpiImageDirClass(RPIJOBNAMES['dir'], schedRPi, eventsRPi, dirConfig, imgCam.imageFIFO, imgDbx.imageUpldFIFO)
rpiLogger.info(imgDir)
コード例 #36
0
ファイル: bchecker.py プロジェクト: d53dave/python-bazchecker
        new_results.append((result['id'], result_url))
    global cached_results
    result_diff = diff(new_results, cached_results)
    logger.info("Found %s new offers!", str(len(result_diff)))
    if(len(result_diff) > 0):
        cached_results = new_results
        send_new_results(result_diff)


def error_listener(event):
    if event.exception:
        print('The job crashed :(')
        msg = "Job crashed with exception [%s]" % event.exception
        broadcast(msg, msg)


if __name__ == '__main__':
    logger.info("Application startup.")
    scheduler = BackgroundScheduler()
    scheduler.add_job(tick, 'interval', minutes=update_interval)
    scheduler.add_listener(error_listener, EVENT_JOB_ERROR)
    scheduler.start()
    bot.polling()

    try:
        while True:
           pass 
    except (KeyboardInterrupt, SystemExit):
        scheduler.shutdown()  
        logger.info("Application shutdown.")
コード例 #37
0
ファイル: controller.py プロジェクト: fzuellich/urlmonitor
class ApplicationController(object):
    """Controller provides callback functions and logic for the ApplicationView
    and therefore also the custom Treeview widget (widget.URLMonitorWidget).
    """

    _INTERVAL = 0.1

    def __init__(self, view=None):
        """Create a new controller instance.
        
        Args:
            view (view.ApplicationView, optional): View instance to control. 
                Due to the structure the view might have to be added after the
                initialization of the view. 1. init. controller 2. init view
                3. set controller.view.
        """

        self.view = view
        self.thread_scheduler = BackgroundScheduler()
        self.url_monitor = urlmonitor.URLMonitor()
    
    def init(self):
        self._load_configuration()
        self._schedule_jobs()

    # /////////////////////////////////////////////////////////////////////////
    # Initializer
    # /////////////////////////////////////////////////////////////////////////

    def _load_configuration(self):
        urls = URLMonitorConfig.load()
        for url in urls:
            self._add_url_to_monitor(url['url'], auth=url['auth'], label=url['label'])
            self.view.monitor_widget.insert_item(url['url'], label=url['label'])

    def _schedule_jobs(self):
        self.monitoring_job = self.thread_scheduler.add_job(
            self.url_monitor.check, 'interval', minutes=self._INTERVAL, id='monitor')

        self.statusbar_job = self.thread_scheduler.add_job(
            self.update_statusbar, 'interval', seconds=1)

        # add listener to automatically sort again after refresh (only for monitor job)
        self.thread_scheduler.add_listener(self.refresh_monitor, apsevent.EVENT_JOB_EXECUTED)
        self.thread_scheduler.start()

    def _add_url_to_monitor(self, url, label=None, auth=None):
        observer = self.url_monitor.register_url(url, label=label, auth=auth)
        observer.subscribe_on_check(self.url_check_callback)
        observer.subscribe_on_result(self.url_result_callback)

    # /////////////////////////////////////////////////////////////////////////
    # Job related
    # /////////////////////////////////////////////////////////////////////////

    def refresh_monitor(self, event):
        if event.job_id == 'monitor':
            self.view.monitor_widget.refresh()

    def update_statusbar(self):
        schedule = self.monitoring_job.next_run_time
        schedule = schedule.replace(tzinfo=None)
        now = datetime.datetime.now()
        delta = schedule - now

        self.view.statusbar.set(self._INTERVAL, str(delta.seconds))

    # /////////////////////////////////////////////////////////////////////////
    # Miscellaneous callback functions
    # /////////////////////////////////////////////////////////////////////////

    def open_url_in_browser(self, event):
        """Opens the currently selected URL in the default browser."""

        url = self.view.monitor_widget.get_selected_url()
        if url is not None:
            webbrowser.open_new_tab(url)

    def quit_app(self, event=None):
        """Shutdown the thread scheduler, save the configuration and quit
        the application."""

        self.thread_scheduler.shutdown(wait=False)
        URLMonitorConfig.save(self.url_monitor._urls)
        self.view.quit()

    def url_check_callback(self, url):
        self.view.monitor_widget.update_item(url, 'Checking', '...')

    def url_result_callback(self, url, response):
        if response is not False:
            self.view.monitor_widget.update_item(url, response.status_code, response.elapsed)
        else:
            self.view.monitor_widget.update_item(url, 'ERROR', '')

    # /////////////////////////////////////////////////////////////////////////
    # URL related methods
    # /////////////////////////////////////////////////////////////////////////

    def add_url(self, event):
        """Create the dialog to add a new URL and add the result to the 
        application if desired."""

        dialog = widget.URLDialog(parent=self.view)
        result = dialog.show()

        if result is not None and len(result) > 0:
            self.view.monitor_widget.insert_item(result['url'], result['label'])
            self._add_url_to_monitor(result['url'], label=result['label'], auth=result['auth'])

    def edit_url(self, event):
        """Create the dialog to edit a URL and handle the result."""

        url = self.view.monitor_widget.get_selected_url()
        values = self.url_monitor._urls[url]
        dialog = widget.URLDialog(parent=self.view, data=values, editmode=True)
        result = dialog.show()

        if result is not None and len(result) > 0:
            self.view.monitor_widget.update_item(url, label=result['label'])
            self._add_url_to_monitor(result['url'], label=result['label'], auth=result['auth']) # will update the value

    def delete_url(self, event):
        """Delete the selected URL."""

        url = self.view.monitor_widget.get_selected_url()

        if url is not None:
            self.url_monitor.unregister_url(url)
            self.view.monitor_widget.delete_item(url)
コード例 #38
0
		logger.error("syncCronListener: %s" % repr(event.exception))
	else:
		logger.info("Syncing Complete w/ no errors!")
if __name__ == "__main__":
	#start
	logger.info("[+]Starting MoombahBot")
	
	#init scheduler
	sched = BackgroundScheduler()

	#init bot
	moombahBot = TwitterBot(config_file=apiConfFile, logger=logger)

	#Sync once daily
	sched.add_job(lambda: moombahBot.sync_follows(), 'interval', hours=23, replace_existing=True)
	sched.add_listener(fetchCronListener, events.EVENT_JOB_EXECUTED | events.EVENT_JOB_ERROR)
	logger.debug('Account sync cronjob added')

	#Fetch at interval
	sched.add_job(lambda: fetch(moombahBot), 'interval', minutes=fetchIntervalMinutes, replace_existing=True)
	sched.add_listener(syncCronListener, events.EVENT_JOB_EXECUTED | events.EVENT_JOB_ERROR)
	logger.debug('Fetch cronjob added')
	
	#start sched cron
	sched.start()

	#Running
	logger.info("[+]Moombah Bot Running")
	print("[+]CTRL+C to exit or ps aux|grep startBot.py")
	
	#Sync+fetch on start
コード例 #39
0
ファイル: __init__.py プロジェクト: zhmsg/dms
            wr.write("scheduled_run_time: %s\n" % ev.scheduled_run_time)
            print(ev.scheduled_run_time)
            wr.write("retval: %s\n" % ev.retval)
            wr.write("exception: %s\n" % ev.exception)
            wr.write("traceback: %s\n" % ev.traceback)
        elif isinstance(ev, apscheduler.events.JobEvent):
            wr.write("Job Event\n")
            wr.write("code: %s\n" % ev.code)
            wr.write("job_id: %s\n" % ev.job_id)
        elif isinstance(ev, apscheduler.events.SchedulerEvent):
            wr.write("Scheduler Event\n")
            wr.write("code: %s\n" % ev.code)
            wr.write("alias: %s\n" % ev.alias)
        wr.write("----------end----------\n")

dms_scheduler.add_listener(err_listener)


class User(UserMixin):
    user_name = ""

    def get_id(self):
        return self.user_name

login_manager = LoginManager()
login_manager.session_protection = 'strong'


@login_manager.user_loader
def load_user(user_name):
    user = User()
コード例 #40
0
ファイル: routes.py プロジェクト: EMC-Underground/dashupdater
port = int(os.getenv('VCAP_APP_PORT', 8080))

# Uncomment if you need to debug the site
# app.debug = True

# Routes
@app.route('/')
def hello_world():
  return 'Hello World!'
#
# Packet attrs: gdun
#
@app.route('/dashboard/', methods=['PUT'])
def dashboards():
  if request.method == 'PUT':
    packet = request.get_json()
    return methods.set_next_index(packet)

# Start App
if __name__ == '__main__':
  scheduler.add_job(methods.rotating, 'interval', seconds=20)
  scheduler.add_listener(methods.error_listener, events.EVENT_JOB_EXECUTED | events.EVENT_JOB_ERROR)
  scheduler.start()

  try:
    app.run(host='0.0.0.0', port=port)

  except (KeyboardInterrupt, SystemExit):
    # Not strictly necessary if daemonic mode is enabled but should be done if possible
    scheduler.shutdown()
コード例 #41
0
ファイル: ejenti.py プロジェクト: Mozilla-TWQA/Hasal
class MainRunner(object):

    class FilterAllLog(logging.Filter):
        # default we will filter logger from apscheduler.executors.default, apscheduler.scheduler,
        # you can config filter logger in config.json
        def filter(self, record):
            return ""

    def __init__(self, input_cmd_config_fp, input_job_config_fp, input_config_fp):

        # init value
        cmd_config_fp = os.path.abspath(input_cmd_config_fp)
        job_config_fp = os.path.abspath(input_job_config_fp)
        config_fp = os.path.abspath(input_config_fp)

        # load configuration json files
        self.cmd_config = CommonUtil.load_json_file(cmd_config_fp)
        self.job_config = CommonUtil.load_json_file(job_config_fp)
        self.config = CommonUtil.load_json_file(config_fp)

        # init schedulers
        self.scheduler = BackgroundScheduler()
        self.scheduler.add_jobstore('sqlalchemy', url=self.config['job_store_url'])
        self.scheduler.start()

        # init variables
        mananger = Manager()
        self.sync_queue = mananger.Queue()
        self.async_queue = mananger.Queue()
        self.current_job_list = []

        # Slack Sending Queue
        # TODO: prevent the Slack bot is disable, the sending queue will use too much memory.
        self.slack_sending_queue = mananger.Queue(50)

        # init logger
        self.set_logging(self.config['log_level'], self.config['log_filter'])

    def set_logging(self, log_level, log_filter_list):
        default_log_format = '%(asctime)s %(levelname)s [%(name)s.%(funcName)s] %(message)s'
        default_datefmt = '%Y-%m-%d %H:%M'
        if log_level.lower() == "debug":
            logging.basicConfig(level=logging.DEBUG, format=default_log_format, datefmt=default_datefmt)
        else:
            logging.basicConfig(level=logging.INFO, format=default_log_format, datefmt=default_datefmt)

        my_filter = self.FilterAllLog()
        for target_logger in log_filter_list:
            logging.getLogger(target_logger).addFilter(my_filter)

    def scheduler_del_job(self, **kwargs):
        input_cmd_str = kwargs.get("input_cmd_str", "")
        cmd_str_list = input_cmd_str.split(" ")
        if len(cmd_str_list) == 2:
            job_id = cmd_str_list[1]
            current_job_list = self.scheduler.get_jobs()
            current_job_id_list = [j.id for j in current_job_list]
            if job_id in current_job_id_list:
                self.scheduler.remove_job(job_id)
            else:
                logging.error("Cannot find the specify job id [%s]" % job_id)
        else:
            logging.error("Incorrect cmd format! [%s]" % input_cmd_str)

    def scheduler_list_job(self, **kwargs):
        self.scheduler.print_jobs()

    def scheduler_shutdown(self, **kwargs):
        self.scheduler.shutdown()
        sys.exit(0)

    def list_all_commands(self, **kwargs):
        print "Current supported commands as below:"
        print "-" * 80
        for cmd_str in self.cmd_config['cmd-settings']:
            print '{:30s} {:50s} '.format(cmd_str, self.cmd_config['cmd-settings'][cmd_str]['desc'])
        print "-" * 80

    def scheduler_job_handler(self, input_cmd_obj, input_cmd_str):
        cmd_match_pattern = input_cmd_obj.keys()[0]
        func_point = getattr(self, input_cmd_obj[cmd_match_pattern]['func-name'])
        func_point(cmd_configs=input_cmd_obj[cmd_match_pattern]['configs'], input_cmd_str=input_cmd_str)

    def cmd_queue_composer(self, input_cmd_str):
        for cmd_pattern in self.cmd_config['cmd-settings']:
            re_compile_obj = re.compile(cmd_pattern)
            re_match_obj = re_compile_obj.search(input_cmd_str)
            if re_match_obj:
                current_command_obj = self.cmd_config['cmd-settings'][cmd_pattern]
                logging.debug("job matched [%s]" % cmd_pattern)
                target_queue_type = current_command_obj.get('queue-type', None)
                if target_queue_type == "async":
                    self.async_queue.put({"cmd_obj": current_command_obj, "cmd_pattern": cmd_pattern, "input_cmd_str": input_cmd_str})
                elif target_queue_type == "sync":
                    self.sync_queue.put({"cmd_obj": current_command_obj, "cmd_pattern": cmd_pattern, "input_cmd_str": input_cmd_str})
                else:
                    self.scheduler_job_handler({cmd_pattern: current_command_obj}, input_cmd_str)
                break

    def load_default_jobs(self, input_scheduler, input_job_config):
        current_jobs = input_scheduler.get_jobs()
        current_jobs_name = [job.name for job in current_jobs]
        for job_name in input_job_config:
            if input_job_config[job_name]['default-loaded']:
                if job_name not in current_jobs_name:
                    func_point = getattr(importlib.import_module(input_job_config[job_name]['module-path']), job_name)
                    self.scheduler.add_job(func_point, input_job_config[job_name]['trigger-type'],
                                           id=job_name,
                                           seconds=input_job_config[job_name]['interval'],
                                           max_instances=input_job_config[job_name]['max-instances'],
                                           kwargs={
                                               'async_queue': self.async_queue,
                                               'sync_queue': self.sync_queue,
                                               'slack_sending_queue': self.slack_sending_queue,
                                               'configs': input_job_config[job_name]['configs'],
                                               'cmd_config': self.cmd_config}
                                           )

    def job_exception_listener(self, event):
        if event.exception:
            logging.error("Job [%s] crashed [%s]" % (event.job_id, event.exception))
            logging.error(event.traceback)

    def add_event_listener(self):
        self.scheduler.add_listener(self.job_exception_listener, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)

    def run(self):
        # load default job into scheduler if the job is not exist
        self.load_default_jobs(self.scheduler, self.job_config)

        # add event listener into scheduler
        self.add_event_listener()

        # enter the loop to receive the interactive command
        while True:
            user_input = raw_input()
            self.cmd_queue_composer(user_input)
            time.sleep(3)
コード例 #42
0
def cmd_call(cmd):
    # print cmd # Execute job and send MQTT message
    commandlist = cmd.split()
    call(commandlist)
    mqttclient.publish("/bouzanet/other/bash", commandlist)

def mqtt_cmd_call(cmd):
    mqttclient.publish("/bouzanet/sprinkler/commands", cmd)

def job_listener(event):
    if event.exception: # Send MQTT message
        print('The job crashed :(')
    else:
        print('The job worked :)')

def on_connect(client, userdata, flags, rc):
    print "Connected to MQTT with result code %02X" % rc

if __name__ == '__main__':
    scheduler.add_listener(job_listener, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)
    scheduler.start()

    mqttclient = mqtt.Client()
    mqttclient.on_connect = on_connect
    mqttclient.connect(MQTTBROKER, MQTTPORT, MQTTKEEPALIVE)
    mqttclient.loop_start()
    while (True):
        main()
        time.sleep(SLEEPTIME)
    mqttclient.loop_stop()
コード例 #43
0
ファイル: roster_bot.py プロジェクト: jmcevoy1984/Python
def job_listener(event):
    if event.exception:
        print(date_and_time_now()+'Scheduler: Error - Job "'+event.job_id+'" sheduled to run at: '+event.scheduled_run_time+' crashed.')
        print(date_and_time_now()+'Traceback: '+event.traceback)
    else:
        print(date_and_time_now()+'Scheduler: Job "'+event.job_id+'" executed successfully.')

def scheduler_listener(event):
    if event.code == 1:
        print(date_and_time_now()+'Scheduler: Scheduler Started.')
    elif event.code == 2:
        print(date_and_time_now()+'Scheduler: Scheduler Shutdown.')


scheduler = BackgroundScheduler()
scheduler.add_listener(job_listener, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)
scheduler.add_listener(scheduler_listener, EVENT_SCHEDULER_STARTED | EVENT_SCHEDULER_SHUTDOWN)
scheduler.add_job(post_message, 'cron',  day_of_week='mon-sat', hour=8, minute=59, end_date='2017-05-30', id='post_roster_to_'+target_channel, args=['*SUPPORT ROSTER*', create_attachment(create_roster_attachment_data(get_formatted_roster()), generate_roster_footer(get_formatted_roster())), target_channel])
scheduler.start()

app = Flask(__name__)
app.run()
'''print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))

try:
    # This is here to simulate application activity (which keeps the main thread alive).
    while True:
        time.sleep(2)
except (KeyboardInterrupt, SystemExit):
        # Not strictly necessary if daemonic mode is enabled but should be done if possible
    scheduler.shutdown()'''
コード例 #44
0
ファイル: starter.py プロジェクト: johnnyFiftyFive/rotator
from flask import Flask

from keys import SCHED_HOUR, SCHED_MINUTE, SCHED_JOBID
from schedule.listeners import execution_listener
from schedule.listeners import error_listener
from db_back import create_backup
import model_blueprint
import db_config

app = Flask(__name__)
app.config.from_object('app_config.config')

app.config.update(db_config.load_config())

scheduler = BackgroundScheduler()
scheduler.add_listener(execution_listener, EVENT_JOB_EXECUTED)
scheduler.add_listener(error_listener, EVENT_JOB_ERROR)
scheduler.add_job(create_backup, 'cron',
                  hour=int(app.config[SCHED_HOUR]),
                  minute=int(app.config[SCHED_MINUTE]),
                  id=app.config[SCHED_JOBID])
scheduler.start()

scheduler.get_jobs()
app.register_blueprint(model_blueprint.blueprint)

from rotatordb import db_session


@app.teardown_request
def remove_db_session(exception):
コード例 #45
0
        },
        'processpool': ProcessPoolExecutor(
            max_workers=settings['scheduler.executors.processpool.max_workers']
        )
    }
    job_defaults = {
        'coalesce': False,
        'max_instances': settings['scheduler.job_defaults.max_instances']
    }
    scheduler.configure(
        jobstores=jobstores,
        executors=executors,
        job_defaults=job_defaults,
        timezone=timezone('UTC')
    )
    if settings['scheduler.autostart'] == 'true':
        scheduler.start()


def job_added_event(event):
    job = scheduler.get_job(event.job_id)
    if hasattr(job.func, 'scopped'):
        kwargs = job.kwargs
        kwargs['_job_id'] = job.id
        scheduler.modify_job(
            event.job_id, None, **{'kwargs': kwargs}
        )


scheduler.add_listener(job_added_event, EVENT_JOB_ADDED)
コード例 #46
0
class HackathonScheduler(object):
    """An helper class for apscheduler"""
    jobstore = "ohp"

    def get_scheduler(self):
        """Return the apscheduler instance in case you have to call it directly

        :return the instance of APScheduler

        .. notes:: the return value might be None in flask debug mode
        """
        return self.__apscheduler

    def add_once(self, feature, method, context=None, id=None, replace_existing=True, run_date=None, **delta):
        """Add a job to APScheduler and executed only once

        Job will be executed at 'run_date' or after certain timedelta.

        :Example:
            scheduler = RequiredFeature("scheduler")

            # execute task once in 5 minutes:
            context = Context(user_id=1)
            scheduler.add_once("user_manager","get_user_by_id",context=context, minutes=5)
            # 5 minutes later, user_manager.get_user_by_id(context) will be executed

        :type feature: str|unicode
        :param: the feature that are used to look for instance through hackathon_factory. All features are defined in __init__.py

        :type method: str|unicode
        :param method: the method name defined in the instance

        :type context: Context, see hackathon/__init__.py
        :param context: the execution context. Actually the parameters of 'method'

        :type id: str
        :param id: id for APScheduler job. Random id will be generated if not specified by caller

        :type replace_existing: bool
        :param replace_existing: if true, existing job with the same id will be replaced. If false, exception will be raised

        :type run_date: datetime | None
        :param run_date: job run date. If None, job run date will be datetime.now()+timedelta(delta)

        :type delta: kwargs for timedelta
        :param delta: kwargs for timedelta. For example: minutes=5. Will be ignored if run_date is not None
        """
        if not run_date:
            run_date = get_now() + timedelta(**delta)

        if self.__apscheduler:
            self.__apscheduler.add_job(scheduler_executor,
                                       trigger='date',
                                       run_date=run_date,
                                       id=id,
                                       max_instances=1,
                                       replace_existing=replace_existing,
                                       jobstore=self.jobstore,
                                       args=[feature, method, context])

    def add_interval(self, feature, method, context=None, id=None, replace_existing=True, next_run_time=undefined,
                     **interval):
        """Add an interval job to APScheduler and executed.

        Job will be executed firstly at 'next_run_time'. And then executed in interval.

        :Example:
            scheduler = RequiredFeature("scheduler")

            context = Context(user_id=1)
            scheduler.add_interval("user_manager","get_user_by_id", context=context, minutes=10)
            # user_manager.get_user_by_id(context) will be called every 10 minutes

        :type feature: str|unicode
        :param: the feature that are used to look for instance through hackathon_factory. All features are defined in __init__.py

        :type method: str|unicode
        :param method: the method name defined in the instance

        :type context: Context, see hackathon/__init__.py
        :param context: the execution context. Actually the parameters of 'method'

        :type id: str
        :param id: id for APScheduler job. Random id will be generated if not specified by caller

        :type replace_existing: bool
        :param replace_existing: if true, existing job with the same id will be replaced. If false, exception will be raised

        :type next_run_time: datetime | undefined
        :param next_run_time: the first time the job will be executed. leave undefined to don't execute until interval time reached

        :type interval: kwargs for "interval" trigger
        :param interval: kwargs for "interval" trigger. For example: minutes=5.
        """
        if self.__apscheduler:
            self.__apscheduler.add_job(scheduler_executor,
                                       trigger='interval',
                                       id=id,
                                       max_instances=1,
                                       replace_existing=replace_existing,
                                       next_run_time=next_run_time,
                                       jobstore=self.jobstore,
                                       args=[feature, method, context],
                                       **interval)

    def remove_job(self, job_id):
        """Remove job from APScheduler job store

        :type job_id: str | unicode
        :param job_id: the id of job
        """
        if self.__apscheduler:
            try:
                self.__apscheduler.remove_job(job_id, self.jobstore)
            except JobLookupError:
                log.debug("remove job failed because job %s not found" % job_id)
            except Exception as e:
                log.error(e)

    def has_job(self, job_id):
        """Check the existence of specific job """
        if self.__apscheduler:
            job = self.__apscheduler.get_job(job_id, jobstore=self.jobstore)
            return job is not None
        return False

    def __init__(self, app):
        """Initialize APScheduler

        :type app: Flask
        :param app: the Flask app
        """
        self.app = app
        self.__apscheduler = None

        # NOT instantiate while in flask DEBUG mode or in the main thread
        # It's to avoid APScheduler being instantiated twice
        if not app.debug or os.environ.get("WERKZEUG_RUN_MAIN") == "true":
            self.__apscheduler = BackgroundScheduler(timezone=utc)

            # add MySQL job store
            job_store_type = safe_get_config("scheduler.job_store", "memory")
            if job_store_type == "mysql":
                log.debug("add aps_cheduler job store based on mysql")
                self.__apscheduler.add_jobstore('sqlalchemy',
                                                alias=self.jobstore,
                                                url=get_config("scheduler.job_store_url"))
            elif job_store_type == "mongodb":
                log.debug("add aps_cheduler job store based on mongodb")
                self.__apscheduler.add_jobstore('mongodb',
                                                alias=self.jobstore,
                                                database=safe_get_config("scheduler.database", "apscheduler"),
                                                collection=safe_get_config("scheduler.collection", "jobs"),
                                                host=safe_get_config("scheduler.host", "localhost"),
                                                port=safe_get_config("scheduler.port", 27017))

            # add event listener
            self.__apscheduler.add_listener(scheduler_listener, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR | EVENT_JOB_ADDED)
            log.info("APScheduler loaded")
            self.__apscheduler.start()
コード例 #47
0
ファイル: tasksTrigger.py プロジェクト: Mozilla-TWQA/Hasal
class TasksTrigger(object):
    """
    MD5_HASH_FOLDER is ".md5"
    """
    ARCHIVE_ROOT_URL = 'https://archive.mozilla.org'
    ARCHIVE_LATEST_FOLDER = '/pub/firefox/nightly/latest-mozilla-central/'
    ARCHIVE_LINK_RE_STRING = r'(?<=href=").*?(?=")'

    KEY_CONFIG_PULSE_USER = '******'
    KEY_CONFIG_PULSE_PWD = 'pulse_password'
    KEY_CONFIG_JOBS = 'jobs'
    KEY_JOBS_ENABLE = 'enable'
    KEY_JOBS_AMOUNT = 'amount'
    KEY_JOBS_TOPIC = 'topic'
    KEY_JOBS_PLATFORM_BUILD = 'platform_build'
    KEY_JOBS_INTERVAL_MINUTES = 'interval_minutes'
    KEY_JOBS_CMD = 'cmd'
    KEY_JOBS_CONFIGS = 'configs'

    MD5_HASH_FOLDER = '.md5'

    # filename example: 'firefox-56.0a1.en-US.linux-x86_64.json'
    MATCH_FORMAT = '.{platform_key}.{ext}'

    PLATFORM_MAPPING = {
        'linux32': {
            'key': 'linux-i686',
            'ext': 'tar.bz2'
        },
        'linux64': {
            'key': 'linux-x86_64',
            'ext': 'tar.bz2'
        },
        'mac': {
            'key': 'mac',
            'ext': 'dmg'
        },
        'win32': {
            'key': 'win32',
            'ext': 'zip'
        },
        'win64': {
            'key': 'win64',
            'ext': 'zip'
        }
    }

    def __init__(self, config, cmd_config_obj, clean_at_begin=False):
        self.all_config = config
        self.cmd_config_obj = cmd_config_obj

        # get jobs config
        self.jobs_config = config.get(TasksTrigger.KEY_CONFIG_JOBS, {})
        self.pulse_username = config.get(TasksTrigger.KEY_CONFIG_PULSE_USER)
        self.pulse_password = config.get(TasksTrigger.KEY_CONFIG_PULSE_PWD)

        self._validate_data()

        if clean_at_begin:
            self.clean_pulse_queues()

        self.scheduler = BackgroundScheduler()
        self.scheduler.start()

    def _validate_data(self):
        # validate Pulse account
        if not self.pulse_username or not self.pulse_password:
            # there is no Pulse account information in "job_config.json"
            raise Exception('Cannot access Pulse due to there is no Pulse account information.')

    def clean_pulse_queues(self):
        """
        Cleaning and re-creating enabled Pulse Queues for cleaning Dead Consumer Client on Pulse.
        Dead Consumer Client will get messages without ack(), so messages will always stay on Pulse, and no one can handle it.
        """
        logging.info('Cleaning and re-creating Pulse Queues ...')
        queues_set = set()
        for job_name, job_detail in self.jobs_config.items():
            # have default config
            enable = job_detail.get(TasksTrigger.KEY_JOBS_ENABLE, False)
            topic = job_detail.get(TasksTrigger.KEY_JOBS_TOPIC, '')
            if enable and topic:
                queues_set.add(topic)
        logging.info('Enabled Pulse Queues: {}'.format(queues_set))

        for topic in queues_set:
            ret = HasalPulsePublisher.re_create_pulse_queue(username=self.pulse_username,
                                                            password=self.pulse_password,
                                                            topic=topic)
            if not ret:
                logging.error('Queue [{}] has been deleted, but not be re-created successfully.'.format(topic))
        logging.info('Clean and re-create Pulse Queues done.')

    @staticmethod
    def get_all_latest_files():
        """
        Get all latest files from ARCHIVE server.
        @return: dict object {'<filename>': '<folder/path/with/filename>', ...}
        """
        latest_url = urlparse.urljoin(TasksTrigger.ARCHIVE_ROOT_URL, TasksTrigger.ARCHIVE_LATEST_FOLDER)
        ret_dict = {}
        try:
            res_obj = urllib2.urlopen(latest_url)
            if res_obj.getcode() == 200:
                for line in res_obj.readlines():
                    match = re.search(TasksTrigger.ARCHIVE_LINK_RE_STRING, line)
                    if match:
                        href_link = match.group(0)
                        name = href_link.split('/')[-1]
                        ret_dict[name] = href_link
            else:
                logging.error('Fetch builds failed. Code: {code}, Link: {link}'.format(code=res_obj.getcode(),
                                                                                       link=latest_url))
        except Exception as e:
            logging.error(e)
        return ret_dict

    @staticmethod
    def get_latest_info_json_url(platform):
        """
        Get latest platform build's JSON file URL base on specify platform.
        @param platform: the specify platform. Defined in PLATFORM_MAPPING[<name>]['key'].
        @return: the latest platform build's JSON file URL.
        """
        ext_json = 'json'
        match_endswith_string = TasksTrigger.MATCH_FORMAT.format(platform_key=platform, ext=ext_json)

        # get latest files
        all_files = TasksTrigger.get_all_latest_files()

        # find the matched files base on platform, e.g. "win64.json"
        matched_files = {k: v for k, v in all_files.items() if k.endswith(match_endswith_string)}

        if len(matched_files) >= 1:
            # when get matched files, then get the latest file URL folder path
            matched_filename = sorted(matched_files.keys())[-1]
            ret_url = matched_files.get(matched_filename)
            return urlparse.urljoin(TasksTrigger.ARCHIVE_ROOT_URL, ret_url)
        else:
            logging.error('There is no matched filename endswith "{}".'.format(match_endswith_string))
            return None

    @staticmethod
    def get_remote_md5(url, max_size=1 * 1024 * 1024):
        """
        Get remote resource's MD5 hash string.
        @param url: remote resource URL.
        @param max_size: max download size. default is 1*1024*1024 bytes (1 MB).
        @return: the MD5 hash string (lowercase).
        """
        remote_resource = urllib2.urlopen(url)
        md5_handler = hashlib.md5()
        counter = 0
        while True:
            data = remote_resource.read(1024)
            counter += 1024

            if not data or counter >= max_size:
                break
            md5_handler.update(data)
        return md5_handler.hexdigest()

    @staticmethod
    def get_latest_info_json_md5_hash(platform):
        """
        Get MD5 hash string of latest platform build's JSON file base on specify platform.
        @param platform: the specify platform. Defined in PLATFORM_MAPPING[<name>]['key'].
        @return: the MD5 hash string of latest platform build's JSON file.
        """
        json_file_url = TasksTrigger.get_latest_info_json_url(platform)
        hash_string = TasksTrigger.get_remote_md5(json_file_url)
        return hash_string

    @staticmethod
    def check_folder(checked_folder):
        """
        Checking folder.
        @param checked_folder:
        @return: Return True if folder already exists and is folder, or re-create folder successfully.
        """
        try:
            if os.path.exists(checked_folder):
                if os.path.isfile(checked_folder):
                    os.remove(checked_folder)
                    os.makedirs(checked_folder)
                return True
            else:
                # there is no valid MD5 folder
                os.makedirs(checked_folder)
                return True
        except Exception as e:
            logging.error(e)
            return False

    @staticmethod
    def check_latest_info_json_md5_changed(job_name, platform):
        """
        @param job_name: the job name which will set as identify name.
        @param platform: the platform archive server.
        @return: True if changed, False if not changed.
        """
        current_file_folder = os.path.dirname(os.path.realpath(__file__))

        md5_folder = os.path.join(current_file_folder, TasksTrigger.MD5_HASH_FOLDER)

        # prepare MD5 folder
        if not TasksTrigger.check_folder(md5_folder):
            return False

        # get new MD5 hash
        new_hash = TasksTrigger.get_latest_info_json_md5_hash(platform)

        # check MD5 file
        job_md5_file = os.path.join(md5_folder, job_name)
        if os.path.exists(job_md5_file):
            with open(job_md5_file, 'r') as f:
                origin_hash = f.readline()

            if origin_hash == new_hash:
                # no changed
                return False
            else:
                # changed
                logging.info('Job "{}" platform "{}": Latest Hash [{}], Origin Hash: [{}]'.format(job_name,
                                                                                                  platform,
                                                                                                  new_hash,
                                                                                                  origin_hash))
                with open(job_md5_file, 'w') as f:
                    f.write(new_hash)
                return True
        else:
            # found the file for the 1st time
            logging.info('Job "{}" platform "{}": Latest Hash [{}], no origin hash.'.format(job_name,
                                                                                            platform,
                                                                                            new_hash))
            with open(job_md5_file, 'w') as f:
                f.write(new_hash)
            return True

    @staticmethod
    def clean_md5_by_job_name(job_name):
        """
        clean the md5 file by job name.
        @param job_name: the job name which will set as identify name.
        """
        current_file_folder = os.path.dirname(os.path.realpath(__file__))

        md5_folder = os.path.join(current_file_folder, TasksTrigger.MD5_HASH_FOLDER)

        # prepare MD5 folder
        if not TasksTrigger.check_folder(md5_folder):
            return False

        # check MD5 file
        job_md5_file = os.path.join(md5_folder, job_name)
        if os.path.exists(job_md5_file):
            if os.path.isfile(job_md5_file):
                try:
                    os.remove(job_md5_file)
                    return True
                except Exception as e:
                    logging.error(e)
                    return False
            else:
                logging.warn('The {} is not a file.'.format(job_md5_file))
                return False
        else:
            logging.debug('The {} not exists.'.format(job_md5_file))
            return True

    @staticmethod
    def _validate_job_config(job_config):
        """
        Validate the job config. Required keys: topic, platform_build, and cmd.
        @param job_config: job detail config.
        @return: True or False.
        """
        required_keys = [TasksTrigger.KEY_JOBS_TOPIC,
                         TasksTrigger.KEY_JOBS_PLATFORM_BUILD,
                         TasksTrigger.KEY_JOBS_CMD]

        for required_key in required_keys:
            if required_key not in job_config:
                logging.error('There is no required key [{}] in job config.'.format(required_key))
                return False
        return True

    @staticmethod
    def job_pushing_meta_task(username, password, command_config, job_name, topic, amount, platform_build, cmd_name, overwrite_cmd_config=None):
        """
        [JOB]
        Pushing the MetaTask if the remote build's MD5 was changed.
        @param username: Pulse username.
        @param password: Pulse password.
        @param command_config: The overall command config dict object.
        @param job_name: The job name which be defined in trigger_config.json.
        @param topic: The Topic on Pulse. Refer to `get_topic()` method of `jobs.pulse`.
        @param amount: The MetaTask amount per time.
        @param platform_build: The platform on Archive server.
        @param cmd_name: The MetaTask command name.
        @param overwrite_cmd_config: The overwrite command config.
        """
        changed = TasksTrigger.check_latest_info_json_md5_changed(job_name=job_name, platform=platform_build)
        if changed:
            # check queue
            queue_exists = HasalPulsePublisher.check_pulse_queue_exists(username=username,
                                                                        password=password,
                                                                        topic=topic)
            if not queue_exists:
                logging.error('There is not Queue for Topic [{topic}]. Message might be ignored.'.format(topic=topic))

            # Push MetaTask to Pulse
            publisher = HasalPulsePublisher(username=username,
                                            password=password,
                                            command_config=command_config)

            now = datetime.now()
            now_string = now.strftime('%Y-%m-%d_%H:%M:%S.%f')
            uid_prefix = '{time}.{job}'.format(time=now_string, job=job_name)
            # push meta task
            logging.info('Pushing to Pulse...\n'
                         '{line}\n'
                         'UID prefix: {uid_prefix}\n'
                         'Trigger Job: {job_name}\n'
                         'Platform: {platform}\n'
                         'Topic: {topic}\n'
                         'Amount: {amount}\n'
                         'command {cmd}\n'
                         'cmd_config: {cmd_config}\n'
                         '{line}\n'.format(uid_prefix=uid_prefix,
                                           job_name=job_name,
                                           platform=platform_build,
                                           topic=topic,
                                           amount=amount,
                                           cmd=cmd_name,
                                           cmd_config=overwrite_cmd_config,
                                           line='-' * 10))
            for idx in range(amount):
                uid = '{prefix}.{idx}'.format(prefix=uid_prefix, idx=idx + 1)
                publisher.push_meta_task(topic=topic,
                                         command_name=cmd_name,
                                         overwrite_cmd_configs=overwrite_cmd_config,
                                         uid=uid)

    @staticmethod
    def job_listen_response_from_agent(username, password, rotating_file_path):
        """
        [JOB]
        Logging the message from Agent by Pulse "mgt" topic channel.
        @param username: Pulse username.
        @param password: Pulse password.
        @param rotating_file_path: The rotating file path.
        """
        PULSE_MGT_TOPIC = 'mgt'
        PULSE_MGT_OBJECT_KEY = 'message'

        rotating_logger = logging.getLogger("RotatingLog")
        rotating_logger.setLevel(logging.INFO)

        # create Rotating File Handler, 1 day, backup 30 times.
        rotating_handler = TimedRotatingFileHandler(rotating_file_path,
                                                    when='midnight',
                                                    interval=1,
                                                    backupCount=30)

        rotating_formatter = logging.Formatter('%(asctime)s, %(levelname)s, %(message)s')
        rotating_handler.setFormatter(rotating_formatter)
        rotating_logger.addHandler(rotating_handler)

        def got_response(body, message):
            """
            handle the message
            ack then broker will remove this message from queue
            """
            message.ack()
            data_payload = body.get('payload')
            msg_dict_obj = data_payload.get(PULSE_MGT_OBJECT_KEY)
            try:
                msg_str = json.dumps(msg_dict_obj)
                rotating_logger.info(msg_str)
            except:
                rotating_logger.info(msg_dict_obj)

        hostname = socket.gethostname()
        consumer_label = 'TRIGGER-{hostname}'.format(hostname=hostname)
        topic = PULSE_MGT_TOPIC
        c = HasalConsumer(user=username, password=password, applabel=consumer_label)
        c.configure(topic=topic, callback=got_response)

        c.listen()

    def _job_exception_listener(self, event):
        if event.exception:
            logging.error("Job [%s] crashed [%s]" % (event.job_id, event.exception))
            logging.error(event.traceback)

    def _add_event_listener(self):
        self.scheduler.add_listener(self._job_exception_listener, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)

    def run(self):
        """
        Adding jobs into scheduler.
        """
        # add event listener
        self._add_event_listener()

        # create "mgt" channel listener
        logging.info('Adding Rotating Logger for listen Agent information ...')
        MGT_ID = 'trigger_mgt_listener'
        MGT_LOG_PATH = 'rotating_mgt.log'
        self.scheduler.add_job(func=TasksTrigger.job_listen_response_from_agent,
                               trigger='interval',
                               id=MGT_ID,
                               max_instances=1,
                               seconds=10,
                               args=[],
                               kwargs={'username': self.pulse_username,
                                       'password': self.pulse_password,
                                       'rotating_file_path': MGT_LOG_PATH})
        logging.info('Adding Rotating Logger done: {fp}'.format(fp=os.path.abspath(MGT_LOG_PATH)))

        # create each Trigger jobs
        for job_name, job_detail in self.jobs_config.items():
            """
            ex:
            {
                "win7_x64": {
                    "enable": true,
                    "topic": "win7",
                    "platform_build": "win64",
                    "interval_minutes": 10,
                    "cmd": "download-latest-nightly",
                    "configs": {}
                },
                ...
            }
            """
            if not TasksTrigger._validate_job_config(job_detail):
                logging.error('There is not valid job.\n{}: {}\n'.format(job_name, job_detail))

            # have default config
            enable = job_detail.get(TasksTrigger.KEY_JOBS_ENABLE, False)
            interval_minutes = job_detail.get(TasksTrigger.KEY_JOBS_INTERVAL_MINUTES, 10)
            configs = job_detail.get(TasksTrigger.KEY_JOBS_CONFIGS, {})
            amount = job_detail.get(TasksTrigger.KEY_JOBS_AMOUNT, 1)
            # required
            topic = job_detail.get(TasksTrigger.KEY_JOBS_TOPIC)
            platform_build = job_detail.get(TasksTrigger.KEY_JOBS_PLATFORM_BUILD)
            cmd = job_detail.get(TasksTrigger.KEY_JOBS_CMD)

            if enable:
                logging.info('Job [{}] is enabled.'.format(job_name))

                # adding Job Trigger
                self.scheduler.add_job(func=TasksTrigger.job_pushing_meta_task,
                                       trigger='interval',
                                       id=job_name,
                                       max_instances=1,
                                       minutes=interval_minutes,
                                       args=[],
                                       kwargs={'username': self.pulse_username,
                                               'password': self.pulse_password,
                                               'command_config': self.cmd_config_obj,
                                               'job_name': job_name,
                                               'topic': topic,
                                               'amount': amount,
                                               'platform_build': platform_build,
                                               'cmd_name': cmd,
                                               'overwrite_cmd_config': configs})

            else:
                logging.info('Job [{}] is disabled.'.format(job_name))
コード例 #48
0
ファイル: node.py プロジェクト: zhilinwww/jbc
  sched.add_job(mine.validate_possible_block, args=[possible_block_dict], id='validate_possible_block') #add the block again

  return jsonify(received=True)

if __name__ == '__main__':

  #args!
  parser = argparse.ArgumentParser(description='JBC Node')
  parser.add_argument('--port', '-p', default='5000',
                    help='what port we will run the node on')
  parser.add_argument('--mine', '-m', dest='mine', action='store_true')
  args = parser.parse_args()

  filename = '%sdata.txt' % (CHAINDATA_DIR)
  with open(filename, 'w') as data_file:
    data_file.write("Mined by node on port %s" % args.port)

  mine.sched = sched #to override the BlockingScheduler in the
  #only mine if we want to
  if args.mine:
    #in this case, sched is the background sched
    sched.add_job(mine.mine_for_block, kwargs={'rounds':STANDARD_ROUNDS, 'start_nonce':0}, id='mining') #add the block again
    sched.add_listener(mine.mine_for_block_listener, apscheduler.events.EVENT_JOB_EXECUTED)#, args=sched)

  sched.start() #want this to start so we can validate on the schedule and not rely on Flask

  #now we know what port to use
  node.run(host='127.0.0.1', port=args.port)

コード例 #49
0
ファイル: test_list.py プロジェクト: shenxiangq/news_crawler
import logging

logging.basicConfig(format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',filename='list.log', filemode='w')

n = 6000
ll = [0 for i in range(n)]

def func(i):
        ll[i] += 1

def err_lis(ev):
    logger = logging.getLogger("")
    logger.error(str(ev))

scheduler = BackgroundScheduler()
#scheduler = TwistedScheduler()
for i in range(n):
    start = datetime.datetime.now() + datetime.timedelta(seconds=i%10)
    scheduler.add_job(func, 'interval', args=(i,), start_date=start, seconds=10)

scheduler.add_listener(err_lis, apscheduler.events.EVENT_JOB_ERROR | apscheduler.events.EVENT_JOB_MISSED)
scheduler.start()
time.sleep(5)
#scheduler.shutdown()
s = 0
for i in ll:
    s+=i
print s

コード例 #50
0
ファイル: scheduler.py プロジェクト: heroldus/alppaca
class Scheduler(object):
    """ Scheduler for refreshing credentials.

        By default it will fetch the credentials and then schedule itself to
        update them based on the expiration date. Some randomness is involved
        to avoid collisions. In case of failure to fetch credentials a back-off
        and safety behaviour is initiated.

        It is based on the apscheduler package.

    """

    def __init__(self, credentials, credentials_provider):
        self.logger = logging.getLogger(__name__)
        self.credentials = credentials

        self.credentials_provider = credentials_provider
        self.backoff = None
        self.scheduler = BackgroundScheduler()
        self.scheduler.add_listener(self.job_executed_event_listener, EVENT_JOB_EXECUTED)
        self.scheduler.add_listener(self.job_failed_event_listener, EVENT_JOB_ERROR)
        self.scheduler.start()

    def job_executed_event_listener(self, event):
        self.logger.info("Successfully completed credentials refresh")

    def job_failed_event_listener(self, event):
        self.logger.error("Failed to refresh credentials: %s", event.exception)

    def do_backoff(self):
        """ Perform back-off and safety. """
        if self.backoff is None:
            self.logger.info("Initialize back-off and safety behaviour")
            self.backoff = backoff_refresh_generator()
        refresh_delta = six.next(self.backoff)
        self.build_trigger(refresh_delta)

    def refresh_credentials(self):
        """ Refresh credentials and schedule next refresh."""
        self.logger.info("about to fetch credentials")

        try:
            cached_credentials = self.credentials_provider.get_credentials_for_all_roles()
        except Exception:
            self.logger.exception("Error in credential provider:")
            cached_credentials = None

        if cached_credentials:
            self.update_credentials(cached_credentials)
        else:
            self.logger.info("No credentials found!")
            self.do_backoff()

    def update_credentials(self, cached_credentials):
        """ Update credentials and retrigger refresh """
        self.credentials.update(cached_credentials)
        self.logger.info("Got credentials: %s", self.credentials)
        refresh_delta = self.extract_refresh_delta()
        if refresh_delta < 0:
            self.logger.warn("Expiration date is in the past, enter backoff.")
            self.do_backoff()
        else:
            if self.backoff is not None:
                self.backoff = None
                self.logger.info("Exit backoff state.")
            refresh_delta = self.sample_new_refresh_delta(refresh_delta)
            self.build_trigger(refresh_delta)

    def extract_refresh_delta(self):
        """ Return shortest expiration time in seconds. """
        expiration = isodate.parse_datetime(extract_min_expiration(self.credentials))
        self.logger.info("Extracted expiration: %s", expiration)
        refresh_delta = total_seconds(expiration - datetime.datetime.now(tz=pytz.utc))
        return refresh_delta

    def sample_new_refresh_delta(self, refresh_delta):
        """ Sample a new refresh delta. """
        refresh_delta = int(uniform(refresh_delta * .5, refresh_delta * .9))
        return refresh_delta

    def build_trigger(self, refresh_delta):
        """ Actually add the trigger to the apscheduler. """
        self.logger.info("Setting up trigger to fire in %s seconds", refresh_delta)
        self.scheduler.add_job(func=self.refresh_credentials, trigger=DelayTrigger(refresh_delta))