示例#1
8
import os, time
from datetime import datetime
from apscheduler.schedulers.background import BackgroundScheduler

def myjob():
	print(datetime.now())

if __name__ == '__main__':
	scheduler = BackgroundScheduler()
	scheduler.start()
	job = scheduler.add_job(myjob, 'interval', seconds=1, id='myjob')
	print(job)

	jobs = scheduler.get_jobs()
	print(jobs)

	try:
		time.sleep(5)		
		print('pause job')
		scheduler.pause_job('myjob')
		time.sleep(5)
		print('resume job')
		scheduler.resume_job('myjob')

		print('reschedule job ...')
		scheduler.reschedule_job('myjob', trigger='cron', second='*/5')
		time.sleep(10)
	except (KeyboardInterrupt, SystemExit):
		scheduler.shutdown()
示例#2
0
class JobMaster(Jobs):
    def __init__(self):
        time_zone = pytz.timezone('Asia/Shanghai')
        executors = {
            'default': ThreadPoolExecutor(20)
        }
        self.sche = BackgroundScheduler(timezone=time_zone, executors=executors)

    def start_jobs(self):
        self.sche.start()

    def shut_down(self):
        self.sche.shutdown()

    def is_jobs_exist(self):
        return self.sche.get_jobs() != []
    
    def get_jobs_id_list(self):
        id_list = list()
        for job in self.sche.get_jobs():
            id_list.append(job.id)
        return id_list

    def load_jobs_by_id(self, id):
        self.load_jobs()
        id_list = self.get_jobs_id_list()
        for i in id_list:
            if i not in id:
                self.sche.remove_job(i)

    def load_jobs(self):
        # self.sche.add_job(self.testjob1, 'interval', seconds=3, id='test_job1')
        self.sche.add_job(self.job_weather, 'cron', hour='9', id='job_weather')
        self.sche.add_job(self.job_new_ranking_telegram, 'cron', hour='16,21', minute='10', id='job_br_tg')
示例#3
0
def start():
    print "at updater start.from water schedule"
    scheduler = BackgroundScheduler()
    scheduler.add_job(program.runProgram,
                      'cron',
                      day_of_week='0,2,4',
                      hour=6,
                      minute=30)
    scheduler.start()
    scheduler.get_jobs()
示例#4
0
文件: glof_main.py 项目: lima36/study
class AutoReserve():
    def __init__(self):
        self.scheduler  = BackgroundScheduler(timezone="Asia/Seoul")
        self.jobid      = 1
        self.jobdb      = {}

    def addGolfJob(self, ccName, BackFunc, user_id, user_pw, schedule, target_date, target_time, day=21):
        print('autoReserve in Jayuro', schedule.hour(), schedule.minute(), schedule.second(), target_date, target_time, day)
        print(self.jobid)
        while self.scheduler.get_job(self.jobid):
            print(self.jobid, self.scheduler.get_job(self.jobid))
        
        self.jobid += 1
        
        result = self.scheduler.add_job(BackFunc,'cron', args=[user_id, user_pw, target_date, target_time, day], week='1-53', day_of_week='0-6', \
                                            hour=schedule.hour(), minute=schedule.minute(), second=schedule.second(), id=str(self.jobid))
        print("result:", result)
        self.jobdb[self.jobid]= ccName

        if self.scheduler.state == 1: #apscheduler.schedulers.base.STATE_RUNNING
            print('Scheduler is running')
        elif self.scheduler.state == 2:
            print('Scheduler is paused')
        elif self.scheduler.state == 0:
            print('Scheduler is stopped')
            self.scheduler.start()

        return self.jobid

    def autoStop(self):   
        print('autoStop')
        for jj in self.scheduler.get_jobs():
            print(jj)

        self.scheduler.remove_all_jobs()
        self.jobdb.clear()
        self.jobid = 1
        print(self.scheduler.get_jobs())

    def autoInfo(self):
        print('Auto information')
        print(self.scheduler.get_jobs())
        for jj in self.scheduler.get_jobs():
            print(jj)

        for key in self.jobdb:
            print(key, self.jobdb[key])

    def printlog(self, target_date, target_time):
        now = datetime.datetime.now()
        print(str(now) + str(target_date + "," + target_time))
        # print("Running main process............... : " + str(datetime.datetime.now(timezone('Asia/Seoul'))))

    def getJobInfo(self):
        return self.jobdb
示例#5
0
class FetchingSchedulerMain(object):
    scheduler = None

    def __init__(self):
        logging.basicConfig()
        logging.getLogger('apscheduler').setLevel(logging.DEBUG)
        self.scheduler = BackgroundScheduler()

    def job_scheduler_add(self, job, start_now=False):
        if start_now:
            self.scheduler.add_job(fetch_logs, args=[job])
        if job.ismonitored:
            self.scheduler.add_job(fetch_logs,
                                   'interval',
                                   seconds=job.fetchFrequency,
                                   args=[job],
                                   jitter=600)
            return 0
        return 1

    def job_scheduler_delete(self, job):
        for task in self.scheduler.get_jobs():
            if task.args[0].name == job.name:
                self.scheduler.remove_job(task.id)
                return 0
        return 1

    def job_scheduler_update(self, job):
        if not self.job_scheduler_check(job):
            self.job_scheduler_delete(job)
            self.job_scheduler_add(job)
        else:
            self.job_scheduler_add(job)

    def job_scheduler_check(self, job):
        for task in self.scheduler.get_jobs():
            if task.args[0].name == job.name:
                return 0
        return 1

    def scheduler_start(self):
        self.scheduler.start()
        jobs = Job.objects.filter(ismonitored=True)
        for job in jobs:
            self.scheduler.add_job(fetch_logs,
                                   'interval',
                                   seconds=job.fetchFrequency,
                                   args=[job],
                                   jitter=600)

    def scheduler_shutdown(self, wait=True):
        self.scheduler.shutdown(wait=wait)

    def get_scheduler(self):
        return self.scheduler
示例#6
0
class TimerTrigger(BaseTrigger):
    name = "timer"
    log = logging.getLogger("zuul.Timer")

    def __init__(self, trigger_config={}, sched=None, connection=None):
        super(TimerTrigger, self).__init__(trigger_config, sched, connection)
        self.apsched = BackgroundScheduler()
        self.apsched.start()

    def _onTrigger(self, pipeline_name, timespec):
        for project in self.sched.layout.projects.values():
            event = TriggerEvent()
            event.type = "timer"
            event.timespec = timespec
            event.forced_pipeline = pipeline_name
            event.project_name = project.name
            self.log.debug("Adding event %s" % event)
            self.sched.addEvent(event)

    def stop(self):
        self.apsched.shutdown()

    def getEventFilters(self, trigger_conf):
        def toList(item):
            if not item:
                return []
            if isinstance(item, list):
                return item
            return [item]

        efilters = []
        for trigger in toList(trigger_conf):
            f = EventFilter(trigger=self, types=["timer"], timespecs=toList(trigger["time"]))

            efilters.append(f)

        return efilters

    def postConfig(self):
        for job in self.apsched.get_jobs():
            job.remove()
        for pipeline in self.sched.layout.pipelines.values():
            for ef in pipeline.manager.event_filters:
                if ef.trigger != self:
                    continue
                for timespec in ef.timespecs:
                    parts = timespec.split()
                    if len(parts) < 5 or len(parts) > 6:
                        self.log.error(
                            "Unable to parse time value '%s' " "defined in pipeline %s" % (timespec, pipeline.name)
                        )
                        continue
                    minute, hour, dom, month, dow = parts[:5]
                    if len(parts) > 5:
                        second = parts[5]
                    else:
                        second = None
                    trigger = CronTrigger(day=dom, day_of_week=dow, hour=hour, minute=minute, second=second)

                    self.apsched.add_job(self._onTrigger, trigger=trigger, args=(pipeline.name, timespec))
示例#7
0
def test_initialize_scheduler(scheduler: BackgroundScheduler):
    # scheduler should not be running before initialization
    assert not scheduler.running
    assert scheduler.get_jobs() == []

    initialize_scheduler(scheduler)

    # ensure scheduler running in background
    assert scheduler.running

    # ensure all jobs registered
    jobs = scheduler.get_jobs()
    assert len(jobs) == 3
    for job in jobs:
        print(job.name)
        assert job.name in ["data_collection_job", "scrape_job"]
示例#8
0
def initialize_scheduler():
    """
    Initialize APScheduler, loading jobs database from disk if present

    Returns instance of BackgroundScheduler
    """
    if not os.path.exists(config["job_store_path"]):
        LOG.warning(
            "[!] Specified job store '%s' does not exist; creating one",
            config["job_store_path"],
        )
    jobstores = {
        "default":
        SQLAlchemyJobStore(url="sqlite:///{}".format(config["job_store_path"]))
    }
    scheduler = BackgroundScheduler(jobstores=jobstores)
    jobs = scheduler.get_jobs()
    LOG.info("[+] Current jobs (%d):", len(jobs))
    for job in jobs:
        LOG.info("ID: %s", job.id)
        LOG.info("\tName: %s", job.name)
        LOG.info("\tFunc: %s", job.func)
        LOG.info("\tWhen: %s", job.next_run_time)
    scheduler.start()
    LOG.info("[+] Initialized scheduler")
    return scheduler
示例#9
0
class Schedule:
    def __init__(self, cal_events):

        self.sched = BackgroundScheduler(daemon=True)
        self.gcalendar_events = cal_events

    def current_jobs(self):

        print(self.gcalendar_events)
        print(self.sched.get_jobs())

    def add_start_events(self, start_function):

        for i in self.gcalendar_events:

            self.sched.add_job(start_function,
                               'date',
                               run_date=i['start_time'])

    def add_end_events(self, end_function):

        print('end function here')

    def start_scheduler(self):

        self.sched.start()
示例#10
0
def go(managedNamespace):
	statusMgr = statusDbManager.StatusResource()
	managedNamespace.run = True
	managedNamespace.serverRun = True

	checkInitDbs()

	server_process = multiprocessing.Process(target=serverProcess, args=(managedNamespace,))

	sched = BackgroundScheduler()

	scheduleJobs(sched, managedNamespace)
	server_process.start()
	sched.start()

	loopCtr = 0
	while managedNamespace.run:
		time.sleep(0.1)

		if loopCtr % 100 == 0:
			for job in sched.get_jobs():
				statusMgr.updateNextRunTime(job.name, job.next_run_time.timestamp())
		loopCtr += 1

	sched.shutdown()
	server_process.join()
def init_alarm(bot, alarm_dict):
    """
    初始化定时任务
    :param alarm_dict: 定时相关内容
    """
    # 定时任务
    scheduler = BackgroundScheduler()
    for item in alarm_dict:
        if not alarm_dict[item]['user_group']:
            print(alarm_dict[item]['name'])
            for group in [
                    bot.groups().search(v)[0] for v in alarm_dict[item]['name']
            ]:
                for v in alarm_dict[item]['alarm_timed']:
                    time_data = v.split(':')
                    scheduler.add_job(send_alarm_msg,
                                      'cron',
                                      args=[
                                          bot, group, item,
                                          alarm_dict[item]['is_tomorrow']
                                      ],
                                      hour=int(time_data[0]),
                                      minute=int(time_data[1]),
                                      id=make_md5(group.name + v),
                                      misfire_grace_time=600,
                                      jitter=alarm_dict[item]['alarm_jitter'])
    scheduler.start()
    print('已开启定时发送提醒功能...')
    print(scheduler.get_jobs())
示例#12
0
class CronTask:

    def __init__(self):
        self.scheduler=BackgroundScheduler()

    def add_job(self, func, args, job_id, cron_args):
        '''
        cron_args:
            :param int|str year: 4-digit year
            :param int|str month: month (1-12)
            :param int|str day: day of the (1-31)
            :param int|str week: ISO week (1-53)
            :param int|str day_of_week: number or name of weekday (0-6 or mon,tue,wed,thu,fri,sat,sun)
            :param int|str hour: hour (0-23)
            :param int|str minute: minute (0-59)
            :param int|str second: second (0-59)
            :param datetime|str start_date: earliest possible date/time to trigger on (inclusive)
            :param datetime|str end_date: latest possible date/time to trigger on (inclusive)
        '''
        logging.info('[cron]: add job %s, cron_args=%s' % (job_id, cron_args))
        self.scheduler.add_job(func, 'cron', args, **cron_args, id=job_id)

    def remove_job(self, job_id):
        logging.info('[cron]: remove job %s' % job_id)
        self.scheduler.remove_job(job_id)

    def __repr__(self):
        return '%s %s' % (self.scheduler.state, self.scheduler.get_jobs())
示例#13
0
class scheduler:
    def __init__(self):
        self.scheduler_job = BackgroundScheduler()
        self.scheduler_job.add_job(stock_api_update,
                                   'interval',
                                   seconds=THREAD_INTERVAL)
        self.scheduler_job.add_job(change_status_rule,
                                   'interval',
                                   minutes=CHANGE_STATUS_RULE_THREAD_INT,
                                   max_instances=1)
        self.scheduler_job.add_job(change_threshold_rule,
                                   'interval',
                                   minutes=CHANGE_THRESHOLD_RULE_THREAD_INT,
                                   max_instances=1)
        self.scheduler_job.add_job(price_threshold_rule,
                                   'interval',
                                   minutes=PRICE_THRESHOLD_RULE_THREAD_INT,
                                   max_instances=1)
        self.scheduler_job.add_job(
            recommendation_analyst_rule,
            'interval',
            minutes=RECOMMENDATION_ANALYST_RULE_THREAD_INT,
            max_instances=1)

    def start(self):
        self.scheduler_job.start()

    def stop_all_jobs(self):
        # Removes all jobs from this store.
        self.scheduler_job.remove_all_jobs()
        # Frees any resources still bound to this job store.
        self.scheduler_job.shutdown()

    def get_running_jobs(self):
        return self.scheduler_job.get_jobs()
class AdvancedScheduleManager:
    def __init__(self):

        self.s = BackgroundScheduler()
        self.s.start()

    def reset_venue(self, bot, venue):

        print("reset venue")

        if self.s.get_job(venue) != None:
            print("remove venue")
            self.s.remove_job(venue)

        self.s.add_job(bot.send_non_update_venues,
                       'date',
                       run_date=datetime.now() + timedelta(seconds=3600),
                       kwargs={"venue": venue},
                       id=venue)

    def test_scheduler(self, bot):

        self.s.add_job(bot.pprint,
                       'date',
                       run_date=datetime.now() + timedelta(seconds=3),
                       kwargs={"text": "Advanced schedule執行"},
                       id="test")

    def get_queue(self):

        return len(self.s.get_jobs())
示例#15
0
def test3():
    """定时执行任务,暂停,恢复"""
    start_time = time.time()
    scheduler = BackgroundScheduler()
    scheduler.add_job(
        my_job, 'interval', args=('123', ), seconds=1,
        id='my_job_id')  # 每隔1秒执行一次my_job函数,args为函数my_job的输入参数;id:可省略;
    scheduler.start()  # 程序运行到这里,任务没有运行完也会往后执行,既执行后面的任务,又执行这个任务。
    print('运行到了这里1')
    while (scheduler.state):
        if time.time() - start_time > 5:
            print('暂停作业')
            scheduler.pause()  # 暂停作业:
            break
    print('恢复作业')
    if time.time() - start_time > 5:
        scheduler.resume()  #
    time.sleep(4)
    print('当前任务列表:{}'.format(
        scheduler.get_jobs()))  # 获得调度作业的列表,可以使用 get_jobs() 来完成,它会返回所有的job实例
    scheduler.get_job('my_job_id')  # 获取id为my_job_id的作业实例

    scheduler.print_jobs()  # 输出所有格式化的作业列表。

    print('移除作业')
    # scheduler.remove_job('my_job_id') # 移除id为my_job_id的作业
    scheduler.remove_all_jobs()  # 移除所有的作业
示例#16
0
def test6():
    """定时执行任务,暂停,恢复, 实例化"""
    start_time = time.time()
    scheduler = BackgroundScheduler()
    job = scheduler.add_job(
        my_job, 'interval', args=('123', ), seconds=1,
        id='my_job_id')  # 每隔1秒执行一次my_job函数,args为函数my_job的输入参数;id:可省略;
    print("作业id:{},作业名字:{},作业参数:{},作业函数:{},触发条件:{}".format(
        job.id, job.name, job.args, job.func, job.trigger))
    scheduler.start()  # 程序运行到这里,任务没有运行完也会往后执行,既执行后面的任务,又执行这个任务。
    print('运行到了这里1')
    while (scheduler.state):
        if time.time() - start_time > 5:
            print('暂停作业')
            #scheduler.pause() # 暂停作业:
            job.pause()  # 暂停单个实例
            break
    print('恢复作业')
    if time.time() - start_time > 5:
        #scheduler.resume() # 恢复作业
        job.resume()  # 恢复单个实例
    time.sleep(4)
    print('当前任务列表:{}'.format(
        scheduler.get_jobs()))  # 获得调度作业的列表,可以使用 get_jobs() 来完成,它会返回所有的job实例
    scheduler.get_job('my_job_id')  # 获取id为my_job_id的作业实例

    scheduler.print_jobs()  # 输出所有格式化的作业列表。

    print('移除作业')
    # scheduler.remove_job('my_job_id') # 移除id为my_job_id的作业
    # scheduler.remove_all_jobs() # 移除所有的作业
    job.remove()  # 移除单个实例的作业
示例#17
0
def test6():
    """定时执行任务,暂停,恢复, 实例化"""
    start_time = time.time()
    scheduler = BackgroundScheduler()
    job = scheduler.add_job(my_job, 'interval', args=('123',),seconds=1, id='my_job_id') # 每隔1秒执行一次my_job函数,args为函数my_job的输入参数;id:可省略;
    print("作业id:{},作业名字:{},作业参数:{},作业函数:{},触发条件:{}".format(job.id, job.name, job.args, job.func,  job.trigger))
    scheduler.start() # 程序运行到这里,任务没有运行完也会往后执行,既执行后面的任务,又执行这个任务。
    print('运行到了这里1')
    while (scheduler.state):
        if time.time() - start_time >5:
            print('暂停作业')
            #scheduler.pause() # 暂停作业:
            job.pause() # 暂停单个实例
            break
    print('恢复作业')
    if time.time() - start_time >5:
        #scheduler.resume() # 恢复作业
        job.resume() # 恢复单个实例
    time.sleep(4)
    print('当前任务列表:{}'.format(scheduler.get_jobs())) # 获得调度作业的列表,可以使用 get_jobs() 来完成,它会返回所有的job实例
    scheduler.get_job('my_job_id') # 获取id为my_job_id的作业实例
    
    scheduler.print_jobs() # 输出所有格式化的作业列表。
    
    print('移除作业')
    # scheduler.remove_job('my_job_id') # 移除id为my_job_id的作业
    # scheduler.remove_all_jobs() # 移除所有的作业
    job.remove() # 移除单个实例的作业
示例#18
0
def start_scheduling(app):
    scheduler = BackgroundScheduler()
    cfq = app.app_config
    retention = cfq.retention
    options = {"trigger": "cron", "kwargs": {"app": app}, "day": "*", "timezone": 'UTC',
               "misfire_grace_time": 60 * 60 * 12, "coalesce": True}
    scheduler.add_job(func=suspend_users, hour=retention.cron_hour_of_day, **options)
    scheduler.add_job(func=parse_idp_metadata, hour=retention.cron_hour_of_day, **options)
    if cfq.platform_admin_notifications.enabled:
        scheduler.add_job(func=outstanding_requests, hour=cfq.platform_admin_notifications.cron_hour_of_day, **options)
    if cfq.collaboration_expiration.enabled:
        scheduler.add_job(func=expire_collaborations, hour=cfq.collaboration_expiration.cron_hour_of_day, **options)
    if cfq.collaboration_suspension.enabled:
        scheduler.add_job(func=suspend_collaborations, hour=cfq.collaboration_suspension.cron_hour_of_day, **options)
    if cfq.membership_expiration.enabled:
        scheduler.add_job(func=expire_memberships, hour=cfq.membership_expiration.cron_hour_of_day, **options)
    if cfq.user_requests_retention.enabled:
        scheduler.add_job(func=cleanup_non_open_requests, hour=cfq.user_requests_retention.cron_hour_of_day, **options)
    scheduler.start()

    logger = logging.getLogger("scheduler")
    jobs = scheduler.get_jobs()
    for job in jobs:
        logger.info(f"Running {job.name} job at {job.next_run_time}")

    if cfq.metadata.get("parse_at_startup", False):
        threading.Thread(target=parse_idp_metadata, args=(app,)).start()

    # Shut down the scheduler when exiting the app
    atexit.register(lambda: scheduler.shutdown())
    app.scheduler = scheduler
    return scheduler
示例#19
0
def go(managedNamespace):
	statusMgr = statusDbManager.StatusResource()
	managedNamespace.run = True
	managedNamespace.serverRun = True

	checkInitDbs()

	server_process = multiprocessing.Process(target=serverProcess, args=(managedNamespace,))

	sched = BackgroundScheduler()

	scheduleJobs(sched, managedNamespace)
	server_process.start()
	sched.start()

	loopCtr = 0
	while managedNamespace.run:
		time.sleep(0.1)

		if loopCtr % 100 == 0:
			for job in sched.get_jobs():
				statusMgr.updateNextRunTime(job.name, job.next_run_time.timestamp())
		loopCtr += 1

	sched.shutdown()
	server_process.join()
示例#20
0
class Controller:
    def __init__(self):

        #devId = os.environ["devId"]
        self.devId = "101"  # temporary
        self.custId = "101"  # temporary
        self.serPort = "/dev/ttyACM0"  # python -m serial.tools.list_ports
        self.ser = serial.Serial(self.serPort)  # open serial port

        self.status = 1  # will be updated on restart
        self.setpoint = 55  # will be updated on restart
        self.temp_interval = 1  # 15 min
        self.energy_interval = 1  # 15 min

        self.myContainer = Container(self.ser, "IRJSON.json", self.setpoint,
                                     self.status)
        self.myRadio = Radio(self.devId, self.custId, self)

        self.scheduler = BackgroundScheduler({
            'apscheduler.timezone': 'UTC',
        })
        self.addJobs()
        self.scheduler.start()

    def addJobs(self):
        if debug: print("added jobs")
        self.tempReporter = self.scheduler.add_job(
            self.myRadio.sendTemperature, 'interval', minutes=1)
        self.energyReporter = self.scheduler.add_job(
            self.myRadio.sendEnergy, 'interval', minutes=self.energy_interval)

    def updateControls(self, onoff=False, radio=True):
        """ update the control settings """
        self.myContainer.sendControls(self.status, self.setpoint)
        if onoff and self.status: self.myContainer.sendIRcode("cool3", "62")
        elif onoff and not self.status: self.myContainer.sendIRcode("off", "0")
        if radio:
            self.myRadio.sendControls()

    def updateIntervals(self):
        """ update the intervals for sending temperature and energy """
        for job in self.scheduler.get_jobs():
            job.remove()
        self.addJobs()

    def buttonUpPushed(self):
        if debug: print("Up button pushed!")
        self.setpoint += 1
        self.updateControls()

    def buttonDownPushed(self):
        if debug: print("Down button pushed!")
        self.setpoint -= 1
        self.updateControls()

    def buttonOnPushed(self):
        if debug: print("On button pushed")
        self.status = abs(self.status - 1)
        self.updateControls(True)
示例#21
0
def create_scheduler():
    # manage = SchedulerManage()
    scheduler = BackgroundScheduler(daemonic=True)
    scheduler.add_jobstore(DjangoJobStore(), "default")
    date = dt.datetime.now()
    # 报警
    scheduler.add_job(cal_kde_value,
                      "date",
                      run_date=date,
                      id='alarm_proj',
                      args=[],
                      replace_existing=True)
    scheduler.add_job(his_model_update,
                      "date",
                      run_date=date,
                      id='his_model_up',
                      args=[],
                      replace_existing=True)
    scheduler.add_job(seperate_operate_record.main,
                      "date",
                      run_date=date,
                      id='operate_parsing',
                      args=[],
                      replace_existing=True)
    scheduler.add_job(clear_database,
                      'cron',
                      hour='16',
                      minute='04',
                      id='clear_database',
                      replace_existing=True)
    # scheduler.add_job(operate_resolve, "date", run_date=date, id='alarm_proj', args=[], replace_existing=True)
    # scheduler.add_job(seperate_operate_record.main, "interval", minutes=1, id='operate_proj', args=[])
    # scheduler.add_job(time_task, "interval", seconds=5, id='mytask2', args=['mytask2',], replace_existing=True)
    scheduler.add_job(so_run,
                      "interval",
                      minutes=1,
                      id='operate_match',
                      args=[],
                      replace_existing=True)
    # try:
    #     group, int_list, scats_input = get_scats_int()
    # except Exception as e:
    #     logger.error(e)
    #     print(e)
    # else:
    #     logger.info("get scats basic inf successfully!")
    #     scheduler.add_job(thread_creat, "interval", minutes=5, id='scats_salklist', args=[group, int_list, scats_input],
    #                       replace_existing=True)
    #     scheduler.add_job(RequestDynaDataFromInt, "interval", minutes=5, id='scats_volumns', args=[int_list],
    #                       replace_existing=True)
    #     scheduler.add_job(get_operate, "interval", minutes=3, id='scats_operate', args=[],
    #                       replace_existing=True)
    scheduler.start()
    logger.info('start scheduler task')
    print("=======================定时任务启动==========================")
    print(scheduler.get_jobs())
    print(scheduler.state)
    logger.info('start task register,check on admin platform!')
    register_events(scheduler)
示例#22
0
class IPOListener(App):

    APP_ID = APP.IPO_LISTENER

    def __init__(self):
        super().__init__(app_type=APPTYPE.STREAMING)
        self.scheduler = BackgroundScheduler({'apscheduler.timezone': self.configuration['SCHEDULER_TIME_ZONE']})
        self.scheduler.start()
        self.MAIN_SITE = self.configuration['IPO_CALENDAR_MARKETWATCH']
        self.SITES = [self.MAIN_SITE,
                      self.configuration['IPO_CALENDAR_YAHOO'],
                      self.configuration['IPO_CALENDAR_NASDAQ']]

    def run(self):
        super().start()
        self.scheduler.add_job(self.search, trigger='cron', second='*/6')
        # Search once on restarting application
        self.search()

    def search(self):
        try:
            terms = IPOStringModel.get_all_not_found_strings()
            response = requests.get(self.MAIN_SITE)
            cleaned = response.content.decode('utf-8').lower()
            for term in terms:
                res = cleaned.find(term.lower())
                if res >= 0:
                    IPOStringModel.update_string_as_found(term)
                    self.log("Company IPO announced: " + term)
        except Exception as e:
            self.log("ERROR: " + (repr(e)))
            self.log("Jobs after catching exception: " + str(self.scheduler.get_jobs()))

    @staticmethod
    def add_string(string):
        IPOStringModel.insert_string_if_not_exists(string)

    @staticmethod
    def remove_string(string):
        IPOStringModel.remove_string_if_not_found(string)

    @staticmethod
    def dismiss_string(string):
        IPOStringModel.update_string_as_dismissed(string)

    def get_data(self):
        res = IPOStringModel.get_all_not_dismissed_strings()
        return {'res': res, 'sites': self.SITES}

    def execute(self, command, **kwargs):
        string = kwargs['string']
        if command == 'add':
            self.add_string(string)
        elif command == 'remove':
            self.remove_string(string)
        elif command == 'dismiss':
            self.dismiss_string(string)
        return {}
示例#23
0
def schedule(path_of_exe, dt):
    scheduler = BackgroundScheduler()
    print(scheduler.get_jobs())
    try:
        pass
    except:
        pass
    try:
        scheduler.add_job(runner,
                          args=[path_of_exe],
                          next_run_time=dt,
                          id='software')
        try:
            scheduler.start()
        except:
            pass
        print(scheduler.get_jobs())
    except Exception as e:
        print("done")
示例#24
0
文件: __init__.py 项目: cryptk/opsy
 def update_monitoring_cache():
     """Update the monitoring cache."""
     from apscheduler.schedulers.background import BackgroundScheduler
     scheduler = BackgroundScheduler()
     self.register_scheduler_jobs(current_app, run_once=True)
     for args, kwargs in current_app.jobs:
         scheduler.add_job(*args, **kwargs)
     scheduler.start()
     while scheduler.get_jobs() > 0:
         continue
     scheduler.shutdown(wait=True)
示例#25
0
class diango_task():
    def __init__(self):
        try:
            # 实例化调度器
            self.scheduler = BackgroundScheduler()
            # self.scheduler = BlockingScheduler()
            # 调度器使用DjangoJobStore()
            self.scheduler.add_jobstore(DjangoJobStore(), "default")
        except Exception as e:
            print(e)
            # 有错误就停止运行
            self.scheduler.shutdown()

    def addtask(self, fun, uuid, time, user, task):
        '''添加job'''
        return self.scheduler.add_job(fun,
                                      "cron",
                                      id=uuid,
                                      hour=time,
                                      args=[user, task],
                                      misfire_grace_time=3600)

    def starttask(self):
        '''开启job'''
        register_events(self.scheduler)
        self.scheduler.start()

    def gettasks(self):
        self.scheduler.get_jobs()

    def pausejob(self, job_name):
        '''暂停job'''
        self.scheduler.pause_job(job_name)

    def resumejob(self, job_name):
        '''重启job'''
        self.scheduler.resume_job(job_name)

    def removejob(self, job_name):
        '''删除任job'''
        self.scheduler.remove_job(job_name)
示例#26
0
def delete_old_and_create_new_cron_jobs_with_timezone(timezone):
    customize_logic = CustomizeLogic()
    old_timezone = customize_logic.get_timezone()
    old_scheduler = BackgroundScheduler(timezone=old_timezone)
    logger.info("delete old scheduler with old_timezone: %s" % old_timezone)
    delete_cron_jobs_for_scheduler(old_scheduler)

    new_scheduler = BackgroundScheduler(timezone=timezone)
    logger.info("create new scheduler: with new_timezone: %s" % timezone)
    create_and_start_cron_jobs_with_scheduler(new_scheduler)
    logger.info("new jobs from scheduler %s" % new_scheduler.get_jobs())
    return [old_scheduler, new_scheduler]
示例#27
0
class Scheduler():
	# 클래스 생성시 스케쥴러 데몬을 생성합니다.
	def __init__(self):
		self.sched = BackgroundScheduler()
		self.sched.start()
		self.job_id=''

	# 클래스가 종료될때, 모든 job들을 종료시켜줍니다.
	def __del__(self):
		if self.get_jobs() > 0:
			self.shutdown()

	# 모든 job들을 종료시켜주는 함수입니다.
	def shutdown(self):
		if self.get_jobs() > 0:
			self.sched.shutdown()
		else:
			print("Empty Scheduler Job!")

	# 특정 job을 종료시켜줍니다.
	def kill_scheduler(self, job_id):
		# msg = f'{"":><10} "{job_id}" kill cron schedule {"":<<10}'
		# print(msg)
		try:
			self.sched.remove_job(job_id)
		except JobLookupError as err:
			slog.error("fail to stop scheduler: %s" % err)
			return

	def baseJob(self, job_id):
		msg = "[Scheduler]: process_id[%s] - runtime %s" % (job_id, datetime.datetime.now())
		print(msg)
		# slog.info(helloMsg)

	def get_jobs(self):
		# tmpjobs = self.sched.get_jobs()
		# print(type(tmpjobs), len(tmpjobs), tmpjobs)
		return len(self.sched.get_jobs())

	# 스케쥴러입니다. 스케쥴러가 실행되면서 hello를 실행시키는 쓰레드가 생성되어집니다.
	# 그리고 다음 함수는 type 인수 값에 따라 cron과 interval 형식으로 지정할 수 있습니다.
	# 인수값이 cron일 경우, 날짜, 요일, 시간, 분, 초 등의 형식으로 지정하여,
	# 특정 시각에 실행되도록 합니다.(cron과 동일)
	# interval의 경우, 설정된 시간을 간격으로 일정하게 실행실행시킬 수 있습니다.
	# args 의 리스트지정시 해당 Callback 함수의 인자로 정의 된것인지 체크 필수 : argument 에러나서 중지 됨
	def scheduler(self, job_id, cb):
		soption = {'hour':'8-23', 'second':'*/3', 'id':job_id}
		if hasattr(cb, "__call__"):
			self.sched.add_job(cb, 'cron', **soption)
		else:
			soption['args'] = [job_id]
			self.sched.add_job(self.baseJob, 'cron', **soption)
示例#28
0
def create_scheduler(message_queue):
    manage = SchedulerManage()
    scheduler = BackgroundScheduler(daemonic=True)
    scheduler.add_jobstore(DjangoJobStore(), "default")
    date = dt.datetime.now()
    scheduler.add_job(main,
                      "date",
                      run_date=date,
                      id='alarm_proj',
                      args=[message_queue],
                      replace_existing=True)
    # scheduler.add_job(manage.message_accept, "date", run_date=date, id='alarm_proj', args=[message_queue], replace_existing=True)
    # scheduler.add_job(seperate_operate_record.main, "interval", minutes=1, id='operate_proj', args=[])
    # scheduler.add_job(time_task, "interval", seconds=5, id='mytask2', args=['mytask2',], replace_existing=True)
    scheduler.add_job(so_run,
                      "interval",
                      minutes=1,
                      id='operate_match',
                      args=[message_queue],
                      replace_existing=True)

    try:
        group, int_list, scats_input = get_scats_int()
    except Exception as e:
        logger.error(e)
        print(e)
    else:
        logger.info("scats基础信息获取成功")
        scheduler.add_job(thread_creat,
                          "interval",
                          minutes=5,
                          id='scats_salklist',
                          args=[group, int_list, scats_input],
                          replace_existing=True)
        scheduler.add_job(RequestDynaDataFromInt,
                          "interval",
                          minutes=5,
                          id='scats_volumns',
                          args=[int_list],
                          replace_existing=True)
        scheduler.add_job(get_operate,
                          "interval",
                          minutes=3,
                          id='scats_operate',
                          args=[],
                          replace_existing=True)

    scheduler.start()
    logger.error('定时任务开始')
    print("=======================定时任务启动==========================")
    print(scheduler.get_jobs())
示例#29
0
class TaskScheduler:
    def __init__(self):
        jobs_database_name = 'jobs.sqlite'
        jobstores = {
            # 'default': SQLAlchemyJobStore(url=F'sqlite:///{jobs_database_name}')
        }
        executors = {
            'default': {
                'type': 'threadpool',
                'max_workers': 20
            },
            'processpool': ProcessPoolExecutor(max_workers=5)
        }
        job_defaults = {'coalesce': False, 'max_instances': 3}
        self.scheduler = BackgroundScheduler()
        self.scheduler.configure(jobstores=jobstores,
                                 executors=executors,
                                 job_defaults=job_defaults,
                                 timezone=utc)

    def get_task(self, task_id):
        return self.scheduler.get_job(task_id)

    def get_tasks_ids(self):
        task_ids = []
        jobs = self.scheduler.get_jobs()
        for j in jobs:
            task_ids.append(j.id)

        return task_ids

    def start_scheduler(self):
        self.scheduler.start()

    def stop_scheduler(self):
        self.scheduler.shutdown()

    def add_task(self, task_func, interval_minutes, args, task_id):
        print('Adding an interval task')
        self.scheduler.add_job(task_func,
                               IntervalTrigger(minutes=interval_minutes),
                               args=args,
                               id=str(task_id))
        print('Adding the interval task finished')

    def remove_task(self, id):
        print(F'Removing task (Id: {id})')
        self.scheduler.remove_job(id)
        print('The task removed.')
def set_auto_backup():
    # 不需要参数
    # 从数据库中读取天数间隔
    days = SystemConfig.objects.first().days_to_auto_backup
    job = BackgroundScheduler()
    # 清空当前的任务
    if job.get_jobs():
        job.remove_all_jobs()
    # 如果天数间隔小于等于0,则不创建新任务,直接退出
    if days <= 0:
        return
    # 创建任务
    job.add_job(auto_backup, 'interval', days=days, id='auto_backup')
    # 开启当前任务
    job.start()
示例#31
0
class Scheduler():
    def __init__(self, patterns):
        self.patterns = patterns
        self.known_urls = dict()
        self.add_known_urls(["", None])
        self.schedule_cond = threading.Condition()
        self.ap_scheduler = BackgroundScheduler()
        self.ap_scheduler.start()

    def schedule(self, article_link, feed_url, schedule_mode):
        # print("known urls size: ", sys.getsizeof(self.known_urls), " entries: ", len(self.known_urls.keys()))
        if article_link in self.known_urls:
            self.known_urls[article_link] = time.time(
            )  # store last access time
        else:
            self.known_urls[article_link] = time.time(
            )  # add link as key and store last access time
            last_scheduled_time = time.time() - self.patterns[0][
                1]  # subtract because first step in loop already adds the period
            with self.schedule_cond:
                if schedule_mode == 1:
                    for pattern in self.patterns:
                        for i in range(pattern[0]):
                            offset = pattern[1] + random.uniform(-5, 5)
                            run_date = datetime.fromtimestamp(
                                last_scheduled_time + offset)

                            self.ap_scheduler.add_job(
                                func=due_job_queue.put,
                                trigger='date',
                                args=([article_link, feed_url], ),
                                run_date=run_date,
                                misfire_grace_time=20)
                            last_scheduled_time = last_scheduled_time + pattern[
                                1]
                    logger.debug("No. of scheduled jobs: %d",
                                 len(self.ap_scheduler.get_jobs()))
                elif schedule_mode == 2:
                    run_date = datetime.fromtimestamp(time.time())
                    self.ap_scheduler.add_job(func=due_job_queue.put,
                                              trigger='date',
                                              args=([article_link,
                                                     feed_url], ),
                                              run_date=run_date,
                                              misfire_grace_time=20)

    def add_known_urls(self, urls):
        list(map(lambda url: self.known_urls.update({url: time.time()}), urls))
示例#32
0
class ScheduleController:
    def __init__(self):
        self.scheduler = BackgroundScheduler()
        # self.scheduler.add_job()

    def run_schedules(self):
        try:
            self.scheduler.start()
            # Shut down the scheduler when exiting the app
            atexit.register(lambda: self.scheduler.shutdown())
        except Exception as e:
            print(
                "*** ScheduleController.run_schedules occurred an exception: {}"
                .format([e, e.with_traceback]))

    def get_pending_jobs(self, job_id=None):
        """
        Get list of jobs or get job by job_id
        :param job_id: string | optional if it's not none returns job which equals job_id
        :returns: List or Dict
        """
        try:
            if job_id is not None:
                return self.scheduler.get_job(job_id)
            else:
                return self.scheduler.get_jobs()
        except Exception as e:
            pass

    def register_schedule(self, callback, trigger_timer, job_id, name=None):
        """
        Void method
        Schedule registerer hook
        :param callback: function
        :param trigger_timer: the alias name of the trigger (e.g. ``date``, ``interval`` or ``cron``)
        :param job_id: string | Schedule job id
        :param name: string | optional name of job
        """
        if callable(callback) is False:
            return False
        else:
            self.scheduler.add_job(
                func=callback,
                trigger=trigger_timer,
                id=job_id,
                name=name if name is not None else str(job_id) +
                " Schedule registered without name",
                replace_existing=True)
示例#33
0
class Scheduler(object):
    def __init__(self):
        self._scheduler = BackgroundScheduler(executors=executors,
                                              job_defaults=job_defaults)
        self._scheduler.add_jobstore('redis',
                                     jobs_key='crontpy.jobs',
                                     run_times_key='crontpy.run_times')

    @property
    def running(self):
        return self._scheduler.running

    def start(self):
        self._scheduler.start()

    def shutdown(self, wait=True):
        self._scheduler.shutdown(wait)

    def pause(self):
        self._scheduler.pause()

    def resume(self):
        self._scheduler.resume()

    def get_jobs(self):
        return self._scheduler.get_jobs()

    def get_job(self, jid):
        return self._scheduler.get_job(job_id=jid)

    def run_job(self, jid):
        job = self.get_job(jid)
        if not job:
            raise Exception('job id:{0} not found'.format(jid))
        job.func(*job.args, **job.kwargs)

    def resume_job(self, jid):
        self._scheduler.resume_job(job_id=jid)

    def pause_job(self, jid):
        self._scheduler.pause_job(job_id=jid)

    def modify_job(self, jid, **changes):
        return self._scheduler.modify_job(job_id=jid, **changes)

    def delete_job(self, jid):
        self._scheduler.remove_job(job_id=jid)
示例#34
0
def send_delayed_message(recipient_id, message_text, time_in_seconds):
    scheduler = BackgroundScheduler()
    scheduler.remove_all_jobs()
    print(scheduler.get_jobs())
    current_date = datetime.datetime.now()
    print(time_in_seconds)
    send_time = datetime.datetime(current_date.year, current_date.month,
                                  current_date.day, current_date.hour,
                                  current_date.minute,
                                  current_date.second + time_in_seconds)
    if send_time.second > 59:
        additional_
        send_time.second = send_time.second - 60
    scheduler.add_job(send_message(recipient_id, message_text),
                      trigger='date',
                      run_date=send_time)
    scheduler.start()
示例#35
0
class Scheduler(object):
    def __init__(self):
        self.backGroundTask = BackgroundScheduler()

    def add_job(self, func, time, param=None):
        if param:
            self.backGroundTask.add_job(func,
                                        'interval',
                                        seconds=time,
                                        args=[param])
        else:
            self.backGroundTask.add_job(func, 'interval', seconds=time)

    def get_jobs(self):
        return self.backGroundTask.get_jobs()

    def start(self):
        self.backGroundTask.start()
示例#36
0
class CommandScheduler(object):

    def __init__(self, start=True):
        executors = {
            'default': ThreadPoolExecutor(20),
            'processpool': ProcessPoolExecutor(5)
        }

        self.scheduler = BackgroundScheduler(executors=executors)
        self.scheduler.add_jobstore('redis',
            jobs_key='shh:jobs',
            run_times_key='shh:run_times'
        )

        if start:
            self.scheduler.start()

        self.datetime_parser = parsedatetime.Calendar()

    def parse(self, at):
        return self.datetime_parser.parseDT(at)[0]

    def already_scheduled(self, what):
        # TODO(Bieber): Improve efficiency with a dict
        for scheduled_at, scheduled_what in self.get_jobs():
            if what == scheduled_what:
                return True
        return False

    def schedule(self, at, what):
        dt = self.parse(at)
        trigger = DateTrigger(dt)
        self.scheduler.add_job(execute_command,
            trigger=trigger,
            args=[what.strip()],
        )

    def get_jobs(self):
        jobs = self.scheduler.get_jobs()
        return [(job.next_run_time, job.args[0]) for job in jobs]
示例#37
0
def test3():
    """定时执行任务,暂停,恢复"""
    start_time = time.time()
    scheduler = BackgroundScheduler()
    scheduler.add_job(my_job, 'interval', args=('123',),seconds=1, id='my_job_id') # 每隔1秒执行一次my_job函数,args为函数my_job的输入参数;id:可省略;
    scheduler.start() # 程序运行到这里,任务没有运行完也会往后执行,既执行后面的任务,又执行这个任务。
    print('运行到了这里1')
    while (scheduler.state):
        if time.time() - start_time >5:
            print('暂停作业')
            scheduler.pause() # 暂停作业:
            break
    print('恢复作业')
    if time.time() - start_time >5:
        scheduler.resume() # 
    time.sleep(4)
    print('当前任务列表:{}'.format(scheduler.get_jobs())) # 获得调度作业的列表,可以使用 get_jobs() 来完成,它会返回所有的job实例
    scheduler.get_job('my_job_id') # 获取id为my_job_id的作业实例
    
    scheduler.print_jobs() # 输出所有格式化的作业列表。
    
    print('移除作业')
    # scheduler.remove_job('my_job_id') # 移除id为my_job_id的作业
    scheduler.remove_all_jobs() # 移除所有的作业
示例#38
0
class MainRunner(object):

    class FilterAllLog(logging.Filter):
        # default we will filter logger from apscheduler.executors.default, apscheduler.scheduler,
        # you can config filter logger in config.json
        def filter(self, record):
            return ""

    def __init__(self, input_cmd_config_fp, input_job_config_fp, input_config_fp):

        # init value
        cmd_config_fp = os.path.abspath(input_cmd_config_fp)
        job_config_fp = os.path.abspath(input_job_config_fp)
        config_fp = os.path.abspath(input_config_fp)

        # load configuration json files
        self.cmd_config = CommonUtil.load_json_file(cmd_config_fp)
        self.job_config = CommonUtil.load_json_file(job_config_fp)
        self.config = CommonUtil.load_json_file(config_fp)

        # init schedulers
        self.scheduler = BackgroundScheduler()
        self.scheduler.add_jobstore('sqlalchemy', url=self.config['job_store_url'])
        self.scheduler.start()

        # init variables
        mananger = Manager()
        self.sync_queue = mananger.Queue()
        self.async_queue = mananger.Queue()
        self.current_job_list = []

        # Slack Sending Queue
        # TODO: prevent the Slack bot is disable, the sending queue will use too much memory.
        self.slack_sending_queue = mananger.Queue(50)

        # init logger
        self.set_logging(self.config['log_level'], self.config['log_filter'])

    def set_logging(self, log_level, log_filter_list):
        default_log_format = '%(asctime)s %(levelname)s [%(name)s.%(funcName)s] %(message)s'
        default_datefmt = '%Y-%m-%d %H:%M'
        if log_level.lower() == "debug":
            logging.basicConfig(level=logging.DEBUG, format=default_log_format, datefmt=default_datefmt)
        else:
            logging.basicConfig(level=logging.INFO, format=default_log_format, datefmt=default_datefmt)

        my_filter = self.FilterAllLog()
        for target_logger in log_filter_list:
            logging.getLogger(target_logger).addFilter(my_filter)

    def scheduler_del_job(self, **kwargs):
        input_cmd_str = kwargs.get("input_cmd_str", "")
        cmd_str_list = input_cmd_str.split(" ")
        if len(cmd_str_list) == 2:
            job_id = cmd_str_list[1]
            current_job_list = self.scheduler.get_jobs()
            current_job_id_list = [j.id for j in current_job_list]
            if job_id in current_job_id_list:
                self.scheduler.remove_job(job_id)
            else:
                logging.error("Cannot find the specify job id [%s]" % job_id)
        else:
            logging.error("Incorrect cmd format! [%s]" % input_cmd_str)

    def scheduler_list_job(self, **kwargs):
        self.scheduler.print_jobs()

    def scheduler_shutdown(self, **kwargs):
        self.scheduler.shutdown()
        sys.exit(0)

    def list_all_commands(self, **kwargs):
        print "Current supported commands as below:"
        print "-" * 80
        for cmd_str in self.cmd_config['cmd-settings']:
            print '{:30s} {:50s} '.format(cmd_str, self.cmd_config['cmd-settings'][cmd_str]['desc'])
        print "-" * 80

    def scheduler_job_handler(self, input_cmd_obj, input_cmd_str):
        cmd_match_pattern = input_cmd_obj.keys()[0]
        func_point = getattr(self, input_cmd_obj[cmd_match_pattern]['func-name'])
        func_point(cmd_configs=input_cmd_obj[cmd_match_pattern]['configs'], input_cmd_str=input_cmd_str)

    def cmd_queue_composer(self, input_cmd_str):
        for cmd_pattern in self.cmd_config['cmd-settings']:
            re_compile_obj = re.compile(cmd_pattern)
            re_match_obj = re_compile_obj.search(input_cmd_str)
            if re_match_obj:
                current_command_obj = self.cmd_config['cmd-settings'][cmd_pattern]
                logging.debug("job matched [%s]" % cmd_pattern)
                target_queue_type = current_command_obj.get('queue-type', None)
                if target_queue_type == "async":
                    self.async_queue.put({"cmd_obj": current_command_obj, "cmd_pattern": cmd_pattern, "input_cmd_str": input_cmd_str})
                elif target_queue_type == "sync":
                    self.sync_queue.put({"cmd_obj": current_command_obj, "cmd_pattern": cmd_pattern, "input_cmd_str": input_cmd_str})
                else:
                    self.scheduler_job_handler({cmd_pattern: current_command_obj}, input_cmd_str)
                break

    def load_default_jobs(self, input_scheduler, input_job_config):
        current_jobs = input_scheduler.get_jobs()
        current_jobs_name = [job.name for job in current_jobs]
        for job_name in input_job_config:
            if input_job_config[job_name]['default-loaded']:
                if job_name not in current_jobs_name:
                    func_point = getattr(importlib.import_module(input_job_config[job_name]['module-path']), job_name)
                    self.scheduler.add_job(func_point, input_job_config[job_name]['trigger-type'],
                                           id=job_name,
                                           seconds=input_job_config[job_name]['interval'],
                                           max_instances=input_job_config[job_name]['max-instances'],
                                           kwargs={
                                               'async_queue': self.async_queue,
                                               'sync_queue': self.sync_queue,
                                               'slack_sending_queue': self.slack_sending_queue,
                                               'configs': input_job_config[job_name]['configs'],
                                               'cmd_config': self.cmd_config}
                                           )

    def job_exception_listener(self, event):
        if event.exception:
            logging.error("Job [%s] crashed [%s]" % (event.job_id, event.exception))
            logging.error(event.traceback)

    def add_event_listener(self):
        self.scheduler.add_listener(self.job_exception_listener, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)

    def run(self):
        # load default job into scheduler if the job is not exist
        self.load_default_jobs(self.scheduler, self.job_config)

        # add event listener into scheduler
        self.add_event_listener()

        # enter the loop to receive the interactive command
        while True:
            user_input = raw_input()
            self.cmd_queue_composer(user_input)
            time.sleep(3)
示例#39
0
    sstalled_sd[now]=0
    stalled_total_old=0
    stalled_hd_old=0


    while True:
        with open(stalled_file, "a") as f:
            now=dt.now()

            stalled_hd = sum([x[1] for x in filter(lambda value: "hd" in value[0], stalled.items())])
            stalled_total = sum(stalled.values())


            sstalled_hd[now]=stalled_hd-stalled_hd_old
            sstalled_sd[now]=(stalled_total - stalled_hd)-(stalled_total_old - stalled_hd_old)
            users[now]=args.user_count - len(scheduler.get_jobs())
            stalled_hd_old=stalled_hd
            stalled_total_old=stalled_total


            stalled_hd_rm=pd.rolling_mean(sstalled_hd.resample("1S",fill_method='bfill'),30)[-1]*60
            stalled_sd_rm=pd.rolling_mean(sstalled_sd.resample("1S",fill_method='bfill'),30)[-1]*60

            stalled_hd_value=stalled_hd_rm if not np.isnan(stalled_hd_rm) else 0
            stalled_sd_value=stalled_sd_rm if not np.isnan(stalled_sd_rm) else 0


            f.write("%lf,%d,%d,%d,%lf,%lf,%d\n" % (
                time.time() - start,
                sum(stalled.values()),
                len(stalled), args.user_count - len(scheduler.get_jobs()),
示例#40
0
def main():
    if '--get-twitter-credentials' in sys.argv:
        get_twitter_credentials()
        sys.exit(0)

    scheduler = BackgroundScheduler(timezone=TIMEZONE)
    # scheduler.add_listener(log_job, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)

    data = yaml.load(encodings.codecs.open(TWEETS_YAML, mode='r', encoding='utf-8'))
    for category in data:
        for subcategory in data[category]:
            for period in data[category][subcategory]:
                if data[category][subcategory][period] is None:
                    # exclude categories with no tweets
                    continue

                # README: exclude 'strict' tweets when calculated
                # interval period for _no_ 'strict' ones
                tweets_for_period = []
                for tweet in data[category][subcategory][period]:
                    if not tweet.get('strict', False):
                        tweets_for_period.append(tweet)
                nro_tweets_for_period = len(tweets_for_period)

                for i, tweet in enumerate(data[category][subcategory][period]):
                    content = tweet['content']

                    if period == 'once':
                        args = ['date', publish]
                        kwargs = {
                            'args': [content],
                            'run_date': tweet['date']
                            # 'timezone': timezone,
                        }
                    else:
                        now = datetime.datetime.now()
                        time_period = PERIOD_ARG[period]
                        args = ['interval', publish]
                        kwargs = {}

                        starting_on = tweet.get('start_date', now)
                        start_date = starting_on + relativedelta(**{time_period: i+1})

                        if period == 'monthly':
                            multiplier = 4  # weeks
                        elif period == 'yearly':
                            # FIXME: how many weeks are in a year?
                            multiplier = 4 * 12  # weeks
                        else:
                            multiplier = 1
                        kwargs[time_period] = nro_tweets_for_period * multiplier

                        if tweet.get('strict', False):
                            # respect the period as it is
                            kwargs.update({
                                'args': [content],
                                time_period: multiplier,
                                # 'timezone': timezone,
                            })

                            for attr in ('start_date', 'end_date'):
                                if tweet.get(attr, False):
                                    kwargs[attr] = tweet[attr]
                        else:
                            kwargs.update({
                                'args': [content],
                                'start_date': start_date,
                                # 'timezone': timezone,
                            })

                    if tweet.get('media', False):
                        args[1] = publish_images
                        kwargs['args'].append(tweet['media'])
                    add_job(scheduler, *args, **kwargs)
    try:
        scheduler.start()
        jobs = scheduler.get_jobs()
        now = datetime.datetime.now(tz=TIMEZONE)
        print('\nNOW: {}'.format(now))
        print('NEXT {} ({}) TWEET to publish:'.format(SHOW_NEXT_TWEET_TO_PUBLISH, len(jobs)))

        for job in jobs[:SHOW_NEXT_TWEET_TO_PUBLISH]:
            print('{}: "{}" - {} ...'.format(
                job.next_run_time,
                job.name,
                job.args[0][:50].replace('\n', '\\n'))
            )

        print('Press Ctrl+C to exit', end='\n\n')

        while True:
            time.sleep(2)

    except (KeyboardInterrupt, SystemExit):
        print('Finishing scheduler...')
        print('Bye.')
示例#41
0
class Scheduler(object):
    def __init__(self, tasks, settings):
        self.settings = settings
        self.logger = get_logger('scheduler', self.settings)

        self.intervals = self.settings['scheduler']['intervals']
        self.apscheduler = { 'apscheduler': self.settings['scheduler']['apscheduler'] }

        if not isinstance(tasks, dict):
            self.logger.error('tasks is not a dictionary')
            return

        if not isinstance(self.intervals, dict):
            self.logger.error('intervals is not a dictionary')
            return

        self.tasks = self._flatten_dict(tasks, '')

        self.logger.debug('Tasks found:')
        self.logger.debug(LINE_SPLITTER)
        for key in self.tasks:
            self.logger.debug('%45s %30s' % (key, self.tasks[key].task_type))
        self.logger.debug(LINE_SPLITTER)

        #self.logger.debug('Checking tasks paths!')
        # TODO: Check if paths are valid

    def init(self):
        """ Initializes the queue, and adds the tasks """

        self.logger.info('Initilizing APScheduler...')

        apsched_kwargs = self._flatten_dict(self.apscheduler, '')
        apsched_kwargs['apscheduler.logger'] = get_logger('apscheduler', self.settings)

        self.sched = BackgroundScheduler(apsched_kwargs)

        for (id, task) in self.tasks.items():
            task_type = task.task_type

            self.logger.debug('Adding task "%s" [%s]' % (id, task_type))

            if not task_type in self.intervals:
                self.logger.info('Interval not defined for "%s" class. Assuming it is an once-time task' % task_type)
                self.add_task(id, task)
                continue

            self.add_task(id, task, self.intervals[task_type])

        self.logger.info('APScheduler initialized!')

    def clear(self):
        """ Removes all jobs from scheduler """
        if not isinstance(self.sched, BaseScheduler):
            self.logger.error('Scheduler is not initialized')
            return

        for job in self.sched.get_jobs():
            job.remove()

    def start(self):
        """ Start the scheduler by starting the instance of APScheduler """
        if not isinstance(self.sched, BaseScheduler):
            self.logger.error('Scheduler is not initialized')
            return

        try:
            self.sched.start()
        except SchedulerAlreadyRunningError as e:
            self.logger.warning(e)


    def stop(self, wait=True):
        """ Stop the scheduler. If wait=True, then it will be stopped after
            all jobs that are currently executed will finish """
        if not isinstance(self.sched, BaseScheduler):
            self.logger.warning('Scheduler is not initialized')
            return

        try:
            self.sched.shutdown(wait=wait)
        except SchedulerNotRunningError as e:
            self.logger.warning(e)

    def add_task(self, id, func, interval=None):
        """ Adds a new task into the queue. If interval is None then the task
            will be executed once. """
        if not isinstance(id, basestring):
            self.logger.error('"id" argument is not an instance of basestring')
            return

        if not hasattr(func, '__call__'):
            self.logger.error('"func" is not callable')
            return

        try:
            if isinstance(interval, dict):
                self.sched.add_job(func, trigger='interval', id=id, **interval)
            elif interval is None: # Run once (ommit trigger)
                self.sched.add_job(func, id=id)
            else:
                self.logger.error('"interval" is not an instance of [time|None]')
                return
        except ConflictingIdError as e:
            self.logger.warning(e)

    def remove_task(self, id):
        """ Remove a job from the queue """
        if not isinstance(id, basestring):
            self.logger.error('"id" argument is not an instance of basestring')
            return

        try:
            self.sched.remove_job(id)
        except JobLookupError as e:
            self.logger.warning(e)

    def force_update(self, job_id=None):
        """ Updates a job with id == job_id, or all jobs if no id is given """
        if not isinstance(self.sched, BaseScheduler):
            self.logger.warning('Scheduler is not initialized')
            return

        if not job_id:
            self.logger.info("Forcing update of all jobs")
            for job in self.sched.get_jobs():
                self.__run_job(job)
        else:
            self.logger.info("Forcing update of job %s" % job_id)
            job = self.sched.get_job(job_id)

            if not job:
                self.logger.warn("Job %s not found" % job_id)
            else:
                self.__run_job(job)

    def __run_job(self, job):
        if job.func:
            # Add the job to the scheduler and run it just once
            self.sched.add_job(job.func)

            # If we explicity call job.func() then we block the thread and we get multiple
            # missed executions from apscheduler
            # job.func()
        else:
            self.logger.warn("Job %s has a None type callable func" % job.id)

    def _flatten_dict(self, d, path):
        new_dict = { }
        for key in d:
            if isinstance(d[key], dict):
                new_path = '%s.%s' % (path, key) if path else key

                x = self._flatten_dict(d[key],new_path).copy()
                new_dict.update(x)
            else:
                new_key = '%s.%s' % (path, key)
                new_dict[new_key] = d[key]

        return new_dict
WITH (
  OIDS=FALSE
);
''')
con.commit();
cur.close()
con.close()


tz = timezone("Europe/Stockholm")
sched = BackgroundScheduler()
now = dt.datetime.now()
print now
then = now - dt.timedelta(seconds=now.second, microseconds=now.microsecond)
then += dt.timedelta(minutes=1, seconds=1)
for i in range(int(pgruns)):
    trigger = DateTrigger(timezone=tz, run_date=then)
    sched.add_job(insert_job, trigger, [pghost, pgport])
    then += dt.timedelta(microseconds=500000)
sched.start()        # start the scheduler
while len(sched.get_jobs()) > 0: sleep(1)


con = psycopg2.connect(host=pghost, port=pgport, user='******')
cur = con.cursor()
cur.execute('SELECT pg_switch_xlog()')
con.commit();
cur.close()
con.close()

示例#43
0
    logger.info(green("s3backups run complete"))


if __name__ == '__main__':
    # Uncomment this, and comment out the scheduling bit if you want to
    # simply run once, or want it to run on a restart of the script.

    run()

    # Comment out the rest if you want to only run once...
    sched = BackgroundScheduler(logger=logger)
    sched.start()
    days = settings.CRON['days']
    hour = settings.CRON['hour']
    minute = settings.CRON['minute']
    sched.add_job(run, 'cron', day_of_week=days, hour=hour, minute=minute)
    user_id = os.getegid()
    user = getpass.getuser()
    info_tuple = (days, hour, minute, user, user_id)
    logger.info(green(
        "s3backups started, running {} at {}:{} under user {} {}".format(*info_tuple)))
    print("Waiting until next scheduled backup.")
    print(sched.get_jobs())
    print(green("Hit Ctrl+c to exit."))

    try:
        while True:
            time.sleep(10)
    except (KeyboardInterrupt, SystemExit):
        sched.shutdown()
示例#44
0
class StationControl(object):
    # GPIO Pins (BCM numbering). OSPI uses 4 pins for shift register.
    clock_pin = 4
    out_pin = 17
    data_pin = 27
    latch_pin = 22

    def __init__(self, data_handler):
        self.data_handler = data_handler
        self.station_status = {station_id: False for station_id in data_handler.settings["active_stations"]}
        self.active_stations = data_handler.settings["active_stations"]
        self.num_stations = len(self.station_status)

        self.utc_timezone_offset = self.data_handler.settings["utc_timezone_offset"]
        self.timezone_name = self.data_handler.settings["timezone_name"]
        print("Operating in {} timezone ({})".format(self.timezone_name, self.utc_timezone_offset))
        self.bg_scheduler = BackgroundScheduler(timezone=utc)
        self.set_schedule(self.data_handler.get_schedule())

        if GPIO:
            atexit.register(self.cleanup)
            # GPIO.setwarnings(False)
            self.setup_gpio()

    def set_schedule(self, settings_json):
        self.bg_scheduler.remove_all_jobs()
        stations = settings_json["schedule"]
        for station in stations:
            station_id = int(station[-1])
            if station_id not in self.active_stations:
                continue

            for day in stations[station]:
                for start_time in stations[station][day]["start_times"]:
                    time_str = day + " " + start_time["time"] + " " + self.utc_timezone_offset
                    # convert to 12 hour time to time-zone aware datetime object (24 hour UTC time) for use internally
                    utc_time = timestr_to_utc(time_str)
                    fixed_duration = int(start_time["duration"])
                    print("Station {} will start at {} UTC for {} minutes".format(station_id, utc_time, fixed_duration))
                    args = {
                        "datetime": str(utc_time).replace(" ", "T"),
                        "station": station_id,
                        "fixed_duration": fixed_duration,
                        "manual": 0,
                    }
                    self.bg_scheduler.add_job(self.water, "interval", days=7, start_date=utc_time, args=[args])

        # start the scheduler if it's not already running
        if not self.bg_scheduler.state:
            self.bg_scheduler.start()

    def pause_schedule(self):
        print("Pausing schedule...")
        jobs_paused = 0
        for job in self.bg_scheduler.get_jobs():
            job.pause()
            print("Paused job {}".format(job))
            jobs_paused += 1
        return jobs_paused

    def resume_schedule(self):
        print("Resuming schedule...")
        for job in self.bg_scheduler.get_jobs():
            job.resume()
            print("Resumed job {}".format(job))
        print("Resumed schedule")

    def manual_watering(self, watering_request):
        # pause normal schedule
        jobs_paused = self.pause_schedule()

        start, last_duration_seconds = Arrow.utcnow(), 5
        start_buffer_seconds = 5

        # for every station, set a scheduling for the duration specified
        # stations are ran serially

        for station, duration in watering_request.items():
            station_id = int(station)
            job_start = start.replace(seconds=last_duration_seconds)

            dt = job_start.format("YYYY-MM-DDTHH:mm:ssZZ").replace("-00:00", "+00:00")
            args = {"datetime": dt, "station": station_id, "fixed_duration": duration, "manual": 1}
            self.bg_scheduler.add_job(self.water, "date", run_date=job_start.datetime, args=[args])

            last_duration_seconds = duration * 60

        # reschedule the original schedule after all stations have watered
        job_start = start.replace(seconds=last_duration_seconds + start_buffer_seconds)
        self.bg_scheduler.add_job(self.resume_schedule, "date", run_date=job_start.datetime)

        # check if schedule contains: paused jobs, manual watering jobs, and extra job to resume paused jobs
        if len(self.bg_scheduler.get_jobs()) == (jobs_paused + len(watering_request) + 1):
            return True

        return False

    def set_station(self, station, signal):
        """
        Sets station [0,..., 7] to True or False (On | Off) in memory.
        Use set_shift_register_values() to activate GPIO
        """
        self.station_status[station] = signal

    def set_shift_register_values(self):
        """
        Activates GPIO based on self.station_status values
        """
        if not GPIO:
            print("Error: set_shift_register_values() doesn't have GPIO module")
            return
        GPIO.output(StationControl.clock_pin, False)
        GPIO.output(StationControl.latch_pin, False)
        for station in range(0, self.num_stations):
            GPIO.output(StationControl.clock_pin, False)
            GPIO.output(StationControl.data_pin, self.station_status[self.num_stations - 1 - station])
            GPIO.output(StationControl.clock_pin, True)
        GPIO.output(StationControl.latch_pin, True)

    def toggle_shift_register_output(self, value):
        if value:
            GPIO.output(StationControl.out_pin, False)
        else:
            GPIO.output(StationControl.out_pin, True)

    def setup_gpio(self):

        if not GPIO:
            print("Error: setup_gpio() doesn't have GPIO module")
            return

        # setup GPIO pins to interface with shift register
        GPIO.setmode(GPIO.BCM)
        GPIO.setup(StationControl.clock_pin, GPIO.OUT)
        GPIO.setup(StationControl.out_pin, GPIO.OUT)
        self.toggle_shift_register_output(False)
        GPIO.setup(StationControl.data_pin, GPIO.OUT)
        GPIO.setup(StationControl.latch_pin, GPIO.OUT)
        self.set_shift_register_values()
        self.toggle_shift_register_output(True)
        self.reset_stations()
        print("GPIO setup successfully")

    def reset_stations(self):
        self.station_status = [False] * self.num_stations

    def cleanup(self):
        self.reset_stations()
        GPIO.cleanup()

    def optimize_duration(self, fixed_duration):
        optimized, forecasted_temp, base_temp = fixed_duration, 70, 75

        # call data handler and get historical for last 7 days, inc. today
        # if it rained today, don't water, return 0

        # else return int(forecasted_temp * (fixed_duration/avg_temp))

        return optimized, forecasted_temp, base_temp

    def water(self, args):
        """
        args parameter contains a dict with args that are necessary for watering (station, duration).
        other args are for optimizing the duration (fixed_duration)
        the return values from optimize_duration will also be inserted into the dict
        args will be passed to data_handler for insertion; all keys map directly to columns in the historical table
        :param args: dictionary with keys 'datetime', 'station', 'fixed_duration', 'manual'
        """
        station = args["station"]
        fixed_duration = args["fixed_duration"]

        optimized_duration, forecasted_temp, base_temp = self.optimize_duration(fixed_duration)

        print("Station {} watering for {} min at {}".format(station, optimized_duration, datetime.now().strftime("%c")))

        # activate solenoid
        self.set_station(station, True)
        self.set_shift_register_values()

        # water and wait
        seconds = int(optimized_duration) * 60
        while seconds > 0:
            print("Drip.... second {}".format(seconds))
            sleep(1)
            seconds -= 1

        # deactivate solenoid
        self.set_station(station, False)
        self.set_shift_register_values()

        print("Station {} finished watering".format(station))

        # add a few more k, v pairs before passing to data_handler for building a SQL insert statement
        args["forecasted_temp"] = forecasted_temp
        args["base_temp"] = base_temp
        args["optimized_duration"] = optimized_duration

        # send args to data handler for insertion to db
        self.data_handler.insert_historical_record(args)
示例#45
0
from db_back import create_backup
import model_blueprint
import db_config

app = Flask(__name__)
app.config.from_object('app_config.config')

app.config.update(db_config.load_config())

scheduler = BackgroundScheduler()
scheduler.add_listener(execution_listener, EVENT_JOB_EXECUTED)
scheduler.add_listener(error_listener, EVENT_JOB_ERROR)
scheduler.add_job(create_backup, 'cron',
                  hour=int(app.config[SCHED_HOUR]),
                  minute=int(app.config[SCHED_MINUTE]),
                  id=app.config[SCHED_JOBID])
scheduler.start()

scheduler.get_jobs()
app.register_blueprint(model_blueprint.blueprint)

from rotatordb import db_session


@app.teardown_request
def remove_db_session(exception):
    db_session.remove()


if __name__ == '__main__':
    app.run(host='0.0.0.0')
示例#46
0
class FlaskReport(object):
    def __init__(self, db, model_map, app, blueprint=None, extra_params=None, table_label_map=None, mail=None):
        self.db = db
        self.app = app
        host = blueprint or app
        self.conf_dir = app.config.get("REPORT_DIR", "report_conf")
        self.report_dir = os.path.join(self.conf_dir, "reports")
        self.notification_dir = os.path.join(self.conf_dir, "notifications")
        self.data_set_dir = os.path.join(self.conf_dir, "data_sets")
        self.model_map = model_map  # model name -> model
        self.table_label_map = table_label_map or {}
        self.table_map = dict((model.__tablename__, model) for model in model_map.values())  # table name -> model
        if not os.path.exists(self.conf_dir):
            os.makedirs(self.conf_dir)
        if not os.path.exists(self.report_dir):
            os.makedirs(self.report_dir)
        if not os.path.exists(self.data_set_dir):
            os.makedirs(self.data_set_dir)

        host.route("/report-list/")(self.report_list)
        host.route("/new-report/", methods=['POST'])(self.new_report)
        host.route("/graphs/report/<int:id_>")(self.report_graphs)
        host.route("/report/<int:id_>", methods=['GET', 'POST'])(self.report)
        host.route("/report_csv/<int:id_>")(self.report_csv)
        host.route("/report_pdf/<int:id_>")(self.report_pdf)
        host.route("/report_txt/<int:id_>")(self.report_txt)
        host.route("/drill-down-detail/<int:report_id>/<int:col_id>")(self.drill_down_detail)

        host.route("/data-sets/")(self.data_set_list)
        host.route("/data-set/<int:id_>")(self.data_set)
        host.route("/notification-list")(self.notification_list)
        host.route("/notification/", methods=['GET', 'POST'])(self.notification)
        host.route("/notification/<int:id_>", methods=['GET', 'POST'])(self.notification)
        host.route("/push_notification/<int:id_>", methods=['POST'])(self.push_notification)
        host.route("/start_notification/<int:id_>", methods=['GET'])(self.start_notification)
        host.route("/stop_notification/<int:id_>", methods=['GET'])(self.stop_notification)
        host.route("/schedule-list")(self.get_schedules)

        from flask import Blueprint
        # register it for using the templates of data browser
        self.blueprint = Blueprint("report____", __name__,
                                   static_folder="static",
                                   template_folder="templates")
        app.register_blueprint(self.blueprint, url_prefix="/__report__")
        self.extra_params = extra_params or {'report': lambda id_: {},
                                             'report_list': lambda: {},
                                             'data_set': lambda id_: {},
                                             'data_sets': lambda: {},
                                             'notification-list': lambda: {},
                                             'notification': lambda id_: {}}

        @app.template_filter("dpprint")
        def dict_pretty_print(value):
            if not isinstance(value, list):
                value = [value]
            s = "{"
            for val in value:
                idx = 0
                for k, v in val.items():
                    idx += 1
                    s += "%s:%s" % (k, v)
                    if idx != len(val):
                        s += ","
            s += "}"
            return s

        self.mail = mail or Mail(self.app)
        self.sched = BackgroundScheduler()
        if app.config.get('FLASK_REPORT_SEND_NOTIFICATION'):
            self.sched.start()

            with app.test_request_context():
                for notification in get_all_notifications(self):
                    if notification.enabled:
                        self.start_notification(notification.id_)


    def try_view_report(self):
        pass

    def try_edit_data_set(self):
        pass

    def try_edit_notification(self):
        pass

    def report_graphs(self, id_):
        report = Report(self, id_)
        return render_template("report____/graphs.html", url=request.args.get("url"), bar_charts=report.bar_charts,
                               name=report.name, pie_charts=report.pie_charts)

    def data_set_list(self):
        self.try_edit_data_set()
        data_sets = [DataSet(self, int(dir_name)) for dir_name in os.listdir(self.data_set_dir) if
                     dir_name.isdigit() and dir_name != '0']
        params = dict(data_sets=data_sets)
        extra_params = self.extra_params.get("data_sets")
        if extra_params:
            if isinstance(extra_params, types.FunctionType):
                extra_params = extra_params()
            params.update(extra_params)
        return render_template("report____/data-sets.html", **params)

    def data_set(self, id_):
        self.try_edit_data_set()
        data_set = DataSet(self, id_)
        SQL_html = highlight(query_to_sql(data_set.query), SqlLexer(), HtmlFormatter())
        params = dict(data_set=data_set, SQL=SQL_html)
        extra_params = self.extra_params.get('data_set')
        if extra_params:
            if isinstance(extra_params, types.FunctionType):
                extra_params = extra_params(id_)
            params.update(extra_params)
        return render_template("report____/data-set.html", **params)

    def _get_report_list(self):
        return [Report(self, int(dir_name)) for dir_name in os.listdir(self.report_dir) if
                dir_name.isdigit() and dir_name != '0']

    def report_list(self):
        self.try_view_report()
        # directory 0 is reserved for special purpose
        reports = self._get_report_list()
        params = dict(reports=reports)
        extra_params = self.extra_params.get('report_list')
        if extra_params:
            if isinstance(extra_params, types.FunctionType):
                extra_params = extra_params()
            params.update(extra_params)
        return render_template('report____/report-list.html', **params)

    def report(self, id_=None):
        self.try_view_report()
        if id_ is not None:
            report = Report(self, id_)

            html_report = report.html_template.render(report=report)
            code = report.read_literal_filter_condition()

            SQL_html = highlight(query_to_sql(report.query), SqlLexer(), HtmlFormatter())
            params = dict(report=report, html_report=html_report, SQL=SQL_html)
            if code is not None:
                customized_filter_condition = highlight(code, PythonLexer(), HtmlFormatter())
                params['customized_filter_condition'] = customized_filter_condition
            extra_params = self.extra_params.get("report")
            if extra_params:
                if isinstance(extra_params, types.FunctionType):
                    extra_params = extra_params(id_)
                params.update(extra_params)
            return render_template("report____/report.html", **params)

    def _write_report(self, to_dir, **kwargs):
        import yaml

        kwargs.setdefault("name", "temp")
        kwargs.setdefault("description", "temp")
        import datetime

        kwargs["create_time"] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
        file_name = os.path.join(to_dir, "meta.yaml")
        if os.path.isfile(file_name):
            new_file_name = os.path.join(to_dir, "meta.yaml~")
            if os.path.isfile(new_file_name):
                os.unlink(new_file_name)
            os.rename(file_name, new_file_name)
        with file(file_name, "w") as f:
            yaml.safe_dump(kwargs, allow_unicode=True, stream=f)

    def _get_report(self, id_, ReportClass):
        from flask.ext.report.report_templates import BaseReport

        assert issubclass(ReportClass, BaseReport)
        data = Report(self, id_)
        if not data.data:
            raise ValueError
        report = ReportClass(queryset=data.data, columns=data.columns, report_name=data.name,
                             sum_columns=data.sum_columns, avg_columns=data.avg_columns)
        return report

    def _get_report_value(self, id_, ReportClass, ReportGenerator, first_row_with_column_names=False):
        from flask.ext.report.report_templates import BaseReport

        assert issubclass(ReportClass, BaseReport)

        from geraldo.generators import base

        assert issubclass(ReportGenerator, base.ReportGenerator)
        try:
            from cStringIO import StringIO
        except ImportError:
            from StringIO import StringIO
        return_fileobj = StringIO()
        from flask.ext.report.writer import UnicodeWriter

        report = self._get_report(id_, ReportClass)

        report.generate_by(ReportGenerator, filename=return_fileobj, writer=UnicodeWriter(return_fileobj),
                           first_row_with_column_names=first_row_with_column_names)
        return return_fileobj

    def _get_report_class(self, id_, default=None):
        if default is None:
            raise ValueError
        filter_def_file = os.path.join(self.report_dir, str(id_), "report_templates.py")
        if not os.path.exists(filter_def_file):
            filter_def_file = os.path.join(self.report_dir, "0", "report_templates.py")
        if os.path.exists(filter_def_file):
            from import_file import import_file

            lib = import_file(filter_def_file)
            return getattr(lib, default.__name__, default)
        return default

    def report_csv(self, id_):
        from geraldo.generators import CSVGenerator
        from flask.ext.report.report_templates import CSVReport

        try:
            return_fileobj = self._get_report_value(id_, self._get_report_class(id_, CSVReport), CSVGenerator, True)
        except ValueError:
            return render_template("report____/error.html", error=u"没有该报告", message=u"无法导出空报告"), 403
        from flask import Response

        response = Response(return_fileobj.getvalue(), mimetype="text/csv")
        response.headers["Content-disposition"] = "attachment; filename={}.csv".format(str(id_))
        return response

    def report_pdf(self, id_):
        from flask.ext.report.report_templates import PDFReport
        from geraldo.generators import PDFGenerator

        try:
            return_fileobj = self._get_report_value(id_, self._get_report_class(id_, PDFReport), PDFGenerator, True)
        except ValueError:
            return render_template("report____/error.html", error=u"没有该报告", message=u"无法导出空报告"), 403
        from flask import Response

        response = Response(return_fileobj.getvalue(), mimetype="application/pdf")
        response.headers["Content-disposition"] = "attachment; filename={}.pdf".format(str(id_))
        return response

    def report_txt(self, id_):
        from flask.ext.report.report_templates import TxtReport

        from geraldo.generators import TextGenerator

        try:
            return_fileobj = self._get_report_value(id_, self._get_report_class(id_, TxtReport), TextGenerator, True)
        except ValueError:
            return render_template("report____/error.html", error=u"没有该报告", message=u"无法导出空报告"), 403
        from flask import Response

        response = Response(return_fileobj.getvalue(), mimetype="text/plan")
        response.headers["Content-disposition"] = "attachment; filename={}.txt".format(str(id_))
        return response

    def get_model_label(self, table):
        return self.table_label_map.get(table.name) or self.table_map[table.name].__name__

    def drill_down_detail(self, report_id, col_id):
        filters = request.args
        report = Report(self, report_id)
        col = report.data_set.columns[col_id]['expr']
        col = get_column_operated(getattr(col, 'element', col))
        model_name = self.get_model_label(col.table)
        items = report.get_drill_down_detail(col_id, **filters)
        return report.get_drill_down_detail_template(col_id).render(items=items,
                                                                    key=col.key,
                                                                    model_name=model_name,
                                                                    report=report)

    def notification_list(self):
        notifications = [Notification(self, int(dir_name)) for dir_name in os.listdir(self.notification_dir) if
                         dir_name.isdigit() and dir_name != '0']
        params = dict(notification_list=notifications)
        extra_params = self.extra_params.get("notification_list")
        if extra_params:
            if isinstance(extra_params, types.FunctionType):
                extra_params = extra_params()
            params.update(extra_params)
        return render_template("report____/notification-list.html", **params)

    def notification(self, id_=None):
        self.try_edit_notification()

        def _write(form, id_):
            kwargs = dict(name=form["name"], senders=form.getlist("sender"),
                          report_ids=form.getlist("report_ids", type=int), description=form["description"],
                          subject=form["subject"], crontab=form["crontab"],
                          enabled=form.get("enabled", type=bool, default=False))
            dump_yaml(os.path.join(self.notification_dir, str(id_), 'meta.yaml'), **kwargs)

        if id_ is not None:
            notification = Notification(self, id_)

            if request.method == "POST":
                if request.form.get('action') == _('Enable'):
                    self.start_notification(id_)
                elif request.form.get("action") == _("Disable"):
                    self.stop_notification(id_)  # any change will incur disable
                else:
                    _write(request.form, id_)
                flash(_("Update Successful!"))
                return redirect(url_for(".notification", id_=id_, _method="GET"))
            else:
                params = dict(notification=notification,
                              report_list=self._get_report_list())
                extra_params = self.extra_params.get("notification")
                if extra_params:
                    if isinstance(extra_params, types.FunctionType):
                        extra_params = extra_params(id_)
                    params.update(extra_params)
                return render_template("report____/notification.html", **params)
        else:
            if request.method == "POST":
                id_ = max([int(dir_name) for dir_name in os.listdir(self.notification_dir) if
                           dir_name.isdigit() and dir_name != '0']) + 1
                new_dir = os.path.join(self.notification_dir, str(id_))
                if not os.path.exists(new_dir):
                    os.mkdir(new_dir)
                _write(request.form, id_)
                flash(_("Save Successful!"))
                return redirect(url_for(".notification", id_=id_))
            else:
                params = dict(report_list=self._get_report_list())
                extra_params = self.extra_params.get("notification")
                if extra_params:
                    if isinstance(extra_params, types.FunctionType):
                        extra_params = extra_params()
                    params.update(extra_params)
                return render_template("report____/notification.html", **params)

    def push_notification(self, id_):
        to = request.args.get('to')
        notification = Notification(self, id_)
        if not to:
            senders = notification.senders
        else:
            senders = [to]

        for sender in senders:
            if sender not in notification.senders in senders:
                return _('notification %(id_)s are not allowed to send to %(to)s', id_=id_, to=sender), 403
        html = notification.template.render(notification=notification)
        msg = Message(subject=notification.subject,
                      html=html,
                      sender="*****@*****.**",
                      recipients=senders)
        self.mail.send(msg)
        return 'ok'

    def start_notification(self, id_):
        notification = Notification(self, id_)

        def _closure(environ):
            def _push_notification():
                with self.app.request_context(environ):
                    self.push_notification(id_)

            return _push_notification

        job = self.sched.add_cron_job(_closure(request.environ), name='flask_report_notification' + str(id_),
                                      **notification.crontab._asdict())
        notification.enabled = True
        notification.dump()
        return 'ok'

    def stop_notification(self, id_):
        jobs = self.sched.get_jobs()
        for job in jobs:
            if job.name == 'flask_report_notification' + str(id_):
                notification = Notification(self, id_)
                notification.enabled = False
                notification.dump()
                self.sched.unschedule_job(job)
            return 'ok'
        else:
            return 'unknown notifiaction:' + str(id_), 404

    def get_schedules(self):
        return json.dumps([str(job) for job in self.sched.get_jobs()])

    def new_report(self):

        form = _ReportForm(self, request.form)

        if form.validate():
            def parse_filters(filters):
                result = {}
                for current in filters:
                    if current["col"] not in result:
                        result[current["col"]] = {'operator': current["op"], 'value': current["val"],
                                                  'proxy': current['proxy']}
                    else:
                        val = result[current["col"]]
                        if not isinstance(val, list):
                            val = [val]
                        val.append({'operator': current["op"], 'value': current["val"], 'proxy': current['proxy']})
                        result[current["col"]] = val
                return result

            name = form.name.data
            id = None
            if request.args.get('preview'):
                name += '(' + _('Preview') + ')'
                id = 0
            report_id = create_report(form.data_set, name=name, creator=form.creator.data,
                                      description=form.description.data, id=id, columns=form.columns.data,
                                      filters=parse_filters(json.loads(form.filters.data)))
            return jsonify({'id': report_id, 'name': form.name.data, 'url': url_for('.report', id_=report_id)})
        else:
            return jsonify({'errors': form.errors}), 403
示例#47
0
    """
    Handle Ctrl-C

    :param signal:
    :param frame:
    :return:
    """
    print '\nCtrl-C detected, cleaning up...'
    cleanup(force_exit=True)


print('Starting Garden Pi')
signal.signal(signal.SIGINT, signal_handler)
scheduler = BackgroundScheduler()
measurement_data = MeasurementData()
#scheduler.add_job(log_measurements, args=[measurement_data], trigger='cron', minute='*/5', name='Data Logger', id='data_logger', max_instances=1, misfire_grace_time=20)
scheduler.add_job(log_measurements, args=[measurement_data], trigger='cron', minute='*', name='Data Logger', id='data_logger', max_instances=1, misfire_grace_time=20)
for zone_name in garden_pi_zones.keys():
    #scheduler.add_job(water_zone, trigger='cron', hour='7,18', minute=15, args=[zone_name], name=zone_name, max_instances=1, misfire_grace_time=20)
    scheduler.add_job(water_zone, trigger='cron', minute="*", kwargs={'zone_name': zone_name, 'force_water': False}, name=zone_name, max_instances=1, misfire_grace_time=20)

scheduler.start()
for job in scheduler.get_jobs():
    print("Job: %s, Func: %s, Next run time: %s" % (job.name, job.func_ref, job.next_run_time))
    sys.stdout.flush()

while True:
    time.sleep(1)

cleanup()
示例#48
0
# UTC as the scheduler’s timezone
scheduler = BackgroundScheduler(
    jobstores=job_stores,
    executors=executors,
    job_defaults=job_defaults,
    timezone=utc,
    daemon=False
)


def current_time():
    import datetime
    return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")


def job1():
    print 'job1 is running, Now is %s' % current_time()


def job2():
    print 'job2 is running, Now is %s' % current_time()


# 每隔5秒运行一次job1,replace_existing=True防止添加重复,启动失败
scheduler.add_job(job1, 'interval', seconds=5, id='job1', replace_existing=True)
# 每隔5秒运行一次job2
scheduler.add_job(job2, 'cron', second='*/5', id='job2', replace_existing=True)
scheduler.start()
print scheduler.get_jobs()
示例#49
0
class Stats(RyuApp):
    OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]

    _CONTEXTS = {
        'dpset': DPSet,
        'wsgi': WSGIApplication,
    }

    def __init__(self, *args, **kwargs):
        super(Stats, self).__init__(*args, **kwargs)
        # logger settings
        self.logger.setLevel(logging.INFO)
        self.logger.info('Application %s initialized', __name__)

        # routing
        self.mac_to_port = {}

        # dpset instance
        self.dpset = kwargs['dpset']

        # setup wsgi
        wsgi = kwargs['wsgi']
        wsgi.register(StatsRestApi, {
            StatsRestApi.controller_instance_name: self,
            StatsRestApi.dpset_instance_name: self.dpset
        })

        # init scheduler
        self.interval = STATS_INTERVAL
        self.sched = BackgroundScheduler()
        self.sched.start()
        logging.getLogger('apscheduler.executors.default').setLevel(logging.WARNING)
        logging.getLogger('apscheduler.scheduler').propagate = False

        # init place for PNDA watchdog
        try:
            os.mkdir('out')
        except OSError:
            pass

        # stats logs to file
        try:
            os.mkdir('stats')
        except OSError:
            pass

    def set_interval(self, interval):
        self.interval = interval
        self.change_sched_interval(interval)

    def change_sched_interval(self, interval):
        self.logger.debug("Rescheduling stat request to %i seconds", interval)
        for s in self.sched.get_jobs():
            self.logger.debug('rescheduling job %s', s.id)
            it = IntervalTrigger(seconds=interval)
            self.sched.reschedule_job(s.id, trigger=it)

    def send_flow_stats_request(self, datapath):
        # https://osrg.github.io/ryu-book/en/html/traffic_monitor.html
        self.logger.debug('Sending flow stat request to sw: %016x%i', datapath.id)
        ofproto = datapath.ofproto
        parser = datapath.ofproto_parser

        req = parser.OFPFlowStatsRequest(datapath)

        datapath.send_msg(req)
        with open('stats/flow_stats_req.txt', 'a') as file:
            file.write(str(int(time.time())) + ' ' +str(ofproto.OFP_FLOW_STATS_SIZE) + ' ' +str(req) + '\n')

    def send_port_stats_request(self, datapath):
        self.logger.debug('Sending flow stat request to sw: %016x%i', datapath.id)
        ofproto = datapath.ofproto
        parser = datapath.ofproto_parser
        req = parser.OFPPortStatsRequest(datapath, 0, ofproto.OFPP_ANY)
        datapath.send_msg(req)
        with open('stats/port_stats_req.txt', 'a') as file:
            file.write(str(int(time.time())) + ' ' +str(ofproto.OFP_PORT_STATS_SIZE) + ' '+ str(req) + '\n')

    @set_ev_cls(ofp_event.EventOFPFlowStatsReply, MAIN_DISPATCHER)
    def flow_stats_reply_handler(self, ev):
        body = ev.msg.body
        flow_stats = []
        for stat in sorted([flow for flow in body if flow.priority == 1],
                           key=lambda flow: (flow.match['in_port'],
                                             flow.match['eth_dst'])):
            flow_stats.append('table_id=%s '
                         'duration_sec=%d duration_nsec=%d '
                         'priority=%d '
                         'idle_timeout=%d hard_timeout=%d flags=0x%04x '
                         'cookie=%d packet_count=%d byte_count=%d '
                         'match=%s instructions=%s' %
                         (stat.table_id,
                          stat.duration_sec, stat.duration_nsec,
                          stat.priority,
                          stat.idle_timeout, stat.hard_timeout, stat.flags,
                          stat.cookie, stat.packet_count, stat.byte_count,
                          stat.match, stat.instructions))
            data = {
                "origin": "flow_stats",
                "timestamp": time.time(),
                "switch_id": ev.msg.datapath.id,
                "duration_sec": stat.duration_sec,
                "duration_nsec": stat.duration_nsec,
                "src_mac": stat.match['eth_src'],
                "dst_mac": stat.match['eth_dst'],
                "byte_count": stat.byte_count,
                "packet_count": stat.packet_count,
                "in_port": stat.match['in_port']
            }

            with open('out/flow_stats.out', 'a') as file:
                file.write(json.dumps(data) + '\n')

        self.logger.debug('FlowStats for switch %i: %s', ev.msg.datapath.id, flow_stats)
        with open('stats/flow_stats_reply.txt', 'a') as file:
            file.write(str(int(time.time())) + ' ' +str(ev.msg.msg_len) + '\n')

    @set_ev_cls(ofp_event.EventOFPPortStatsReply, MAIN_DISPATCHER)
    def port_stats_reply_handler(self, ev):
        body = ev.msg.body
        port_stats = []
        for stat in sorted(body, key=attrgetter('port_no')):
            port_stats.append("port_no=%d "
                              "rx_packets=%d "
                              "rx_bytes=%d "
                              "rx_errors=%d "
                              "tx_packets=%d "
                              "tx_bytes=%d "
                              "tx_errors=%d " %
                              (stat.port_no,
                               stat.rx_packets, stat.rx_bytes, stat.rx_errors,
                               stat.tx_packets, stat.tx_bytes, stat.tx_errors)
                              )
            data = {
                "origin": "port_stats",
                "timestamp": time.time(),
                "switch_id": ev.msg.datapath.id,
                "port_no": stat.port_no,
                "rx_packets": stat.rx_packets,
                "rx_bytes": stat.rx_bytes,
                "rx_errors": stat.rx_errors,
                "tx_packets": stat.tx_packets,
                "tx_bytes": stat.tx_bytes,
                "tx_errors": stat.tx_errors
            }

            with open('out/port_stats.out', 'a') as file:
                file.write(json.dumps(data) + '\n')
        self.logger.debug('PortStats for switch %i: %s', ev.msg.datapath.id, port_stats)
        with open('stats/port_stats_reply.txt', 'a') as file:
            file.write(str(int(time.time())) + ' ' + str(ev.msg.msg_len) + '\n')

    @set_ev_cls(ofp_event.EventOFPSwitchFeatures, CONFIG_DISPATCHER)
    def switch_features_handler(self, ev):
        datapath = ev.msg.datapath
        ofproto = datapath.ofproto
        parser = datapath.ofproto_parser

        # install table-miss flow entry
        #
        # We specify NO BUFFER to max_len of the output action due to
        # OVS bug. At this moment, if we specify a lesser number, e.g.,
        # 128, OVS will send Packet-In with invalid buffer_id and
        # truncated packet data. In that case, we cannot output packets
        # correctly.  The bug has been fixed in OVS v2.1.0.
        match = parser.OFPMatch()
        actions = [parser.OFPActionOutput(ofproto.OFPP_CONTROLLER,
                                          ofproto.OFPCML_NO_BUFFER)]
        self.add_flow(datapath, 0, match, actions, 0)

        # Add job to scheduler
        self.logger.debug('Starting scheduler for dp =%i', ev.msg.datapath.id)
        # ## flow stats
        self.sched.add_job(self.send_flow_stats_request, 'interval', seconds=STATS_INTERVAL, start_date=START_DATE,
                           args=[ev.msg.datapath])
        ## port stats
        self.sched.add_job(self.send_port_stats_request, 'interval', seconds=STATS_INTERVAL, start_date=START_DATE,
                           args=[ev.msg.datapath])

    def add_flow(self, datapath, priority, match, actions, idle_timeout, buffer_id=None):
        ofproto = datapath.ofproto
        parser = datapath.ofproto_parser

        inst = [parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,
                                             actions)]
        if buffer_id:
            mod = parser.OFPFlowMod(datapath=datapath, buffer_id=buffer_id,
                                    priority=priority, match=match,
                                    instructions=inst,idle_timeout=idle_timeout)
        else:
            mod = parser.OFPFlowMod(datapath=datapath, priority=priority,
                                    match=match, instructions=inst,idle_timeout=idle_timeout)
        datapath.send_msg(mod)

    @set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
    def _packet_in_handler(self, ev):
        # If you hit this you might want to increase
        # the "miss_send_length" of your switch
        if ev.msg.msg_len < ev.msg.total_len:
            self.logger.debug("packet truncated: only %s of %s bytes",
                              ev.msg.msg_len, ev.msg.total_len)
        msg = ev.msg
        datapath = msg.datapath
        ofproto = datapath.ofproto
        parser = datapath.ofproto_parser
        in_port = msg.match['in_port']

        pkt = packet.Packet(msg.data)
        eth = pkt.get_protocols(ethernet.ethernet)[0]

        if eth.ethertype == ether_types.ETH_TYPE_LLDP:
            # ignore lldp packet
            return
        dst = eth.dst
        src = eth.src

        # self.logger.info("packet in dpid = %s src = %s dst = %s in_port = %s", dpid, src, dst, in_port)

        # learn a mac address to avoid FLOOD next time.
        self.mac_to_port[dpid][src] = in_port

        if dst in self.mac_to_port[dpid]:
            out_port = self.mac_to_port[dpid][dst]
        else:
            out_port = ofproto.OFPP_FLOOD

        actions = [parser.OFPActionOutput(out_port)]

        # install a flow to avoid packet_in next time
        if out_port != ofproto.OFPP_FLOOD:
            match = parser.OFPMatch(in_port=in_port, eth_dst=dst, eth_src=src)

            # verify if we have a valid buffer_id, if yes avoid to send both
            # flow_mod & packet_out
            if msg.buffer_id != ofproto.OFP_NO_BUFFER:
                self.add_flow(datapath, 1, match, actions, 3, msg.buffer_id)
                return
            else:
                self.add_flow(datapath, 1, match, actions, 3)
        data = None
        if msg.buffer_id == ofproto.OFP_NO_BUFFER:
            data = msg.data

        out = parser.OFPPacketOut(datapath=datapath, buffer_id=msg.buffer_id,
                                  in_port=in_port, actions=actions, data=data)
        datapath.send_msg(out)