class JobScheduler(object):

    def __init__(self, config):
        self.scheduler = BackgroundScheduler()
        self.scheduler.add_job(
            self.job_process,
            'interval',
            seconds=config['JOB_POLL_INTERVAL_SECONDS'],
            max_instances=1)
        self.scheduler.add_job(
            self.notification_job_process,
            'interval',
            seconds=config['DELIVERY_POLL_INTERVAL_SECONDS'],
            max_instances=1)
        self.config = config

    def start(self):
        self.scheduler.start()

    def shutdown(self):
        self.scheduler.shutdown(wait=True)

    def job_process(self):
        process_jobs(self.config)

    def notification_job_process(self):
        process_notification_job(self.config)
示例#2
0
def go(managedNamespace):
	statusMgr = statusDbManager.StatusResource()
	managedNamespace.run = True
	managedNamespace.serverRun = True

	checkInitDbs()

	server_process = multiprocessing.Process(target=serverProcess, args=(managedNamespace,))

	sched = BackgroundScheduler()

	scheduleJobs(sched, managedNamespace)
	server_process.start()
	sched.start()

	loopCtr = 0
	while managedNamespace.run:
		time.sleep(0.1)

		if loopCtr % 100 == 0:
			for job in sched.get_jobs():
				statusMgr.updateNextRunTime(job.name, job.next_run_time.timestamp())
		loopCtr += 1

	sched.shutdown()
	server_process.join()
示例#3
0
class MyScheduler(object):
	"""scheduler class, difine scheduler and jobs"""
	def __init__(self):
		super(MyScheduler, self).__init__()
		self.scheduler = BackgroundScheduler()
		self.wish_time = setting.WISH_DELAY
		self.book_time = setting.BOOK_LAST_TIME

	def start(self):
		self.scheduler.start()

	def shutdown(self):
		self.scheduler.shutdown()

	def add_check_wish_status_job(self, wish_id):
		# print datetime.now()
		run_date = datetime.now() + timedelta(seconds=self.wish_time)
		self.scheduler.add_job(_reset_wish_status, 'date', args=[wish_id],
			run_date=run_date)

	def add_set_book_removed_job(self, book_id):
		run_date = datetime.now() + timedelta(days=self.book_time)
		self.scheduler.add_job(_set_book_removed, 'date', args=[book_id],
			run_date=run_date)

	def add_xapian_reindex_job(self, xapian_tool):
		self.scheduler.add_job(xapian_tool.index, 'interval', minutes=15)
示例#4
0
class TimerTrigger(BaseTrigger):
    name = "timer"
    log = logging.getLogger("zuul.Timer")

    def __init__(self, trigger_config={}, sched=None, connection=None):
        super(TimerTrigger, self).__init__(trigger_config, sched, connection)
        self.apsched = BackgroundScheduler()
        self.apsched.start()

    def _onTrigger(self, pipeline_name, timespec):
        for project in self.sched.layout.projects.values():
            event = TriggerEvent()
            event.type = "timer"
            event.timespec = timespec
            event.forced_pipeline = pipeline_name
            event.project_name = project.name
            self.log.debug("Adding event %s" % event)
            self.sched.addEvent(event)

    def stop(self):
        self.apsched.shutdown()

    def getEventFilters(self, trigger_conf):
        def toList(item):
            if not item:
                return []
            if isinstance(item, list):
                return item
            return [item]

        efilters = []
        for trigger in toList(trigger_conf):
            f = EventFilter(trigger=self, types=["timer"], timespecs=toList(trigger["time"]))

            efilters.append(f)

        return efilters

    def postConfig(self):
        for job in self.apsched.get_jobs():
            job.remove()
        for pipeline in self.sched.layout.pipelines.values():
            for ef in pipeline.manager.event_filters:
                if ef.trigger != self:
                    continue
                for timespec in ef.timespecs:
                    parts = timespec.split()
                    if len(parts) < 5 or len(parts) > 6:
                        self.log.error(
                            "Unable to parse time value '%s' " "defined in pipeline %s" % (timespec, pipeline.name)
                        )
                        continue
                    minute, hour, dom, month, dow = parts[:5]
                    if len(parts) > 5:
                        second = parts[5]
                    else:
                        second = None
                    trigger = CronTrigger(day=dom, day_of_week=dow, hour=hour, minute=minute, second=second)

                    self.apsched.add_job(self._onTrigger, trigger=trigger, args=(pipeline.name, timespec))
示例#5
0
    def post(self, action, position = ''):
        global scheduler
        self.checkStartup()
        
        if action == 'play':
            runCommand('mpc play ' + position)
            #Settings.set('radio', 'state', 'play')
            
            if scheduler is None:
                scheduler = BackgroundScheduler()
                scheduler.add_job(self.checkStatus, 'interval', seconds=30, id='checkStatus', replace_existing=True)
                scheduler.start()
        elif action == 'stop':
            runCommand('mpc stop')
            #Settings.set('radio', 'state', 'stop')
            
            if scheduler is not None:
                scheduler.remove_job('checkStatus')
                scheduler.shutdown()
                scheduler = None
            return {'playMode': 'stopped'}
        elif action =='pause':
            runCommand('mpc pause')
        elif action =='next':
            runCommand('mpc next')
        elif action =='previous':
            runCommand('mpc prev')
        else:
            return {'playMode': 'invalid'}

        (out, err) = runCommand('mpc status')
        if err:
            return {'error', err}, 500
        return {'playMode': Parser.parsePlayMode(out)}
示例#6
0
def main(args):
    scheduler = BackgroundScheduler(coalesce=True, misfire_grace_time=4)
    taskparser = TaskParser(args['f'])
    taskparser.parse()

    yml_handler = YmlFileEventHandler(patterns=["*.yml"])
    yml_handler.set_scheduler(scheduler)
    yml_handler.set_parser(taskparser)
    file_observer = Observer()
    file_observer.schedule(yml_handler, path=args['f'], recursive=False)
    file_observer.start()

    # Initial parsing of the task folder
    for t in taskparser.task_list:
        addJob(t, scheduler)
        # Spread tasks from each other to prevent overload/task miss
        time.sleep(1)

    scheduler.start()

    # Update jobs while running
    while True:
        try:
            time.sleep(15)
        except KeyboardInterrupt:
            break

    scheduler.shutdown()
示例#7
0
文件: quote.py 项目: xujhao/py
class RealTimeQuote(object):
    def __init__(self, cf, codelist, eventEngine_):
        self._codelist = codelist
        logger.info("codelist:%s", self._codelist)
        self._eventEngine = eventEngine_
        #self._eventEngine.register(EVENT_TIMER, self.TimerCall)
        self._sched  = BackgroundScheduler()

    def start(self):
        self._sched.add_job(self.TimerCall, 'interval',  seconds=3)
        self._sched.start()
        logger.info('RealTimeQuote start')

    def stop(self):
        logger.info('RealTimeQuote stop')
        self._sched.shutdown()

    def TimerCall(self):
        '''
        定时根据代码列表获取最新行情
        :return:
        '''
        if len(self._codelist) < 1:
            return

        rtQuote = GetRealTimeQuote(self._codelist)
        for i in range(rtQuote.shape[0]):
            itQuote = rtQuote.ix[i]
            if float(itQuote['amount']) <= 0.01:
                continue
            event = Event(type_=EVENT_MARKETDATA_CONTRACT + itQuote['code'])
            event.dict_['tick'] = itQuote
            self._eventEngine.put(event)
示例#8
0
def go():
	preflight()

	sched = BackgroundScheduler(jobstores=jobstores, executors=executors, job_defaults=job_defaults)

	# startTime = datetime.datetime.now()+datetime.timedelta(seconds=60*60)
	# startTime = datetime.datetime.now()+datetime.timedelta(seconds=60*15)
	# startTime = datetime.datetime.now()+datetime.timedelta(seconds=60*5)
	# startTime = datetime.datetime.now()+datetime.timedelta(seconds=20)
	startTime = datetime.datetime.now()+datetime.timedelta(seconds=10)
	scheduleJobs(sched, startTime)
	sched.start()

	# spinwait for ctrl+c, and exit when it's received.
	loops = 0
	while runStatus.run:
		time.sleep(0.1)
		# loops += 1
		# if loops > 100:
		# 	logging_tree.printout()
		# 	loops = 0

	print("Scraper stopping scheduler")
	sched.shutdown()
	nt.dirNameProxy.stop()
示例#9
0
def mail2diaspora(config_pathname):

    # configure logging
    logger = logging.getLogger(__name__)
    configure_logging(logging.INFO)

    logger.info("Start mail2diaspora application")
    config.initialize(config_pathname)

    os.chdir(config.get(config.TEMP))

    # cron email fetcher
    scheduler = BackgroundScheduler()
    scheduler.add_job(
        diaspora.mail_poll, "interval", seconds=config.getInt(config.MAIL_POLLING)
    )
    scheduler.start()

    print("Press Ctrl+{0} to exit".format("Break" if os.name == "nt" else "C"))
    try:
        # This is here to simulate application activity (which keeps the main thread alive).
        while True:
            time.sleep(2)
    except (KeyboardInterrupt, SystemExit):
        # Not strictly necessary if daemonic mode is enabled but should be done if possible
        scheduler.shutdown()

    logger.info("Stop mail2diaspora application")
    def build(cube_name_list, buildType, start_time=None, end_time=None):
        run_cube_job_id = '1'
        check_cube_job_id = '2'
        scheduler = BackgroundScheduler()
        CubeWorker.job_instance_dict = {}

        for cube_name in cube_name_list:
            CubeWorker.job_instance_dict[cube_name] = None

        CubeWorker.scheduler = scheduler
        CubeWorker.run_cube_job_id = run_cube_job_id
        CubeWorker.check_cube_job_id = check_cube_job_id
        # start the run cube job immediately
        CubeWorker.run_cube_job(buildType, start_time, end_time)

        scheduler.add_job(CubeWorker.run_cube_job, 'interval', seconds=30, id=run_cube_job_id, args=[buildType, start_time, end_time])
        scheduler.add_job(CubeWorker.check_cube_job, 'interval', seconds=30, id=check_cube_job_id)
        scheduler.start()

        while True:
            if CubeWorker.all_finished():
                print "all cube jobs are finished"
                scheduler.remove_job(check_cube_job_id)
                scheduler.remove_job(run_cube_job_id)
                scheduler.shutdown()
                
                status = CubeWorker.get_status()
                print 'Build exited with status %s' % status
                return status == CubeWorkerStatus.SUCCESS

            time.sleep(settings.KYLIN_CHECK_STATUS_INTERVAL)
示例#11
0
def go(managedNamespace):
	log.info("Go()")


	resetter = xascraper.status_monitor.StatusResetter()
	resetter.resetRunState()

	# statusMgr = manage.statusDbManager.StatusResource()
	managedNamespace.run = True
	managedNamespace.serverRun = True

	server_process = multiprocessing.Process(target=serverProcess, args=(managedNamespace,))
	if "debug" in sys.argv:
		log.info("Not starting scheduler due to debug mode!")
		sched = None
	else:
		sched = BackgroundScheduler({
				'apscheduler.jobstores.default': {
					'type': 'memory'
				},
				'apscheduler.executors.default': {
					'class': 'apscheduler.executors.pool:ThreadPoolExecutor',
					'max_workers'                              : 5
				},
				'apscheduler.job_defaults.coalesce'            : True,
				'apscheduler.job_defaults.max_instances'       : 1,
				'apscheduler.job_defaults.misfire_grace_time ' : 60 * 60 * 2,
			})


		logging.getLogger('apscheduler').setLevel(logging.DEBUG)
		sched.add_listener(job_evt_listener,
				apscheduler.events.EVENT_JOB_EXECUTED |
				apscheduler.events.EVENT_JOB_ERROR    |
				apscheduler.events.EVENT_JOB_MISSED   |
				apscheduler.events.EVENT_JOB_MAX_INSTANCES
			)
		scheduleJobs(sched, managedNamespace)
		sched.start()
		log.info("Scheduler is running!")

	log.info("Launching server process")
	server_process.start()
	loopCtr = 0

	log.info("Entering idle loop.")
	while managedNamespace.run:
		time.sleep(0.1)
		# if loopCtr % 100 == 0:
		# 	for job in sched.get_jobs():
		# 		print("Job: ", job.name, job.next_run_time.timestamp())
		# 		# statusMgr.updateNextRunTime(job.name, job.next_run_time.timestamp())
		loopCtr += 1

	if sched:
		sched.shutdown()
	log.info("Joining on web thread.")
	server_process.join()
def schedule():
    scheduler = BackgroundScheduler()
    scheduler.add_job(process, 'interval', seconds=120)
    scheduler.start()
    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))
    try: # This is here to simulate application activity (which keeps the main thread alive).
        while True: time.sleep(2)
    except (KeyboardInterrupt, SystemExit):
        scheduler.shutdown() # Not strictly necessary if daemonic mode is enabled but should be done if possible
示例#13
0
def addJobToScheduler():
	timeArgv = int(sys.argv[3])
	sc = BackgroundScheduler()
	sc.add_job(backUpFile, 'interval', seconds=timeArgv)		
	sc.start()
	try:
		while True:
			time.sleep(2)
	except(KeyboardInterrupt, SystemExit):
		sc.shutdown()
示例#14
0
def main():
	logging.basicConfig()
	sys.setrecursionlimit(1500)

	scheduler = BackgroundScheduler()
	scheduler.add_job(url_scraper.scrapeurl, 'interval', hours=8)
	scheduler.start()
	while True:
		time.sleep(10)
	scheduler.shutdown()
示例#15
0
文件: __init__.py 项目: cryptk/opsy
 def update_monitoring_cache():
     """Update the monitoring cache."""
     from apscheduler.schedulers.background import BackgroundScheduler
     scheduler = BackgroundScheduler()
     self.register_scheduler_jobs(current_app, run_once=True)
     for args, kwargs in current_app.jobs:
         scheduler.add_job(*args, **kwargs)
     scheduler.start()
     while scheduler.get_jobs() > 0:
         continue
     scheduler.shutdown(wait=True)
示例#16
0
def scheduler_scan_all_article():
    print "scheduler_scan_all_article is start!"
    #定时任务启动
    scheduler = BackgroundScheduler()
    scheduler.add_job(scan_all_article, 'cron', hour='0-23', minute=50)
    scheduler.start()
    
    try:
        # This is here to simulate application activity (which keeps the main thread alive).
        while True:
            time.sleep(2)
    except (KeyboardInterrupt, SystemExit):
        scheduler.shutdown()  
示例#17
0
class Scheduler:
    def __init__(self):
        self.jobs = {}
        self.scheduler = BackgroundScheduler({
            'apscheduler.job_defaults.coalesce': 'true',
        })


    def addJob(self, policy, job_function, job_name):
        # Check that a job with the same name has not already be registered
        if self.jobs.has_key(job_name):
            raise Exception ('A job named "' + job_name + '" has already been scheduled')
        # Get the period trigger to use
        trigger = self._getTrigger(policy)
        # Add the job to the scheduler
        j = self.scheduler.add_job(job_function, name = job_name, max_instances = 1, trigger=trigger)
        getLogger(__name__).debug('Job "' + job_name +'" has been added to scheduler, it has the id ' + j.id + '. It is scheduled every ' + policy)
        # Store job if so that we can update it if needed
        self.jobs[job_name] = j.id


    def rescheduleJob(self, policy, job_name):
        # Check that the job with job_name well exist
        if not(self.jobs.has_key(job_name)):
            raise Exception ('Job named "' + job_name + '" can not be rescheduled because it is not registered in scheduler')
        # Get the period trigger to use
        trigger = self._getTrigger(policy)
        # Reschedule the job with the new trigger
        getLogger(__name__).debug('Reschedule job "' + job_name +'" having id ' + self.jobs.get(job_name))
        self.scheduler.reschedule_job(self.jobs.get(job_name), trigger=trigger)


    def start(self):
        self.scheduler.start()
        getLogger(__name__).info('Start scheduler')


    def stop(self):
        self.scheduler.shutdown()
        getLogger(__name__).info('Stop scheduler')


    def _getTrigger(self, policy):
        comp = re.compile("^([0-9]*)([smhd])?$")
        match = comp.match(policy)
        policy = match.groups()
        if (policy[0] == '' or policy[0] == None):
            raise Exception ("The periodicity of your task is not well defined.")
        else:
            period = int(policy[0])
        return IntervalTrigger(seconds=period)
示例#18
0
def restart_schedule():

    # flask 实例重启后,原来的schedule 进程已经不存在了,不需要shutdown了,若应用已经起来了,
    # Schedule 已经初始化后,需要shutdown 它

    scheduler = BackgroundScheduler()

    try:

        scheduler.shutdown(wait=False)

    except Exception, e:

        print "Scheduler is not running"
示例#19
0
class Schedule(object):
    def __init__(self):
        self.scheduler = BackgroundScheduler()

    def tick(self):
        pass

    def start(self):
        self.scheduler.add_job(self.tick, 'interval', seconds=3*60*60)
        self.scheduler.start()
        atexit.register(lambda: self.shutdown())

    def shutdown(self):
        self.scheduler.shutdown(wait=False)
示例#20
0
 def run(self):
     """Run watcher"""
     self.logger.info("Running watcher ...")
     scheduler = BackgroundScheduler()
     scheduler.add_job(self.watching, "interval", seconds=self.config["interval"])
     scheduler.start()
     try:
         # This is here to simulate application activity (which keeps the main thread alive).
         while self._running:
             time.sleep(2)
         scheduler.shutdown()
     except (KeyboardInterrupt, SystemExit):
         # Not strictly necessary if daemonic mode is enabled but should be done if possible
         scheduler.shutdown()
示例#21
0
class JobMonitor(telepot.helper.Monitor):
  def __init__(self, seed_tuple, server, db):
    super(JobMonitor, self).__init__(seed_tuple, capture=[{'_': lambda msg: True}])
    self.server = server 
    self.db = db
    self.logger = logging.getLogger('torrentbot')
    self.sched = BackgroundScheduler()
    self.sched.start()
    self.sched.add_job(self.torrentMonitor, 'interval', minutes=3)

    self.logger.debug('jobmonitor logger init ...')

  def on_chat_message(self, msg): 
    pass

  def on_callback_query(self, msg): 
    pass

  def on_close(self, e):
    self.logger.debug('jobmonitor will shutdown')
    self.shutdown()
   
  def shutdown(self):
    self.sched.shutdown()

  def torrentMonitor(self):
    self.logger.debug('========== DB ==========')
    fromDB = self.db.uncompleted() 
    self.logger.debug(fromDB)

    self.logger.debug('========== Server ==========')
    fromServer = self.server.completed() 
    self.logger.debug(fromServer)

    # extract complete torrents
    self.logger.debug('========== updateList ==========')
    updateList = []
    for dbt in fromDB:
      for st in fromServer:
        if dbt['id'] == st['id']:
          updateList.append({'id':dbt['id'], 'chat_id':dbt['chat_id'], 
            'title': st['title']}) 

    self.logger.debug(updateList) 

    for t in updateList:
      self.bot.sendMessage(t['chat_id'], 'downloaded\n' + t['title'])
      # self.db.completeTorrent(t['chat_id'], t['id'])
      self.server.delete(t['id'])
      self.db.deleteTorrent(t['chat_id'], t['id'])
示例#22
0
 def start_schedule(self):
     sched = BackgroundScheduler()
     job = sched.add_job(self.do_post, 'date', run_date=chun.next_sunrise)
     sched.add_listener(self.job_executed_listener, EVENT_JOB_EXECUTED)
     sched.start()
     try:
         while True:
             self.running = True
             while self.running:
                 time.sleep(10)
             job.remove()
             job = sched.add_job(self.do_post, 'date', run_date=self.next_sunrise, misfire_grace_time=120)
     except (KeyboardInterrupt, SystemExit):
         sched.shutdown()
示例#23
0
def begin():
    init()
    run_robot()
    scheduler = BackgroundScheduler()
    scheduler.add_job(run_360, 'interval', seconds=1800)
    scheduler.add_job(run_wooyun, 'interval', seconds=300)
    scheduler.add_job(run_freebuf, 'interval', seconds=3600)
    scheduler.add_job(run_time_report, 'interval', seconds=43200)
    scheduler.start()
    try:
        # This is here to simulate application activity (which keeps the main thread alive).
        while True:
            time.sleep(2)
    except (KeyboardInterrupt, SystemExit):
        scheduler.shutdown()  # Not strictly necessary if daemonic mode is enabled but should be done if possible
示例#24
0
def main():
    scheduler = BackgroundScheduler();
    scheduler.add_job(onOpen,'cron',hour=9, minute=29,
                      misfire_grace_time=10, coalesce=True);
    scheduler.add_job(onClose,'cron',hour=14, minute=58,
                      misfire_grace_time=10, coalesce=True);

    scheduler.start();

    while True:
        time.sleep(1);
        pass;

    scheduler.shutdown();
    pass;
示例#25
0
    def handle(self, *args, **options):
        source_id = options['source_id']

        # start BackgroundScheduler and add an interval job that gets ran every 30 minutes
        scheduler = BackgroundScheduler()
        scheduler.start()
        scheduler.add_job(self.job, 'interval', id='scraper', minutes=30,
                          kwargs={'source_id': source_id})

        try:
            # this keeps the thread alive
            while True:
                sleep(1)
        except (KeyboardInterrupt, SystemExit):
            scheduler.shutdown()
示例#26
0
    def __init__(self, profile):
        self._logger = logging.getLogger(__name__)
        self.q = Queue.Queue()
        self.profile = profile
        self.notifiers = []
        
        self._logger.debug('Initializing Notifier...')

        if 'gmail_address' in profile and 'gmail_password' in profile:
            self.notifiers.append(self.NotificationClient(
                self.handleEmailNotifications, None))
        else:
            self._logger.warning('gmail_address or gmail_password not set ' +
                                 'in profile, Gmail notifier will not be used')

        if 'ssh_auth_log' in profile:
            self.notifiers.append(self.NotificationClient(
                    self.handleSSHAuthNotifications, None))
        else:
            self._logger.warning('ssh_auth_log not set,' +
                                 'SSH login notifier will not be used')

        job_defaults = {
            'coalesce': True,
            'max_instances': 1
        }
        sched = BackgroundScheduler(timezone="UTC", job_defaults=job_defaults)
        sched.start()
        sched.add_job(self.gather, 'interval', seconds=30)
        atexit.register(lambda: sched.shutdown(wait=False))
        
        # put the scheduler in Notifier object for reference
        self._sched = sched
示例#27
0
文件: __init__.py 项目: c2corg/v6_api
def configure_scheduler_from_config(settings):
    scheduler = BackgroundScheduler()
    scheduler.start()

    # run `purge_account` job at 0:00
    scheduler.add_job(
        purge_account,
        id='purge_account',
        name='Purge accounts which where not activated',
        trigger='cron',
        hour=0,
        minute=0
    )

    # run `purge_token` job at 0:30
    scheduler.add_job(
        purge_token,
        id='purge_token',
        name='Purge expired tokens',
        trigger='cron',
        hour=0,
        minute=30
    )

    scheduler.add_listener(exception_listener, EVENT_JOB_ERROR)

    atexit.register(lambda: scheduler.shutdown())
示例#28
0
def test4():
    """定时执行任务,关闭调度器"""
    start_time = time.time()
    scheduler = BackgroundScheduler()
    scheduler.add_job(my_job, 'interval', args=('123',),seconds=1, id='my_job_id') 
    # 每隔1秒执行一次my_job函数,args为函数my_job的输入参数;id:可省略;
    
    scheduler.start() # 程序运行到这里,任务没有运行完也会往后执行,既执行后面的任务,又执行这个任务。
    print('运行到了这里1')
    
    #默认情况下调度器会等待所有正在运行的作业完成后,关闭所有的调度器和作业存储。如果你不想等待,可以将wait选项设置为False。
    time.sleep(1)
    print('关闭所有的调度器和作业存储')
    #scheduler.shutdown()
    scheduler.shutdown(wait=False)
    print("输出所有格式化的作业列表:{}".format(scheduler.print_jobs())) # 输出所有格式化的作业列表。
示例#29
0
    def handle(self, **options):
         print "This is a command"
         print('Tick! The time is: %s' % datetime.now())

          
         #if __name__ == '__main__':
         scheduler = BackgroundScheduler()
         scheduler.add_job(handle, 'interval', seconds=3)
         scheduler.start()
         print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))
         
         try:
                # This is here to simulate application activity (which keeps the main thread alive).
             while True:
                 time.sleep(10)
         except (KeyboardInterrupt, SystemExit):
             scheduler.shutdown()  # Not strictly necessary if daemonic mode is enabled but should be done if possible
示例#30
0
class Greeter(pykka.ThreadingActor):
	'''Greeter'''

	def __init__(self, greeting='Hi there!'):
		super(Greeter, self).__init__()
		self.printer = Printer()
		self.greeting = greeting
		self.sched = BackgroundScheduler()
		self.job = self.sched.add_job(self.printer.display, 'interval', minutes=0.01)
		self.sched.start()

	def on_receive(self, message):
		if (message['msg']=='Stop!'):
			self.sched.shutdown()
			self.stop()
		else:
			self.printer.set_mesg(message['msg'])
示例#31
0
s = Scheduler()

def output():
    for i in 'hello':
        sleep(0.4)
        print i


s.add_job(output, 'interval', seconds=3)
s.start()
print 'started'

sleep(10)


print 'trying to shutdown'
#s.shutdown(wait=False)
#s.shutdown(wait=True)
s.shutdown()
print 'shutdown complete'

#sleep(5)

# 结论:
# shutdown时,无论是否wait,都不会强中断job线程。
# 假设shutdown时,有正在执行的任务,那么:
# wait为True,则shutdown等job全执行完再执行后面的。好比饭店要关门了,店长说:“要关门啦!大家吃完后就得走啦,我等你们吃完再关门,然后去接孩子。”
# wait为False,则shutdown告诉job线程“一会自己关上,我先去干别的”,好比“大家吃完就走哈,我不等你们了,我得先去接孩子!”
# wait默认为True
示例#32
0
conn = conn_mysqldb()
t = 0
def insertFunc():
    global t
    cursor = conn.cursor()
    t+=1
    sql = "Insert into map_info values(%d,'TEST_%d','%d','%d')"%(0,t,t*10,t*100)
    cursor.execute(sql)
    conn.commit()
    cursor.close()
    print("Commit Complete, TEST_%d %d %d"%(t,t*10,t*100))

sched.add_job(insertFunc, "cron",minutes = 15)

try:
    while True:
        print("Running Main Process..")
        time.sleep(1)

except:
    sched.shutdown()

"""
이 스케쥴러는 실시간 전력 데이터를 받아와서 DB에 저장하는 스케쥴러
*  1시간마다 실시간 전력 데이터를 어딘가로부터 받아와 우리 DB 서버에 저장


다른 스케쥴러는 Tensorflow Serving 용으로 만들어야함
* Tensorflow Serving 에서 사용량을 보냄 --> 예측 --> 받아서 저장 (24시간)
"""
示例#33
0
class Controller(object):
    """The Controller in a MVC model"""
    def __init__(self,
                 db,
                 bottle_ip='127.0.0.1',
                 pigpio_host="",
                 pigpio_port=8888):
        """- Create the controller, its Viewer and connect to database (Model)
           - Select all the hardware (sensors/switches) for the systems under its control
           - Launch the scheduler
           - host:port is used to connect to the pigpio server running on the Raspberry Pi.
           Need to execute 'sudo pigpiod' to get that daemon running if it is not automatically started at boot time

           :param db: instance of a Ponicwatch_Db
           :param bottle_ip: IP to reach the webpages. Important to set properly for remote access.
        """
        global _simulation  # if no PGIO port as we are not running on a Raspberry Pi
        self.debug = DEBUG
        self.bottle_ip = bottle_ip
        # keep a link to the database i.e. M in MVC
        self.db = db
        self.db.allow_close = False
        # finding the Controller User entry --> currently 'hard coded' as 'ctrl'/'passwd' --> to improve later
        self.user = User(self, id=1)
        self.name = self.user[
            "name"]  # this name is used to identify the log messages posted by this controller

        # opening the LOGger with the debug level for this application run
        self.log = Ponicwatch_Log(controller=self, debug=DEBUG)

        # Create the background scheduler that will execute the actions (using the APScheduler library)
        self.scheduler = BackgroundScheduler()

        # select all the systems, sensors, switchs to monitor and the necessary hardware drivers
        self.pig = pigpio.pi(
            pigpio_host, pigpio_port) if not _simulation else pigpio_simu.pi()
        if not self.pig.connected:
            if self.debug >= 2: print("WARNING: not connected to a RasPi")
            self.pig = pigpio_simu.pi()
            _simulation = True
        # some plural are "fake" to respect the logic of: <cls name> + 's' --> dictionary of <cls> PonicWatch Object
        self.systems, self.sensors, self.switchs, self.hardwares, self.interrupts = {}, {}, {}, {}, {}
        # system_id <= 0:  inactive link --> ignore this row
        self.db.curs.execute(
            "SELECT * from tb_link where system_id > 0 order by system_id desc, order_for_creation"
        )
        self.links = self.db.curs.fetchall()
        for system_id, sensor_id, switch_id, hardware_id, order_for_creation, interrupt_id in self.links:
            # (1) create all necessary objects
            # (2) and register the system and hardware to a sensor/switch
            if system_id not in self.systems:
                self.systems[system_id] = System(self, id=system_id)
            if hardware_id and hardware_id not in self.hardwares:
                self.hardwares[hardware_id] = Hardware(
                    controller=self,
                    id=hardware_id,
                    system_name=self.systems[system_id]["name"])
            if sensor_id and sensor_id not in self.sensors:
                self.sensors[sensor_id] = Sensor(
                    controller=self,
                    id=sensor_id,
                    system_name=self.systems[system_id]["name"],
                    hardware=self.hardwares[hardware_id])
            if switch_id and switch_id not in self.switchs:
                self.switchs[switch_id] = Switch(
                    controller=self,
                    id=switch_id,
                    system_name=self.systems[system_id]["name"],
                    hardware=self.hardwares[hardware_id])

            if interrupt_id and interrupt_id not in self.interrupts:
                self.interrupts[interrupt_id] = Interrupt(
                    controller=self,
                    id=interrupt_id,
                    system_name=self.systems[system_id]["name"],
                    hardware=self.hardwares[hardware_id])
        self.db.allow_close = True
        self.db.close()

    def add_cron_job(self, callback, timer):
        """
        Add a new scheduled task
        :param callback:
        :param timer:  a string '* * * * * *' OR a JSON object as { 't': ['* * * * * *', t2, t3...]}
        :return:
        """
        if timer[0] == '{':
            # convert the JSON timer string to a python dictionary
            try:
                cron_times = json.loads(timer)
                cron_times = cron_times['t']
            except:  # json.JSONDecodeError:
                print("Alarm: timer is not a JSON string!", timer)
        else:
            cron_times = [timer]
        i = 0.0
        for cron_time in cron_times:
            # When do we need to read the sensor or activate a switch?
            # ┌───────────── sec (0 - 59)
            # | ┌───────────── min (0 - 59)
            # | │ ┌────────────── hour (0 - 23)
            # | │ │ ┌─────────────── day of month (1 - 31)
            # | │ │ │ ┌──────────────── month (1 - 12)
            # | │ │ │ │ ┌───────────────── day of week (0 - 6) (0 to 6 are Sunday to
            # | │ │ │ │ │                  Saturday, or use names; 7 is also Sunday)
            # | │ │ │ │ │
            # | │ │ │ │ │
            # * * * * * *
            _sec, _min, _hrs, _dom, _mon, _dow = cron_time.split(
            )  # like "*/5 * * * * *" --> every 5 seconds
            self.scheduler.add_job(callback,
                                   'cron',
                                   second=_sec,
                                   minute=_min,
                                   hour=_hrs,
                                   day=_dom,
                                   month=_mon,
                                   day_of_week=_dow)
            self.log.add_log(log_type='SCHEDULER',
                             system_name='@startup',
                             param={
                                 'error_code': 0,
                                 'text_value': cron_time,
                                 'float_value': i
                             })
            i += 1.0

    def run(self):
        """Starts the APScheduler task and the Bottle HTTP server"""
        self.running = True
        with open("ponicwatch.pid", "wt") as fpid:
            print(os.getpid(), file=fpid)

        def stop_handler(signum, frame):
            """ allow:   kill -10 `cat ponicwatch.pid`   """
            self.stop()
            sys.exit()

        signal.signal(signal.SIGUSR1, stop_handler)

        self.scheduler.start()
        self.log.add_info("Controller {} is now running.".format(__version__),
                          fval=1.0)
        # http_view.controller = self
        try:
            http_view.run(host=self.bottle_ip)
        except (KeyboardInterrupt, SystemExit):
            pass
        finally:
            self.stop()

    def stop(self, from_bottle=False):
        try:
            self.scheduler.shutdown(
            )  # Not strictly necessary if daemonic mode is enabled but should be done if possible
        except SchedulerNotRunningError:
            pass
        for hw in self.hardwares.values():
            hw.cleanup()
        self.log.add_info(
            "Controller {} has been stopped.".format(__version__), fval=0.0)
        if not from_bottle: bottle_stop()

    # if_expression manipulation to provide conditional execution to PWO based on their 'if' string
    def make_expression(self, submitted_by, if_expression):
        """Replace the Sensor/Switch/Hardware reference to its value
        Error will be caught by the calling function: SyntaxError, ValueError, NameError
        :param submitted_by: the pwo requesting the 'if_expression' evalution (needed for logging in case of error)
        :param if_expression: string or tuple from the 'init' dictionary under the key 'if'
        :return result: python evaluation of the input string with all pwo references have been replaced by their value
        """
        if isinstance(if_expression, str):
            # expects a string starting by a pwo reference and then a boolean test
            # ex: "Sensor[2]>=40.0" or "Switch[1]==0"
            pwo_cls, _ = if_expression.split('[', 1)
            id, test = _.split(']', 1)
            pwo = self.get_pwo(pwo_cls, id)
            try:
                _expression = str(
                    pwo.value
                ) + test  # if the direct read is possible then use it
            except AttributeError:
                _expression = str(
                    pwo["value"]) + test  # else take the latest read value
        elif isinstance(if_expression, list):
            # expects list: format string followed by the pwo references or keyword 'now'
            # example: [ "{}>10. and {}==1", "Sensor[1]", "Switch[2]" ]
            _format, pwo_values = if_expression[0], []
            for pwo_ref in if_expression[1:]:
                if pwo_ref.lower() == "now":
                    pwo_values.append(
                        datetime.now()
                    )  # expects {:%H} for hour --> "8<={:%H}<=20".format(datetime.now())
                else:
                    pwo_cls, id = pwo_ref.split('[', 1)
                    pwo = self.get_pwo(pwo_cls, id[:-1])  # drop the last ']'
                    try:
                        pwo_values.append(pwo.value)
                    except AttributeError:
                        pwo_values.append(pwo["value"])
            _expression = _format.format(*pwo_values)
        else:
            msg = "Error! Unknown if_expression type: {} for {}".format(
                type(if_expression), submitted_by)
            self.log.add_error(msg=msg, err_code=submitted_by["id"], fval=-2.6)
            _expression = None
        return _expression

    def eval_expression(self,
                        submitted_by,
                        if_expression,
                        make_expression=None):
        """evaluate the expression found in the init_dit['if'] of a PWO"""
        try:
            _expression = make_expression or self.make_expression(
                submitted_by, if_expression)
            result = eval(_expression)
        except (SyntaxError, NameError, ValueError) as err:
            self.log.add_error(
                msg="if_expression {} cannot be evaluated: {} for {}".format(
                    if_expression, err, submitted_by),
                err_code=submitted_by["id"],
                fval=-2.7)
            result = None

        if self.debug >= 3:
            print("eval({}) == {}".format(_expression, result))
        return result

    ### helper functions
    def get_pwo(self, cls, id):
        """return a PonicWatch Object from the controller's PWO dictionary
        :param cls: either a string as class name or an PW object
        :param id: the pwo's id, must be integer as pwo dictionary key
        :return pwo: the matching pwo or None
        """
        pwo_dict_name = (cls if isinstance(cls, str) else
                         cls.__class__.__name__).lower() + 's'
        pwo_dict = getattr(self, pwo_dict_name)
        pwo = None
        try:
            pwo = pwo_dict[int(id)]
        except KeyError:
            msg = "No pwo id {} found in {}".format(id, pwo_dict_name)
            if self.debug >= 3:
                print(msg)
            self.log.add_error(msg=msg, err_code=id, fval=-1.1)
        return pwo

    def print_list(self):
        """CLI: Print the list of all created objects in the __init__ phase"""
        print("--- System ---")
        for k, v in self.systems.items():
            print(k, v)
        print("--- Hardware ---")
        for k, v in self.hardwares.items():
            print(k, v, v["init"])
        print("--- Switches ---")
        for k, v in self.switchs.items():
            print(k, v, v["init"])
        print("--- Sensors ---")
        for k, v in self.sensors.items():
            print(k, v, v["init"])
        print("--- Interruptions ---")
        for k, v in self.interrupts.items():
            print(k, v)
        print("--- Links ---")
        for system_id, sensor_id, switch_id, hardware_id, order_for_creation, interrupt_id in self.links:
            print("system_id",
                  system_id or '-',
                  "hardware_id",
                  hardware_id or '-',
                  "switch_id",
                  switch_id or '-',
                  "sensor_id",
                  sensor_id or '-',
                  "interrupt_id",
                  interrupt_id or '-',
                  sep='\t')

    def ponicwatch_notification(self):
        """
        Regular email to inform the system manager of its status
        :return:
        """
        images = []  # list of .png to attach to the email
        html = http_default()  # email's body
        objects = []  # working list of all Ponicwatch objects (pwo)
        for s in self.systems.values():
            objects.append(s)
        for s in self.switchs.values():
            objects.append(s)
        for s in self.sensors.values():
            objects.append(s)
        for pwo in objects:
            html += one_pw_object_html(pwo, only_html=True)
            if os.path.isfile(get_image_file(pwo)):
                images.append(get_image_file(pwo))
        # print(html)
        send_email("Ponicwatch Notification - %s" %
                   ",".join([s["name"] for s in self.systems.values()]),
                   from_=self.user["email"],
                   to_=[
                       "*****@*****.**",
                   ],
                   message_HTML=html,
                   images=images,
                   login=self.user["email"],
                   passwd=self.user["password"])
class AutoExportingCounters(object):
    """
    A wrapper around collections.Counter that adds periodic backup.

    NOTE: Not to be confused with remote_datatypes. Counter which wraps a live
          redis counter. This counter is save only, and only offers weak durability.
          This is appropriate for monitoring and performance measurements, not
          for operational counters that require strict semantics.

    At the specified interval and program exit, the value in the counters will be
    sent to the provided channel.
    """

    def __init__(self,
                 name,
                 host=None,
                 export_interval_secs=None,
                 counter_type=None,
                 config=None,
                 redis=None,
                 counter_names=None,
                 timer_names=None):
        config = config or forge.get_config()
        self.channel = forge.get_metrics_sink(redis)
        self.export_interval = export_interval_secs or config.core.metrics.export_interval
        self.name = name
        self.host = host or get_random_id()
        self.type = counter_type or name

        self.counter_schema = set(counter_names)
        self.timer_schema = set(timer_names)

        self.counts = None
        self.lock = threading.Lock()
        self.scheduler = None
        self.reset()

        assert self.channel
        assert(self.export_interval > 0)

    # noinspection PyUnresolvedReferences
    def start(self):
        from apscheduler.schedulers.background import BackgroundScheduler
        import atexit

        self.scheduler = BackgroundScheduler(daemon=True)
        self.scheduler.add_job(self.export, 'interval', seconds=self.export_interval)
        self.scheduler.start()

        atexit.register(lambda: self.stop())

    def reset(self):
        with self.lock:
            old, self.counts = self.counts, Counters({key: 0 for key in self.counter_schema})
            self.counts.update({key + '.t': 0 for key in self.timer_schema})
            self.counts.update({key + '.c': 0 for key in self.timer_schema})
            self.counts['type'] = self.type
            self.counts['name'] = self.name
            self.counts['host'] = self.host

        return old

    def stop(self):
        if self.scheduler:
            self.scheduler.shutdown(wait=False)
            self.scheduler = None
        self.export()

    def export(self):
        try:
            # To avoid blocking increments on the redis operation
            # we only hold the long to do a copy.
            thread_copy = dict(self.reset().items())
            self.channel.publish(thread_copy)
            log.debug(f"{pprint.pformat(thread_copy)}")

            return thread_copy
        except Exception:
            log.exception("Exporting counters")

    def increment(self, name, increment_by=1):
        try:
            if name not in self.counter_schema:
                raise ValueError(f"{name} is not an accepted counter for this module: f{self.counter_schema}")
            with self.lock:
                self.counts[name] += increment_by
                return increment_by
        except Exception:  # Don't let increment fail anything.
            log.exception("Incrementing counter")
            return 0

    def increment_execution_time(self, name, execution_time):
        try:
            if name not in self.timer_schema:
                raise ValueError(f"{name} is not an accepted counter for this module: f{self.timer_schema}")
            with self.lock:
                self.counts[name + ".c"] += 1
                self.counts[name + ".t"] += execution_time
                return execution_time
        except Exception:  # Don't let increment fail anything.
            log.exception("Incrementing counter")
            return 0
示例#35
0
class HttpServer(flask.Flask):
    """Our HTTP/API server."""

    EXECUTORS = {
        'default': ThreadPoolExecutor(20),
        'processpool': ProcessPoolExecutor(5)
    }

    def __init__(self, name, ip, port, *args, **kwargs):
        """Constructor.

        Args:
            name:  (str) name of Flask service
            ip:  (str) IP address to bind HTTP server
            port:  (int) TCP port for HTTP server to listen
        """
        super(HttpServer, self).__init__(name, *args, **kwargs)
        # Fixup the root path for Flask so it can find templates/*
        root_path = os.path.abspath(os.path.dirname(__file__))
        logging.debug('Setting root_path for Flask: %s', root_path)
        self.root_path = root_path
        self.targets = config.CollectorConfig()
        self.ip = ip
        self.port = port
        self.start_time = time.time()
        self.setup_time = 0
        self.scheduler = BackgroundScheduler(daemon=True,
                                             executors=self.EXECUTORS)
        self.collection = None
        self.add_url_rule('/', 'index', self.index_handler)
        self.add_url_rule('/status', 'status', self.status_handler)
        self.add_url_rule('/latency', 'latency', self.latency_handler)
        self.add_url_rule('/influxdata', 'influxdata', self.influxdata_handler)
        self.add_url_rule('/quitquit', 'quitquit', self.shutdown_handler)
        logging.info('Starting Llama Collector, version %s', __version__)

    def configure(self, filepath):
        """Configure the Collector from file.

        Args:
            filepath: (str) where the configuration is located
        """
        self.targets.load(filepath)

    def status_handler(self):
        return flask.Response('ok', mimetype='text/plain')

    def index_handler(self):
        return flask.render_template(
            'index.html',
            targets=self.targets.targets,
            interval=self.interval,
            start_time=self.start_time,
            setup_time=self.setup_time,
            uptime=humanfriendly.format_timespan(time.time() -
                                                 self.start_time))

    def latency_handler(self):
        data = json.dumps(self.collection.stats, indent=4)
        return flask.Response(data, mimetype='application/json')

    def influxdata_handler(self):
        data = json.dumps(self.collection.stats_influx, indent=4)
        return flask.Response(data, mimetype='application/json')

    def shutdown_handler(self):
        """Shuts down the running web server and other things."""
        logging.warn('/quitquit request, attempting to shutdown server...')
        self.scheduler.shutdown(wait=False)
        fn = flask.request.environ.get('werkzeug.server.shutdown')
        if not fn:
            raise Error('Werkzeug (Flask) server NOT running.')
        fn()
        return '<pre>Quitting...</pre>'

    def run(self,
            interval,
            count,
            use_udp=False,
            dst_port=util.DEFAULT_DST_PORT,
            timeout=util.DEFAULT_TIMEOUT,
            *args,
            **kwargs):
        """Start all the polling and run the HttpServer.

        Args:
            interval:  seconds between each poll
            count:  count of datagram to send each responder per interval
            use_udp:   utilize UDP probes for testing
            dst_port:  port to use for testing (only UDP)
            timeout:  how long to wait for probes to return
        """
        self.interval = interval
        self.scheduler.start()
        self.collection = Collection(self.targets, use_udp)
        self.scheduler.add_job(self.collection.collect,
                               'interval',
                               seconds=interval,
                               args=[count, dst_port, timeout])
        super(HttpServer, self).run(host=self.ip,
                                    port=self.port,
                                    threaded=True,
                                    *args,
                                    **kwargs)
        self.setup_time = round(time.time() - self.start_time, 0)
示例#36
0
class JobManager:
    """
    Central overseer that manages the measurement jobs.
    """

    _instance: Optional['JobManager'] = None

    @classmethod
    def initialize( cls, datadir: Path ) -> 'JobManager':

        if cls._instance is None:
            cls._instance = JobManager( datadir )
        return cls._instance

    @classmethod
    def get_instance( cls ) -> 'JobManager':

        if cls._instance is None:
            raise RuntimeError( "Attempted to obtain manager before initializing it." )
        return cls._instance

    @classmethod
    def run_job( cls, job: Job ) -> None:
        """
        Executes the specified job once.

        :param job: The job to execute.
        """

        _LOGGER.debug( "Running job '%s'.", job.id )

        timestamp = datetime.now( pytz.utc )
        try:
            output = speedtest.run_test( server_id = job.server_id, server_name = job.server_name )
            result = {
                'success': True,
                'time': timestamp.isoformat(),
                'result': output
            }
        except speedtest.TestError as e:
            _LOGGER.exception( "Test could not be completed." )
            result = {
                'success': False,
                'timestamp': timestamp.isoformat(),
                'error': str( e ),
                'stdout': e.stdout,
                'stderr': e.stderr,
            }

        with open( cls.get_instance().output_file( job ), 'a' ) as f:
            f.write( json.dumps( result ) )
            f.write( ',\n' ) # Line break to make it slightly more readable

        _LOGGER.debug( "Finished running job '%s'.", job.id )

    def __init__( self, datadir: Path ):
        """
        Initializes a new manager that uses the specified directory to store data.

        :param datadir: The path of the directory where data should be stored.
        """

        _LOGGER.debug( "Initializing manager." )
        try:
            _LOGGER.info( "Using %s", speedtest.get_version() ) # Also implicitly check installed
        except speedtest.TestError:
            _LOGGER.exception( "Obtaining Speedtest CLI version caused an error." )
            _LOGGER.critical( "The Speedtest CLI could not accessed. Is it installed in this system?" )
            sys.exit( 1 )

        database_path = datadir / 'jobs.db'

        self.storage = datadir / 'results'
        self.storage.mkdir( mode = 0o770, exist_ok = True )

        self.engine = create_engine( f'sqlite:///{database_path}' )
        Base.metadata.create_all( self.engine )
        self.Session = orm.sessionmaker( bind = self.engine )

        jobstores = {
            'default': SQLAlchemyJobStore( engine = self.engine )
        }
        executors = {
            'default': ThreadPoolExecutor( 1 )
        }
        job_defaults = {
            'coalesce': True,
            'max_instances': 1,
            'misfire_grace_time': 5 * 60, # Can be up to 5 minutes late
        }
        self.scheduler = BackgroundScheduler( jobstores = jobstores, executors = executors, job_defaults = job_defaults, timezone = pytz.utc )
        self.scheduler.add_listener( self.job_stopped, mask = events.EVENT_JOB_REMOVED )

        _LOGGER.debug( "Manager initialized." )

    def start( self ):
        """
        Starts processing jobs.
        """

        _LOGGER.info( "Manager starting." )
        self.scheduler.start()
        _LOGGER.debug( "Manager started." )

    def shutdown( self, wait: bool = True ):
        """
        Shuts down the manager, stopping job processing.

        :param wait: If True, waits for all currently executing jobs to finish before returning.
        """

        _LOGGER.info( "Manager stopping." )
        self.scheduler.shutdown( wait = wait )
        _LOGGER.debug( "Manager stopped." )

    def job_stopped( self, event: events.JobEvent ) -> None:

        id: str = event.job_id
        with self.transaction() as session:
            job: JobMetadata = session.query( JobMetadata ).filter_by( id = id ).first()
            job.running = False

    def output_file( self, job: Job ) -> Path:
        """
        Determines the path to the output file of the job identified by the given ID.

        :param job: The job to get the path for.
        :return: The path of the output file for the given job.
        """

        return self.storage / f'{job.id}.result' # Not really proper JSON

    def load_results( self, job: Job ) -> Sequence[JSONData]:
        """
        Loads the results obtained so far for the given job.

        :param job: The job to load results for.
        :return: The results of the given job, as a list of JSON objects.
        """

        output_file = self.output_file( job )
        if not output_file.exists():
            return []

        with open( output_file, 'r' ) as f:
            results = f.read()
        results = '[' + results[:-2] + ']' # Remove trailing comma and line break and add brackets

        return json.loads( results )

    @contextmanager
    def transaction( self ) -> orm.Session:
        """
        Provide a transactional scope around a series of operations.
        """

        session: orm.Session = self.Session()
        try:
            yield session
            session.commit()
        except:
            session.rollback()
            raise
        finally:
            session.close()

    def new_job( self, job: Job ) -> None:
        """
        Registers the given job.

        :param job: The job to register.
        :raises IDExistsError: if the ID of the given job is already in use.
        """

        _LOGGER.info( "Registering job '%s'.", job.id )
        _LOGGER.debug( "Job '%s' (%s) has target %d|'%s', starts at %s and ends at %s with interval %s.",
            job.id, job.title, job.server_id, job.server_name, job.start, job.end, job.interval
        )
        with self.transaction() as session:
            try:
                if session.query( JobMetadata ).filter_by( id = job.id ).count() > 0:
                    raise IDExistsError( "There is already metadata for the given ID." )

                new_job = JobMetadata( job )
                session.add( new_job )

                if job.interval:
                    _LOGGER.debug( "Creating an interval-triggered job." )
                    if job.end is not None and job.end < ( now := datetime.now( pytz.utc ) ):
                        raise PastEndError( now, job )
                    trigger = IntervalTrigger( 
                        seconds = int( job.interval.total_seconds() ),
                        start_date = job.start if job.start is not None else datetime.now( pytz.utc ),
                        end_date = job.end
                    )
                else:
                    _LOGGER.debug( "Creating a date-triggered job." )
                    trigger = DateTrigger(
                        run_date = job.start
                    )
示例#37
0
 def run(self, output_fn, **kwargs):
     '''处理数据库中的任务队列'''
     # 引入MySQL配置
     from Functions import AppServer
     db_name = AppServer().getConfValue('Databases', 'MysqlDB')
     db_user = AppServer().getConfValue('Databases', 'MysqlUser')
     db_pass = AppServer().getConfValue('Databases', 'MysqlPass')
     db_ip = AppServer().getConfValue('Databases', 'MysqlHost')
     db_port = AppServer().getConfValue('Databases', 'MysqlPort')
     dbconn = 'mysql://%s:%s@%s:%s/%s' % (db_user, db_pass, db_ip,
                                          int(db_port), db_name)
     from MySQL import writeDb
     # 尝试清空DB数据库中记录的JOB信息
     try:
         sql = """delete from apscheduler_jobs ;"""
         writeDb(sql, )
     except:
         True
     # 动态引入任务函数
     moduleSrc = 'TaskFunctions'
     dylib = importlib.import_module(moduleSrc)
     # 重新加载job队列[两种类型调度器按情况选择]
     from apscheduler.schedulers.background import BackgroundScheduler
     from apscheduler.executors.pool import ThreadPoolExecutor, ProcessPoolExecutor
     job_defaults = {'max_instances': 1}
     executors = {
         'default': ThreadPoolExecutor(20),
         'processpool': ProcessPoolExecutor(5)
     }
     scheduler = BackgroundScheduler(timezone='Asia/Shanghai',
                                     executors=executors,
                                     job_defaults=job_defaults)
     # sqlite or mysql
     scheduler.add_jobstore('sqlalchemy', url='%s' % dbconn)
     from MySQL import readDb
     sql = """ Select id,timedesc from taskconf where status='1' """
     result = readDb(sql, )
     for taskobject in result:
         Taskid = 'TaskID_%s' % taskobject.get('id')
         FunName = 'TaskFunc_%s' % taskobject.get('id')
         function = getattr(dylib, FunName)
         cronlist = taskobject.get('timedesc').strip().split(' ')
         print cronlist
         if len(cronlist) == 5:
             scheduler.add_job(func=function,
                               trigger='cron',
                               month=cronlist[4],
                               day=cronlist[3],
                               hour=cronlist[2],
                               minute=cronlist[1],
                               second=cronlist[0],
                               id=Taskid)
         elif len(cronlist) == 6:
             scheduler.add_job(func=function,
                               trigger='cron',
                               day_of_week=cronlist[5],
                               month=cronlist[4],
                               day=cronlist[3],
                               hour=cronlist[2],
                               minute=cronlist[1],
                               second=cronlist[0],
                               id=Taskid)
         else:
             continue
     scheduler.start()
     fd = open(output_fn, 'a')
     try:
         dtnow = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
         line = '\n[%s]: System Starting all Tasks...\n' % dtnow
         fd.write(line)
         fd.flush()
         while 1:
             pass
     except KeyboardInterrupt:  #捕获键盘ctrl+c,在此脚本中不生效,console下可用
         dtnow = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
         line = '\n[%s]: System Stoping all Tasks, please wait...\n' % dtnow
         fd.write(line)
         fd.close()
         scheduler.shutdown()
         time.sleep(1)
         os._exit(0)
示例#38
0
class TimerPlus(object):

    def __init__(self):
        self._sche = BackgroundScheduler()

    def add_jobs(self, job_container_, **kw):
        """
        define job store
        1. csv: filepath
        2. pgsql: db_cfg, table
        """
        self._job_container = job_container_.lower()

        container_ = [k for (k, v) in JobContainer.items()]

        assert self._job_container in container_, f'job_container_ should be in {container_}'
        for arg in JobContainer[self._job_container]:
            assert arg in kw, f'{arg} should be included.'

        if self._job_container == 'csv':
            assert os.path.exists(
                kw['filepath']), f"{kw['filepath']} is not existed."

            config_dict_ = read_yaml(
                config_name='cfg/csv.yaml', package='jt.timerplus')
            jobs = Utils.read_csv(kw['filepath'], config_dict_.get(
                'import_data'), **config_dict_.get('config'))

        elif self._job_container == 'pgsql':
            _con = PgSQLLoader(kw['db_cfg'])
            _dbtabale = kw['table']

            jobs = _con.read(f'''select job_id,job_name,job_type,job_args,
                                trigger,trigger_args,is_trade_date,exchange from {_dbtabale}''')

        # loop adding job
        for row in jobs.itertuples():
            assert getattr(
                row, 'job_type') in JOBTYPE, f'job type should be in {JOBTYPE}!'
            if isinstance(getattr(row, 'job_args'), str):
                t_job_args = json.loads(getattr(row, 'job_args'))
            else:
                t_job_args = None

            self._sche.add_job(func=self._distributor,
                               args=[getattr(row, 'job_type'), getattr(row, 'job_name'),
                                     t_job_args, getattr(row, 'is_trade_date'),
                                     getattr(row, 'exchange')],
                               trigger=getattr(row, 'trigger'),
                               id=getattr(row, 'job_id'),
                               **json.loads(getattr(row, 'trigger_args')))

        self._sche.add_listener(
            self.__my_listener, EVENT_JOB_ERROR | EVENT_JOB_MISSED)

    def print_jobs(self):
        self._sche.print_jobs()

    def start(self):
        self._sche.start()

    def shutdown(self):
        self._sche.shutdown()

    def _distributor(self, job_type_=None, job_name_=None, job_args_=None, is_trade_date_='n', exchange_='sz'):
        today_ = datetime2string(datetime.datetime.now(), r'%Y%m%d')

        if is_trade_date_.lower() == 'y':
            if not calendar.is_trading_date(today_, exchange_):
                return

        if job_type_ == 'matlab':
            self.__matlab_executor(job_name_, job_args_)
        elif job_type_ == 'python':
            self.__python_executor(job_name_, job_args_)
        elif job_type_ == 'sqlprocedure':
            self.__sqlprocedure_executor(job_name_, job_args_)
        elif job_type_ == 'cmd':
            pass

    def __matlab_executor(self, job_name_, job_args_):
        """
        execute the user defined matlab function/scripts
        """
        eng = matlab.engine.start_matlab()
        func = getattr(eng, job_name_)
        assert func is not None, f'job_name : {job_name_} is not found!'
        if job_args_ is None:
            func(nargout=0)
        else:
            func(*job_args_, nargout=0)

        eng.quit()

    def __python_executor(self, job_name_, job_args_):
        assert isinstance(job_name_, str), 'job_name_ should be a String!'

        module_name = job_name_[0:job_name_.rfind('.')]
        spec = importlib.util.find_spec(module_name)
        assert spec is not None, f'job_name : {job_name_} is not found!'

        module = importlib.util.module_from_spec(spec)
        spec.loader.exec_module(module)
        
        func_name = job_name_[job_name_.rfind('.')+1:len(job_name_)]
        func = getattr(module, func_name)
        assert func is not None, f'job_name : {job_name_} is not found!'
        assert hasattr(func, '__call__'), f'{job_name_} is not callable!'

        if job_args_ is None:
            func()
        else:
            func(**job_args_)
            

    def __cmd_executor(self, *args, **kw):
        pass

    def __sqlprocedure_executor(self, job_name_, job_args_):
        assert 'db_env' in job_args_, 'db_env should be in job_args'
        pgl = PgSQLLoader(job_args_.get('db_env'))
        pgl.call_procedure(job_name_, job_args_.get(
            'args', []), job_args_.get('proc_has_return', 'n'))

    def __my_listener(self, event):
        """
        add listener for job error and missed
        """
        if event.exception:
            logger.exception('%s error.', str(event.job))
        else:
            logger.info('%s miss.', str(event.job))
示例#39
0
def create_app():
    """Create and configure an instance of the Flask application."""
    app = Flask(__name__, instance_relative_config=True)
    app.config.from_object(config)
    app.config.from_pyfile('config.py')

    # set logging level
    logging.basicConfig(level=app.config['LOGGING_LEVEL'])

    # ensure the instance folder exists
    try:
        os.makedirs(app.instance_path)
    except OSError:
        pass

    # register the database commands
    from providentia.db import this as db
    db.init_app(app)

    # apply the blueprints to Providentia
    logging.debug('Applying blueprints to routes.')
    from providentia.views import new_job, dataset, database, benchmark, analysis, classifier, logs, queries, \
        kate, review_trends, city_sentiment, sim1, sim2, sim3

    app.register_blueprint(benchmark.bp, url_prefix='/api/benchmark')
    app.register_blueprint(dataset.bp, url_prefix='/api/dataset')
    app.register_blueprint(database.bp, url_prefix='/api/database')
    app.register_blueprint(analysis.bp, url_prefix='/api/analysis')
    app.register_blueprint(new_job.bp, url_prefix='/api/new-job')
    app.register_blueprint(classifier.bp, url_prefix='/api/classifier')
    app.register_blueprint(logs.bp, url_prefix='/api/logs')
    app.register_blueprint(queries.bp, url_prefix="/api/queries")
    app.register_blueprint(kate.bp, url_prefix="/api/result/kate")
    app.register_blueprint(review_trends.bp,
                           url_prefix="/api/result/review-trends")
    app.register_blueprint(city_sentiment.bp,
                           url_prefix="/api/result/city-sentiment")
    app.register_blueprint(sim1.bp, url_prefix="/api/result/sim1")
    app.register_blueprint(sim2.bp, url_prefix="/api/result/sim2")
    app.register_blueprint(sim3.bp, url_prefix="/api/result/sim3")

    # establish analysis database for this app
    logging.debug('Establishing database connections.')
    from providentia.db import this
    app.teardown_appcontext(this.close_db)

    # restart any incomplete jobs
    from providentia.repository.tbl_benchmark import reset_processing_jobs
    with app.app_context():
        reset_processing_jobs()

    # test connections to benchmark databases
    test_database_connections(app)

    # register CORS
    logging.debug('Registering CORS filter.')
    CORS(app, resources={r"/api/*": {"origins": app.config['CORS_ORIGINS']}})

    # enable scheduler
    logging.debug('Starting background jobs.')
    from apscheduler.schedulers.background import BackgroundScheduler
    from providentia.analysis.periodic_jobs import log_server_state, execute_waiting
    from providentia.classifier import sentiment, fake
    from datetime import datetime, timedelta

    classifier_start_train = datetime.now() + timedelta(0, 10)

    scheduler = BackgroundScheduler()
    scheduler.add_job(func=execute_waiting,
                      id='execute_waiting',
                      trigger='interval',
                      seconds=10)
    scheduler.add_job(func=log_server_state,
                      id='log_server_state',
                      trigger='interval',
                      seconds=1)
    # train the classifier model if it is enabled
    if app.config['ENABLE_SENTIMENT'] is True:
        logging.debug(
            '[SENTIMENT] Checking necessary NLTK resources are installed')
        check_nltk_deps()
        # Check if model exists else train one
        if os.path.exists(
                "./models/naivebayes.pickle") is True and os.path.exists(
                    "./models/features.pickle") is True:
            try:
                sentiment.deserialize_model()
                logging.info("Sentiment classifier ready!")
            except OSError as e:
                logging.error(
                    "Unable to deserialize Naive Bayes model! Creating a new one.",
                    e)
                scheduler.add_job(func=sentiment.train_model,
                                  id='train_sentiment',
                                  trigger='date',
                                  next_run_time=classifier_start_train,
                                  args=[app.config['SENTIMENT_DATA'], app])
        else:
            scheduler.add_job(func=sentiment.train_model,
                              id='train_sentiment',
                              trigger='date',
                              next_run_time=classifier_start_train,
                              args=[app.config['SENTIMENT_DATA'], app])
    filter_apscheduler_logs()
    scheduler.start()
    # shut down the scheduler when exiting the app
    atexit.register(lambda: scheduler.shutdown())

    return app
示例#40
0
                        patch_cursor = cve_collection.find_one(
                            {'id': patch_id})
                        if ap.handle_Patch_Update(
                                patch_cursor, package_cursor['package_name']):
                            print("Successfully applied patch")
                        else:
                            print("Admin patch requested")
                    except:
                        pass
            gv.collect_Checkable_Packages()


if not app.debug or os.environ.get('WERKZEUG_RUN_MAIN') == 'true':
    scheduler = BackgroundScheduler(daemon=True)
    scheduler.start()
    scheduler.add_job(func=call_database_updater,
                      trigger=IntervalTrigger(hours=24),
                      id='refreshing_database',
                      name='Database_Refresh',
                      replace_existing=True)
    atexit.register(lambda: scheduler.shutdown())

if __name__ == "__main__":

    log = logging.getLogger('werkzeug')
    log.setLevel(logging.ERROR)

    print("\t\t\t**** Use CTRL+C then Enter key to exit ****")
    print("\t\t\t\tFlask logging mode off")
    print("\t\t\t**** Access to UI at 127.0.0.1:5000 ****\n")
    app.run(debug=True)
示例#41
0
class App:

    # Static variables
    folder_location = ""  # Future static var for location of database for multiple apps
    logging.basicConfig(filename='app.log', level=logging.INFO)

    def __init__(self, master):

        # Initialize Auxillaries
        pygame.mixer.init()

        # Reset/Initialize Counts for Button Text and instant reports
        self.ad_num = 0
        self.civ_num = 0
        self.ret_num = 0

        self.active_duty_numbers = 0
        self.civilian_numbers = 0
        self.retiree_numbers = 0

        # Establish connection to database
        try:
            self.db = sqlite3.connect("tracker.db")
        except sqlite3.OperationalError as oe:
            logging.error(f"Error trying to connect to DB: {oe}")
        self.cur = self.db.cursor()

        # Set Fonts
        default_font = tkinter.font.Font(family='Helvetica',
                                         size=48,
                                         weight='bold')

        # Buttons
        self.btn_adres = Button(master,
                                text="0",
                                command=lambda: self.onclick_ad(),
                                font=default_font,
                                bg='#003B74',
                                fg='white',
                                relief=FLAT,
                                bd=0,
                                highlightthickness=0,
                                highlightcolor="#003B74",
                                cursor="none",
                                activeforeground='white',
                                activebackground='#003B74',
                                height=1,
                                width=3)

        self.btn_civ = Button(master,
                              text="0",
                              command=lambda: self.onclick_civ(),
                              font=default_font,
                              bg='#328400',
                              fg='white',
                              relief=FLAT,
                              bd=0,
                              highlightthickness=0,
                              highlightcolor="#328400",
                              cursor="none",
                              activeforeground='white',
                              activebackground='#328400',
                              height=1,
                              width=3)

        self.btn_ret = Button(master,
                              text="0",
                              command=lambda: self.onclick_ret(),
                              font=default_font,
                              bg='#A00004',
                              fg='white',
                              relief=FLAT,
                              bd=0,
                              highlightthickness=0,
                              highlightcolor="#A00004",
                              cursor="none",
                              activeforeground='white',
                              activebackground='#A00004',
                              height=1,
                              width=3)

        # Background Image
        self.filename = PhotoImage(file="Assets/gym9.png")
        self.background_label = Label(master, image=self.filename)

        # Menu Bar
        self.menubar = Menu(master)
        self.filemenu = Menu(self.menubar,
                             relief=FLAT,
                             font=default_font,
                             tearoff=0)
        self.filemenu.add_command(label="Show Report", command=self.show_chart)
        self.filemenu.add_command(label="E-Mail", command=self.email)
        self.filemenu.add_command(label="Quit", command=master.destroy)
        self.menubar.add_cascade(label='Admin', menu=self.filemenu)

        # Place Widgets
        self.background_label.place(x=0, y=0)
        self.background_label.lower(self.btn_adres)

        master.config(menu=self.menubar)

        self.btn_adres.place(x=100, y=925)
        self.btn_civ.place(x=747, y=925)
        self.btn_ret.place(x=1370, y=925)

        # Start reset everyday at midnight
        self.sched = BackgroundScheduler()
        self.sched.add_job(self.reset, trigger='cron', hour=0, minute=0)
        self.sched.start()

    def onclick_ad(self):
        # Save to database
        self.save_db("Active Duty")

        # Increment ad numbers and update display
        self.ad_num += 1
        self.btn_adres.configure(text=self.ad_num)

        # Load and play sound
        pygame.mixer.music.load("Assets/Whistle-noise.mp3")
        pygame.mixer.music.play(0)

    def onclick_civ(self):
        # Save to database
        self.save_db("Civilian")

        # Increment civilian numbers and update display
        self.civ_num += 1
        self.btn_civ.configure(text=self.civ_num)

        # Load and play sound
        pygame.mixer.music.load("Assets/Retro.mp3")
        pygame.mixer.music.play(0)

    def onclick_ret(self):
        # Save to database
        self.save_db("Retired")

        # Increment retiree numbers and update display
        self.ret_num += 1
        self.btn_ret.configure(text=self.ret_num)

        # Load and play sound
        pygame.mixer.music.load("Assets/Wrong-number.mp3")
        pygame.mixer.music.play(0)

    def email(self):

        fromaddr = ''
        toaddr = ['']
        SUBJECT = "Totals for "
        TEXT = f'{self.get_report(1)}\n\n{self.get_report(7)}\n\n{self.get_report(14)}'
        msg = 'Subject: {}\n\n{}'.format(SUBJECT, TEXT)
        username = '******'
        password = ''
        server = smtplib.SMTP('smtp.gmail.com:587')

        try:
            server.starttls()
            server.login(username, password)
            server.sendmail(fromaddr, toaddr, msg)
            showinfo("SUCCESS!", "E-mail Sent")
        except Exception as e:
            logging.error(f"Server connection error: {e}")
            showinfo("FAILED!",
                     "Error Sending Email. See app.log for further details.")
        finally:
            server.quit()

    def save_db(self, category):

        # SQL Statements
        sql_create_table = 'create table if not exists count(dt_date DATE, dt_time DATE, category TEXT)'
        sql_insert = f'INSERT INTO count VALUES (DATE(\'now\', \'localtime\'), TIME(\'now\', \'localtime\'), "{category}")'

        # Execute the above statements
        try:
            self.cur.execute(sql_create_table)
            self.cur.execute(sql_insert)
            self.db.commit()
        except Exception as e:
            logging.error(f"Error trying to execute SQL: {e}")

    def get_report(self, days):

        # SQL Statements
        sql_ad = f'SELECT COUNT(*) FROM count WHERE dt_date BETWEEN DATE(\'now\', \'localtime\', \'-{days} day\'' \
                      f') AND DATE(\'now\', \'localtime\') AND category is \'Active Duty\''
        sql_civ = f'SELECT COUNT(*) FROM count WHERE dt_date BETWEEN DATE(\'now\', \'localtime\', \'-{days} day\'' \
                   f') AND DATE(\'now\', \'localtime\') AND category is \'Active Duty\''
        sql_ret = f'SELECT COUNT(*) FROM count WHERE dt_date BETWEEN DATE(\'now\', \'localtime\', \'-{days} day\'' \
                   f') AND DATE(\'now\', \'localtime\') AND category is \'Active Duty\''

        # Set numbers based on days for reuse
        self.active_duty_numbers = self.cur.execute(sql_ad).fetchone()[0]
        self.civilian_numbers = self.cur.execute(sql_civ).fetchone()[0]
        self.retiree_numbers = self.cur.execute(sql_ret).fetchone()[0]

        return f'Last {days} days:\n' \
               f'Active Duty: {self.active_duty_numbers}\nCivilian: {self.civilian_numbers}\nRetirees: ' \
               f'{self.retiree_numbers}\nTotal: {self.active_duty_numbers + self.civilian_numbers + self.retiree_numbers}'

    def show_chart(self):

        ad_data = pd.read_sql_query(
            'SELECT dt_date as Date, category as Category, count(*) as Count FROM count WHERE category IS '
            '\'Active Duty\' GROUP BY dt_date, category ', self.db)
        civ_data = pd.read_sql_query(
            'SELECT dt_date as Date, category as Category, count(*) as Count FROM count WHERE category IS '
            '\'Civilian\' GROUP BY dt_date, category ', self.db)
        ret_data = pd.read_sql_query(
            'SELECT dt_date as Date, category as Category, count(*) as Count FROM count WHERE category IS '
            '\'Retired\' GROUP BY dt_date, category ', self.db)

        plt.plot(ad_data.Date, ad_data.Count, '-', label='Active Duty')
        plt.plot(civ_data.Date, civ_data.Count, '-', label='Civilian')
        plt.plot(ret_data.Date, ret_data.Count, '-', label='Retirees')
        plt.xlabel('Date')
        plt.ylabel('Number')
        plt.gcf().canvas.set_window_title('Gym Tracker (BETA)')
        plt.title('Patron Analysis')
        plt.suptitle("Gym Tracker")
        plt.legend()
        plt.show()

    def set_db(self):
        """
        Future implementation: Allow user to set folder location for database
        :return:
        """
        folder_location = askdirectory()

    def __del__(self):
        """
        Destructor to close database connection
        :return: None
        """
        self.db.close()
        self.sched.shutdown()

    def reset(self):

        self.ad_num = 0
        self.civ_num = 0
        self.ret_num = 0

        self.btn_adres.configure(text=self.ad_num)
        self.btn_civ.configure(text=self.civ_num)
        self.btn_ret.configure(text=self.ret_num)
示例#42
0
from server.k2000_prime import k2000
import atexit
from flask import render_template, request
from apscheduler.schedulers.background import BackgroundScheduler
from server import app, socketio

s = BackgroundScheduler(
    deamon=True, job_defaults={'apscheduler.job_defaults.max_instances': '5'})
s.start()

# Shutdown your cron thread if the web process is stopped
atexit.register(lambda: s.shutdown(wait=False))

inst = k2000()


@app.route('/')
def hello_world():
    return render_template('index.html')


@app.route('/inst/pause', methods=['GET'])
def pause():
    if s.get_job('send_read') is not None:
        # k.write('disp:enab 1')
        s.remove_job('send_read')
    return ''


@app.route('/inst/start', methods=['GET'])
def start():
示例#43
0
            user[key] = datetime.strptime(user[key], "%Y-%m-%dT%H:%M:%S")
        # Save current state of account
        db.witness.update({'_id': user['owner']}, user, upsert=True)
        # Create our Snapshot dict
        snapshot = user.copy()
        _id = user['owner'] + '|' + now.strftime('%Y%m%d')
        snapshot.update({
          '_id': _id,
          'created': scantime
        })
        # Save Snapshot in Database
        db.witness_history.update({'_id': _id}, snapshot, upsert=True)

def run():
    update_witnesses()
    check_misses()

if __name__ == '__main__':
    # Start job immediately
    run()
    # Schedule it to run every 1 minute
    scheduler = BackgroundScheduler()
    scheduler.add_job(run, 'interval', seconds=30, id='run')
    scheduler.start()
    # Loop
    try:
        while True:
            time.sleep(2)
    except (KeyboardInterrupt, SystemExit):
        scheduler.shutdown()
示例#44
0
def main():
    """Declare command line options"""
    parser = ArgumentParser(
        description='ouroboros',
        formatter_class=RawTextHelpFormatter,
        epilog=
        'EXAMPLE: ouroboros -d tcp://1.2.3.4:5678 -i 20 -m container1 container2 -l warn'
    )

    core_group = parser.add_argument_group(
        "Core", "Configuration of core functionality")
    core_group.add_argument('-v',
                            '--version',
                            action='version',
                            version=VERSION)

    core_group.add_argument(
        '-d',
        '--docker-sockets',
        nargs='+',
        default=Config.docker_sockets,
        dest='DOCKER_SOCKETS',
        help='Sockets for docker management\n'
        'DEFAULT: "unix://var/run/docker.sock"\n'
        'EXAMPLE: -d unix://var/run/docker.sock tcp://192.168.1.100:2376')

    core_group.add_argument('-t',
                            '--docker-tls',
                            default=Config.docker_tls,
                            dest='DOCKER_TLS',
                            action='store_true',
                            help='Enable docker TLS\n'
                            'REQUIRES: docker cert mount')

    core_group.add_argument('-T',
                            '--docker-tls-verify',
                            default=Config.docker_tls_verify,
                            dest='DOCKER_TLS_VERIFY',
                            action='store_false',
                            help='Verify the CA Certificate mounted for TLS\n'
                            'DEFAULT: True')

    core_group.add_argument(
        '-i',
        '--interval',
        type=int,
        default=Config.interval,
        dest='INTERVAL',
        help='Interval in seconds between checking for updates\n'
        'DEFAULT: 300')

    core_group.add_argument('-C',
                            '--cron',
                            default=Config.cron,
                            dest='CRON',
                            help='Cron formatted string for scheduling\n'
                            'EXAMPLE: "*/5 * * * *"')

    core_group.add_argument(
        '-l',
        '--log-level',
        choices=['debug', 'info', 'warn', 'error', 'critical'],
        dest='LOG_LEVEL',
        default=Config.log_level,
        help='Set logging level\n'
        'DEFAULT: info')

    core_group.add_argument('-u',
                            '--self-update',
                            default=Config.self_update,
                            dest='SELF_UPDATE',
                            action='store_true',
                            help='Let ouroboros update itself')

    core_group.add_argument('-S',
                            '--swarm',
                            default=Config.swarm,
                            dest='SWARM',
                            action='store_true',
                            help='Put ouroboros in swarm mode')

    core_group.add_argument('-o',
                            '--run-once',
                            default=Config.run_once,
                            action='store_true',
                            dest='RUN_ONCE',
                            help='Single run')

    core_group.add_argument(
        '-A',
        '--dry-run',
        default=Config.dry_run,
        action='store_true',
        dest='DRY_RUN',
        help='Run without making changes. Best used with run-once')

    core_group.add_argument(
        '--monitor-only',
        default=Config.monitor_only,
        action='store_true',
        dest='MONITOR_ONLY',
        help='Run without making changes. Best used with run-once')

    core_group.add_argument(
        '-N',
        '--notifiers',
        nargs='+',
        default=Config.notifiers,
        dest='NOTIFIERS',
        help='Apprise formatted notifiers\n'
        'EXAMPLE: -N discord://1234123412341234/jasdfasdfasdfasddfasdf '
        'mailto://*****:*****@gmail.com')

    docker_group = parser.add_argument_group(
        "Docker", "Configuration of docker functionality")
    docker_group.add_argument('-m',
                              '--monitor',
                              nargs='+',
                              default=Config.monitor,
                              dest='MONITOR',
                              help='Which container(s) to monitor\n'
                              'DEFAULT: All')

    docker_group.add_argument('-n',
                              '--ignore',
                              nargs='+',
                              default=Config.ignore,
                              dest='IGNORE',
                              help='Container(s) to ignore\n'
                              'EXAMPLE: -n container1 container2')

    docker_group.add_argument(
        '-k',
        '--label-enable',
        default=Config.label_enable,
        dest='LABEL_ENABLE',
        action='store_true',
        help='Enable label monitoring for ouroboros label options\n'
        'Note: labels take precedence'
        'DEFAULT: False')

    docker_group.add_argument(
        '-M',
        '--labels-only',
        default=Config.labels_only,
        dest='LABELS_ONLY',
        action='store_true',
        help='Only watch containers that utilize labels\n'
        'This allows a more strict compliance for environments'
        'DEFAULT: False')

    docker_group.add_argument('-c',
                              '--cleanup',
                              default=Config.cleanup,
                              dest='CLEANUP',
                              action='store_true',
                              help='Remove old images after updating')

    docker_group.add_argument('-r',
                              '--repo-user',
                              default=Config.repo_user,
                              dest='REPO_USER',
                              help='Private docker registry username\n'
                              'EXAMPLE: [email protected]')

    docker_group.add_argument('-R',
                              '--repo-pass',
                              default=Config.repo_pass,
                              dest='REPO_PASS',
                              help='Private docker registry password\n'
                              'EXAMPLE: MyPa$$w0rd')

    data_group = parser.add_argument_group(
        'Data Export', 'Configuration of data export functionality')
    data_group.add_argument('-D',
                            '--data-export',
                            choices=['prometheus', 'influxdb'],
                            default=Config.data_export,
                            dest='DATA_EXPORT',
                            help='Enable exporting of data for chosen option')

    data_group.add_argument('-a',
                            '--prometheus-addr',
                            default=Config.prometheus_addr,
                            dest='PROMETHEUS_ADDR',
                            help='Bind address to run Prometheus exporter on\n'
                            'DEFAULT: 127.0.0.1')

    data_group.add_argument('-p',
                            '--prometheus-port',
                            type=int,
                            default=Config.prometheus_port,
                            dest='PROMETHEUS_PORT',
                            help='Port to run Prometheus exporter on\n'
                            'DEFAULT: 8000')

    data_group.add_argument('-I',
                            '--influx-url',
                            default=Config.influx_url,
                            dest='INFLUX_URL',
                            help='URL for influxdb\n'
                            'DEFAULT: 127.0.0.1')

    data_group.add_argument('-P',
                            '--influx-port',
                            type=int,
                            default=Config.influx_port,
                            dest='INFLUX_PORT',
                            help='PORT for influxdb\n'
                            'DEFAULT: 8086')

    data_group.add_argument('-U',
                            '--influx-username',
                            default=Config.influx_username,
                            dest='INFLUX_USERNAME',
                            help='Username for influxdb\n'
                            'DEFAULT: root')

    data_group.add_argument('-x',
                            '--influx-password',
                            default=Config.influx_password,
                            dest='INFLUX_PASSWORD',
                            help='Password for influxdb\n'
                            'DEFAULT: root')

    data_group.add_argument(
        '-X',
        '--influx-database',
        default=Config.influx_database,
        dest='INFLUX_DATABASE',
        help='Influx database name. Required if using influxdb')

    data_group.add_argument('-s',
                            '--influx-ssl',
                            default=Config.influx_ssl,
                            dest='INFLUX_SSL',
                            action='store_true',
                            help='Use SSL when connecting to influxdb')

    data_group.add_argument(
        '-V',
        '--influx-verify-ssl',
        default=Config.influx_verify_ssl,
        dest='INFLUX_VERIFY_SSL',
        action='store_true',
        help='Verify SSL certificate when connecting to influxdb')

    docker_group.add_argument(
        '--skip-startup-notifications',
        default=Config.skip_startup_notifications,
        dest='SKIP_STARTUP_NOTIFICATIONS',
        action='store_true',
        help='Do not send ouroboros notifications when starting')

    args = parser.parse_args()

    if environ.get('LOG_LEVEL'):
        log_level = environ.get('LOG_LEVEL')
    else:
        log_level = args.LOG_LEVEL
    ol = OuroborosLogger(level=log_level)
    ol.logger.info('Version: %s-%s', VERSION, BRANCH)
    config = Config(environment_vars=environ, cli_args=args)
    config_dict = {
        key: value
        for key, value in vars(config).items() if key.upper() in config.options
    }
    ol.logger.debug("Ouroboros configuration: %s", config_dict)

    data_manager = DataManager(config)
    notification_manager = NotificationManager(config, data_manager)
    scheduler = BackgroundScheduler()
    scheduler.start()

    for socket in config.docker_sockets:
        try:
            docker = Docker(socket, config, data_manager, notification_manager)
            if config.swarm:
                mode = Service(docker)
            else:
                mode = Container(docker)

            if config.run_once:
                scheduler.add_job(
                    mode.update,
                    name=f'Run Once container update for {socket}')
            else:
                if mode.mode == 'container':
                    scheduler.add_job(mode.self_check,
                                      name=f'Self Check for {socket}')
                if config.cron:
                    scheduler.add_job(
                        mode.update,
                        name=f'Cron container update for {socket}',
                        trigger='cron',
                        minute=config.cron[0],
                        hour=config.cron[1],
                        day=config.cron[2],
                        month=config.cron[3],
                        day_of_week=config.cron[4],
                        misfire_grace_time=15)
                else:
                    scheduler.add_job(
                        mode.update,
                        name=
                        f'Initial run interval container update for {socket}')
                    scheduler.add_job(
                        mode.update,
                        name=f'Interval container update for {socket}',
                        trigger='interval',
                        seconds=config.interval)
        except ConnectionError:
            ol.logger.error(
                "Could not connect to socket %s. Check your config", socket)

    if config.run_once:
        next_run = None
    elif config.cron:
        next_run = scheduler.get_jobs()[0].next_run_time
    else:
        now = datetime.now(timezone.utc).astimezone()
        next_run = (
            now + timedelta(0, config.interval)).strftime("%Y-%m-%d %H:%M:%S")

    if not config.skip_startup_notifications:
        notification_manager.send(kind='startup', next_run=next_run)

    while scheduler.get_jobs():
        sleep(1)

    scheduler.shutdown()
示例#45
0
class SpeakReader(object):
    _INITIALIZED = False
    SIGNAL = None
    transcribeEngine = None
    HTTP_PORT = None
    _INPUT_DEVICE = None

    ###################################################################################################
    #  Initialize SpeakReader
    ###################################################################################################
    def __init__(self, initOptions):
        if SpeakReader._INITIALIZED:
            return

        with INIT_LOCK:

            global PROG_DIR
            PROG_DIR = initOptions['prog_dir']

            global DATA_DIR
            DATA_DIR = initOptions['data_dir']

            global CONFIG
            CONFIG = initOptions['config']
            assert CONFIG is not None

            if isinstance(initOptions['http_port'], int):
                self.HTTP_PORT = initOptions['http_port']
            else:
                self.HTTP_PORT = int(CONFIG.HTTP_PORT)

            if self.HTTP_PORT < 21 or self.HTTP_PORT > 65535:
                logger.warn("HTTP_PORT out of bounds: 21 < %s < 65535", self.HTTP_PORT)
                self.HTTP_PORT = 8880

            # Check if pyOpenSSL is installed. It is required for certificate generation
            # and for CherryPy.
            if CONFIG.ENABLE_HTTPS:
                try:
                    import OpenSSL
                except ImportError:
                    logger.warn("The pyOpenSSL module is missing. Install this "
                                "module to enable HTTPS. HTTPS will be disabled.")
                    CONFIG.ENABLE_HTTPS = False

                if not CONFIG.HTTPS_CERT:
                    CONFIG.HTTPS_CERT = os.path.join(DATA_DIR, 'server.crt')
                if not CONFIG.HTTPS_KEY:
                    CONFIG.HTTPS_KEY = os.path.join(DATA_DIR, 'server.key')

                if not (os.path.exists(CONFIG.HTTPS_CERT) and os.path.exists(CONFIG.HTTPS_KEY)):
                    logger.warn("Disabled HTTPS because of missing certificate and key.")
                    CONFIG.ENABLE_HTTPS = False

            # Check if we has a jwt_secret
            if CONFIG.JWT_SECRET == '' or not CONFIG.JWT_SECRET:
                logger.debug("Generating JWT secret...")
                CONFIG.JWT_SECRET = generate_uuid()
                CONFIG.write()

            ###################################################################################################
            #  Get Version Information and check for updates
            ###################################################################################################
            self.versionInfo = Version()

            ###################################################################################################
            #  Get the Input Device
            ###################################################################################################
            self.get_input_device()

            ###################################################################################################
            #  Initialize the Transcribe Engine
            ###################################################################################################
            self.transcribeEngine = TranscribeEngine()

            if CONFIG.START_TRANSCRIBE_ON_STARTUP :
                self.startTranscribeEngine()

            ###################################################################################################
            #  Initialize the webserver
            ###################################################################################################
            logger.info('WebServer Initializing')
            webServerOptions = {
                'config': CONFIG,
                'prog_dir': PROG_DIR,
                'data_dir': DATA_DIR,
                'http_port': self.HTTP_PORT,
            }
            self.webServer = webstart.initialize(webServerOptions)
            self.webServer.root.SR = self
            cherrypy.server.start()

            # Launch the WebBrowser
            if CONFIG.LAUNCH_BROWSER and not initOptions['nolaunch']:
                launch_browser(CONFIG.HTTP_HOST, self.HTTP_PORT, CONFIG.HTTP_ROOT + 'manage')

            ###################################################################################################
            #  Run cleanup of old logs, transcripts, and recordings and start a scheduler to run every 24 hours
            ###################################################################################################
            self.cleanup_files()
            self.scheduler = BackgroundScheduler()
            self.scheduler.add_job(self.cleanup_files, 'interval', hours=24)
            self.scheduler.start()

            SpeakReader._INITIALIZED = True

    @property
    def is_initialized(self):
        return self._INITIALIZED

    ###################################################################################################
    #  Start the Transcribe Engine
    ###################################################################################################
    def startTranscribeEngine(self):
        if self.transcribeEngine.is_online:
            logger.info("Transcribe Engine already started.")
            return

        if self.get_input_device() is None:
            logger.warn("No Input Devices Available. Can't start Transcribe Engine.")
            return

        if CONFIG.SPEECH_TO_TEXT_SERVICE == 'google':
            if CONFIG.GOOGLE_CREDENTIALS_FILE == "":
                logger.warn("API Credentials not available. Can't start Transcribe Engine.")
                return
            try:
                with open(CONFIG.GOOGLE_CREDENTIALS_FILE) as f:
                    json.loads(f.read())
            except json.decoder.JSONDecodeError:
                logger.warn("API Credentials does not appear to be a valid JSON file. Can't start Transcribe Engine.")
                return

        elif CONFIG.SPEECH_TO_TEXT_SERVICE == 'IBM':
            if CONFIG.IBM_CREDENTIALS_FILE == "":
                logger.warn("API Credentials not available. Can't start Transcribe Engine.")
                return

            APIKEY = None
            URL = None
            try:
                with open(CONFIG.IBM_CREDENTIALS_FILE) as f:
                    for line in f.read().splitlines():
                        parm = line.split('=')
                        if parm[0] == 'SPEECH_TO_TEXT_APIKEY':
                            APIKEY = parm[1]
                        if parm[0] == 'SPEECH_TO_TEXT_URL':
                            URL = parm[1]
            except:
                pass
            if APIKEY is None or URL is None:
                logger.warn("APIKEY or URL not found in IBM credentials file. Can't start Transcribe Engine.")
                return

        elif CONFIG.SPEECH_TO_TEXT_SERVICE == 'microsoft':
            if CONFIG.MICROSOFT_SERVICE_APIKEY == "" or CONFIG.MICROSOFT_SERVICE_REGION == "":
                logger.warn("Microsoft Azure APIKEY and Region are required. Can't start Transcribe Engine.")
                return

        else:
            return

        self.transcribeEngine.start()

    ###################################################################################################
    #  Stop the Transcribe Engine
    ###################################################################################################
    def stopTranscribeEngine(self):
        if self.transcribeEngine.is_online:
            self.transcribeEngine.stop()

    ###################################################################################################
    #  Shutdown SpeakReader
    ###################################################################################################
    def shutdown(self, restart=False, update=False, checkout=False):
        SpeakReader._INITIALIZED = False
        self.transcribeEngine.shutdown()
        self.scheduler.shutdown()
        CONFIG.write()

        if not restart and not update and not checkout:
            logger.info("Shutting Down SpeakReader")

        if update:
            logger.info("********************************")
            logger.info("*  SpeakReader is updating...  *")
            logger.info("********************************")
            try:
                self.versionInfo.update()
            except Exception as e:
                logger.warn("SpeakReader failed to update: %s. Restarting." % e)

        logger.info('WebServer Terminating')
        cherrypy.engine.exit()

        if checkout:
            logger.info("SpeakReader is switching the git branch...")
            try:
                self.versionInfo.checkout_git_branch()
            except Exception as e:
                logger.warn("SpeakReader failed to switch git branch: %s. Restarting." % e)


    ###################################################################################################
    #  Get Input Device
    ###################################################################################################
    def get_input_device(self):
        self._INPUT_DEVICE = CONFIG.INPUT_DEVICE
        try:
            p = pyaudio.PyAudio()
            defaultInputDevice = p.get_default_input_device_info()

            if self._INPUT_DEVICE not in list(d['name'] for d in self.get_input_device_list()):
                CONFIG.INPUT_DEVICE = self._INPUT_DEVICE = defaultInputDevice.get('name')
                CONFIG.write()
        except:
            self._INPUT_DEVICE = None

        return self._INPUT_DEVICE

    ###################################################################################################
    #  Get Input Device List
    ###################################################################################################
    def get_input_device_list(self):
        deviceList = []
        try:
            p = pyaudio.PyAudio()
            defaultHostAPIindex = p.get_default_host_api_info().get('index')
            numdevices = p.get_default_host_api_info().get('deviceCount')
            for i in range(0, numdevices):
                inputDevice = p.get_device_info_by_host_api_device_index(defaultHostAPIindex, i)
                if inputDevice.get('maxInputChannels') > 0:
                    device = {
                        'index':    inputDevice.get('index'),
                        'name':     inputDevice.get('name'),
                        'selected': True if inputDevice.get('name') == self._INPUT_DEVICE else False,
                    }
                    deviceList.append(device)
        except Exception:
            pass

        return deviceList


    ###################################################################################################
    #  Delete any files over the retention days
    ###################################################################################################
    def cleanup_files(self):
        logger.info("Running File Cleanup")
        def delete(path, days):
            try:
                days = int(days)
            except ValueError:
                return
            delete_date = datetime.datetime.now() - datetime.timedelta(days=days)
            with os.scandir(path=path) as files:
                for file in files:
                    file_info = file.stat()
                    if datetime.datetime.fromtimestamp(file_info.st_ctime) < delete_date:
                        filename = os.path.join(path, file.name)
                        logger.debug("Deleting: %s" % filename)
                        os.remove(filename)

        if CONFIG.LOG_RETENTION_DAYS != "":
            delete(CONFIG.LOG_DIR, CONFIG.LOG_RETENTION_DAYS)

        if CONFIG.TRANSCRIPT_RETENTION_DAYS != "":
            delete(CONFIG.TRANSCRIPTS_FOLDER, CONFIG.TRANSCRIPT_RETENTION_DAYS)

        if CONFIG.RECORDING_RETENTION_DAYS != "":
            delete(CONFIG.RECORDINGS_FOLDER, CONFIG.RECORDING_RETENTION_DAYS)
示例#46
0
class JenniferBrain(object):

    UNSURE_TEXT = "Sorry, I can't help with that"
    MULTIPLE_LESSONS_APPLY = 'Which one of my lessons applies here?'

    def __init__(self, allow_network_plugins=False, always_allow_plugins=None):
        self._initialize_paths()

        # Lessons + Settings
        self.allow_network_plugins = allow_network_plugins
        self.always_allow_plugins = always_allow_plugins or []
        self.responders = []
        self.notifiers = []
        self.notification_queue = Queue.PriorityQueue()
        self._load_profile_and_settings()

        # Requires self.database & self.settings
        self._load_lessons()

        # Just to save time later
        self.nltktagger = PerceptronTagger()
        self.tagset = None

        # Notifications
        self.notification_clients = []
        self._initialize_background_tasks()

    def _initialize_paths(self):
        """Create the paths needed"""
        self.base_path = os.path.join(os.path.dirname(__file__), '..')
        self.profile_file = os.path.join(self.base_path, 'profile.json')
        self.lessons_path = os.path.join(self.base_path, 'lessons')

    def _load_lessons(self):
        """
        Search the lessons/ package for lessons & store them in sorted order by priority
        :return:
        """
        pkgs = [
            n for _, n, _ in pkgutil.iter_modules(['lessons']) if n != 'base'
        ]
        for name in pkgs:
            exec 'import lessons.' + name + '.plugin'

        responders = [
            cls(self).set_profile(self.database['profile'])
            for cls in JenniferResponsePlugin.__subclasses__()
            if self._is_lesson_allowed(cls)
        ]
        self.notifiers = [
            cls(self).set_profile(self.database['profile'])
            for cls in JenniferNotificationPlugin.__subclasses__()
            if self._is_lesson_allowed(cls)
        ]

        for r in (responders + self.notifiers):
            r.set_settings(self._get_settings_for_lesson(r))

        self.responders = sorted(responders, key=lambda l: l.PRIORITY)

    def _is_lesson_allowed(self, lesson_cls):
        if lesson_cls in self.always_allow_plugins:
            return True
        if lesson_cls.REQUIRES_NETWORK and not self.allow_network_plugins:
            return False
        return True

    def _load_profile_and_settings(self):
        """
        Load the profile
        :return:
        """
        try:
            with open(self.profile_file, 'r+') as profile_file:
                data = json.loads(profile_file.read(), strict=False)
                self.database = data
                if 'profile' in self.database and 'settings' in self.database:
                    profile_file.close()
                    return
        except (IOError, ValueError):
            self.database = {}
            self._init_profile()
            self._save_profile_to_file()

    def _get_settings_for_lesson(self, lesson, lesson_name=None):
        """
        Get the settings dict for the lesson
        (Must be called from a lesson)
        :return:
        """
        if not lesson_name:
            lesson_name = unicode(lesson.settings_name)

        try:
            return self.database['settings'][lesson_name]
        except KeyError:
            if self._test_if_settings_template_exists(lesson):
                print "--------{} SETTINGS--------".format(lesson_name)
                self._add_lesson_to_settings_and_write(lesson)
                return self._get_settings_for_lesson(lesson)
            return {}

    def _settings_template_path_for_lesson(self, lesson):
        """Gets a settings_template for a given lesson"""
        lesson_settings_name = lesson.settings_name
        return os.path.join(self.lessons_path, lesson_settings_name,
                            'settings_template.json')

    def _test_if_settings_template_exists(self, lesson):
        """Returns if a settings_template for a given lesson"""
        return os.path.isfile(self._settings_template_path_for_lesson(lesson))

    def _add_lesson_to_settings_and_write(self, lesson):
        """Loads a lesson's settings_template, runs an initialization function if available, and copies into DB"""
        lesson_settings_name = lesson.settings_name
        with open(self._settings_template_path_for_lesson(lesson)) as template:
            try:
                # Try to load initial template
                settings_template_dict = json.loads(template.read(),
                                                    strict=False)
                settings_template_dict = lesson.initialize_settings(
                    settings_template_dict)

                # Push to DB & save
                self.database['settings'][
                    lesson_settings_name] = settings_template_dict
                self._save_profile_to_file()
            except ValueError:
                exit("{} has an invalid settings_template.json".format(
                    lesson_settings_name))

    def _save_profile_to_file(self):
        """Writes to profile.json"""
        with open(self.profile_file, "w+") as f:
            plain_text = json.dumps(self.database, indent=4, sort_keys=True)
            f.write(plain_text)
            f.close()

    def _init_profile(self):
        """Should be run if profile.json doesn't exist"""
        fields = [
            ('first name', 'firstName'),
            ('last name', 'lastName'),
        ]
        location_fields = [
            ('city', 'city', 'New York City'),
            ('region', 'region', 'NY'),
            ('country', 'country', 'US'),
            ('zip', 'zip'),
        ]

        if 'profile' not in self.database:
            for field in fields:
                self.database.update({'profile': {'location': {}}})
                print "What is your {}?".format(field[0])
                self.database['profile'][field[1]] = raw_input("> ")

            self.database['profile']['location'] = {}
            for field in location_fields:
                txt = "What is your {}?".format(field[0])

                if len(location_fields) >= 3:
                    txt += " example: ({})".format(field[2])

                print txt
                self.database['profile']['location'][field[1]] = raw_input(
                    "> ")

            while True:
                print "What is your timezone? example: ({})".format(
                    random.choice(common_timezones))
                tz = raw_input('> ')
                if timezone(tz):
                    self.database['profile']['location']['timezone'] = tz
                    break
                else:
                    print "Invalid timezone"

        if 'settings' not in self.database:
            self.database.update(
                {'settings': {
                    'notifications': {
                        'quiet_hours': []
                    }
                }})

    def _get_profile(self):
        """Get the user's profile"""
        return self.database['profile']

    def take_input(self, text_input, client):
        """
        Search all lessons for lessons that can respond
        :param text_input:
        :return:
        """
        text_input = text_input.lower()
        tokens = nltk.word_tokenize(text_input)
        tags = nltk.tag._pos_tag(tokens, self.tagset, self.nltktagger)

        # TODO: extrap this out to a custom stopwords
        try:
            tags.remove(
                ('please', 'NN')
            )  # It's common to say 'please' when asking Jennifer something
        except:
            pass

        # Find the lessons that can answer
        respond_to = None
        matching_lessons = [
            lesson for lesson in self.responders if lesson.can_respond(
                tags=tags, client=client, brain=self, plain_text=text_input)
        ]

        # No answer
        if len(matching_lessons) == 0:
            self.respond_or_unsure(None, tags, client, text_input)

        # Only one module can respond
        elif len(matching_lessons) == 1:
            respond_to = matching_lessons[0]

        # Multiple lessons can response
        else:
            priority_counts = {}
            for lesson in matching_lessons:
                key = lesson.PRIORITY
                priority_counts.setdefault(key, []).append(lesson)

            # Now we have something like {999: [TimePlugin(), LowPriorityTimePlugin()], 0: [ImportantTimePlugin()]}
            min_priority = min(priority_counts.keys())

            if len(priority_counts[min_priority]) == 1:
                respond_to = priority_counts[min_priority][0]
            else:
                client.give_output_string("brain", self.MULTIPLE_LESSONS_APPLY)
                for lesson in priority_counts[min_priority]:
                    if client.confirm("brain", lesson.VERBOSE_NAME + "?"):
                        # TODO: would be nice to remember this decision.. that's v3.0 though.
                        respond_to = lesson
                        break

        return self.respond_or_unsure(respond_to, tags, client, text_input)

    def respond_or_unsure(self, respond_to, tags, client, text_input):
        try:
            return respond_to.respond(tags=tags,
                                      client=client,
                                      brain=self,
                                      plain_text=text_input)
        except Exception as e:
            return JenniferResponse(
                self, [JenniferTextResponseSegment(self.UNSURE_TEXT)])

    def _initialize_background_tasks(self):
        self.scheduler = BackgroundScheduler(timezone="UTC", daemon=True)
        self.scheduler.start()
        self.scheduler.add_job(self._collect_notifications_from_notifiers,
                               'interval',
                               seconds=10)
        self.scheduler.add_job(self.push_notifications_to_clients,
                               'interval',
                               seconds=2)
        atexit.register(lambda: self.scheduler.shutdown(wait=False))

    def _collect_notifications_from_notifiers(self):
        for notification_provider in self.notifiers:
            while not notification_provider.queue.empty():
                self.notification_queue.put(notification_provider.queue.get())

    def register_notification_client(self, client):
        self.notification_clients.append(client)

    def push_notifications_to_clients(self):
        while not self.notification_queue.empty():
            notification = self.notification_queue.get()
            for client in self.notification_clients:
                client.give_output_string("brain", notification[1])
示例#47
0
class Scheduler(object):
    def __init__(self):
        self._logger = logging.getLogger()

        config_path = os.path.join(os.path.expanduser('~'), '.shipane_sdk', 'config', 'scheduler.ini')
        self._logger.info('Config path: %s', config_path)
        self._config = configparser.RawConfigParser()
        self._config.readfp(codecs.open(config_path, encoding="utf_8_sig"), )

        self._scheduler = BackgroundScheduler()
        self._client = Client(self._logger, **dict(self._config.items('ShiPanE')))

    def start(self):
        self.__add_job(self.__create_new_stock_purchase_job())
        self.__add_job(self.__create_repo_job())
        self.__add_job(self.__create_batch_job())
        self.__add_job(self.__create_join_quant_following_job())
        self.__add_job(self.__create_rice_quant_following_job())
        self.__add_job(self.__create_uqer_following_job())
        self.__add_job(self.__create_guorn_sync_job())

        self._scheduler.start()
        print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))

        try:
            while True:
                time.sleep(1)
        except (KeyboardInterrupt, SystemExit):
            self._scheduler.shutdown()

    def __add_job(self, job):
        if job.is_enabled:
            self._scheduler.add_job(job, APCronParser.parse(job.schedule), name=job.name, misfire_grace_time=None)
        else:
            self._logger.warning('{} is not enabled'.format(job.name))

    def __create_new_stock_purchase_job(self):
        section = 'NewStocks'
        options = self.__build_options(section)
        client_aliases = self.__filter_client_aliases(section)
        return NewStockPurchaseJob(self._client, client_aliases, '{}Job'.format(section), **options)

    def __create_repo_job(self):
        section = 'Repo'
        options = self.__build_options(section)
        client_aliases = self.__filter_client_aliases(section)
        return RepoJob(self._client, client_aliases, '{}Job'.format(section), **options)

    def __create_batch_job(self):
        section = 'Batch'
        options = self.__build_options(section)
        client_aliases = self.__filter_client_aliases(section)
        return BatchJob(self._client, client_aliases, '{}Job'.format(section), **options)

    def __create_join_quant_following_job(self):
        section = 'JoinQuant'
        options = self.__build_options(section)
        client_aliases = self.__filter_client_aliases(section)
        quant_client = JoinQuantClient(**options)
        return OnlineQuantFollowingJob(self._client, quant_client, client_aliases, '{}FollowingJob'.format(section),
                                       **options)

    def __create_rice_quant_following_job(self):
        section = 'RiceQuant'
        options = self.__build_options(section)
        client_aliases = self.__filter_client_aliases(section)
        quant_client = RiceQuantClient(**options)
        return OnlineQuantFollowingJob(self._client, quant_client, client_aliases, '{}FollowingJob'.format(section),
                                       **options)

    def __create_uqer_following_job(self):
        section = 'Uqer'
        options = self.__build_options(section)
        client_aliases = self.__filter_client_aliases(section)
        quant_client = UqerClient(**options)
        return OnlineQuantFollowingJob(self._client, quant_client, client_aliases, '{}FollowingJob'.format(section),
                                       **options)

    def __create_guorn_sync_job(self):
        section = 'Guorn'
        options = self.__build_options(section)
        client_aliases = self.__filter_client_aliases(section)
        quant_client = GuornClient(**options)
        return OnlineQuantSyncJob(self._client, quant_client, client_aliases, '{}SyncJob'.format(section),
                                  **options)

    def __build_options(self, section):
        if not self._config.has_section(section):
            return dict()

        options = dict(self._config.items(section))
        options['enabled'] = bool(distutils.util.strtobool(options['enabled']))
        return options

    def __filter_client_aliases(self, section):
        if not self._config.has_section(section):
            return dict()

        all_client_aliases = dict(self._config.items('ClientAliases'))
        client_aliases = [client_alias.strip() for client_alias in
                          filter(None, self._config.get(section, 'clients').split(','))]
        return collections.OrderedDict(
            (client_alias, all_client_aliases[client_alias]) for client_alias in client_aliases)
示例#48
0
class HarryBotter(object):
    def __init__(self, debug=False, stopcb=None):
        oplogs("Harry Botter is rebooting")
        self.__init_robot(debug)
        self.stopcb = stopcb

        #self.__init_db()
        self.__init_jobs()
        oplogs("Harry Botter is online")

    def __del__(self):
        oplogs("Harry Botter is shutting down~~~")
        self.stop_jobs()
        #self.close_db()

    def __init_robot(self, debug):
        self.enable = True
        self.mouth = None  # output callback
        self.ear = None  # input for text and audio callback
        self.eye = None  # input for graph callback
        self.brain = None  # processor modules
        self.memory = []  # memory for what heard or seen

        self.support_groups = []
        self.debug = debug
        self.sched = None
        self.starttime = datetime.now()

    def subscribe(self, msgSend):
        self.mouth = msgSend
        oplogs('Robot unmuted.')

    def unsubscribe(self):
        self.mouth = None
        oplogs('Robot muted.')

    def install_mods(self, hmod):
        self.stockmod = hmod  #StockMod(self.debug)

    def __init_db(self):
        # 创建数据库数据库用于永久记忆
        self.conn = sqlite3.connect('messages.db')
        self.cursor = self.conn.cursor()
        self.cursor.execute(
            'create table if not exists msgqueue (id integer primary key autoincrement, date TEXT, groupname TEXT, user TEXT, msg TEXT)'
        )
        self.cursor.execute(
            'create table if not exists monitorstocks (id integer primary key autoincrement, groupname TEXT, code TEXT)'
        )
        self.cursor.commit()

        oplogs("[%s]messages.db connected")

    def __close_db(self):
        self.conn.close()

    def __init_jobs(self):
        self.sched = BackgroundScheduler()

        # 添加任务作业

        # 每天清理一次缓存
        #self.sched.add_job(self.job_clean_cache, trigger='cron', day_of_week='*',hour=1, minute=0, second=0)

        # 提供给group的5分钟检测
        self.sched.add_job(self.job_stock_monitor,
                           trigger='cron',
                           id='job_stock_monitor',
                           minute="*/5")  #, next_run_time=None)

        # 交易日9:30:15生成开盘报告
        self.sched.add_job(self.job_open_scan,
                           trigger='cron',
                           day_of_week='0-4',
                           hour=9,
                           minute=30,
                           second=15)

        # 交易日11:30--13:00之间关闭扫描
        self.sched.add_job(self.job_close_scan,
                           trigger='cron',
                           day_of_week='0-4',
                           hour=11,
                           minute=30,
                           second=15)
        self.sched.add_job(self.job_open_scan,
                           trigger='cron',
                           day_of_week='0-4',
                           hour=13,
                           minute=0,
                           second=0)
        # 交易日15:05:00生成收盘报告

        self.sched.add_job(self.job_close_scan,
                           trigger='cron',
                           day_of_week='0-4',
                           hour=15,
                           minute=5,
                           second=0)

        # 启动调度器
        self.sched.start()

        oplogs("schedulers started")

    def stop_jobs(self):
        if self.sched == None:
            self.sched.shutdown()
        oplogs("schedulers stopped")

    def job_stock_monitor(self, valve=3.0):
        if self.enable is False:
            return

        oplogs("job_stock_monitor triggerred")
        group_alarms = self.stockmod.get_alarms(valve)
        for (group, alarms) in group_alarms.items():
            notice = ""
            for alarm in alarms:
                # alarm = [stockname,price,change,highlow,closeopen]
                alarm_content = "5分钟涨跌\n[%s %.2f%%]幅度:%.2f%%, 波动:%.2f%%\n" % (
                    alarm[0], alarm[2], alarm[4], alarm[3])
                notice += alarm_content
            if len(notice) > 0:
                print(notice)
                self.mouth(group, notice)
                time.sleep(1)

    def job_open_scan(self):
        oplogs("job_open_scan triggerred")
        self.job_stock_monitor()
        self.sched.resume_job(job_id="job_stock_monitor")

    def job_close_scan(self):
        oplogs("job_close_scan triggerred")
        self.sched.pause_job(job_id="job_stock_monitor")
        oplogs("job_stock_monitor paused")

    def job_forget(self, persist=False):
        # 清除记忆
        if persist is True:
            #save memory before forget them
            pass
        self.memory.clear()
        oplogs("memory erased")

    def listen(self, group, user, msg):
        # 添加一条记录到记忆中
        record = [time.strftime("%Y-%m-%d %H:%M:%S"), group, user, msg]
        self.stockmod.scan_stock(group, msg)
        self.memory.append(record)

    def save_to_db(self):
        oplogs("save_to_db called")
        # 保存消息
        for msg in self.memory:
            sql = "INSERT INTO msgqueue VALUES (%s,%s,%s,%s)" % (
                msg[0], msg[1], msg[2], msg[3])
            self.cursor.execute(sql)

        # 保存监控列表
        for (key, value) in self.stockmod.monitor_queue.items():
            for code in value:
                # date TEXT, group TEXT, code TEXT
                # date = time.strftime("%Y-%m-%d %H:%M:%S")
                sql = "INSERT INTO monitorstocks VALUES (%s,%s)" % (key, code)
                self.cursor.execute(sql)

        self.conn.commit()

    def load_from_db(self):
        pass

    def isCmd(self, cmd):
        if USER_CMD in cmd:
            return True
        for keyword in syscmd:
            if keyword in cmd:
                return True

        return False

    def action_user(self, cmd, group, user):

        cmds = cmd.lower().split()

        #"命令格式:
        # [添加监控]harry 股票名
        # [删除监控]harry del 股票名
        # [显示群推荐]harry 本群推荐
        # [显示群数据统计]harry stat
        # [显示报告]harry report"]
        # user 用户命令
        if len(cmds) == 1:
            return random.choice(auto_replys)

        if ('list' in cmds[1]) or ('本群推荐' in cmds[1]):
            oplogs("action_user:本群推荐 [%s]" % cmd)
            return self.stockmod.get_group_stock_price(group)
        elif 'del' == cmds[1]:
            return self.stockmod.del_from_list(group, cmds[2])
        elif 'stat' in cmds[1]:
            boottime = datetime.now() - self.starttime

            boothour = int(boottime.seconds / 3600)
            bootmin = int(boottime.seconds % 3600 / 60)
            stat = "机器人已运行:{}天{}小时{}分钟\n股价扫描任务:{}".format(
                boottime.days, boothour, bootmin, self.sched.state)

            return stat

        elif 'report' in cmds[1]:
            return "建设中"
        elif 'help' in cmds[1]:
            oplogs("action_user:help called")
            return auto_replys[0]

        elif 'ver' in cmds[1]:
            return HR__VERSION
        elif '启动' in cmds[1]:
            return "臣在"
        elif '关闭' in cmds[1]:
            return "跪安"
        else:
            oplogs("action_user:harry [stock] called %s" % cmd)
            if self.stockmod.isvalid_stock(cmds[1]):
                self.add_stock(group, cmds[1])
                return self.show_stock(cmds[1])

        return ""

    def action(self, cmd):
        cmds = cmd.split()

        # 管理员后台控制命令
        if not self.isCmd(cmds[0]):
            return ""

        # 去除'/',提取控制命令函数名
        _cmd = cmds[0].strip('/')
        if len(cmds) > 1:
            return eval("self.{}({})".format(_cmd, cmds[1:]))
        else:
            return eval("self.{}()".format(_cmd))

    def ping(self, cmds=[]):
        #self.mouth('量化技术讨论','test')
        return 'pong'

    def help(self, cmds=[]):
        # 显示命令帮助
        return ("Help\n" + ",".join(syscmd))

    def show(self, cmds=[]):
        # 显示在线信息和数据
        # show gmsg/stock [stockname]/monitor/stats
        print("/show called")
        if cmds[0] == 'memory':
            return "Robot has %d messages in memory" % (len(self.memory))
        if cmds[0] == 'stock':
            return self.show_stock(cmds[1])
        if cmds[0] == 'groups':  # 显示所有支持的群
            return ",".join(self.support_groups)
        if cmds[0] == 'group':  # 显示指定群支持的股票
            if len(cmds) > 1 and cmds[1].isdigit():
                groupid = int(cmds[1])
            else:
                groupid = 0
            return self.action_user("harry 本群推荐", self.support_groups[groupid],
                                    "robot")
        if cmds[0] == 'mq':
            return str(self.stockmod.monitor_queue)
        if cmds[0] == 'stats':
            return "no supported cmd"
        return "no supported cmd"

    def show_stock(self, stock):
        if '机器人' in stock or '300024' in stock:
            return "harry拒绝关注其他机器人"

        if stock.isdigit():
            return self.stockmod.get_stock_price(stock)
        else:
            return self.stockmod.get_stock_price_by_name(stock)

    def cfg(self):
        # 修改在线配置
        return ("/cfg called")

    def add(self, cmds=[]):
        if len(cmds) >= 2:
            # /add group [groupname], 添加支持的group
            if cmds[0] is "group":
                self.support_groups.append(cmds[1])
                return "group added"
        return "not supprt command"

    def add_stock(self, group, stock):
        self.stockmod.add_to_monitor_group(group, stock)

    def delete(self, cmds=[]):
        if len(cmds) > 0:
            # 删除支持的group
            if cmds[0] is "group":
                self.support_groups.remove(cmds[1])
                return "group deleted"
        return "not supprt command"

    def is_support_group(self, group):
        return group in self.support_groups

    def test(self, cmd):
        return "test"

    def restart(self):
        # 重启机器人
        return "restarted"

    def start(self):
        # 启动机器人
        self.enable = True
        return "臣在"

    def pause(self):
        self.enable = False
        return "跪安"

    def stop(self):
        print("stop called")
        # 离线机器人
        self.enable = False
        self.stopcb()

    def use(self):
        # 加载模块控制
        # module management to be implemented here
        return "All mods loaded"

    def add_msg(self, msg):
        # save message
        self.memory.append(msg)

    def add_groups(self, groups):
        for group in groups:
            if group not in self.support_groups:
                self.support_groups.append(group)
示例#49
0
class Scheduler:
    def __init__(self, timezone='UTC'):
        """Initialize scheduler."""
        self._scheduler = BackgroundScheduler(timezone=timezone)
        self._scheduler.add_listener(self._event_job_executed,
                                     EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)
        self._scheduler.add_listener(self._event_job_added, EVENT_JOB_ADDED)
        self._scheduler.add_listener(self._event_job_removed,
                                     EVENT_JOB_REMOVED)

        self._status, self._data = {}, {}

    def _event_job_executed(self, event):
        """Update job data after execution."""
        t = arrow.utcnow().to(self._scheduler.timezone).format()
        d = str(event.exception) if event.exception else event.retval
        self._data[event.job_id].append([t, d])

    def _event_job_added(self, event):
        """Update job status and data after adding."""
        self._status[event.job_id] = 'started'
        self._data.setdefault(event.job_id, [])

    def _event_job_removed(self, event):
        """Update job status after removing."""
        self._status[event.job_id] = 'nonexistent'

    def start(self):
        """Start scheduler."""
        self._scheduler.start()

    def shutdown(self, wait=False):
        """Shutdown scheduler."""
        self._scheduler.shutdown(wait=wait)

    def add(self, id, func, trigger, **kwargs):
        """Add job."""
        self.remove(id)
        self._scheduler.add_job(id=id, func=func, trigger=trigger, **kwargs)

    def remove(self, id):
        """Remove job."""
        if self._status.get(id) in ('started', 'paused'):
            self._scheduler.remove_job(id)

    def job_result(self, id, latest=True):
        """Get job results."""
        r = self._data.get(id)
        return r if not latest else r[0] if r else None

    def job_status(self, id):
        """Get job status.

        :return 'started', 'stopped', 'nonexistent'.
        """
        if self._status.get(id, None) is None:
            return 'nonexistent'
        return self._status.get(id)

    def job_next_run_time(self, id, fmt='YYYY-MM-DD HH:mm:ssZZ'):
        """Get job next run time as string."""
        if self._status.get(id):
            t = self._scheduler.get_job(id).next_run_time
            return arrow.get(t).format(fmt=fmt)
        return None

    def print(self):
        """Print jobs information."""
        self._scheduler.print_jobs()
示例#50
0
    def DScheduler(cls,
                   action,
                   start_date=None,
                   execute_date=None,
                   end_date=None,
                   execute_interval=3,
                   tz=None,
                   **kwargs):
        """
        一个依托于时间驱动的实时任务,action所挂载的任务由相应的时间驱动,这跟run方法由K线更新驱动不一样
        :param action:
        :param start_date:like '09:30:00'
        :param execute_date:like '09:30:00-11:30:00' or '09:30:00-11:30:00 13:00:00-15:00:00'
        :param end_date:like '15:00:00'
        :param execute_interval:连续任务的执行时间间隔,以秒计
        :param tz:时区
        :return:
        """
        fmt = '%Y-%m-%d %H:%M:%S'
        if start_date is not None:
            try:
                sdt = dt.datetime.strptime('2000-01-01 ' + start_date, fmt)
            except Exception:
                raise TypeError(
                    'this start_date param like a "09:30:00" string')
        if execute_date is not None:
            try:
                xdt = []
                dts = execute_date.split(' ')
                for et in dts:
                    t = et.split('-')
                    s = dt.datetime.strptime('2000-01-01 ' + t[0], fmt)
                    e = dt.datetime.strptime('2000-01-01 ' + t[1], fmt)
                    # if s > e:
                    # 如果execute的start大于end说明是当天的end到第二天的start
                    # raise TypeError('execute start datetime must less than end')
                    xdt.append([s, e])
                    del s, e, t
                del dts
            except Exception:
                raise TypeError(
                    'this start_date param like a "09:30:00-11:30:00" or'
                    ' "09:30:00-11:30:00 13:00:00-15:00:00"')
        if end_date is not None:
            try:
                edt = dt.datetime.strptime('2000-01-01 ' + end_date, fmt)
            except Exception:
                raise TypeError(
                    'this start_date param like a "15:30:00" string')
        if tz is not None:
            if tz not in pytz.all_timezones:
                raise ValueError('this tz: %s not in pytz time zones' % tz)
            else:
                tz = pytz.timezone(tz)
        from apscheduler.triggers.date import DateTrigger
        from apscheduler.triggers.interval import IntervalTrigger
        from apscheduler.executors.pool import ThreadPoolExecutor, ProcessPoolExecutor
        while 1:
            # scheduler = BlockingScheduler(daemonic=False)
            # crt = CalfDateTime.now(tz, offset)
            crt = dt.datetime.now() if tz is None else dt.datetime.now(
                tz=tz).replace(tzinfo=None)
            tdy = dt.datetime(crt.year, crt.month, crt.day)
            # 非交易日
            if not action.is_trade_day(tdy):
                print(fontcolor.F_RED + '-' * 80)
                print('Note:Non-transaction date;Datetime:' + str(crt))
                print('-' * 80 + fontcolor.END)
                delta = (tdy + dt.timedelta(days=1) - crt).seconds
                delta = 1 if delta < 1 else delta
                time.sleep(delta)  # sleep to next day
                continue
            # 交易日
            else:
                try:
                    nsds = list()
                    executors = {
                        'default': ThreadPoolExecutor(4),
                        'processpool': ProcessPoolExecutor(4)
                    }
                    job_defaults = {'coalesce': True, 'max_instances': 1}
                    scheduler = BackgroundScheduler(executors=executors,
                                                    job_defaults=job_defaults,
                                                    daemonic=False,
                                                    timezone=tz)
                    if start_date is not None:
                        d = tdy + dt.timedelta(hours=sdt.hour,
                                               minutes=sdt.minute,
                                               seconds=sdt.second)
                        nsds.append(d + dt.timedelta(days=1))

                        def action_start(**args):
                            print(fontcolor.F_GREEN + '-' * 80)
                            print('Calf-Note:start task running on ',
                                  dt.datetime.now(tz=tz))
                            print('-' * 80 + fontcolor.END)
                            try:
                                action.start(**args)
                            except Exception as ep:
                                ExceptionInfo(ep)

                        scheduler.add_job(func=action_start,
                                          trigger=DateTrigger(d),
                                          id='action_start',
                                          args=[kwargs])
                    if execute_date is not None:

                        def action_execute(**args):
                            print(fontcolor.F_GREEN + '-' * 80)
                            print('Calf-Note:execute task running on ',
                                  dt.datetime.now(tz=tz))
                            print('-' * 80 + fontcolor.END)
                            try:
                                action.execute(**args)
                            except Exception as ep:
                                ExceptionInfo(ep)

                        for x in xdt:
                            sd = tdy + dt.timedelta(hours=x[0].hour,
                                                    minutes=x[0].minute,
                                                    seconds=x[0].second)
                            ed = tdy + dt.timedelta(hours=x[1].hour,
                                                    minutes=x[1].minute,
                                                    seconds=x[1].second)
                            if sd > ed:
                                sd = sd + dt.timedelta(days=1)
                                scheduler.add_job(func=action_execute,
                                                  trigger=IntervalTrigger(
                                                      seconds=execute_interval,
                                                      start_date=ed,
                                                      end_date=sd),
                                                  args=[kwargs])
                            else:
                                scheduler.add_job(func=action_execute,
                                                  trigger=IntervalTrigger(
                                                      seconds=execute_interval,
                                                      start_date=sd,
                                                      end_date=ed),
                                                  args=[kwargs])
                            nsds.append(sd + dt.timedelta(days=1))

                    if end_date is not None:

                        def action_end(**args):
                            print(fontcolor.F_GREEN + '-' * 80)
                            print('Calf-Note:end task running on ',
                                  dt.datetime.now(tz=tz))
                            print('-' * 80 + fontcolor.END)
                            try:
                                action.end(**args)
                            except Exception as ep:
                                ExceptionInfo(ep)

                        d = tdy + dt.timedelta(hours=edt.hour,
                                               minutes=edt.minute,
                                               seconds=edt.second)
                        nsds.append(d + dt.timedelta(days=1))
                        scheduler.add_job(func=action_end,
                                          trigger=DateTrigger(d),
                                          id='action_end',
                                          args=[kwargs])
                    print(fontcolor.F_GREEN + '-' * 80)
                    print('Note:enter Calf real task and mount these tasks:')
                    scheduler.print_jobs()
                    print('Datetime:' + str(crt))
                    print('-' * 80 + fontcolor.END)
                    scheduler.start()
                    # 计算距离下一次启动应该休眠多久
                    if len(nsds) == 0:
                        break
                    # ed = CalfDateTime.now(tz, offset)
                    nd = dt.datetime.now() if tz is None else dt.datetime.now(
                        tz=tz).replace(tzinfo=None)
                    delta = (min(nsds) - nd)
                    delta = delta.seconds + delta.days * 86400
                    print(fontcolor.F_YELLOW + '-' * 80)
                    print(
                        'Note:Calf will sleep {0} seconds and restart on {1}:'.
                        format(delta, min(nsds)))
                    print('Datetime:', str(crt))
                    print('-' * 80 + fontcolor.END)
                    delta = 1 if delta < 1 else delta
                    time.sleep(delta)
                    scheduler.shutdown(wait=False)
                    del scheduler
                except Exception as e:
                    ExceptionInfo(e)
            pass
示例#51
0
class JobQueue:
    """This class allows you to periodically perform tasks with the bot. It is a convenience
    wrapper for the APScheduler library.

    Attributes:
        scheduler (:class:`apscheduler.schedulers.background.BackgroundScheduler`): The APScheduler
        bot (:class:`telegram.Bot`): The bot instance that should be passed to the jobs.
            DEPRECATED: Use :attr:`set_dispatcher` instead.

    """
    def __init__(self) -> None:
        self._dispatcher: 'Dispatcher' = None  # type: ignore[assignment]
        self.logger = logging.getLogger(self.__class__.__name__)
        self.scheduler = BackgroundScheduler(timezone=pytz.utc)
        self.scheduler.add_listener(self._update_persistence,
                                    mask=EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)

        # Dispatch errors and don't log them in the APS logger
        def aps_log_filter(record):  # type: ignore
            return 'raised an exception' not in record.msg

        logging.getLogger('apscheduler.executors.default').addFilter(
            aps_log_filter)
        self.scheduler.add_listener(self._dispatch_error, EVENT_JOB_ERROR)

    def _build_args(self,
                    job: 'Job') -> List[Union[CallbackContext, 'Bot', 'Job']]:
        if self._dispatcher.use_context:
            return [CallbackContext.from_job(job, self._dispatcher)]
        return [self._dispatcher.bot, job]

    def _tz_now(self) -> datetime.datetime:
        return datetime.datetime.now(self.scheduler.timezone)

    def _update_persistence(self, event: JobEvent) -> None:  # pylint: disable=W0613
        self._dispatcher.update_persistence()

    def _dispatch_error(self, event: JobEvent) -> None:
        try:
            self._dispatcher.dispatch_error(None, event.exception)
        # Errors should not stop the thread.
        except Exception:
            self.logger.exception(
                'An error was raised while processing the job and an '
                'uncaught error was raised while handling the error '
                'with an error_handler.')

    @overload
    def _parse_time_input(self, time: None, shift_day: bool = False) -> None:
        ...

    @overload
    def _parse_time_input(
        self,
        time: Union[float, int, datetime.timedelta, datetime.datetime,
                    datetime.time],
        shift_day: bool = False,
    ) -> datetime.datetime:
        ...

    def _parse_time_input(
        self,
        time: Union[float, int, datetime.timedelta, datetime.datetime,
                    datetime.time, None],
        shift_day: bool = False,
    ) -> Optional[datetime.datetime]:
        if time is None:
            return None
        if isinstance(time, (int, float)):
            return self._tz_now() + datetime.timedelta(seconds=time)
        if isinstance(time, datetime.timedelta):
            return self._tz_now() + time
        if isinstance(time, datetime.time):
            date_time = datetime.datetime.combine(
                datetime.datetime.now(
                    tz=time.tzinfo or self.scheduler.timezone).date(), time)
            if date_time.tzinfo is None:
                date_time = self.scheduler.timezone.localize(date_time)
            if shift_day and date_time <= datetime.datetime.now(pytz.utc):
                date_time += datetime.timedelta(days=1)
            return date_time
        # isinstance(time, datetime.datetime):
        return time

    def set_dispatcher(self, dispatcher: 'Dispatcher') -> None:
        """Set the dispatcher to be used by this JobQueue. Use this instead of passing a
        :class:`telegram.Bot` to the JobQueue, which is deprecated.

        Args:
            dispatcher (:class:`telegram.ext.Dispatcher`): The dispatcher.

        """
        self._dispatcher = dispatcher
        if dispatcher.bot.defaults:
            if dispatcher.bot.defaults:
                self.scheduler.configure(
                    timezone=dispatcher.bot.defaults.tzinfo or pytz.utc)

    def run_once(
        self,
        callback: Callable[['CallbackContext'], None],
        when: Union[float, datetime.timedelta, datetime.datetime,
                    datetime.time],
        context: object = None,
        name: str = None,
        job_kwargs: JSONDict = None,
    ) -> 'Job':
        """Creates a new ``Job`` that runs once and adds it to the queue.

        Args:
            callback (:obj:`callable`): The callback function that should be executed by the new
                job. Callback signature for context based API:

                    ``def callback(CallbackContext)``

                ``context.job`` is the :class:`telegram.ext.Job` instance. It can be used to access
                its ``job.context`` or change it to a repeating job.
            when (:obj:`int` | :obj:`float` | :obj:`datetime.timedelta` |                         \
                  :obj:`datetime.datetime` | :obj:`datetime.time`):
                Time in or at which the job should run. This parameter will be interpreted
                depending on its type.

                * :obj:`int` or :obj:`float` will be interpreted as "seconds from now" in which the
                  job should run.
                * :obj:`datetime.timedelta` will be interpreted as "time from now" in which the
                  job should run.
                * :obj:`datetime.datetime` will be interpreted as a specific date and time at
                  which the job should run. If the timezone (``datetime.tzinfo``) is :obj:`None`,
                  the default timezone of the bot will be used.
                * :obj:`datetime.time` will be interpreted as a specific time of day at which the
                  job should run. This could be either today or, if the time has already passed,
                  tomorrow. If the timezone (``time.tzinfo``) is :obj:`None`, the
                  default timezone of the bot will be used.

            context (:obj:`object`, optional): Additional data needed for the callback function.
                Can be accessed through ``job.context`` in the callback. Defaults to :obj:`None`.
            name (:obj:`str`, optional): The name of the new job. Defaults to
                ``callback.__name__``.
            job_kwargs (:obj:`dict`, optional): Arbitrary keyword arguments to pass to the
                ``scheduler.add_job()``.

        Returns:
            :class:`telegram.ext.Job`: The new ``Job`` instance that has been added to the job
            queue.

        """
        if not job_kwargs:
            job_kwargs = {}

        name = name or callback.__name__
        job = Job(callback, context, name, self)
        date_time = self._parse_time_input(when, shift_day=True)

        j = self.scheduler.add_job(
            callback,
            name=name,
            trigger='date',
            run_date=date_time,
            args=self._build_args(job),
            timezone=date_time.tzinfo or self.scheduler.timezone,
            **job_kwargs,
        )

        job.job = j
        return job

    def run_repeating(
        self,
        callback: Callable[['CallbackContext'], None],
        interval: Union[float, datetime.timedelta],
        first: Union[float, datetime.timedelta, datetime.datetime,
                     datetime.time] = None,
        last: Union[float, datetime.timedelta, datetime.datetime,
                    datetime.time] = None,
        context: object = None,
        name: str = None,
        job_kwargs: JSONDict = None,
    ) -> 'Job':
        """Creates a new ``Job`` that runs at specified intervals and adds it to the queue.

        Args:
            callback (:obj:`callable`): The callback function that should be executed by the new
                job. Callback signature for context based API:

                    ``def callback(CallbackContext)``

                ``context.job`` is the :class:`telegram.ext.Job` instance. It can be used to access
                its ``job.context`` or change it to a repeating job.
            interval (:obj:`int` | :obj:`float` | :obj:`datetime.timedelta`): The interval in which
                the job will run. If it is an :obj:`int` or a :obj:`float`, it will be interpreted
                as seconds.
            first (:obj:`int` | :obj:`float` | :obj:`datetime.timedelta` |                        \
                   :obj:`datetime.datetime` | :obj:`datetime.time`, optional):
                Time in or at which the job should run. This parameter will be interpreted
                depending on its type.

                * :obj:`int` or :obj:`float` will be interpreted as "seconds from now" in which the
                  job should run.
                * :obj:`datetime.timedelta` will be interpreted as "time from now" in which the
                  job should run.
                * :obj:`datetime.datetime` will be interpreted as a specific date and time at
                  which the job should run. If the timezone (``datetime.tzinfo``) is :obj:`None`,
                  the default timezone of the bot will be used.
                * :obj:`datetime.time` will be interpreted as a specific time of day at which the
                  job should run. This could be either today or, if the time has already passed,
                  tomorrow. If the timezone (``time.tzinfo``) is :obj:`None`, the
                  default timezone of the bot will be used.

                Defaults to ``interval``
            last (:obj:`int` | :obj:`float` | :obj:`datetime.timedelta` |                        \
                   :obj:`datetime.datetime` | :obj:`datetime.time`, optional):
                Latest possible time for the job to run. This parameter will be interpreted
                depending on its type. See ``first`` for details.

                If ``last`` is :obj:`datetime.datetime` or :obj:`datetime.time` type
                and ``last.tzinfo`` is :obj:`None`, the default timezone of the bot will be
                assumed.

                Defaults to :obj:`None`.
            context (:obj:`object`, optional): Additional data needed for the callback function.
                Can be accessed through ``job.context`` in the callback. Defaults to :obj:`None`.
            name (:obj:`str`, optional): The name of the new job. Defaults to
                ``callback.__name__``.
            job_kwargs (:obj:`dict`, optional): Arbitrary keyword arguments to pass to the
                ``scheduler.add_job()``.

        Returns:
            :class:`telegram.ext.Job`: The new ``Job`` instance that has been added to the job
            queue.

        Note:
             `interval` is always respected "as-is". That means that if DST changes during that
             interval, the job might not run at the time one would expect. It is always recommended
             to pin servers to UTC time, then time related behaviour can always be expected.

        """
        if not job_kwargs:
            job_kwargs = {}

        name = name or callback.__name__
        job = Job(callback, context, name, self)

        dt_first = self._parse_time_input(first)
        dt_last = self._parse_time_input(last)

        if dt_last and dt_first and dt_last < dt_first:
            raise ValueError("'last' must not be before 'first'!")

        if isinstance(interval, datetime.timedelta):
            interval = interval.total_seconds()

        j = self.scheduler.add_job(
            callback,
            trigger='interval',
            args=self._build_args(job),
            start_date=dt_first,
            end_date=dt_last,
            seconds=interval,
            name=name,
            **job_kwargs,
        )

        job.job = j
        return job

    def run_monthly(
        self,
        callback: Callable[['CallbackContext'], None],
        when: datetime.time,
        day: int,
        context: object = None,
        name: str = None,
        day_is_strict: bool = True,
        job_kwargs: JSONDict = None,
    ) -> 'Job':
        """Creates a new ``Job`` that runs on a monthly basis and adds it to the queue.

        Args:
            callback (:obj:`callable`):  The callback function that should be executed by the new
                job. Callback signature for context based API:

                    ``def callback(CallbackContext)``

                ``context.job`` is the :class:`telegram.ext.Job` instance. It can be used to access
                its ``job.context`` or change it to a repeating job.
            when (:obj:`datetime.time`): Time of day at which the job should run. If the timezone
                (``when.tzinfo``) is :obj:`None`, the default timezone of the bot will be used.
            day (:obj:`int`): Defines the day of the month whereby the job would run. It should
                be within the range of 1 and 31, inclusive.
            context (:obj:`object`, optional): Additional data needed for the callback function.
                Can be accessed through ``job.context`` in the callback. Defaults to :obj:`None`.
            name (:obj:`str`, optional): The name of the new job. Defaults to
                ``callback.__name__``.
            day_is_strict (:obj:`bool`, optional): If :obj:`False` and day > month.days, will pick
                the last day in the month. Defaults to :obj:`True`.
            job_kwargs (:obj:`dict`, optional): Arbitrary keyword arguments to pass to the
                ``scheduler.add_job()``.

        Returns:
            :class:`telegram.ext.Job`: The new ``Job`` instance that has been added to the job
            queue.

        """
        if not job_kwargs:
            job_kwargs = {}

        name = name or callback.__name__
        job = Job(callback, context, name, self)

        if day_is_strict:
            j = self.scheduler.add_job(
                callback,
                trigger='cron',
                args=self._build_args(job),
                name=name,
                day=day,
                hour=when.hour,
                minute=when.minute,
                second=when.second,
                timezone=when.tzinfo or self.scheduler.timezone,
                **job_kwargs,
            )
        else:
            trigger = OrTrigger([
                CronTrigger(
                    day=day,
                    hour=when.hour,
                    minute=when.minute,
                    second=when.second,
                    timezone=when.tzinfo,
                    **job_kwargs,
                ),
                CronTrigger(
                    day='last',
                    hour=when.hour,
                    minute=when.minute,
                    second=when.second,
                    timezone=when.tzinfo or self.scheduler.timezone,
                    **job_kwargs,
                ),
            ])
            j = self.scheduler.add_job(callback,
                                       trigger=trigger,
                                       args=self._build_args(job),
                                       name=name,
                                       **job_kwargs)

        job.job = j
        return job

    def run_daily(
        self,
        callback: Callable[['CallbackContext'], None],
        time: datetime.time,
        days: Tuple[int, ...] = Days.EVERY_DAY,
        context: object = None,
        name: str = None,
        job_kwargs: JSONDict = None,
    ) -> 'Job':
        """Creates a new ``Job`` that runs on a daily basis and adds it to the queue.

        Args:
            callback (:obj:`callable`): The callback function that should be executed by the new
                job. Callback signature for context based API:

                    ``def callback(CallbackContext)``

                ``context.job`` is the :class:`telegram.ext.Job` instance. It can be used to access
                its ``job.context`` or change it to a repeating job.
            time (:obj:`datetime.time`): Time of day at which the job should run. If the timezone
                (``time.tzinfo``) is :obj:`None`, the default timezone of the bot will be used.
            days (Tuple[:obj:`int`], optional): Defines on which days of the week the job should
                run (where ``0-6`` correspond to monday - sunday). Defaults to ``EVERY_DAY``
            context (:obj:`object`, optional): Additional data needed for the callback function.
                Can be accessed through ``job.context`` in the callback. Defaults to :obj:`None`.
            name (:obj:`str`, optional): The name of the new job. Defaults to
                ``callback.__name__``.
            job_kwargs (:obj:`dict`, optional): Arbitrary keyword arguments to pass to the
                ``scheduler.add_job()``.

        Returns:
            :class:`telegram.ext.Job`: The new ``Job`` instance that has been added to the job
            queue.

        Note:
            For a note about DST, please see the documentation of `APScheduler`_.

        .. _`APScheduler`: https://apscheduler.readthedocs.io/en/stable/modules/triggers/cron.html
                           #daylight-saving-time-behavior

        """
        if not job_kwargs:
            job_kwargs = {}

        name = name or callback.__name__
        job = Job(callback, context, name, self)

        j = self.scheduler.add_job(
            callback,
            name=name,
            args=self._build_args(job),
            trigger='cron',
            day_of_week=','.join([str(d) for d in days]),
            hour=time.hour,
            minute=time.minute,
            second=time.second,
            timezone=time.tzinfo or self.scheduler.timezone,
            **job_kwargs,
        )

        job.job = j
        return job

    def run_custom(
        self,
        callback: Callable[['CallbackContext'], None],
        job_kwargs: JSONDict,
        context: object = None,
        name: str = None,
    ) -> 'Job':
        """Creates a new customly defined ``Job``.

        Args:
            callback (:obj:`callable`): The callback function that should be executed by the new
                job. Callback signature for context based API:

                    ``def callback(CallbackContext)``

                ``context.job`` is the :class:`telegram.ext.Job` instance. It can be used to access
                its ``job.context`` or change it to a repeating job.
            job_kwargs (:obj:`dict`): Arbitrary keyword arguments. Used as arguments for
                ``scheduler.add_job``.
            context (:obj:`object`, optional): Additional data needed for the callback function.
                Can be accessed through ``job.context`` in the callback. Defaults to ``None``.
            name (:obj:`str`, optional): The name of the new job. Defaults to
                ``callback.__name__``.

        Returns:
            :class:`telegram.ext.Job`: The new ``Job`` instance that has been added to the job
            queue.

        """
        name = name or callback.__name__
        job = Job(callback, context, name, self)

        j = self.scheduler.add_job(callback,
                                   args=self._build_args(job),
                                   name=name,
                                   **job_kwargs)

        job.job = j
        return job

    def start(self) -> None:
        """Starts the job_queue thread."""
        if not self.scheduler.running:
            self.scheduler.start()

    def stop(self) -> None:
        """Stops the thread."""
        if self.scheduler.running:
            self.scheduler.shutdown()

    def jobs(self) -> Tuple['Job', ...]:
        """
        Returns a tuple of all *pending/scheduled* jobs that are currently in the ``JobQueue``.
        """
        return tuple(
            Job.from_aps_job(job, self) for job in self.scheduler.get_jobs())

    def get_jobs_by_name(self, name: str) -> Tuple['Job', ...]:
        """Returns a tuple of all *pending/scheduled* jobs with the given name that are currently
        in the ``JobQueue``"""
        return tuple(job for job in self.jobs() if job.name == name)
示例#52
0
        # fig.set_facecolor('#222222')
        # ax.set_facecolor('#222222')
        if (download_plot == True):
            plt.savefig("data.png",
                        bbox_inches='tight',
                        pad_inches=0.05,
                        dpi=150)
        # close the plot
        plt.close('all')
        return

    plot_vaccine(merged_totals, dates, True)


scheduler = BackgroundScheduler()
scheduler.add_job(scheduled_update, 'interval', seconds=30)
scheduler.start()
atexit.register(lambda: scheduler.shutdown(wait=False))


def hello():
    return jsonify("hello").headers.add("Access-Control-Allow-Origin", "*")


# Shut down the scheduler when exiting the app

web_site.run(host='0.0.0.0', port=PORT)
# web_site.run(host='0.0.0.0', port=8080)

# DEPRECIATED FUNCTIONS FROM THE V1 WEBSITE
示例#53
0
def Assign(request,pk):
    homework_course = get_object_or_404(course, pk=pk)
    user = User.objects.get(name=request.session.get('user_name'))
    print(homework_course.course_name)
    #email_title = 'test'
    #email_body = '你该交作业啦!'
    #email = '*****@*****.**'  # 对方的邮箱
    #email_from = user.email
    #send_status = send_mail(email_title, email_body, email_from, [email])
    if request.method == "POST":
        print("343434")
        form = AssignForm(request.POST,request.FILES)
        if form.is_valid():
            print("ccccc")
            deadline = form.cleaned_data['deadline']
            name = form.cleaned_data['name']
            #new_homework = models.Homework.objects.create()
            # new_homework.name = name
            # new_homework.content = content
            # new_homework.course = homework_course
            # # new_homework.deadline = deadline
            # new_homework.save()
            c = datetime.datetime.now()
            d = datetime.datetime.strptime(deadline, "%Y-%m-%d %H:%M:%S")
            if d < c:
                message = "deadline不能设置过去的时间"
                return redirect('new_homework', pk=pk)
            else:
                print("dddd")
                homework=form.save(commit=False)
                homework.course = homework_course
                homework.save()
            try:
                sched = BackgroundScheduler()
                @sched.scheduled_job('interval', seconds=1)
                def timed_job():
                    a=datetime.datetime.now()
                    b=datetime.datetime.strptime(deadline,"%Y-%m-%d %H:%M:%S")

                    for user in homework.course.users.all():
                        if user.identity == "student":
                            flag = 0
                            for submit in homework.submit.all():
                                if submit.author.name == user.name:
                                    flag = 1

                            if flag == 0:
                                if b>a :
                                    if (b-a).seconds == 86399  :

                                            email_title = '请尽快提交作业——作业提醒'
                                            email_body = '点击此处提交作业http://127.0.0.1:8000/course/'+pk+'/homework/'
                                            email = user.email  # 对方的邮箱
                                            send_status = send_mail(email_title, email_body, EMAIL_FROM, [email])
                sched.start()
            except Exception as e:
                print(e)
                sched.shutdown()

            return redirect('homework_list',pk=pk)
        print("Nononono")
        print(form.errors)
    else:
        print("ttt")
        assign_form = AssignForm()
    # print(models.Homework.objects.all())
    return render(request,'login/assign.html',locals())
示例#54
0
def worker_loop(queue,
                parsed_args,
                setup_pyenv=False,
                single_experiment=False,
                fetch_artifacts=False,
                timeout=0):

    logger = logging.getLogger('worker_loop')

    hold_period = 4
    while queue.has_next():

        first_exp, ack_key = queue.dequeue(acknowledge=False)

        experiment_key = json.loads(first_exp)['experiment']['key']
        config = json.loads(first_exp)['config']
        parsed_args.config = config
        verbose = model.parse_verbosity(config.get('verbose'))
        logger.setLevel(verbose)

        logger.debug(
            'Received experiment {} with config {} from the queue'.format(
                experiment_key, config))

        executor = LocalExecutor(parsed_args)
        experiment = executor.db.get_experiment(experiment_key)

        if allocate_resources(experiment, config, verbose=verbose):

            def hold_job():
                queue.hold(ack_key, hold_period)

            hold_job()
            sched = BackgroundScheduler()
            sched.add_job(hold_job, 'interval', minutes=hold_period / 2)
            sched.start()

            try:
                if setup_pyenv:
                    logger.info('Setting up python packages for experiment')
                    pipp = subprocess.Popen(['pip', 'install'] +
                                            experiment.pythonenv,
                                            stdout=subprocess.PIPE,
                                            stderr=subprocess.STDOUT)

                    pipout, _ = pipp.communicate()
                    logger.info("pip output: \n" + pipout)

                    # pip.main(['install'] + experiment.pythonenv)

                for tag, art in experiment.artifacts.iteritems():
                    if fetch_artifacts or 'local' not in art.keys():
                        logger.info('Fetching artifact ' + tag)
                        if tag == 'workspace':
                            # art['local'] = executor.db.store.get_artifact(
                            #    art, '.', only_newer=False)
                            art['local'] = executor.db.store.get_artifact(
                                art, only_newer=False)
                        else:
                            art['local'] = executor.db.store.get_artifact(art)
                executor.run(experiment)
            finally:
                sched.shutdown()
                queue.acknowledge(ack_key)

            if single_experiment:
                logger.info('single_experiment is True, quitting')
                return
        else:
            logger.info('Cannot run experiment ' + experiment.key +
                        ' due lack of resources. Will retry')
            time.sleep(config['sleep_time'])

        wait_for_messages(queue, timeout, logger)

        # queue = glob.glob(fs_tracker.get_queue_directory() + "/*")

    logger.info("Queue in {} is empty, quitting".format(
        fs_tracker.get_queue_directory()))
示例#55
0
class ListenerService(MashService):
    """
    Base class for MASH services that live in the image listener.
    """
    def post_init(self):
        """Initialize base service class and job scheduler."""
        self.listener_queue = 'listener'
        self.service_queue = 'service'
        self.job_document_key = 'job_document'
        self.listener_msg_key = 'listener_msg'

        self.jobs = {}

        # setup service job directory
        self.job_directory = self.config.get_job_directory(
            self.service_exchange)
        os.makedirs(self.job_directory, exist_ok=True)

        self.prev_service = self._get_previous_service()

        if not self.custom_args:
            self.custom_args = {}

        if 'job_factory' not in self.custom_args:
            raise MashListenerServiceException(
                'Job factory is required as a custom arg in listener service.')
        else:
            self.job_factory = self.custom_args['job_factory']

        logfile_handler = setup_logfile(
            self.config.get_log_file(self.service_exchange))
        self.log.addHandler(logfile_handler)

        self.bind_queue(self.service_exchange, self.job_document_key,
                        self.service_queue)
        self.bind_queue(self.prev_service, self.listener_msg_key,
                        self.listener_queue)

        thread_pool_count = self.custom_args.get(
            'thread_pool_count', self.config.get_base_thread_pool_count())
        executors = {'default': ThreadPoolExecutor(thread_pool_count)}
        self.scheduler = BackgroundScheduler(executors=executors, timezone=utc)
        self.scheduler.add_listener(
            self._process_job_result,
            events.EVENT_JOB_EXECUTED | events.EVENT_JOB_ERROR)
        self.scheduler.add_listener(self._process_job_missed,
                                    events.EVENT_JOB_MISSED)

        signal.signal(signal.SIGINT, self.stop)
        signal.signal(signal.SIGTERM, self.stop)

        restart_jobs(self.job_directory, self._add_job)
        self.start()

    def _add_job(self, job_config):
        """
        Create job using job factory if job id does not already exist.

        Job config is persisted to disk if not already done.
        """
        job_id = job_config['id']

        if job_id not in self.jobs:
            try:
                job = self.job_factory.create_job(job_config, self.config)
            except Exception as error:
                self.log.error('Invalid job: {0}.'.format(error))
            else:
                self.jobs[job.id] = job
                job.log_callback = self.log

                if 'job_file' not in job_config:
                    job_config['job_file'] = '{0}job-{1}.json'.format(
                        self.job_directory, job_id)
                    persist_json(job_config['job_file'], job_config)
                    job.job_file = job_config['job_file']

                self.log.info('Job queued, awaiting listener message.',
                              extra=job.get_job_id())
        else:
            self.log.warning('Job already queued.', extra={'job_id': job_id})

    def _cleanup_job(self, job_id):
        """
        Job failed upstream.

        Delete job and notify the next service.
        """
        job = self.jobs[job_id]

        self.log.warning('Failed upstream.', extra=job.get_job_id())
        self._delete_job(job.id)

        message = self._get_status_message(job)
        self._publish_message(message, job.id)

    def _delete_job(self, job_id):
        """
        Remove job from file store and delete from listener queue.

        Also attempt to remove any running instances of the job.
        """
        if job_id in self.jobs:
            job = self.jobs[job_id]
            self.log.info('Deleting job.', extra=job.get_job_id())

            del self.jobs[job_id]
            remove_file(job.job_file)
        else:
            self.log.warning('Job deletion failed, job is not queued.',
                             extra={'job_id': job_id})

    def _get_previous_service(self):
        """
        Return the previous service based on the current exchange.
        """
        services = self.config.get_service_names()

        try:
            index = services.index(self.service_exchange) - 1
        except ValueError:
            return None

        if index < 0:
            return None

        return services[index]

    def _get_status_message(self, job):
        """
        Build and return json message.

        Message contains completion status to post to next service exchange.
        """
        key = '{0}_result'.format(self.service_exchange)
        return JsonFormat.json_message({key: job.get_status_message()})

    def _handle_listener_message(self, message):
        """
        Callback for listener messages.
        """
        listener_msg = self._get_listener_msg(
            message.body, '{0}_result'.format(self.prev_service))

        job_id = None
        if listener_msg:
            status = listener_msg['status']
            job_id = listener_msg['id']

        if job_id and job_id in self.jobs:
            job = self.jobs[listener_msg['id']]
            job.listener_msg = message
            job.set_status_message(listener_msg)

            if status == SUCCESS:
                self._schedule_job(job.id)
                return  # Don't ack message until job finishes
            else:
                self._cleanup_job(job_id)

        message.ack()

    def _handle_service_message(self, message):
        """
        Callback for events from jobcreator.
        """
        job_key = '{0}_job'.format(self.service_exchange)
        try:
            job_desc = json.loads(message.body)
            self._add_job(job_desc[job_key])
        except Exception as e:
            self.log.error('Error adding job: {0}.'.format(e))

        message.ack()

    def _process_job_result(self, event):
        """
        Callback when job background process finishes.

        Handle exceptions and errors that occur and logs info to job log.
        """
        job_id = event.job_id
        job = self.jobs[job_id]
        metadata = job.get_job_id()

        self._delete_job(job_id)

        if event.exception:
            job.status = EXCEPTION
            msg = 'Exception in {0}: {1}'.format(self.service_exchange,
                                                 event.exception)
            job.add_error_msg(msg)
            self.log.error(msg, extra=metadata)
        elif job.status == SUCCESS:
            self.log.info('{0} successful.'.format(self.service_exchange),
                          extra=metadata)
        else:
            self.log.error('Error occurred in {0}.'.format(
                self.service_exchange),
                           extra=metadata)

        message = self._get_status_message(job)
        self._publish_message(message, job.id)
        job.listener_msg.ack()

    def _process_job_missed(self, event):
        """
        Callback when job background process misses execution.

        This should not happen as no jobs are scheduled, log any occurrences.
        """
        job_id = event.job_id
        job = self.jobs[job_id]
        metadata = job.get_job_id()

        self.log.warning('Job missed during {0}.'.format(
            self.service_exchange),
                         extra=metadata)

    def _publish_message(self, message, job_id):
        """
        Publish message to next service exchange.
        """
        try:
            self.publish_job_result(self.service_exchange, message)
        except AMQPError:
            self.log.warning('Message not received: {0}'.format(message),
                             extra={'job_id': job_id})

    def _schedule_job(self, job_id):
        """
        Schedule new job in background scheduler for job based on id.
        """
        try:
            self.scheduler.add_job(self._start_job,
                                   args=(job_id, ),
                                   id=job_id,
                                   max_instances=1,
                                   misfire_grace_time=None,
                                   coalesce=True)
        except ConflictingIdError:
            self.log.warning(
                'Job already running. Received multiple '
                'listener messages.',
                extra={'job_id': job_id})

    def _start_job(self, job_id):
        """
        Process job based on job id.
        """
        job = self.jobs[job_id]
        job.process_job()

    def _get_listener_msg(self, message, key):
        """Load json and attempt to get message by key."""
        try:
            listener_msg = json.loads(message)[key]
        except Exception:
            self.log.error('Invalid listener message: {0}, '
                           'missing key: {1}'.format(message, key))
            listener_msg = None

        return listener_msg

    def publish_job_result(self, exchange, message):
        """
        Publish the result message to the listener queue on given exchange.
        """
        self._publish(exchange, self.listener_msg_key, message)

    def start(self):
        """
        Start listener service.
        """
        self.scheduler.start()
        self.consume_queue(self._handle_service_message, self.service_queue,
                           self.service_exchange)
        self.consume_queue(self._handle_listener_message, self.listener_queue,
                           self.prev_service)

        try:
            self.channel.start_consuming()
        except Exception:
            self.stop()
            raise

    def stop(self, signum=None, frame=None):
        """
        Gracefully stop the service.

        Shutdown scheduler and wait for running jobs to finish.
        Close AMQP connection.
        """
        if signum:
            self.log.info(
                'Got a TERM/INTERRUPT signal, shutting down gracefully.')
        else:
            self.log.info('An unhandled Exception occurred in event loop, '
                          'shutting down gracefully.')

        self.scheduler.shutdown()
        self.close_connection()
示例#56
0
    time.sleep(600)
    pi.write(station[5], 1)
    time.sleep(1)
    pi.write(station[6], 0)
    time.sleep(600)
    pi.write(station[6], 1)
    running = False


if __name__ == '__main__':
    # Maker sure that all stations are turned off initially
    all_off()

    scheduler = BackgroundScheduler()
    # Change this to manage what days and time you want to run your job
    scheduler.add_job(job1,
                      'cron',
                      day_of_week='mon,wed,fri,sun',
                      hour=6,
                      minute=30)
    scheduler.start()
    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))

    server = ThreadedTCPServer(('', 5555), ThreadedServer)
    server.serve_forever()
    timer = ThreadedTimer()

all_off()
client.close()
scheduler.shutdown(wait=False)
示例#57
0
def runtimeReport():
    print('*' * 15 + 'Time Report! The time is: %s' % datetime.now())
    robot_timereport.sendTextEmail('Running report', 'program is running',
                                   'timereport')
    print('*' * 15 + 'Time Report over! The time is: %s' % datetime.now())


def Start():
    runtimeStart()
    run360()
    runWooYun()
    runFreeBuf()


if __name__ == '__main__':
    Start()
    scheduler = BackgroundScheduler()
    scheduler.add_job(run360, 'interval', seconds=1800)
    scheduler.add_job(runWooYun, 'interval', seconds=300)
    scheduler.add_job(runFreeBuf, 'interval', seconds=3600)
    scheduler.add_job(runtimeReport, 'interval', seconds=43200)
    scheduler.start()
    try:
        # This is here to simulate application activity (which keeps the main thread alive).
        while True:
            time.sleep(2)
    except (KeyboardInterrupt, SystemExit):
        scheduler.shutdown(
        )  # Not strictly necessary if daemonic mode is enabled but should be done if possible
示例#58
0
class WebApp(Flask):
    """
    A web application.
    """

    def __init__(self, name, config):
        """
        Create a new web application.
        :param name: the application name.
        :param config: the configuration.
        """
        Flask.__init__(self, name)
        self.config.from_object(config)
        self.api = Api(self)

        # JSON Response
        self.json_encoder = SimpleJSONEncoder
        self.api.representations = OrderedDict(REPRESENTATIONS)

        # Error Handling
        for exc in default_exceptions:
            self.register_error_handler(exc, error_ctrl.handle_exception)
        self.register_error_handler(Exception, error_ctrl.handle_exception)

        # Logging
        level = logging._nameToLevel[self.config["LOG_LEVEL"]]

        logging.basicConfig(level=level, handlers=[ConsoleHandler(level, FORMATTER)])
        logging.getLogger("apscheduler.scheduler").setLevel(logging.ERROR)
        logging.getLogger("apscheduler.executors.default").setLevel(logging.ERROR)
        logging.getLogger("urllib3").setLevel(logging.ERROR)
        logging.getLogger("redis_lock").setLevel(logging.ERROR)
        self.logger.setLevel(level)
        for hdlr in self.logger.handlers:
            hdlr.setFormatter(FORMATTER)

        # Scheduler
        self.scheduler = None

    def start(self, host="0.0.0.0", port=None):
        """
        Start the application.
        :param host: the server address.
        :param port: the server port number.
        :return: None
        """
        if self.scheduler is not None:
            self.scheduler.start()
        Flask.run(self, host, port, threaded=True, use_reloader=False)

    def shutdown(self):
        """
        Shutdown the application.
        :return: None
        """
        fn_shutdown = request.environ.get("werkzeug.server.shutdown")
        if fn_shutdown is None:
            raise RuntimeError("Not running with the Werkzeug Server")
        fn_shutdown()

    def add_rest_api(self, res, url):
        """
        Register a REST interface.
        :param res: the resource.
        :param url: the url.
        :return: None
        """
        self.api.add_resource(res, url)

    def add_teardown_hook(self, func, *args, **kwargs):
        """
        Register a teardown hook.
        :param func: the function.
        :param args: optional arguments to pass to func.
        :param kwargs: optional keyword arguments to pass to func
        :return: None
        """
        self.teardown_appcontext(func)

    def add_shutdown_hook(self, func, *args, **kwargs):
        """
        Register a shutdown hook.
        :param func: the function.
        :param args: optional arguments to pass to func.
        :param kwargs: optional keyword arguments to pass to func
        :return: None
        """
        if args is None and kwargs is None:
            atexit.register(func)
        else:
            atexit.register(func)

    def add_scheduled_job(self, job):
        """
        Add a scheduler.
        :param job: a scheduler job
        :return: None
        """
        if self.scheduler is None:
            self.scheduler = BackgroundScheduler()
            self.add_shutdown_hook(self._shutdown_scheduler)

        self.scheduler.add_job(id=job.name, func=job.func, kwargs=job.kwargs,
                               trigger=job.trigger, max_instances=1, coalesce=True)

    def _start_scheduler(self):
        """
        Start the app scheduler.
        :return: None
        """
        self.scheduler.start()

    def _shutdown_scheduler(self):
        """
        Shut down the app scheduler.
        :return: None
        """
        if self.scheduler is not None and self.scheduler.state is not STATE_STOPPED:
            self.scheduler.shutdown()
示例#59
0
class ChromecastDriver(object):
    class Listener(object):
        def __init__(self, expected_url_prefix, 
                        callback_another_cast_started):
            self.expected_url_prefix = expected_url_prefix
            self.callback_another_cast_started = callback_another_cast_started
            self.last_media = None

        def new_media_status(self, status):
            # Sometimes empty status appear. Should be safe to ignore since if
            # content == null then no one must be casting. I hope.
            if status.content_id is None:
                return

            self.last_media = status.content_id

            # If content_id doesn't have our server prefix someone else started
            # casting and we should seppuku
            if self.last_media.find(self.expected_url_prefix) != 0:
                self.callback_another_cast_started()

    def __init__(self, logger, target_chromecast_name, img_url_provider, interval_seconds):
        """
        Run a background task: every $interval seconds to load a new url
        in a chromecast, as provided by $img_url_provider
        Constructor will look for all chromcasts in the network (ie: slow)
        target_chromecast: Name of CC to use

        $img_url_provider should provide a unique URL each time it's called
        """
        self.cleanup_on_exit = True
        self.logger = logger
        self.target_chromecast_name = target_chromecast_name
        self.img_url_provider = img_url_provider
        self.interval_seconds = interval_seconds

        # Try to find the right chromecast
        self.logger.info('Looking for all Chromecasts in the network')
        all_casts = pychromecast.get_chromecasts()
        try:
            self.cast = next(cc for cc in all_casts
                            if cc.device.friendly_name == target_chromecast_name)
        except StopIteration:
            all_casts_names = [cc.device.friendly_name for cc in all_casts]
            self.logger.error('Chromecast {} not found. These are available: {}'.format(
                                target_chromecast, all_casts_names))
            raise pychromecast.NoChromecastFoundError()

        self.logger.info('Found {}, connecting...'.format(self.target_chromecast_name))
        self.cast.wait()
        self.cast.quit_app()
        self.cast.wait()

        # Register callback for status changes
        self.cc_listener = ChromecastDriver.Listener(img_url_provider.get_url_prefix(),
                                self.on_another_cast_started)
        self.cast.media_controller.register_status_listener(self.cc_listener)

        # Call show_image once to load the first one (otherwise we need to
        # wait for the first interval trigger)
        self.show_image()

        # Call self every $interval_seconds to reload image
        self.scheduler = BackgroundScheduler()
        self.sched_job_obj = self.scheduler.add_job(func=self.show_image,
                               trigger="interval", seconds=interval_seconds)
        self.scheduler.start()

        # TODO: Move from atexit to main obj?
        atexit.register(self.disconnect)

    def on_another_cast_started(self):
        self.logger.info('Someone else started casting to {}! Will shutdown...'.\
                            format(self.target_chromecast_name))
        # pychromecast doesn't like shutting down while on a listener thread, so
        # instead we remove our 'show new image' job and schedule a disconnect
        self.sched_job_obj.remove()
        self.cleanup_on_exit = False
        print("RQ CLEAN SHUTDOWN")
        self.scheduler.add_job(func=self.disconnect, 
                               trigger="interval", seconds=1)

    def disconnect(self):
        self.logger.info('Shutdown: disconnecting from Chromecast')
        self.scheduler.shutdown()

        if self.cleanup_on_exit:
            self.cast.quit_app()
            self.cast.wait()

        self.cast.disconnect()
        self.cast.join()

    def show_image(self):
        url = self.img_url_provider.get_random_image_url()
        self.logger.info('Asking CC {} to load image {}'.format(self.target_chromecast_name, url))
        # TODO: Hardcoded mime type might break
        self.cast.play_media(url=url, content_type='image/jpeg')
        self.cast.wait()

        # TODO: Configure timeout count
        timeout_count = 5
        while timeout_count > 0:
            if self.cc_listener.last_media == url:
                self.logger.debug('Image should be shown')
                break

            timeout_count -= 1
            try:
                time.sleep(1)
            except Exception as ex:
                raise ex

        if timeout_count == 0:
            self.logger.error('Image display seems to have failed')
示例#60
0
class Client(object):
    """The propsd client

  Keyword Args:
    propsd_server (str): The server hostname/ip address (default localhost)
    propsd_port (int): The server port (default 9100)
  """
    def __init__(self, propsd_server='localhost', propsd_port=9100):
        self.propsd_server = propsd_server
        self.propsd_port = propsd_port
        self.__update_callbacks = []
        self.__update_properties_previous = {}
        self.__update_scheduler = BackgroundScheduler({
            'apscheduler.jobstores.default': {
                'class': 'apscheduler.jobstores.memory:MemoryJobStore',
            },
            'apscheduler.executors.default': {
                'class': 'apscheduler.executors.pool:ThreadPoolExecutor',
                'max_workers': '1'
            },
            'apscheduler.job_defaults.coalesce':
            'true',
            'apscheduler.job_defaults.max_instances':
            '1',
            'apscheduler.timezone':
            'UTC',
        })
        self.__update_job = self.__update_scheduler.add_job(
            self.__update_properties,
            'interval',
            seconds=1,
            id='update-check-job')
        self.__update_scheduler.start()

    def get(self, key):
        """Gets a specific property

    Args:
      key (str): The key to retrieve

    Returns:
      str: The property value, or None.
    """
        try:
            response = requests.get(
                "http://%s:%d/v1/conqueso/api/roles/default/properties/%s" %
                (self.propsd_server, self.propsd_port, key))
            return response.text
        except:
            log.warn("Could not retrieve property value")

    def properties(self):
        """Gets all propsd properties

    Returns:
      dict: The complete propsd property set
    """
        try:
            response = requests.get("http://%s:%d/v1/properties" %
                                    (self.propsd_server, self.propsd_port))
            return json.loads(response.text)
        except:
            log.warn("Could not retrieve property value")

    def status(self):
        """Gets the status of the propsd service

    Returns:
      dict: A dictionary containing the status parameters.
    """
        response = requests.get("http://%s:%d/v1/status" %
                                (self.propsd_server, self.propsd_port))
        return json.loads(response.text)

    def health(self):
        """Gets the health of the propsd service

    Returns:
      dict: A dictionary containing the health parameters.
    """
        response = requests.get("http://%s:%d/v1/health" %
                                (self.propsd_server, self.propsd_port))
        return json.loads(response.text)

    def subscribe(self, search, callback):
        """Subscribe to document changes

    Args:
      search (str): The objectpatch search string
      callback (object): The function to call
    """
        self.__update_callbacks.append({
            'search': search,
            'callback': callback
        })

    def shutdown(self):
        """Shuts down the propsd client
    """
        self.__update_scheduler.shutdown()

    def __update_properties(self):
        properties = self.properties()
        for item in self.__update_callbacks:
            search = item['search']
            thistree = objectpath.Tree(properties)
            thisresult = thistree.execute(search)
            thattree = objectpath.Tree(self.__update_properties_previous)
            thatresult = thattree.execute(search)

            if thisresult != thatresult:
                item['callback'](search, properties, thisresult)

        self.__update_properties_previous = properties