示例#1
2
class LightManager(object):
    def __init__(self):
        self.setterZones = [LightSetter(zone.mode, zone.pinout, description=zone.description) for zone in zones]
        self.defaultSetterZone = self.setterZones[0]
        self.scheduler = BackgroundScheduler()
        self.scheduler.start()

    def setLights(self, color, zone=None):
        if not zone:
            self.defaultSetterZone.setLights(color)
        elif int(zone) in range(len(self.setterZones)):
            self.setterZones[int(zone)].setLights(color)
        else:
            raise InvalidZoneException

    def setEvent(self, secondsUntilEvent, color, zone=None):
        eventTime = datetime.now()+timedelta(seconds=secondsUntilEvent)
        eventId = str(hash((eventTime, color)))
        self.scheduler.add_job(self.setLights, args=(color,zone), next_run_time=eventTime, id=eventId)
        return eventId

    def cancelEvent(self, eventId):
        self.scheduler.remove_job(eventId)

    def getZoneInfo(self):
        return {str(i): {'type': zone.mode, 'description': zone.description} for i, zone in zip(range(len(self.setterZones)), self.setterZones)}
示例#2
1
def main():
	plugin_list= cf.sections()
	print plugin_list
	notif = pynsca.NSCANotifier(cf.monitor_server, password=cf.nsca_pass)
	plugin_scheduler = BackgroundScheduler()
	try:	
		for svc_name in plugin_list:
			if str(svc_name) == 'defaults': pass
			else:
				logging.info("%s loading" % str(svc_name))
				cmd=cf.g(svc_name,"check")
				if cmd is None:
					logging.error("%s check not exists" % svc_name)
					sys.exit(2)
				plugin_interval = cf.g(svc_name,"interval")
				if plugin_interval is None:
					continue
				plugin_scheduler.add_job(func=check_execute,args=[cmd,svc_name,notif],trigger='interval',seconds = plugin_interval, id = svc_name)
				logging.info("%s loaded" %str(svc_name))
		plugin_scheduler.start()
		while True:
			time.sleep(2)
	except Exception,e:
		logging.info("Agent boot up error: %s" % str(e))
		sys.exit(1)
class TestPySpout(basesinfonierspout.BaseSinfonierSpout):

    def __init__(self):

        basesinfonierspout.BaseSinfonierSpout().__init__()

    def useropen(self):

        # Using deque as a queue
        self.queue = deque()
        
        self.frequency = int(self.getParam("frequency"))
        
        # This scheduler launches self.job function every X seconds
        self.sched = BackgroundScheduler()
        self.sched.add_job(self.job, "interval", seconds=self.frequency, id="testpy")
        self.sched.start()

    def usernextTuple(self):

        # If there are items in self.queue, get the first one (.popleft()), do what you want with it and emit the tuple
        if self.queue:
            self.addField("timestamp",self.queue.popleft())
            self.emit()
    
    def userclose(self):
        
        pass
    
    def job(self):
        
        self.queue.append(str(int(time.time())))
示例#4
0
def start():
    print('cront started')
    scheduler = BackgroundScheduler()
    scheduler.add_job(run_daily, 'interval', days=1)
    scheduler.add_job(run_sync, 'interval', minutes=5,
                      max_instances=1, next_run_time=datetime.now())
    scheduler.start()
示例#5
0
def start_scheduler():
    """ Creates the scheduler and adds all basic tasks.
    Returns:

    """
    global scheduler
    scheduler = BackgroundScheduler(executors=app.config['SCHEDULER_EXECUTORS'],
                                    job_defaults=app.config['SCHEDULER_DEFAULTS'])
    scheduler.remove_all_jobs()
    scheduler.add_job(meassure, 'cron', minute='0,15,30,45', id='meassure')
    # Light devices.
    for light_device in LightDevice.get_active():
        start_light_tasks(light_device)
    # Water devices.
    for active_water_device in WaterDevice.get_turned_on():
        if active_water_device.switch_off_time <= datetime.now():  # switch now if event in past
            stop_water(active_water_device.id)
        else:
            scheduler.add_job(stop_water, 'date', run_date=active_water_device.switch_off_time,
                              args=[active_water_device.id],
                              misfire_grace_time=10000000,
                              id='water_off_' + active_water_device.name)
    # Subscribers.
    scheduler.add_job(update_subscribers, 'cron',
                      hour='15', id='update_subscribers')
    # Webcam
    scheduler.add_job(webcam, 'cron',
                      minute='1,31', id='webcam')
    webcam()
    # Time Lapse
    # scheduler.add_job(time_lapse, 'cron',
    #                   hour='12,18', id='timelapse')
    # time_lapse()
    print("Scheduler started")
    scheduler.start()
示例#6
0
def main():
    scheduler = Scheduler()
    servermap = ServerMap()
    scheduler.add_job(servermap.reload, trigger='cron', minute='*/5')
    scheduler.add_job(servermap.main, trigger='cron', minute='*/1')
    scheduler.start()
    dashboard.run()
示例#7
0
def schedule_notices():
    sched = BackgroundScheduler()
    sched.start()


    trigger = CronTrigger(day_of_week='*', hour=17)
    sched.add_job(send_notice, trigger)
示例#8
0
def initialize():
	"""Setting a schedule for the background process"""
	with app.app_context():
		print("Scheduling...")
		apsched = BackgroundScheduler()
		apsched.add_job(run_check, 'interval', seconds=60)
		apsched.start()
示例#9
0
def go(managedNamespace):
	statusMgr = statusDbManager.StatusResource()
	managedNamespace.run = True
	managedNamespace.serverRun = True

	checkInitDbs()

	server_process = multiprocessing.Process(target=serverProcess, args=(managedNamespace,))

	sched = BackgroundScheduler()

	scheduleJobs(sched, managedNamespace)
	server_process.start()
	sched.start()

	loopCtr = 0
	while managedNamespace.run:
		time.sleep(0.1)

		if loopCtr % 100 == 0:
			for job in sched.get_jobs():
				statusMgr.updateNextRunTime(job.name, job.next_run_time.timestamp())
		loopCtr += 1

	sched.shutdown()
	server_process.join()
class JobScheduler(object):

    def __init__(self, config):
        self.scheduler = BackgroundScheduler()
        self.scheduler.add_job(
            self.job_process,
            'interval',
            seconds=config['JOB_POLL_INTERVAL_SECONDS'],
            max_instances=1)
        self.scheduler.add_job(
            self.notification_job_process,
            'interval',
            seconds=config['DELIVERY_POLL_INTERVAL_SECONDS'],
            max_instances=1)
        self.config = config

    def start(self):
        self.scheduler.start()

    def shutdown(self):
        self.scheduler.shutdown(wait=True)

    def job_process(self):
        process_jobs(self.config)

    def notification_job_process(self):
        process_notification_job(self.config)
示例#11
0
class schedulecontrol:
    def __init__(self):
        self.scheduler = BackgroundScheduler()
        self.oncescheduler=BlockingScheduler()
        self.scheduler.start()
    def start(self):
        self.oncescheduler.start()
    def addschedule(self,event=None, day_of_week='0-7', hour='11',minute='57' ,second='0',id='',type='cron',run_date='',args=None):
        if id=='':
            id=str(time.strftime("%Y-%m-%d %X", time.localtime()));
        if type=='date':
            if run_date=='':

                self.oncescheduler.add_job(event, args=args)


            else:

                self.oncescheduler.add_job(event, 'date', run_date=run_date, args=args)
        elif type=='back':
            self.oncescheduler.add_job(event,type, day_of_week=day_of_week, hour=hour,minute=minute ,second=second,id=id)
        else:

            self.scheduler.add_job(event, type, day_of_week=day_of_week, hour=hour, minute=minute, second=second, id=id)
    def removeschedule(self,id):
        self.scheduler.remove_job(id)
示例#12
0
def mail2diaspora(config_pathname):

    # configure logging
    logger = logging.getLogger(__name__)
    configure_logging(logging.INFO)

    logger.info("Start mail2diaspora application")
    config.initialize(config_pathname)

    os.chdir(config.get(config.TEMP))

    # cron email fetcher
    scheduler = BackgroundScheduler()
    scheduler.add_job(
        diaspora.mail_poll, "interval", seconds=config.getInt(config.MAIL_POLLING)
    )
    scheduler.start()

    print("Press Ctrl+{0} to exit".format("Break" if os.name == "nt" else "C"))
    try:
        # This is here to simulate application activity (which keeps the main thread alive).
        while True:
            time.sleep(2)
    except (KeyboardInterrupt, SystemExit):
        # Not strictly necessary if daemonic mode is enabled but should be done if possible
        scheduler.shutdown()

    logger.info("Stop mail2diaspora application")
示例#13
0
def go():
	preflight()

	sched = BackgroundScheduler(jobstores=jobstores, executors=executors, job_defaults=job_defaults)

	# startTime = datetime.datetime.now()+datetime.timedelta(seconds=60*60)
	# startTime = datetime.datetime.now()+datetime.timedelta(seconds=60*15)
	# startTime = datetime.datetime.now()+datetime.timedelta(seconds=60*5)
	# startTime = datetime.datetime.now()+datetime.timedelta(seconds=20)
	startTime = datetime.datetime.now()+datetime.timedelta(seconds=10)
	scheduleJobs(sched, startTime)
	sched.start()

	# spinwait for ctrl+c, and exit when it's received.
	loops = 0
	while runStatus.run:
		time.sleep(0.1)
		# loops += 1
		# if loops > 100:
		# 	logging_tree.printout()
		# 	loops = 0

	print("Scraper stopping scheduler")
	sched.shutdown()
	nt.dirNameProxy.stop()
示例#14
0
def run_scheduler():
    scheduler = BackgroundScheduler()
    # scheduler.add_job(func, "interval", days=1)
    scheduler.add_job(check_overdue, "interval", days=1)
   # scheduler.add_job(send_mail_test(), "interval", minutes=1)
    scheduler.start()
    print "Scheduler started!"
示例#15
0
文件: quote.py 项目: xujhao/py
class RealTimeQuote(object):
    def __init__(self, cf, codelist, eventEngine_):
        self._codelist = codelist
        logger.info("codelist:%s", self._codelist)
        self._eventEngine = eventEngine_
        #self._eventEngine.register(EVENT_TIMER, self.TimerCall)
        self._sched  = BackgroundScheduler()

    def start(self):
        self._sched.add_job(self.TimerCall, 'interval',  seconds=3)
        self._sched.start()
        logger.info('RealTimeQuote start')

    def stop(self):
        logger.info('RealTimeQuote stop')
        self._sched.shutdown()

    def TimerCall(self):
        '''
        定时根据代码列表获取最新行情
        :return:
        '''
        if len(self._codelist) < 1:
            return

        rtQuote = GetRealTimeQuote(self._codelist)
        for i in range(rtQuote.shape[0]):
            itQuote = rtQuote.ix[i]
            if float(itQuote['amount']) <= 0.01:
                continue
            event = Event(type_=EVENT_MARKETDATA_CONTRACT + itQuote['code'])
            event.dict_['tick'] = itQuote
            self._eventEngine.put(event)
示例#16
0
def init():
    scheduler = BackgroundScheduler()
    scheduler.start()
    scheduler.add_job(dochatcleanup, 'interval',
                      minutes=int(plugin.config.config('sleep')),
                      id='dochatcleanup', replace_existing=True)
    return
示例#17
0
    def __init__(self, profile):
        self._logger = logging.getLogger(__name__)
        self.q = Queue.Queue()
        self.profile = profile
        self.notifiers = []
        
        self._logger.debug('Initializing Notifier...')

        if 'gmail_address' in profile and 'gmail_password' in profile:
            self.notifiers.append(self.NotificationClient(
                self.handleEmailNotifications, None))
        else:
            self._logger.warning('gmail_address or gmail_password not set ' +
                                 'in profile, Gmail notifier will not be used')

        if 'ssh_auth_log' in profile:
            self.notifiers.append(self.NotificationClient(
                    self.handleSSHAuthNotifications, None))
        else:
            self._logger.warning('ssh_auth_log not set,' +
                                 'SSH login notifier will not be used')

        job_defaults = {
            'coalesce': True,
            'max_instances': 1
        }
        sched = BackgroundScheduler(timezone="UTC", job_defaults=job_defaults)
        sched.start()
        sched.add_job(self.gather, 'interval', seconds=30)
        atexit.register(lambda: sched.shutdown(wait=False))
        
        # put the scheduler in Notifier object for reference
        self._sched = sched
示例#18
0
class timer:
    '''
    process = timer(function, [para1,para2,...], intervalseconds)
    process.run()
    '''
    def __init__(self, func, paras, seconds, id):
        self.func = func
        self.paras = paras
        self.time = seconds
        self.id = id
        self.scheduler = None
        self.setTimer()

    def setTimer(self):
        self.scheduler = BackgroundScheduler()
        self.scheduler.add_job(self.func, 'interval', seconds=self.time, id=self.id)

    def add_job(self, func, seconds, id):
        self.scheduler.add_job(func, 'interval', seconds, id)

    def remove_job(self, id):
        self.scheduler.remove_job(id)


    def run(self):
        self.scheduler.start()
示例#19
0
文件: __init__.py 项目: c2corg/v6_api
def configure_scheduler_from_config(settings):
    scheduler = BackgroundScheduler()
    scheduler.start()

    # run `purge_account` job at 0:00
    scheduler.add_job(
        purge_account,
        id='purge_account',
        name='Purge accounts which where not activated',
        trigger='cron',
        hour=0,
        minute=0
    )

    # run `purge_token` job at 0:30
    scheduler.add_job(
        purge_token,
        id='purge_token',
        name='Purge expired tokens',
        trigger='cron',
        hour=0,
        minute=30
    )

    scheduler.add_listener(exception_listener, EVENT_JOB_ERROR)

    atexit.register(lambda: scheduler.shutdown())
示例#20
0
    def post(self, action, position = ''):
        global scheduler
        self.checkStartup()
        
        if action == 'play':
            runCommand('mpc play ' + position)
            #Settings.set('radio', 'state', 'play')
            
            if scheduler is None:
                scheduler = BackgroundScheduler()
                scheduler.add_job(self.checkStatus, 'interval', seconds=30, id='checkStatus', replace_existing=True)
                scheduler.start()
        elif action == 'stop':
            runCommand('mpc stop')
            #Settings.set('radio', 'state', 'stop')
            
            if scheduler is not None:
                scheduler.remove_job('checkStatus')
                scheduler.shutdown()
                scheduler = None
            return {'playMode': 'stopped'}
        elif action =='pause':
            runCommand('mpc pause')
        elif action =='next':
            runCommand('mpc next')
        elif action =='previous':
            runCommand('mpc prev')
        else:
            return {'playMode': 'invalid'}

        (out, err) = runCommand('mpc status')
        if err:
            return {'error', err}, 500
        return {'playMode': Parser.parsePlayMode(out)}
示例#21
0
def startScheduler():
    db.create_all()
    #create default roles!
    if not db.session.query(models.Role).filter(models.Role.name == "admin").first():
        admin_role = models.Role(name='admin', description='Administrator Role')
        user_role = models.Role(name='user', description='User Role')
        db.session.add(admin_role)
        db.session.add(user_role)
        db.session.commit()
        
    try:
        import tzlocal

        tz = tzlocal.get_localzone()
        logger.info("local timezone: %s" % tz)
    except:
        tz = None

    if not tz or tz.zone == "local":
        logger.error('Local timezone name could not be determined. Scheduler will display times in UTC for any log'
                 'messages. To resolve this set up /etc/timezone with correct time zone name.')
        tz = pytz.utc
    #in debug mode this is executed twice :(
    #DONT run flask in auto reload mode when testing this!
    scheduler = BackgroundScheduler(logger=sched_logger, timezone=tz)
    scheduler.add_job(notify.task, 'interval', seconds=config.SCAN_INTERVAL, max_instances=1,
                      start_date=datetime.datetime.now(tz) + datetime.timedelta(seconds=2))
    scheduler.start()
    sched = scheduler
示例#22
0
def test6():
    """定时执行任务,暂停,恢复, 实例化"""
    start_time = time.time()
    scheduler = BackgroundScheduler()
    job = scheduler.add_job(my_job, 'interval', args=('123',),seconds=1, id='my_job_id') # 每隔1秒执行一次my_job函数,args为函数my_job的输入参数;id:可省略;
    print("作业id:{},作业名字:{},作业参数:{},作业函数:{},触发条件:{}".format(job.id, job.name, job.args, job.func,  job.trigger))
    scheduler.start() # 程序运行到这里,任务没有运行完也会往后执行,既执行后面的任务,又执行这个任务。
    print('运行到了这里1')
    while (scheduler.state):
        if time.time() - start_time >5:
            print('暂停作业')
            #scheduler.pause() # 暂停作业:
            job.pause() # 暂停单个实例
            break
    print('恢复作业')
    if time.time() - start_time >5:
        #scheduler.resume() # 恢复作业
        job.resume() # 恢复单个实例
    time.sleep(4)
    print('当前任务列表:{}'.format(scheduler.get_jobs())) # 获得调度作业的列表,可以使用 get_jobs() 来完成,它会返回所有的job实例
    scheduler.get_job('my_job_id') # 获取id为my_job_id的作业实例
    
    scheduler.print_jobs() # 输出所有格式化的作业列表。
    
    print('移除作业')
    # scheduler.remove_job('my_job_id') # 移除id为my_job_id的作业
    # scheduler.remove_all_jobs() # 移除所有的作业
    job.remove() # 移除单个实例的作业
示例#23
0
文件: mainWeb.py 项目: GDXN/MangaCMS
def run_web():

	nt.dirNameProxy.startDirObservers()


	sched = BackgroundScheduler(jobstores=jobstores, executors=executors, job_defaults=job_defaults)
	sched.start()


	x = 60
	for name, classInstance in nt.__dict__.items():

		# look up all class instances in nameTools. If they have the magic attribute "NEEDS_REFRESHING",
		# that means they support scheduling, so schedule the class in question.
		# To support auto-refreshing, the class needs to define:
		# cls.NEEDS_REFRESHING = {anything, just checked for existance}
		# cls.REFRESH_INTERVAL = {number of seconds between refresh calls}
		# cls.refresh()        = Call to do the refresh operation. Takes no arguments.
		#
		if  isinstance(classInstance, type) or not hasattr(classInstance, "NEEDS_REFRESHING"):
			continue

		sched.add_job(classInstance.refresh,
					trigger='interval',
					seconds=classInstance.REFRESH_INTERVAL,
					start_date=datetime.datetime.now()+datetime.timedelta(seconds=20+x),
					jobstore='main_jobstore')

		x += 60*2.5


	# It looks like cherrypy installs a ctrl+c handler, so I don't need to.
	webserver_process.serverProcess()
示例#24
0
文件: like.py 项目: detorto/tulen
class Processor:

	def like_wall(self):
		print "Will like a wall!"
		try:
			news = self.user.get_news(random.randint(10,100))
			print "News: ", len(news)
			for n in news:
				likes = n.get("likes",None)
				if likes and likes["user_likes"] == 0 and likes["can_like"] == 1:
					print "LIKE", n["post_id"]
					self.user.like_post(n["post_id"],n["source_id"])
					print "Sleeep"
					time.sleep(random.uniform(0, 5))
					print "Done"
						
		except:
			print "Error in like"
			pass


	def __init__(self , user):
		self.user = user
		self.shced  = BackgroundScheduler()
		self.shced.add_job(self.like_wall, "interval", seconds=60);
		self.shced.start()


	def process_message(self, message, chatid, userid):
		return;
示例#25
0
def scheduler(event):
    scheduler = BackgroundScheduler()
    settings = event.app.registry.settings
    jobstores = {'default': SQLAlchemyJobStore(url=settings['scheduler.url'])}
    executors = {
        'default': {
            'type': settings['scheduler.executors.type'],
            'max_workers': settings['scheduler.executors.max_workers']
        },
        'processpool': ProcessPoolExecutor(
            max_workers=settings['scheduler.executors.processpool.max_workers']
        )
    }
    job_defaults = {
        'coalesce': False,
        'max_instances': settings['scheduler.job_defaults.max_instances']
    }
    scheduler.configure(
        jobstores=jobstores,
        executors=executors,
        job_defaults=job_defaults,
        timezone=timezone('UTC')
    )
    if settings['scheduler.autostart'] == 'true':
        scheduler.start()
    event.app.registry.registerUtility(scheduler, IScheduler)
示例#26
0
class TimerTrigger(BaseTrigger):
    name = "timer"
    log = logging.getLogger("zuul.Timer")

    def __init__(self, trigger_config={}, sched=None, connection=None):
        super(TimerTrigger, self).__init__(trigger_config, sched, connection)
        self.apsched = BackgroundScheduler()
        self.apsched.start()

    def _onTrigger(self, pipeline_name, timespec):
        for project in self.sched.layout.projects.values():
            event = TriggerEvent()
            event.type = "timer"
            event.timespec = timespec
            event.forced_pipeline = pipeline_name
            event.project_name = project.name
            self.log.debug("Adding event %s" % event)
            self.sched.addEvent(event)

    def stop(self):
        self.apsched.shutdown()

    def getEventFilters(self, trigger_conf):
        def toList(item):
            if not item:
                return []
            if isinstance(item, list):
                return item
            return [item]

        efilters = []
        for trigger in toList(trigger_conf):
            f = EventFilter(trigger=self, types=["timer"], timespecs=toList(trigger["time"]))

            efilters.append(f)

        return efilters

    def postConfig(self):
        for job in self.apsched.get_jobs():
            job.remove()
        for pipeline in self.sched.layout.pipelines.values():
            for ef in pipeline.manager.event_filters:
                if ef.trigger != self:
                    continue
                for timespec in ef.timespecs:
                    parts = timespec.split()
                    if len(parts) < 5 or len(parts) > 6:
                        self.log.error(
                            "Unable to parse time value '%s' " "defined in pipeline %s" % (timespec, pipeline.name)
                        )
                        continue
                    minute, hour, dom, month, dow = parts[:5]
                    if len(parts) > 5:
                        second = parts[5]
                    else:
                        second = None
                    trigger = CronTrigger(day=dom, day_of_week=dow, hour=hour, minute=minute, second=second)

                    self.apsched.add_job(self._onTrigger, trigger=trigger, args=(pipeline.name, timespec))
示例#27
0
class cScheduler():
    def __init__(self):
        jobstores = {
            'default': SQLAlchemyJobStore(url='sqlite:///__secret//jobs.db')
        }
        executors = {
            'default': {'type': 'threadpool', 'max_workers': 20},
            'processpool': ProcessPoolExecutor(max_workers=5)
        }
        job_defaults = {
            'coalesce': False,
            'max_instances': 1
        }
        self.the_sched = BackgroundScheduler()
        self.the_sched.configure(jobstores=jobstores, executors=executors, job_defaults=job_defaults, timezone=utc)

        self.telegram_server = None
        self.job_queue = None

    def start(self):
        self.the_sched.start()

    def set_telegram_server(self, telegram_server):
        self.telegram_server = telegram_server
        self.telegram_job_queue = telegram_server.get_job_queue()

    def start_main_schedule(self):
        self.telegram_job_queue.put(self.telegram_server.extjob_send_all, 5, repeat=True)
示例#28
0
    def start(self):
        if self.config.webui['enable']:
            scheduler = BackgroundScheduler(
                logger=logging.getLogger('schedule'))
            logging.info("Using Background Scheduler")
        else:
            scheduler = BlockingScheduler(logger=logging.getLogger('schedule'))
            logging.info("Using Blocking Scheduler")

        scheduler.add_job(lambda: self.fetch(),
                          'cron', minute=30, id='fetch')
        scheduler.add_job(lambda: self.sort(),
                          'cron', minute=00, id='sort')

        # run once at launch
        self.fetch()
        self.sort()

        scheduler.start()

        if self.config.webui['enable']:
            logging.debug("Setting up WebUI")
            from fetcherd.webui import WebUI
            self.webui = WebUI(self, self.config)
            logging.debug("Starting WebUI")
            self.webui.run()
示例#29
0
def nameserver_check_scheduler(heartbeat_obj):
    """ Schedule the check using the heartbeat object """
    sched = BackgroundScheduler()
    sched.start()
    sched.add_job(heartbeat_obj.nameserver_check,
                  'cron',
                  second=("*/%s" % int(heartbeat_obj.configuration['heartbeat']['default']['interval'])))

    retries_check = int(heartbeat_obj.configuration['heartbeat']['default']['init_retries'])
    retry_wait = int(10)

    while(retries_check != 0):
        try:
            heartbeat_obj.nameservers.next()
        except StopIteration:
            pretty_log("Heartbeat scheduler not initialized yet... Will retry %s times..." % retries_check)
            pretty_log("Will retry in %s seconds" % retry_wait)
            retries_check -= 1
            sleep(retry_wait)
        else:
            pretty_log("Heartbeat scheduler initalized...")
            return True
    else:
        pretty_log("Heartbeat scheduler error!")
        return False
示例#30
0
def main(args):
    scheduler = BackgroundScheduler(coalesce=True, misfire_grace_time=4)
    taskparser = TaskParser(args['f'])
    taskparser.parse()

    yml_handler = YmlFileEventHandler(patterns=["*.yml"])
    yml_handler.set_scheduler(scheduler)
    yml_handler.set_parser(taskparser)
    file_observer = Observer()
    file_observer.schedule(yml_handler, path=args['f'], recursive=False)
    file_observer.start()

    # Initial parsing of the task folder
    for t in taskparser.task_list:
        addJob(t, scheduler)
        # Spread tasks from each other to prevent overload/task miss
        time.sleep(1)

    scheduler.start()

    # Update jobs while running
    while True:
        try:
            time.sleep(15)
        except KeyboardInterrupt:
            break

    scheduler.shutdown()
示例#31
0
            user[key] = datetime.strptime(user[key], "%Y-%m-%dT%H:%M:%S")
        # Save current state of account
        db.witness.update({'_id': user['owner']}, user, upsert=True)
        # Create our Snapshot dict
        snapshot = user.copy()
        _id = user['owner'] + '|' + now.strftime('%Y%m%d')
        snapshot.update({
          '_id': _id,
          'created': scantime
        })
        # Save Snapshot in Database
        db.witness_history.update({'_id': _id}, snapshot, upsert=True)

def run():
    update_witnesses()
    check_misses()

if __name__ == '__main__':
    # Start job immediately
    run()
    # Schedule it to run every 1 minute
    scheduler = BackgroundScheduler()
    scheduler.add_job(run, 'interval', seconds=30, id='run')
    scheduler.start()
    # Loop
    try:
        while True:
            time.sleep(2)
    except (KeyboardInterrupt, SystemExit):
        scheduler.shutdown()
示例#32
0
def start():
    scheduler = BackgroundScheduler()
    scheduler.add_job(update_event_scores, 'interval', hours=1)
    scheduler.start()
示例#33
0
from googlefinance import getQuotes
from kafka import KafkaProducer
from kafka.errors import KafkaError, KafkaTimeoutError
from apscheduler.schedulers.background import BackgroundScheduler

import atexit
import logging   # write into log
import time
import json
import argparse


# scheduler, automatically do something
schedule = BackgroundScheduler()
schedule.add_executor('threadpool')
schedule.start()


# logger configuration
logging.basicConfig()
logger = logging.getLogger('data-producer')
logger.setLevel(logging.DEBUG) # used during development
# logger.setLevel(logging.ERROR) # used after development

symbol = 'AAPL'
topic_name = 'stock'
kafka_broker = '127.0.0.1:9092'

"""
release resources properly
    1. thread pool
示例#34
0
def tick_challenge():
    print('启动 热门挑战 爬虫! The time is: %s' % datetime.now())
    subprocess.Popen("scrapy crawl categoryVideoSpider")


def tick_music():
    print('启动 热门音乐 爬虫! The time is: %s' % datetime.now())
    subprocess.Popen("scrapy crawl douyinSpider")


if __name__ == '__main__':
    scheduler = BackgroundScheduler()
    scheduler.add_job(tick_list, 'cron', day='*', hour=0, minute=0)  # 每天凌晨执行

    scheduler.add_job(tick_challenge, 'cron', day='*', hour=1, minute=0)
    scheduler.add_job(tick_music, 'cron', day='*', hour=1, minute=30)
    scheduler.start()  # 这里的调度任务是独立的一个线程
    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))

    try:
        # This is here to simulate application activity (which keeps the main
        # thread alive).
        while True:
            time.sleep(2)  # 其他任务是独立的线程执行
            print('sleep!')
    except (KeyboardInterrupt, SystemExit):
        # Not strictly necessary if daemonic mode is enabled but should be done
        # if possible
        scheduler.shutdown()
        print('Exit The Job!')
示例#35
0
def run_trade(stock):
    now = datetime.datetime.now()
    current_time = now.strftime('%H%M')
    global C0_LIST
    global ts_dict
    global un_buy_open
    global un_sell_open
    global un_sell_close
    global un_buy_close
    global close_axis_dict
    position_short = 0
    position_long = 0
    last_min_data = quotes[stock]  # 最新价
    new_price = last_min_data.last_price
    price_tick = last_min_data.price_tick
    if positions.__contains__(stock):
        # 判断空单持仓
        position_short = positions[stock].pos_short
        log.logger.info('{0} 当前空单持仓数:{1}'.format(stock, position_short))
        # 判断多单持仓
        position_long = positions[stock].pos_long
        log.logger.info('{0} 当前多单持仓数:{1}'.format(stock, position_long))
    ts_dict, C0 = sg.Trade_Signal(klines_dict[stock], para['BAR_NUM'])
    log.logger.info('C0值:{0}'.format(C0))
    C0_DICT[stock] = C0  # 更新指标值
    log.logger.info("{0}信号值ts_dict['b_open']={1}, ts_dict['s_open']={2}, ts_dict['b_close']={3}, ts_dict['s_close']={4}".format(\
        stock, ts_dict['b_open'], ts_dict['s_open'], ts_dict['b_close'], ts_dict['s_close']))
    # 平多仓
    if ts_dict['s_close'] == 1:
        if abs(max(C0_DICT[stock][-5:])) <= 2:
            close_axis_dict[stock]['s_close'] += 1
            if close_axis_dict[stock]['s_close'] < para['CLOSE_AXIS']:
                return
        close_axis_dict[stock]['s_close'] = 0
        if position_long > 0:
            log.logger.info('{0} 已有多仓,需平多>>>>>>'.format(stock))
            # 平仓前把之前的未成交的减仓单撤掉
            for ord in un_sell_close[stock]:
                qry_order = api.get_order(ord)
                if qry_order.status != "FINISHED":
                    log.logger.info('{0} 存在预埋平多委托,单号[{1}],对其撤单>>>>>>'.format(
                        stock, ord))
                    api.cancel_order(ord)
            un_sell_close[stock] = []
            if stock.find('SHFE') >= 0 or stock.find('INE') >= 0:
                if positions[stock].pos_long_his > 0:
                    log.logger.info('{0}有{1}手多头老仓,进行平多>>>>>>'.format(
                        stock, positions[stock].pos_long_his))
                    order = api.insert_order(symbol=stock, direction="SELL", offset="CLOSE", volume=positions[stock].pos_long_his,\
                                             limit_price=new_price-price_tick*2)
                elif positions[stock].pos_long_today > 0:
                    log.logger.info('{0}有{1}手多头今仓,进行平多>>>>>>'.format(
                        stock, positions[stock].pos_long_today))
                    order = api.insert_order(symbol=stock, direction="SELL", offset="CLOSETODAY",volume=positions[stock].pos_long_today,\
                                             limit_price=new_price-price_tick*2)
            else:
                log.logger.info('{0}有{1}手多仓,进行平多>>>>>>'.format(
                    stock, position_long))
                order = api.insert_order(symbol=stock,
                                         direction="SELL",
                                         offset="CLOSE",
                                         volume=position_long)
            api.wait_update()
        else:
            log.logger.info("{0} 无多仓,无需平多>>>>>>".format(stock))
        checkOpenAdd('s_close', stock, position_long, new_price - price_tick)
    # 平空仓
    if ts_dict['b_close'] == 1:
        if abs(min(C0_DICT[stock][-5:])) <= 2:
            close_axis_dict[stock]['b_close'] += 1
            if close_axis_dict[stock]['b_close'] < para['CLOSE_AXIS']:
                return
        close_axis_dict[stock]['b_close'] = 0
        if position_short > 0:
            log.logger.info('{0} 已有空仓,需平空>>>>>>'.format(stock))
            for ord in un_buy_close[stock]:
                qry_order = api.get_order(ord)
                if qry_order.status != "FINISHED":
                    log.logger.info('存在预埋平空委托,单号[{0}],对其撤单>>>>>>'.format(ord))
                    api.cancel_order(ord)
            un_buy_close[stock] = []
            if stock.find('SHFE') >= 0 or stock.find('INE') >= 0:
                if positions[stock].pos_short_his > 0:
                    log.logger.info('{0}有{1}手空头老仓,进行平空>>>>>>'.format(
                        stock, positions[stock].pos_short_his))
                    order = api.insert_order(symbol=stock, direction="BUY", offset="CLOSE", volume=positions[stock].pos_short_his,\
                                             limit_price=new_price+price_tick*2)
                elif positions[stock].pos_short_today > 0:
                    log.logger.info('{0}有{1}手空头今仓,进行平空>>>>>>'.format(
                        stock, positions[stock].pos_short_today))
                    order = api.insert_order(symbol=stock, direction="BUY", offset="CLOSETODAY",volume=positions[stock].pos_short_today,\
                                             limit_price=new_price+price_tick*2)
            else:
                log.logger.info('{0}有{1}手空仓,进行平空>>>>>>'.format(
                    stock, position_long))
                order = api.insert_order(symbol=stock,
                                         direction="BUY",
                                         offset="CLOSE",
                                         volume=position_short)
            api.wait_update()
        else:
            log.logger.info('{0} 无空仓,无需平空>>>>>>'.format(stock))
        checkOpenAdd('b_close', stock, position_long, new_price + price_tick)
    #开多仓
    if ts_dict['b_open'] == 1:
        if position_long > 0:
            log.logger.info('{0} 已有多仓,取消开多>>>>>>'.format(stock))
        else:
            log.logger.info('{0} 开多仓>>>>>>{1}手,委托价格:{2}'.format(
                stock, para['OPEN_NUM'], new_price))
            order = api.insert_order(symbol=stock,
                                     direction="BUY",
                                     offset="OPEN",
                                     volume=para['OPEN_NUM'],
                                     limit_price=new_price + price_tick)
            un_buy_open[stock].append(order.order_id)
            # -----设定n秒后执行检查委托状态并撤单追单---
            scheduler = BackgroundScheduler()
            scheduler.add_job(
                cancelJob,
                'date',
                run_date=now +
                datetime.timedelta(seconds=para['CANCEL_SECOND']),
                args=[order.order_id, stock, "BUY"])
            # 一个独立的线程启动任务
            scheduler.start()
            while order.status != "FINISHED":
                api.wait_update()
            if positions.__contains__(stock) and positions[stock].pos_long > 0:
                offsetStr = "CLOSETODAY" if stock.find(
                    'SHFE') >= 0 or stock.find('INE') >= 0 else "CLOSE"
                close_order1 = api.insert_order(symbol=stock,
                                                direction="SELL",
                                                offset=offsetStr,
                                                volume=para['CLOSE_NUM'],
                                                limit_price=new_price +
                                                price_tick * 25)
                close_order2 = api.insert_order(symbol=stock,
                                                direction="SELL",
                                                offset=offsetStr,
                                                volume=para['CLOSE_NUM2'],
                                                limit_price=new_price +
                                                price_tick * 50)
                api.wait_update()
                un_sell_close[stock].append(close_order1.order_id)
                un_sell_close[stock].append(close_order2.order_id)
    # 开空仓
    if ts_dict['s_open'] == 1:
        if position_short > 0:
            log.logger.info('{0} 已有空仓,取消开空>>>>>>'.format(stock))
        else:
            log.logger.info('{0} 开空仓>>>>>>{1}手,委托价格:{2}'.format(
                stock, para['OPEN_NUM'], new_price))
            order = api.insert_order(symbol=stock,
                                     direction="SELL",
                                     offset="OPEN",
                                     volume=para['OPEN_NUM'],
                                     limit_price=new_price - price_tick)
            un_sell_open[stock].append(order.order_id)
            # -----设定n秒后执行检查委托状态并撤单---
            scheduler2 = BackgroundScheduler()
            scheduler2.add_job(
                cancelJob,
                'date',
                run_date=now +
                datetime.timedelta(seconds=para['CANCEL_SECOND']),
                args=[order.order_id, stock, "SELL"])
            # 一个独立的线程启动任务
            scheduler2.start()
            while order.status != "FINISHED":
                api.wait_update()
            if positions.__contains__(
                    stock) and positions[stock].pos_short > 0:
                offsetStr = "CLOSETODAY" if stock.find(
                    'SHFE') >= 0 or stock.find('INE') >= 0 else "CLOSE"
                close_order1 = api.insert_order(symbol=stock,
                                                direction="BUY",
                                                offset=offsetStr,
                                                volume=para['CLOSE_NUM'],
                                                limit_price=new_price -
                                                price_tick * 35)
                close_order2 = api.insert_order(symbol=stock,
                                                direction="BUY",
                                                offset=offsetStr,
                                                volume=para['CLOSE_NUM2'],
                                                limit_price=new_price -
                                                price_tick * 50)
                api.wait_update()
                un_buy_close[stock].append(close_order1.order_id)
                un_buy_close[stock].append(close_order2.order_id)
示例#36
0
logger = logging.getLogger(__name__)

sched = BackgroundScheduler()


@sched.scheduled_job('cron', hour=16, minute=00)
def run():
    while True:
        try:
            week_kdata = ChinaStockKdataRecorder(
                level=IntervalLevel.LEVEL_1WEEK)
            week_kdata.run()

            mon_kdata = ChinaStockKdataRecorder(level=IntervalLevel.LEVEL_1MON)
            mon_kdata.run()

            break
        except Exception as e:
            logger.exception('quote runner error:{}'.format(e))
            time.sleep(60)


if __name__ == '__main__':
    init_process_log('eastmoney_quote.log')

    run()

    sched.start()

    sched._thread.join()
示例#37
0
def start_rewards():
    scheduler = BackgroundScheduler()
    scheduler.add_job(rewardsRoutine, 'cron', day_of_week='sun', hour=2, minute=50)
    scheduler.start()
class AutoExportingCounters(object):
    """
    A wrapper around collections.Counter that adds periodic backup.

    NOTE: Not to be confused with remote_datatypes. Counter which wraps a live
          redis counter. This counter is save only, and only offers weak durability.
          This is appropriate for monitoring and performance measurements, not
          for operational counters that require strict semantics.

    At the specified interval and program exit, the value in the counters will be
    sent to the provided channel.
    """

    def __init__(self,
                 name,
                 host=None,
                 export_interval_secs=None,
                 counter_type=None,
                 config=None,
                 redis=None,
                 counter_names=None,
                 timer_names=None):
        config = config or forge.get_config()
        self.channel = forge.get_metrics_sink(redis)
        self.export_interval = export_interval_secs or config.core.metrics.export_interval
        self.name = name
        self.host = host or get_random_id()
        self.type = counter_type or name

        self.counter_schema = set(counter_names)
        self.timer_schema = set(timer_names)

        self.counts = None
        self.lock = threading.Lock()
        self.scheduler = None
        self.reset()

        assert self.channel
        assert(self.export_interval > 0)

    # noinspection PyUnresolvedReferences
    def start(self):
        from apscheduler.schedulers.background import BackgroundScheduler
        import atexit

        self.scheduler = BackgroundScheduler(daemon=True)
        self.scheduler.add_job(self.export, 'interval', seconds=self.export_interval)
        self.scheduler.start()

        atexit.register(lambda: self.stop())

    def reset(self):
        with self.lock:
            old, self.counts = self.counts, Counters({key: 0 for key in self.counter_schema})
            self.counts.update({key + '.t': 0 for key in self.timer_schema})
            self.counts.update({key + '.c': 0 for key in self.timer_schema})
            self.counts['type'] = self.type
            self.counts['name'] = self.name
            self.counts['host'] = self.host

        return old

    def stop(self):
        if self.scheduler:
            self.scheduler.shutdown(wait=False)
            self.scheduler = None
        self.export()

    def export(self):
        try:
            # To avoid blocking increments on the redis operation
            # we only hold the long to do a copy.
            thread_copy = dict(self.reset().items())
            self.channel.publish(thread_copy)
            log.debug(f"{pprint.pformat(thread_copy)}")

            return thread_copy
        except Exception:
            log.exception("Exporting counters")

    def increment(self, name, increment_by=1):
        try:
            if name not in self.counter_schema:
                raise ValueError(f"{name} is not an accepted counter for this module: f{self.counter_schema}")
            with self.lock:
                self.counts[name] += increment_by
                return increment_by
        except Exception:  # Don't let increment fail anything.
            log.exception("Incrementing counter")
            return 0

    def increment_execution_time(self, name, execution_time):
        try:
            if name not in self.timer_schema:
                raise ValueError(f"{name} is not an accepted counter for this module: f{self.timer_schema}")
            with self.lock:
                self.counts[name + ".c"] += 1
                self.counts[name + ".t"] += execution_time
                return execution_time
        except Exception:  # Don't let increment fail anything.
            log.exception("Incrementing counter")
            return 0
示例#39
0
文件: monitor.py 项目: xlybaby/VAR
class PooledWebDriverManager(threading.Thread):

  def __init__(self, p_queue, p_request_queue):
    #Process.__init__(self)  
    threading.Thread.__init__(self) 
    self._interval = Configure.configure().value("headless.webdriver.browserIdleTimeMonitorInterval")
    self._max_idle_time = Configure.configure().value("headless.webdriver.maxBrowserIdleTime")
    
    self._alertMinAvailableNum = Configure.configure().value("headless.webdriver.alertMinAvailableNum")
    self._alertMaxAvailableNum = Configure.configure().value("headless.webdriver.alertMaxAvailableNum")
    self._monitorMinAvailableNum = Configure.configure().value("headless.webdriver.monitorMinAvailableNum")
    self._monitorMaxAvailableNum = Configure.configure().value("headless.webdriver.monitorMaxAvailableNum")
    
    self._iniBrowserNum = Configure.configure().value("headless.webdriver.iniBrowserNum")
    self._iniWinHeight = Configure.configure().value("headless.webdriver.iniBrowserWinHeight")
    self._iniWinWidth = Configure.configure().value("headless.webdriver.iniBrowserWinWidth")
    self._driver_path = Configure.configure().value("headless.webdriver.path")

    self._alert_used_rate = 0.5
    #self._alert_job_interval = 5
    self._driver_queue = p_queue
    self._request_queue = p_request_queue
    
  def run(self):
    executors = {
                'default': apscheduler.executors.pool.ThreadPoolExecutor(2),
                'processpool': apscheduler.executors.pool.ProcessPoolExecutor(2)
                 }
    job_defaults = {
                'coalesce': True,
                'max_instances': 1
                 }
    self._scheduler = BackgroundScheduler(executors=executors, job_defaults=job_defaults, timezone=utc)  
    #self._scheduler.add_job(self.checkLess,'interval',seconds=self._monitorMinAvailableNum)
    #self._scheduler.add_job(self.checkOverload,'interval',seconds=self._monitorMaxAvailableNum)
    self._scheduler.add_job(self.check,'interval',seconds=self._monitorMinAvailableNum)
    Logger.getLogger().info("Web driver pool manager starts")
    self._scheduler.start()
  
  def check(self):
    drivernum = self._driver_queue.qsize()
    requestnum = self._request_queue.qsize() 
    print ("current driver num is %d, waiting request num is %d"%(drivernum, requestnum))
    if round(requestnum/drivernum, 4) >= 0.5:
      print ("waiting request num is half of driver num")  
      for i in range(self._iniBrowserNum):
        try:  
          timeout = int(Configure.configure().value("headless.webdriver.addedNewDriverProcessAliveTimeout")) + i  
          proc = DriverProcess(p_request_queue=self._request_queue, p_alive_timeout=timeout)
          self._driver_queue.put(proc, block=False)
          proc.start()    
        except:
          pass   
      
  def checkOverload(self):
    cursize = self._driver_queue.qsize()
    Logger.getLogger().info ("*** check idle driver count, current queue size: %d"%(cursize))
    if cursize >= self._alertMaxAvailableNum:
      Logger.getLogger().info ("Current queue size is great than alert idle value: %d >= %d"%(cursize, self._alertMaxAvailableNum) )
      num = self._alertMaxAvailableNum - cursize
      for i in range(num):
        try:
          proc = self._driver_queue.get(block=False)
          proc.raiseExc(SystemExit)
          #proc.getInputQueue.put("Over")
#           driverwrapper = proc.getDriverwrapper()
#           driver = driverwrapper["driver"]
#           driver.quit()
#           driver = None
#           driverwrapper = None
        except:
          pass
      
  def checkLess(self):
    cursize = self._driver_queue.qsize()
    Logger.getLogger().info ("*** check minimum driver count, current queue size: %d"%(cursize))
    if cursize <= self._alertMinAvailableNum:
      Logger.getLogger().info ("Current queue size is less than alert minimum value: %d <= %d"%(cursize, self._alertMinAvailableNum) )
      for i in range(self._iniBrowserNum):
        try:  
          proc = DriverProcess()
          self._driver_queue.put(proc, block=False)
          proc.start()    
#           chrome_options = Options()
#           chrome_options.add_argument("--headless")
#           chrome_options.add_argument("--window-size="+str(self._iniWinWidth)+"x"+str(self._iniWinHeight))
#           driver = webdriver.Chrome(chrome_options=chrome_options, executable_path=self._driver_path)
#           driverwrapper = {"driver": driver, "instancetime": datetime.datetime.now(), "lastactivetime": datetime.datetime.now(), "usetimes": 0 }
#           self._queue.put(driverwrapper, block=False)
        except:
          pass
示例#40
0
def init_scheduler_infomation(scheduler):
    """
    :param scheduler:
    :return:
    """
    ok, all_report_uuids = storage.get_all_report_uuid()
    if not ok:
        raise RuntimeError(f"Fail to get reports info: {all_report_uuids}")
    if not all_report_uuids:
        print("No reports info in db")
        return

    for report_uuid in all_report_uuids:
        cook_report_uuid(scheduler=scheduler,
                         report_uuid=report_uuid,
                         action="add")


if __name__ == "__main__":
    back_scheduler = BackgroundScheduler()
    back_scheduler.start()

    init_scheduler_infomation(back_scheduler)

    tornado.options.parse_command_line()
    http_server = tornado.httpserver.HTTPServer(ReportApp(back_scheduler),
                                                xheaders=True)

    http_server.listen(options.port, address="127.0.0.1")
    tornado.ioloop.IOLoop.instance().start()
示例#41
0
def setScheduler():
    sched = BackgroundScheduler()
    sched.add_job(closeJob, 'cron', minute='*', second=10)
    sched.start()
示例#42
0
def closeJob():
    global C0_LIST
    global ts_dict
    global un_buy_open
    global un_sell_open
    global un_sell_close
    global un_buy_close
    global close_axis_dict
    now = datetime.datetime.now()
    for stock in SEC_LIST:
        if len(C0_DICT[stock]) == 0:
            continue
        last_min_data = quotes[stock]  # 最新价
        new_price = last_min_data.last_price
        price_tick = last_min_data.price_tick
        C1 = min(C0_DICT[stock][-5:])  # 下穿做空,上穿平仓
        C2 = max(C0_DICT[stock][-5:])  # 上穿做多,xia穿平仓
        position_long = positions[stock].pos_long
        position_short = positions[stock].pos_short
        print(stock, ' position_long: ', position_long, ', position_short: ',
              position_short)
        #print(un_buy_close)
        #print(un_sell_close)
        if C1 > 0 and position_short > 0:
            log.logger.info('存在尚需平仓空单>>>>>>')
            # 平仓前把之前的未成交的减仓单撤掉
            for ord in un_buy_close[stock]:
                qry_order = api.get_order(ord)
                if qry_order.status != "FINISHED":
                    log.logger.info('存在预埋平多委托,单号[{0}],对其撤单>>>>>>'.format(ord))
                    api.cancel_order(ord)
            un_buy_close[stock] = []
            if stock.find('SHFE') >= 0 or stock.find('INE') >= 0:
                if positions[stock].pos_short_his > 0:
                    log.logger.info('{0}有{1}手空头老仓,进行平空>>>>>>'.format(
                        stock, positions[stock].pos_short_his))
                    order = api.insert_order(symbol=stock, direction="BUY", offset="CLOSE",
                                             volume=positions[stock].pos_short_his, \
                                             limit_price=new_price)
                elif positions[stock].pos_short_today > 0:
                    log.logger.info('{0}有{1}手空头今仓,进行平空>>>>>>'.format(
                        stock, positions[stock].pos_short_today))
                    order = api.insert_order(symbol=stock, direction="BUY", offset="CLOSETODAY",
                                             volume=positions[stock].pos_short_today, \
                                             limit_price=new_price)
            else:
                log.logger.info('{0}有{1}手空仓,进行平空>>>>>>'.format(
                    stock, position_long))
                order = api.insert_order(symbol=stock,
                                         direction="BUY",
                                         offset="CLOSE",
                                         volume=position_short)
        if C2 < 0 and position_long > 0:
            log.logger.info('存在尚需平仓多单>>>>>>')
            for ord in un_sell_close[stock]:
                qry_order = api.get_order(ord)
                if qry_order.status != "FINISHED":
                    log.logger.info('存在预埋平空委托,单号[{0}],对其撤单>>>>>>'.format(ord))
                    api.cancel_order(ord)
            un_sell_close[stock] = []
            if stock.find('SHFE') >= 0 or stock.find('INE') >= 0:
                if positions[stock].pos_long_his > 0:
                    log.logger.info('{0}有{1}手多头老仓,进行平多>>>>>>'.format(
                        stock, positions[stock].pos_long_his))
                    order = api.insert_order(symbol=stock, direction="SELL", offset="CLOSE",
                                             volume=positions[stock].pos_long_his, \
                                             limit_price=new_price)
                elif positions[stock].pos_long_today > 0:
                    log.logger.info('{0}有{1}手多头今仓,进行平多>>>>>>'.format(
                        stock, positions[stock].pos_long_today))
                    order = api.insert_order(symbol=stock, direction="SELL", offset="CLOSETODAY",
                                             volume=positions[stock].pos_long_today, \
                                             limit_price=new_price)
            else:
                log.logger.info('{0}有{1}手多仓,进行平多>>>>>>'.format(
                    stock, position_long))
                order = api.insert_order(symbol=stock,
                                         direction="SELL",
                                         offset="CLOSE",
                                         volume=position_long)
        if C2 > 0 and (max(C0_DICT[stock][-6:-1]) < 0 or
                       max(C0_DICT[stock][-7:-2]) < 0) and position_long == 0:
            # 补开仓前把之前的未成交的开仓单撤掉
            for ord in un_buy_open[stock]:
                qry_order = api.get_order(ord)
                if qry_order.status != "FINISHED":
                    log.logger.info('{0} 存在未成交开多委托,单号[{1}],对其撤单>>>>>>'.format(
                        stock, ord))
                    api.cancel_order(ord)
            un_buy_open[stock] = []
            if stock.find('SHFE') >= 0 or stock.find('INE') >= 0:
                log.logger.info('{0} 补开多仓>>>>>>{1}手,委托价格{2}'.format(
                    stock, para['OPEN_NUM'], new_price - price_tick))
                order = api.insert_order(symbol=stock,
                                         direction="BUY",
                                         offset="OPEN",
                                         volume=para['OPEN_NUM'],
                                         limit_price=new_price + price_tick)
            else:
                log.logger.info('{0} 补开多仓>>>>>>{1}手,以市价委托'.format(
                    stock, para['OPEN_NUM']))
                order = api.insert_order(symbol=stock,
                                         direction="BUY",
                                         offset="OPEN",
                                         volume=para['OPEN_NUM'])
            # -----设定5秒后执行检查委托状态并撤单---
            scheduler = BackgroundScheduler()
            scheduler.add_job(
                cancelJob,
                'date',
                run_date=now +
                datetime.timedelta(seconds=para['CANCEL_SECOND']),
                args=[order.order_id, stock, "BUY"])
            # 一个独立的线程启动任务
            scheduler.start()
            time.sleep(2)  #等待两秒看是否成交再判断持仓进行下减仓
            if positions.__contains__(stock) and positions[stock].pos_long > 0:
                offsetStr = "CLOSETODAY" if stock.find(
                    'SHFE') >= 0 or stock.find('INE') >= 0 else "CLOSE"
                close_order1 = api.insert_order(symbol=stock,
                                                direction="SELL",
                                                offset=offsetStr,
                                                volume=para['CLOSE_NUM'],
                                                limit_price=new_price +
                                                price_tick * 5)
                close_order2 = api.insert_order(symbol=stock,
                                                direction="SELL",
                                                offset=offsetStr,
                                                volume=para['CLOSE_NUM2'],
                                                limit_price=new_price +
                                                price_tick * 10)
                un_sell_close[stock].append(close_order1.order_id)
                un_sell_close[stock].append(close_order2.order_id)
        if C1 < 0 and (min(C0_DICT[stock][-6:-1]) > 0 or
                       min(C0_DICT[stock][-7:-2]) > 0) and position_short == 0:
            for ord in un_sell_open[stock]:
                qry_order = api.get_order(ord)
                if qry_order.status != "FINISHED":
                    log.logger.info('{0} 存在未成交开空委托,单号[{1}],对其撤单>>>>>>'.format(
                        stock, ord))
                    api.cancel_order(ord)
            un_sell_open[stock] = []
            if stock.find('SHFE') >= 0 or stock.find('INE') >= 0:
                log.logger.info('{0} 补开空仓>>>>>>{1}手,委托价格{2}'.format(
                    stock, para['OPEN_NUM'], new_price - price_tick))
                order = api.insert_order(symbol=stock,
                                         direction="SELL",
                                         offset="OPEN",
                                         volume=para['OPEN_NUM'],
                                         limit_price=new_price - price_tick)
            else:
                log.logger.info('{0} 补开空仓>>>>>>{1}手,以市价委托'.format(
                    stock, para['OPEN_NUM']))
                order = api.insert_order(symbol=stock,
                                         direction="SELL",
                                         offset="OPEN",
                                         volume=para['OPEN_NUM'])
            # -----设定5秒后执行检查委托状态并撤单---
            scheduler2 = BackgroundScheduler()
            scheduler2.add_job(
                cancelJob,
                'date',
                run_date=now +
                datetime.timedelta(seconds=para['CANCEL_SECOND']),
                args=[order.order_id, stock, "SELL"])
            # 一个独立的线程启动任务
            scheduler2.start()
            time.sleep(2)  #等待两秒看是否成交再判断持仓进行下减仓
            if positions.__contains__(
                    stock) and positions[stock].pos_short > 0:
                offsetStr = "CLOSETODAY" if stock.find(
                    'SHFE') >= 0 or stock.find('INE') >= 0 else "CLOSE"
                close_order1 = api.insert_order(symbol=stock,
                                                direction="BUY",
                                                offset=offsetStr,
                                                volume=para['CLOSE_NUM'],
                                                limit_price=new_price -
                                                price_tick * 5)
                close_order2 = api.insert_order(symbol=stock,
                                                direction="BUY",
                                                offset=offsetStr,
                                                volume=para['CLOSE_NUM2'],
                                                limit_price=new_price -
                                                price_tick * 10)
                un_buy_close[stock].append(close_order1.order_id)
                un_buy_close[stock].append(close_order2.order_id)
示例#43
0
 def get(self, request, *args, **kwargs):
     '''
         接收前端参数,开始线程执行调度任务
     '''
     id = request.GET.get("id", "")
     ploy = request.GET.get("ploy", "")
     notification = request.GET.get("notification", "")
     start_date = request.GET.get("start_time", "")
     end_date = request.GET.get("end_time", "")
     admin_url = request.GET.get("admin_url", "")
     username = request.GET.get("username", "")
     password = request.GET.get("password", "")
     # DayofMonth没有,只支持week(1-53)个星期
     second, minute, hour, week, month, day_of_week = self.determine_ploy(
         ploy)
     if not second:
         error_code["error"] = "请输入合法的ploy"
         return Response(error_code)
     # 查询任务下所有的用例集
     case_set = ExecutePlanCases.objects.filter(parent=id).values_list(
         "relevance_id", flat=True)
     print("查询任务下所有的用例集:" + str(case_set))
     if not case_set:
         error_code["error"] = "请您先在任务下添加用例集!"
         return Response(error_code)
     # 查询所有用例集下所有的用例
     cases = RelevanceCaseSet.objects.filter(
         parent__in=case_set).values_list("relevance_id", flat=True)
     print("查询所有用例集下所有的用例:" + str(cases))
     # 查询所有用例下所有的接口,此查询的结果列表,排序是乱的
     interface_id_list = InterFaceCaseData.objects.filter(
         parent__in=cases).values_list("id", flat=True).distinct()
     print("查询所有用例集下所有的接口:" + str(interface_id_list))
     # # 批量修改执行接口的所有请求头
     # token = self.get_token(interface_id_list,admin_url,username,password)
     # if "error" in token:
     #     return Response(token)
     # 实例化非阻塞模式的调度器
     scheduler = BackgroundScheduler()
     uid = str(uuid.uuid4())
     obj = ExecutePlan.objects.get(id=id)
     #保存报告信息
     dic = {
         "parent": id,
         "status": False,
         "all_case_count": len(cases),
         "start_time": start_date,
         "end_time": end_date
     }
     serializer = ExecutePlanReporttSer(data=dic)
     if serializer.is_valid():
         serializer.save()
     #获取最新一个报告的对象
     report_obj = ExecutePlanReport.objects.filter().last()
     scheduler.add_job(self.job_func,
                       'cron',
                       start_date=start_date,
                       id=uid,
                       month=month,
                       week=week,
                       day_of_week=day_of_week,
                       hour=hour,
                       minute=minute,
                       second=second,
                       args=(cases, end_date, scheduler, uid, notification,
                             obj, interface_id_list, admin_url, username,
                             password, report_obj))
     # 开启调度任务
     scheduler.start()
     job = str(scheduler.get_job(uid))
     print(str(job))
     #获取下次运行的时间节点
     task_notification = job.split(",")[-1][1:-1]
     right_code["msg"] = task_notification
     return Response(right_code)
class DistanceSensor:
    def __init__(self, dist):
        self.threshold_distance = dist
        self.ipcon = None
        self.device = None
        self.tfIDs = []
        self.triggered = False
        self.thr_start = threading.Thread(target=self.triggerPlayer, args=(1,))
        self.thr_stop = threading.Thread(target=self.stopPlayer(), args=(1,))
        self.deviceIDs = [ i[0] for i in deviceIdentifiersList ]
        self.scheduler = None
        self.counter = _DELAY
        self.distance = 200.0

        if dist:
            self.setThresholdFromSettings()

        if self.threshold_distance:
            self.poll()
        else:
            logger("Test distance sensor created")

    def setThresholdFromSettings(self):
        try:
            d = self.loadSettings()
            self.threshold_distance = d
            logger("Threshold set to: " + str(d) + "cm")
        except Exception as e:
            logger("ERROR: could not get distance setting from the usb stick, using default value ..." + e)

    def getIdentifier(self, ID):
        deviceType = ""
        for t in range(len(self.deviceIDs)):
            if ID[1]==deviceIdentifiersList[t][0]:
                deviceType = deviceIdentifiersList[t][1]
        return(deviceType)

    def loadSettings(self):
        settings_json = Settings.get_settings()
        settings_json = settings_json.copy()
        print("Distance threshold in settings: ", settings_json["detection_distance"])
        return int(settings_json["detection_distance"])

     # Tinkerforge sensors enumeration
    def cb_enumerate(self, uid, connected_uid, position, hardware_version, firmware_version,
                    device_identifier, enumeration_type):
        self.tfIDs.append([uid, device_identifier])

    def tick(self):
        print("Triggered: " + str(self.triggered))
        print("Distance: " + str(self.distance))
        print("Counter: " + str(self.counter))

        if self.triggered:
            self.counter = _DELAY

        elif not self.triggered:
            self.counter -= 1
            if self.counter < 0:
                print("Stopping player")
                self.stopPlayer()
                self.device.set_distance_callback_configuration(_ENTRY_CALLBACK_PERIOD, True, "x", 0, 0)
            if self.counter < 0:
                self.counter = 0

    def poll(self):

        self.ipcon = IPConnection() # Create IP connection
        self.ipcon.connect(HOST, PORT) # Connect to brickd
        self.ipcon.register_callback(IPConnection.CALLBACK_ENUMERATE, self.cb_enumerate)

        # Trigger Enumerate
        self.ipcon.enumerate()

        time.sleep(0.7)

        for tf in self.tfIDs:
            if len(tf[0])<=3: # if the device UID is 3 characters it is a bricklet
                if tf[1] in self.deviceIDs:
                    print(tf[0],tf[1], self.getIdentifier(tf))
                    if tf[1] == 25: # DISTANCE IR BRICKLET
                        print("Registering %s as active Distance IR sensor 1.2" % tf[0])
                        self.device = BrickletDistanceIR(tf[0], self.ipcon) # Create device object
                        # Don't use device before ipcon is connected

                        self.device.register_callback(self.device.CALLBACK_DISTANCE, self.cb_distance)

                        # Get threshold callbacks with a debounce time of 10 seconds (10000ms)
                        # self.device.set_debounce_period(_DEBOUNCE_TIME)
                        self.device.set_distance_callback_period(_ENTRY_CALLBACK_PERIOD)
                    elif tf[1] == 2125: # DISTANCE IR BRICKLET V2.0
                        print("Registering %s as active Distance IR sensor 2.0" % tf[0])
                        self.device = BrickletDistanceIRV2(tf[0], self.ipcon) # Create device object
                        # Don't use device before ipcon is connected

                        self.device.register_callback(self.device.CALLBACK_DISTANCE, self.cb_distance_v2)

                        self.device.set_distance_callback_configuration(_ENTRY_CALLBACK_PERIOD, True, "x", 0, 0)

                    self.scheduler = BackgroundScheduler({
                        'apscheduler.executors.processpool': {
                            'type': 'processpool',
                            'max_workers': '1'
                        }}, timezone="Europe/London")
                    self.scheduler.add_job(self.tick, 'interval', seconds=_TICK_TIME, misfire_grace_time=5  , max_instances=1, coalesce=False)
                    self.scheduler.start(paused=False)
                    logging.getLogger('apscheduler').setLevel(logging.CRITICAL)


        print("Polling the TF distance sensor for distance measurement... ")
        print("Threshold distance is set to ", self.threshold_distance, "cm")

        # except Exception as e:
        #     print("ERROR: There is a problem with the Distance Sensor!")
        #     print("Why:", e)
        #     self.__del__()

    # Callback function for distance polling
    # Is only called if the distance has changed within _CALLBACK_PERIOD

    def cb_distance(self, distance):
        logger("Distance: " + str(distance/10.0) + " cm")
        d = distance/10.0
        t = None
        self.distance = d
        if d <= self.threshold_distance:
            self.triggerPlayer()
            self.device.set_distance_callback_period(_EXIT_CALLBACK_PERIOD)
            self.triggered = True
        elif d > self.threshold_distance:
            self.triggered = False

    def cb_distance_v2(self, distance):
        logger("Distance: " + str(distance/10.0) + " cm")
        d = distance/10.0
        t = None
        self.distance = d
        if d <= self.threshold_distance:
            self.triggerPlayer()
            self.device.set_distance_callback_configuration(_EXIT_CALLBACK_PERIOD, True, "x", 0, 0)
            self.triggered = True
        elif ( d > self.threshold_distance ):
            self.triggered = False

    def triggerPlayer(self, path="/media/usb/uploads/01_scentroom.mp3", start_position=0, test=False):
        try:
            if self.triggered or test:
                postFields = { \
                            'trigger' : "start", \
                            'upload_path': str(path), \
                            'start_position': str(start_position), \
                        }

                playerRes = requests.post('http://localhost:' + os.environ.get("PLAYER_PORT", "8080") + '/scentroom-trigger', json=postFields)
                print("INFO: res from start: ", playerRes)
        except Exception as e:
            logging.error("HTTP issue with player trigger")
            print("Why: ", e)

    def stopPlayer(self, test=False):
        try:
            if not self.triggered or test :
                postFields = { \
                            'trigger': "stop" \
                        }

                playerRes = requests.post('http://localhost:' + os.environ.get("PLAYER_PORT", "8080") + '/scentroom-trigger', json=postFields)
                print("INFO: res from stop: ", playerRes)
        except Exception as e:
            logging.error("HTTP issue with player stop")


    def __del__(self):
        try:
            self.ipcon.disconnect()
        except Exception as e:
            logger("Cannot destroy the Tinkerforge IP connection gracefully...")
            print("Why: ", e)
            logger("It's likely there was no connection to begin with!")
            logger("Distance sensor ")
        self.device = None
def start():
    scheduler = BackgroundScheduler()
    scheduler.add_job(schedule_api, 'interval', seconds=5)
    scheduler.start()
示例#46
0
def create_app():
    app = Flask(__name__)
    app.config.from_object(CONFIG_CLASS)

    init_logging()

    socketio = SocketIO(app, cors_allowed_origins="*")

    if os.getenv("FLASK_ENV") == "development":
        app = register_teardown_request(app)

    # read directory mount based config into Flask config
    try:
        conf_data = get_user_conf()
        app.config.update(conf_data)
    except Exception:
        app.logger.warning("Failed to load config.json")

    app.config["ORCHEST_REPO_TAG"] = get_repo_tag()

    # create thread for non-cpu bound background tasks, e.g. requests
    scheduler = BackgroundScheduler(
        job_defaults={
            # Infinite amount of grace time, so that if a task cannot be
            # instantly executed (e.g. if the webserver is busy) then it
            # will eventually be.
            "misfire_grace_time": 2**31,
            "coalesce": False,
            # So that the same job can be in the queue an infinite
            # amount of times, e.g. for concurrent requests issuing the
            # same tasks.
            "max_instances": 2**31,
        })
    app.config["SCHEDULER"] = scheduler
    scheduler.start()

    app.logger.info("Flask CONFIG: %s" % app.config)

    # Create the database if it does not exist yet. Roughly equal to a
    # "CREATE DATABASE IF NOT EXISTS <db_name>" call.
    if not database_exists(app.config["SQLALCHEMY_DATABASE_URI"]):
        create_database(app.config["SQLALCHEMY_DATABASE_URI"])
    db.init_app(app)
    ma.init_app(app)
    # necessary for migration
    Migrate().init_app(app, db)

    with app.app_context():
        # Upgrade to the latest revision. This also takes care of
        # bringing an "empty" db (no tables) on par.
        upgrade()
        initialize_default_datasources(db, app)

    # Telemetry
    if not app.config["TELEMETRY_DISABLED"]:
        # initialize posthog
        posthog.api_key = base64.b64decode(
            app.config["POSTHOG_API_KEY"]).decode()
        posthog.host = app.config["POSTHOG_HOST"]

        # send a ping now
        analytics_ping(app)

        # and every 15 minutes
        scheduler.add_job(
            analytics_ping,
            "interval",
            minutes=app.config["TELEMETRY_INTERVAL"],
            args=[app],
        )

    # static file serving
    @app.route("/public/<path:path>")
    def send_files(path):
        return send_from_directory("../static", path)

    register_views(app, db)
    register_orchest_api_views(app, db)
    register_background_tasks_view(app, db)
    register_socketio_broadcast(db, socketio)
    register_analytics_views(app, db)

    processes = []

    if process_start_gate():

        file_dir = os.path.dirname(os.path.realpath(__file__))

        # log_streamer process
        log_streamer_process = Popen(
            ["python3", "-m", "scripts.log_streamer"],
            cwd=os.path.join(file_dir, ".."),
            stderr=subprocess.STDOUT,
        )

        app.logger.info("Started log_streamer.py")
        processes.append(log_streamer_process)

    return app, socketio, processes
示例#47
0
def create_app():
    """Create and configure an instance of the Flask application."""
    app = Flask(__name__, instance_relative_config=True)
    app.config.from_object(config)
    app.config.from_pyfile('config.py')

    # set logging level
    logging.basicConfig(level=app.config['LOGGING_LEVEL'])

    # ensure the instance folder exists
    try:
        os.makedirs(app.instance_path)
    except OSError:
        pass

    # register the database commands
    from providentia.db import this as db
    db.init_app(app)

    # apply the blueprints to Providentia
    logging.debug('Applying blueprints to routes.')
    from providentia.views import new_job, dataset, database, benchmark, analysis, classifier, logs, queries, \
        kate, review_trends, city_sentiment, sim1, sim2, sim3

    app.register_blueprint(benchmark.bp, url_prefix='/api/benchmark')
    app.register_blueprint(dataset.bp, url_prefix='/api/dataset')
    app.register_blueprint(database.bp, url_prefix='/api/database')
    app.register_blueprint(analysis.bp, url_prefix='/api/analysis')
    app.register_blueprint(new_job.bp, url_prefix='/api/new-job')
    app.register_blueprint(classifier.bp, url_prefix='/api/classifier')
    app.register_blueprint(logs.bp, url_prefix='/api/logs')
    app.register_blueprint(queries.bp, url_prefix="/api/queries")
    app.register_blueprint(kate.bp, url_prefix="/api/result/kate")
    app.register_blueprint(review_trends.bp,
                           url_prefix="/api/result/review-trends")
    app.register_blueprint(city_sentiment.bp,
                           url_prefix="/api/result/city-sentiment")
    app.register_blueprint(sim1.bp, url_prefix="/api/result/sim1")
    app.register_blueprint(sim2.bp, url_prefix="/api/result/sim2")
    app.register_blueprint(sim3.bp, url_prefix="/api/result/sim3")

    # establish analysis database for this app
    logging.debug('Establishing database connections.')
    from providentia.db import this
    app.teardown_appcontext(this.close_db)

    # restart any incomplete jobs
    from providentia.repository.tbl_benchmark import reset_processing_jobs
    with app.app_context():
        reset_processing_jobs()

    # test connections to benchmark databases
    test_database_connections(app)

    # register CORS
    logging.debug('Registering CORS filter.')
    CORS(app, resources={r"/api/*": {"origins": app.config['CORS_ORIGINS']}})

    # enable scheduler
    logging.debug('Starting background jobs.')
    from apscheduler.schedulers.background import BackgroundScheduler
    from providentia.analysis.periodic_jobs import log_server_state, execute_waiting
    from providentia.classifier import sentiment, fake
    from datetime import datetime, timedelta

    classifier_start_train = datetime.now() + timedelta(0, 10)

    scheduler = BackgroundScheduler()
    scheduler.add_job(func=execute_waiting,
                      id='execute_waiting',
                      trigger='interval',
                      seconds=10)
    scheduler.add_job(func=log_server_state,
                      id='log_server_state',
                      trigger='interval',
                      seconds=1)
    # train the classifier model if it is enabled
    if app.config['ENABLE_SENTIMENT'] is True:
        logging.debug(
            '[SENTIMENT] Checking necessary NLTK resources are installed')
        check_nltk_deps()
        # Check if model exists else train one
        if os.path.exists(
                "./models/naivebayes.pickle") is True and os.path.exists(
                    "./models/features.pickle") is True:
            try:
                sentiment.deserialize_model()
                logging.info("Sentiment classifier ready!")
            except OSError as e:
                logging.error(
                    "Unable to deserialize Naive Bayes model! Creating a new one.",
                    e)
                scheduler.add_job(func=sentiment.train_model,
                                  id='train_sentiment',
                                  trigger='date',
                                  next_run_time=classifier_start_train,
                                  args=[app.config['SENTIMENT_DATA'], app])
        else:
            scheduler.add_job(func=sentiment.train_model,
                              id='train_sentiment',
                              trigger='date',
                              next_run_time=classifier_start_train,
                              args=[app.config['SENTIMENT_DATA'], app])
    filter_apscheduler_logs()
    scheduler.start()
    # shut down the scheduler when exiting the app
    atexit.register(lambda: scheduler.shutdown())

    return app
示例#48
0
class PeerToPeer():
    def __init__(self, addr):
        """
        PeerToPeer network initialization routine, generates miner ID and synchronizes blockchain (blocks and participants)
        """
        self.master_node = "localhost:5000"
        random.seed()
        self.generate_miner_id()
        self.address = addr
        self.participant_list = []
        self.get_current_participant_list()
        self.advertise()
        self.blockchain = Blockchain()

        while self.blockchain.empty():
            self.get_current_blockchain()
        # Valid addresses you can issue a vote to
        self.valid_addresses = [{
            "name": "Candidate 1",
            "address": "12345"
        }, {
            "name": "Candidate 2",
            "address": "5678"
        }, {
            "name": "Candidate 3",
            "address": "9999"
        }]
        self.sched = BackgroundScheduler(daemon=True)
        self.sched.start()

    def get_current_participant_list(self):
        """
        Get current participant list from other peers
        """
        logger.info("Get Current Participant List")
        if self.address == self.master_node:
            logger.info("Assuming this node as Master")
            return

        if len(self.participant_list) == 0:
            logger.info("Use master node as source")
            # Treat as the first list insertion
            try:
                r = requests.get("http://" + self.master_node + "/list")
                logger.debug("Request return: {}".format(r.status_code))
                if r.status_code == 200:
                    self.participant_list = r.json()
            except:
                logger.error("Could not get data from master node")

        else:
            logger.info("Use current participants as source")
            done = False
            while not done:
                random_participant = random.randint(
                    0,
                    len(self.participant_list) - 1)
                r = requests.get(
                    "http://" +
                    self.participant_list[random_participant]["address"] +
                    "/list")
                if r.status_code == 200:
                    self.participant_list = r.json()
                    done = True
                elif r.status_code == 408:
                    # Client timeout, remove participant from list and try another
                    self.participant_list.pop(random_participant)
                    if len(self.participant_list) == 0:
                        logger.error(
                            "Participant list is empty now, will keep trying on master node for now"
                        )
                        done = True
                else:
                    logger.error("Invalid return code, finish operation")
                    done = True

    def get_current_blockchain(self):
        """
        Request current blockchain from another peer and save it.
        """
        logger.info("Get current blockchain")
        if self.participant_list is not None:
            if len(self.participant_list) > 1:
                random_participant = random.randint(
                    0,
                    len(self.participant_list) - 1)
                # Check if the participant is not himself. If it is just abort with error
                if self.participant_list[random_participant][
                        "address"] == self.address:
                    return
                r = requests.get(
                    "http://" +
                    self.participant_list[random_participant]["address"] +
                    "/blockchain")
                logger.debug("Request result: {}".format(r.status_code))
                if r.status_code == 200:
                    self.blockchain.setup_new_chain(r.json())
            else:
                logger.info(
                    "Current node is the only one in the participant list")
                if self.blockchain.empty():
                    self.blockchain.create_genesis_block(
                        self.private_key, self.miner_id)
        else:
            # There should never be an empty participant list
            logger.error(
                "Empty participant list, won't get blockchain for now")

    def get_current_transaction_pool(self):
        """
        Request transaction pool from other peer and save it.
        """
        logger.info("Get current transaction pool")
        if self.participant_list is not None:
            random_participant = random.randint(0,
                                                len(self.participant_list) - 1)
            r = requests.get(
                "http://" +
                self.participant_list[random_participant]["address"] + "/pool")
            logger.debug("Request result: {}".format(r.status_code))
            if r.status_code == 200:
                self.transaction_pool = r.json()
                logger.debug("Current Transaction Pool {}".format(
                    self.transaction_pool))
        else:
            # There should never be an empty participant list
            logger.error(
                "Empty participant list, won't get transaction pool for now")

    def create_and_add_transaction(self, addr_to):
        """
        Used to make a transaction from current node to another
        """
        logger.info("Add transaction to pool")
        if self.has_to_vote():
            if self.check_valid_address(addr_to):
                transaction = Transaction(self.miner_id, addr_to)
                self.blockchain.add_transaction_to_pool(
                    transaction.get_signed_json(self.private_key))
                self.propagate_transaction(
                    transaction.get_signed_json(self.private_key))
                if len(self.sched.get_jobs()) == 0:
                    logger.info("Start block schedule")
                    self.sched.add_job(self.create_and_add_block,
                                       'date',
                                       run_date=datetime.now() +
                                       timedelta(seconds=5))
            else:
                logger.error("Cannot vote for this ledger, check the address")

    def has_to_vote(self):
        """
        Check if node still hasn't voted
        """
        logger.info("Checking blockchain for node votes")
        if not self.blockchain.empty():
            if self.blockchain.check_double_spending(self.miner_id):
                return False
            if self.blockchain.has_transaction_in_pool(self.miner_id):
                return False
        return True

    def check_valid_address(self, address):
        """
        Check if the value inserted is a valid candidate in the list
        """
        logger.info("Check destination address")
        for valid_candidate in self.valid_addresses:
            if address in valid_candidate["address"]:
                return True
        return False

    def create_and_add_block(self):
        """
        Create block using current transaction pool as data
        """
        block = self.blockchain.create_and_add_block(self.miner_id)
        self.propagate_block(block.get_json())
        return

    def generate_miner_id(self):
        """
        Check if there is already an ID for this node and load it, otherwise create.
        Also, keep a private key to use when signing transactions
        """
        if os.path.isfile("private_key.pem"):
            logger.info("Loading private key")
            with open("private_key.pem") as fr:
                self.private_key = RSA.importKey(fr.read())

            with open("miner_id.txt") as fr:
                self.miner_id = fr.read()

        else:
            logger.info("Create new miner ID")
            self.miner_id = str(random.randint(0, 10000))
            logger.info("Saving miner ID")
            with open("miner_id.txt", "w") as fw:
                fw.write(self.miner_id)

            logger.info("Create private key")
            self.private_key = RSA.generate(1024)
            with open("private_key.pem", "w") as fw:
                fw.write(self.private_key.exportKey("PEM").decode())

        return

    def propagate_transaction(self, transaction):
        """
        Post generated transaction to all the peers in the list
        """
        logger.info("Propagate transaction")
        for peer in self.participant_list:
            if peer["address"] != self.address:
                r = requests.post("http://" + peer["address"] + "/update_pool",
                                  json=transaction)
                if r.status_code == 200:
                    logger.info("Sent transaction to {}".format(
                        peer["address"]))

    def propagate_block(self, block):
        """
        Post generated block to all the peers in the list
        """
        logger.info("Propagate block")
        for peer in self.participant_list:
            if peer["address"] != self.address:
                r = requests.post("http://" + peer["address"] +
                                  "/add_new_block",
                                  json=block)
                if r.status_code == 200:
                    logger.info("Sent block to {}".format(peer["address"]))

    def validate_and_add_block(self, block):
        """
        Validate received block and add it to local chain
        """
        self.blockchain.validate_and_add_block(block)
        return

    def validate_and_add_transaction(self, transaction):
        """
        Validate received transaction and add it to transaction pool
        """
        logger.info("Transaction received: {}".format(transaction))
        # First check if the signature is ok
        if self.blockchain.validate_transaction(transaction):
            # Then check the destination address
            logger.info("Verified signature")
            for valid_addr in self.valid_addresses:
                if transaction["addr_to"] == valid_addr["address"]:
                    self.blockchain.add_transaction_to_pool(transaction)
                    if len(self.sched.get_jobs()) == 0:
                        logger.info("Start block schedule")
                        self.sched.add_job(self.create_and_add_block,
                                           'date',
                                           run_date=datetime.now() +
                                           timedelta(seconds=5))
        return

    def add_participant_to_list(self, peer):
        """
        Receive peer advertisement and add him to the list
        """
        if peer not in self.participant_list:
            self.participant_list.append(peer)

    def advertise(self):
        """
        Post registration message to the current peers of the network
        """
        logger.info("Advertise node to other peers")
        if len(self.participant_list) > 0:
            advertisement = {
                "miner_id": self.miner_id,
                "address": self.address
            }
            for peer in self.participant_list:
                if peer["address"] != self.address:
                    try:
                        requests.post("http://" + peer["address"] +
                                      "/advertise",
                                      json=advertisement)
                    except:
                        logger.error("Error advertising for {}".format(
                            peer["address"]))
        # Add current node to its own list
        self.participant_list.append({
            "miner_id": self.miner_id,
            "address": self.address
        })
示例#49
0
class JenniferBrain(object):

    UNSURE_TEXT = "Sorry, I can't help with that"
    MULTIPLE_LESSONS_APPLY = 'Which one of my lessons applies here?'

    def __init__(self, allow_network_plugins=False, always_allow_plugins=None):
        self._initialize_paths()

        # Lessons + Settings
        self.allow_network_plugins = allow_network_plugins
        self.always_allow_plugins = always_allow_plugins or []
        self.responders = []
        self.notifiers = []
        self.notification_queue = Queue.PriorityQueue()
        self._load_profile_and_settings()

        # Requires self.database & self.settings
        self._load_lessons()

        # Just to save time later
        self.nltktagger = PerceptronTagger()
        self.tagset = None

        # Notifications
        self.notification_clients = []
        self._initialize_background_tasks()

    def _initialize_paths(self):
        """Create the paths needed"""
        self.base_path = os.path.join(os.path.dirname(__file__), '..')
        self.profile_file = os.path.join(self.base_path, 'profile.json')
        self.lessons_path = os.path.join(self.base_path, 'lessons')

    def _load_lessons(self):
        """
        Search the lessons/ package for lessons & store them in sorted order by priority
        :return:
        """
        pkgs = [
            n for _, n, _ in pkgutil.iter_modules(['lessons']) if n != 'base'
        ]
        for name in pkgs:
            exec 'import lessons.' + name + '.plugin'

        responders = [
            cls(self).set_profile(self.database['profile'])
            for cls in JenniferResponsePlugin.__subclasses__()
            if self._is_lesson_allowed(cls)
        ]
        self.notifiers = [
            cls(self).set_profile(self.database['profile'])
            for cls in JenniferNotificationPlugin.__subclasses__()
            if self._is_lesson_allowed(cls)
        ]

        for r in (responders + self.notifiers):
            r.set_settings(self._get_settings_for_lesson(r))

        self.responders = sorted(responders, key=lambda l: l.PRIORITY)

    def _is_lesson_allowed(self, lesson_cls):
        if lesson_cls in self.always_allow_plugins:
            return True
        if lesson_cls.REQUIRES_NETWORK and not self.allow_network_plugins:
            return False
        return True

    def _load_profile_and_settings(self):
        """
        Load the profile
        :return:
        """
        try:
            with open(self.profile_file, 'r+') as profile_file:
                data = json.loads(profile_file.read(), strict=False)
                self.database = data
                if 'profile' in self.database and 'settings' in self.database:
                    profile_file.close()
                    return
        except (IOError, ValueError):
            self.database = {}
            self._init_profile()
            self._save_profile_to_file()

    def _get_settings_for_lesson(self, lesson, lesson_name=None):
        """
        Get the settings dict for the lesson
        (Must be called from a lesson)
        :return:
        """
        if not lesson_name:
            lesson_name = unicode(lesson.settings_name)

        try:
            return self.database['settings'][lesson_name]
        except KeyError:
            if self._test_if_settings_template_exists(lesson):
                print "--------{} SETTINGS--------".format(lesson_name)
                self._add_lesson_to_settings_and_write(lesson)
                return self._get_settings_for_lesson(lesson)
            return {}

    def _settings_template_path_for_lesson(self, lesson):
        """Gets a settings_template for a given lesson"""
        lesson_settings_name = lesson.settings_name
        return os.path.join(self.lessons_path, lesson_settings_name,
                            'settings_template.json')

    def _test_if_settings_template_exists(self, lesson):
        """Returns if a settings_template for a given lesson"""
        return os.path.isfile(self._settings_template_path_for_lesson(lesson))

    def _add_lesson_to_settings_and_write(self, lesson):
        """Loads a lesson's settings_template, runs an initialization function if available, and copies into DB"""
        lesson_settings_name = lesson.settings_name
        with open(self._settings_template_path_for_lesson(lesson)) as template:
            try:
                # Try to load initial template
                settings_template_dict = json.loads(template.read(),
                                                    strict=False)
                settings_template_dict = lesson.initialize_settings(
                    settings_template_dict)

                # Push to DB & save
                self.database['settings'][
                    lesson_settings_name] = settings_template_dict
                self._save_profile_to_file()
            except ValueError:
                exit("{} has an invalid settings_template.json".format(
                    lesson_settings_name))

    def _save_profile_to_file(self):
        """Writes to profile.json"""
        with open(self.profile_file, "w+") as f:
            plain_text = json.dumps(self.database, indent=4, sort_keys=True)
            f.write(plain_text)
            f.close()

    def _init_profile(self):
        """Should be run if profile.json doesn't exist"""
        fields = [
            ('first name', 'firstName'),
            ('last name', 'lastName'),
        ]
        location_fields = [
            ('city', 'city', 'New York City'),
            ('region', 'region', 'NY'),
            ('country', 'country', 'US'),
            ('zip', 'zip'),
        ]

        if 'profile' not in self.database:
            for field in fields:
                self.database.update({'profile': {'location': {}}})
                print "What is your {}?".format(field[0])
                self.database['profile'][field[1]] = raw_input("> ")

            self.database['profile']['location'] = {}
            for field in location_fields:
                txt = "What is your {}?".format(field[0])

                if len(location_fields) >= 3:
                    txt += " example: ({})".format(field[2])

                print txt
                self.database['profile']['location'][field[1]] = raw_input(
                    "> ")

            while True:
                print "What is your timezone? example: ({})".format(
                    random.choice(common_timezones))
                tz = raw_input('> ')
                if timezone(tz):
                    self.database['profile']['location']['timezone'] = tz
                    break
                else:
                    print "Invalid timezone"

        if 'settings' not in self.database:
            self.database.update(
                {'settings': {
                    'notifications': {
                        'quiet_hours': []
                    }
                }})

    def _get_profile(self):
        """Get the user's profile"""
        return self.database['profile']

    def take_input(self, text_input, client):
        """
        Search all lessons for lessons that can respond
        :param text_input:
        :return:
        """
        text_input = text_input.lower()
        tokens = nltk.word_tokenize(text_input)
        tags = nltk.tag._pos_tag(tokens, self.tagset, self.nltktagger)

        # TODO: extrap this out to a custom stopwords
        try:
            tags.remove(
                ('please', 'NN')
            )  # It's common to say 'please' when asking Jennifer something
        except:
            pass

        # Find the lessons that can answer
        respond_to = None
        matching_lessons = [
            lesson for lesson in self.responders if lesson.can_respond(
                tags=tags, client=client, brain=self, plain_text=text_input)
        ]

        # No answer
        if len(matching_lessons) == 0:
            self.respond_or_unsure(None, tags, client, text_input)

        # Only one module can respond
        elif len(matching_lessons) == 1:
            respond_to = matching_lessons[0]

        # Multiple lessons can response
        else:
            priority_counts = {}
            for lesson in matching_lessons:
                key = lesson.PRIORITY
                priority_counts.setdefault(key, []).append(lesson)

            # Now we have something like {999: [TimePlugin(), LowPriorityTimePlugin()], 0: [ImportantTimePlugin()]}
            min_priority = min(priority_counts.keys())

            if len(priority_counts[min_priority]) == 1:
                respond_to = priority_counts[min_priority][0]
            else:
                client.give_output_string("brain", self.MULTIPLE_LESSONS_APPLY)
                for lesson in priority_counts[min_priority]:
                    if client.confirm("brain", lesson.VERBOSE_NAME + "?"):
                        # TODO: would be nice to remember this decision.. that's v3.0 though.
                        respond_to = lesson
                        break

        return self.respond_or_unsure(respond_to, tags, client, text_input)

    def respond_or_unsure(self, respond_to, tags, client, text_input):
        try:
            return respond_to.respond(tags=tags,
                                      client=client,
                                      brain=self,
                                      plain_text=text_input)
        except Exception as e:
            return JenniferResponse(
                self, [JenniferTextResponseSegment(self.UNSURE_TEXT)])

    def _initialize_background_tasks(self):
        self.scheduler = BackgroundScheduler(timezone="UTC", daemon=True)
        self.scheduler.start()
        self.scheduler.add_job(self._collect_notifications_from_notifiers,
                               'interval',
                               seconds=10)
        self.scheduler.add_job(self.push_notifications_to_clients,
                               'interval',
                               seconds=2)
        atexit.register(lambda: self.scheduler.shutdown(wait=False))

    def _collect_notifications_from_notifiers(self):
        for notification_provider in self.notifiers:
            while not notification_provider.queue.empty():
                self.notification_queue.put(notification_provider.queue.get())

    def register_notification_client(self, client):
        self.notification_clients.append(client)

    def push_notifications_to_clients(self):
        while not self.notification_queue.empty():
            notification = self.notification_queue.get()
            for client in self.notification_clients:
                client.give_output_string("brain", notification[1])
示例#50
0
文件: app.py 项目: navin604/sba
def init_scheduler():
    sched = BackgroundScheduler(daemon=True)
    sched.add_job(populate_stats,
                  'interval',
                  seconds=app_config['scheduler']['period_sec'])
    sched.start()
示例#51
0
class BaseController:

    log_severity = {"error": error, "info": info, "warning": warning}

    get_endpoints = [
        "/administration",
        "/calendar/run",
        "/calendar/task",
        "/dashboard",
        "/login",
        "/table/changelog",
        "/table/device",
        "/table/event",
        "/table/pool",
        "/table/link",
        "/table/run",
        "/table/server",
        "/table/service",
        "/table/syslog",
        "/table/task",
        "/table/user",
        "/view/network",
        "/view/site",
        "/workflow_builder",
    ]

    json_endpoints = [
        "multiselect_filtering",
        "save_configuration",
        "table_filtering",
        "view_filtering",
    ]

    form_endpoints = [
        "add_edge",
        "add_service_to_workflow",
        "copy_service_in_workflow",
        "calendar_init",
        "clear_results",
        "clear_configurations",
        "compare",
        "connection",
        "counters",
        "count_models",
        "create_label",
        "database_deletion",
        "delete_edge",
        "delete_instance",
        "delete_label",
        "delete_node",
        "duplicate_workflow",
        "export_service",
        "export_to_google_earth",
        "export_topology",
        "get",
        "get_all",
        "get_cluster_status",
        "get_device_data",
        "get_device_logs",
        "get_exported_services",
        "get_git_content",
        "get_service_logs",
        "get_properties",
        "get_result",
        "get_runtimes",
        "get_view_topology",
        "get_service_state",
        "import_service",
        "import_topology",
        "migration_export",
        "migration_import",
        "query_netbox",
        "query_librenms",
        "query_opennms",
        "reset_status",
        "run_service",
        "save_parameters",
        "save_pool_objects",
        "save_positions",
        "scan_cluster",
        "scan_playbook_folder",
        "scheduler",
        "skip_services",
        "stop_workflow",
        "task_action",
        "topology_import",
        "update",
        "update_parameters",
        "update_pool",
        "update_all_pools",
    ]

    rest_endpoints = [
        "get_cluster_status",
        "get_git_content",
        "update_all_pools",
        "update_database_configurations_from_git",
    ]

    def __init__(self):
        self.config = config
        self.path = Path.cwd()
        self.custom_properties = self.load_custom_properties()
        self.init_scheduler()
        if config["tacacs"]["active"]:
            self.init_tacacs_client()
        if config["ldap"]["active"]:
            self.init_ldap_client()
        if config["vault"]["active"]:
            self.init_vault_client()
        if config["syslog"]["active"]:
            self.init_syslog_server()
        if config["paths"]["custom_code"]:
            sys_path.append(config["paths"]["custom_code"])
        self.fetch_version()
        self.init_logs()
        self.init_connection_pools()

    def configure_database(self):
        self.init_services()
        Base.metadata.create_all(bind=engine)
        configure_mappers()
        configure_events(self)
        self.init_forms()
        self.clean_database()
        if not fetch("user", allow_none=True, name="admin"):
            self.configure_server_id()
            self.create_admin_user()
            Session.commit()
            if self.config["app"]["create_examples"]:
                self.migration_import(name="examples",
                                      import_export_types=import_classes)
                self.update_credentials()
            else:
                self.migration_import(name="default",
                                      import_export_types=import_classes)
            self.get_git_content()
            Session.commit()

    def clean_database(self):
        for run in fetch("run",
                         all_matches=True,
                         allow_none=True,
                         status="Running"):
            run.status = "Aborted (app reload)"
        Session.commit()

    def fetch_version(self):
        with open(self.path / "package.json") as package_file:
            self.version = load(package_file)["version"]

    def configure_server_id(self):
        factory(
            "server",
            **{
                "name": str(getnode()),
                "description": "Localhost",
                "ip_address": "0.0.0.0",
                "status": "Up",
            },
        )

    def create_admin_user(self) -> None:
        admin = factory("user", **{"name": "admin"})
        if not admin.password:
            admin.password = "******"

    def update_credentials(self):
        with open(self.path / "files" / "spreadsheets" / "usa.xls",
                  "rb") as file:
            self.topology_import(file)

    def get_git_content(self):
        repo = self.config["app"]["git_repository"]
        if not repo:
            return
        local_path = self.path / "network_data"
        try:
            if exists(local_path):
                Repo(local_path).remotes.origin.pull()
            else:
                local_path.mkdir(parents=True, exist_ok=True)
                Repo.clone_from(repo, local_path)
        except Exception as exc:
            self.log("error", f"Git pull failed ({str(exc)})")
        self.update_database_configurations_from_git()

    def load_custom_properties(self):
        filepath = self.config["paths"]["custom_properties"]
        if not filepath:
            custom_properties = {}
        else:
            with open(filepath, "r") as properties:
                custom_properties = yaml.load(properties)
        property_names.update(
            {k: v["pretty_name"]
             for k, v in custom_properties.items()})
        public_custom_properties = {
            k: v
            for k, v in custom_properties.items()
            if not v.get("private", False)
        }
        device_properties.extend(list(custom_properties))
        pool_device_properties.extend(list(public_custom_properties))
        for properties_table in table_properties, filtering_properties:
            properties_table["device"].extend(list(public_custom_properties))
        device_diagram_properties.extend(
            list(p for p, v in custom_properties.items()
                 if v["add_to_dashboard"]))
        private_properties.extend(
            list(p for p, v in custom_properties.items()
                 if v.get("private", False)))
        return custom_properties

    def init_logs(self):
        log_level = self.config["app"]["log_level"].upper()
        folder = self.path / "logs"
        folder.mkdir(parents=True, exist_ok=True)
        basicConfig(
            level=getattr(import_module("logging"), log_level),
            format="%(asctime)s %(levelname)-8s %(message)s",
            datefmt="%m-%d-%Y %H:%M:%S",
            handlers=[
                RotatingFileHandler(folder / "enms.log",
                                    maxBytes=20_000_000,
                                    backupCount=10),
                StreamHandler(),
            ],
        )

    def init_connection_pools(self):
        self.request_session = RequestSession()
        retry = Retry(**self.config["requests"]["retries"])
        for protocol in ("http", "https"):
            self.request_session.mount(
                f"{protocol}://",
                HTTPAdapter(
                    max_retries=retry,
                    **self.config["requests"]["pool"],
                ),
            )

    def init_scheduler(self):
        self.scheduler = BackgroundScheduler({
            "apscheduler.jobstores.default": {
                "type": "sqlalchemy",
                "url": "sqlite:///jobs.sqlite",
            },
            "apscheduler.executors.default": {
                "class": "apscheduler.executors.pool:ThreadPoolExecutor",
                "max_workers": "50",
            },
            "apscheduler.job_defaults.misfire_grace_time":
            "5",
            "apscheduler.job_defaults.coalesce":
            "true",
            "apscheduler.job_defaults.max_instances":
            "3",
        })

        self.scheduler.start()

    def init_forms(self):
        for file in (self.path / "eNMS" / "forms").glob("**/*.py"):
            spec = spec_from_file_location(
                str(file).split("/")[-1][:-3], str(file))
            spec.loader.exec_module(module_from_spec(spec))

    def init_services(self):
        path_services = [self.path / "eNMS" / "services"]
        if self.config["paths"]["custom_services"]:
            path_services.append(Path(self.config["paths"]["custom_services"]))
        for path in path_services:
            for file in path.glob("**/*.py"):
                if "init" in str(file):
                    continue
                if not self.config["app"][
                        "create_examples"] and "examples" in str(file):
                    continue
                info(f"Loading service: {file}")
                spec = spec_from_file_location(
                    str(file).split("/")[-1][:-3], str(file))
                try:
                    spec.loader.exec_module(module_from_spec(spec))
                except InvalidRequestError as e:
                    error(f"Error loading custom service '{file}' ({str(e)})")

    def init_ldap_client(self):
        self.ldap_client = Server(self.config["ldap"]["server"], get_info=ALL)

    def init_tacacs_client(self):
        self.tacacs_client = TACACSClient(self.config["tacacs"]["address"], 49,
                                          environ.get("TACACS_PASSWORD"))

    def init_vault_client(self):
        self.vault_client = VaultClient()
        self.vault_client.url = self.config["vault"]["address"]
        self.vault_client.token = environ.get("VAULT_TOKEN")
        if self.vault_client.sys.is_sealed(
        ) and self.config["vault"]["unseal"]:
            keys = [environ.get(f"UNSEAL_VAULT_KEY{i}") for i in range(1, 6)]
            self.vault_client.sys.submit_unseal_keys(filter(None, keys))

    def init_syslog_server(self):
        self.syslog_server = SyslogServer(self.config["syslog"]["address"],
                                          self.config["syslog"]["port"])
        self.syslog_server.start()

    def update_parameters(self, **kwargs):
        Session.query(models["parameters"]).one().update(**kwargs)
        self.__dict__.update(**kwargs)

    def delete_instance(self, cls, instance_id):
        return delete(cls, id=instance_id)

    def get(self, cls, id):
        return fetch(cls, id=id).serialized

    def get_properties(self, cls, id):
        return fetch(cls, id=id).get_properties()

    def get_all(self, cls):
        return [instance.get_properties() for instance in fetch_all(cls)]

    def update(self, cls, **kwargs):
        try:
            must_be_new = kwargs.get("id") == ""
            for arg in ("name", "scoped_name"):
                if arg in kwargs:
                    kwargs[arg] = kwargs[arg].strip()
            kwargs["last_modified"] = self.get_time()
            kwargs["creator"] = getattr(current_user, "name", "admin")
            instance = factory(cls, must_be_new=must_be_new, **kwargs)
            Session.flush()
            return instance.serialized
        except Exception as exc:
            Session.rollback()
            if isinstance(exc, IntegrityError):
                return {
                    "error": (f"There already is a {cls} with the same name")
                }
            return {"error": str(exc)}

    def log(self, severity, content):
        factory(
            "changelog",
            **{
                "severity": severity,
                "content": content,
                "user": getattr(current_user, "name", "admin"),
            },
        )
        self.log_severity[severity](content)

    def count_models(self):
        return {
            "counters": {cls: count(cls)
                         for cls in diagram_classes},
            "properties": {
                cls: Counter(
                    str(getattr(instance, type_to_diagram_properties[cls][0]))
                    for instance in fetch_all(cls))
                for cls in diagram_classes
            },
        }

    def compare(self, type, result1, result2):
        first = self.str_dict(getattr(fetch(type, id=result1),
                                      "result")).splitlines()
        second = self.str_dict(getattr(fetch(type, id=result2),
                                       "result")).splitlines()
        opcodes = SequenceMatcher(None, first, second).get_opcodes()
        return {"first": first, "second": second, "opcodes": opcodes}

    def build_filtering_constraints(self, obj_type, **kwargs):
        model, constraints = models[obj_type], []
        for property in filtering_properties[obj_type]:
            value = kwargs["form"].get(property)
            if not value:
                continue
            filter = kwargs["form"].get(f"{property}_filter")
            if value in ("bool-true", "bool-false"):
                constraint = getattr(model, property) == (value == "bool-true")
            elif filter == "equality":
                constraint = getattr(model, property) == value
            elif filter == "inclusion" or DIALECT == "sqlite":
                constraint = getattr(model, property).contains(value)
            else:
                regex_operator = "regexp" if DIALECT == "mysql" else "~"
                constraint = getattr(model, property).op(regex_operator)(value)
            constraints.append(constraint)
        for related_model, relation_properties in relationships[
                obj_type].items():
            relation_ids = [
                int(id) for id in kwargs["form"].get(related_model, [])
            ]
            filter = kwargs["form"].get(f"{related_model}_filter")
            if filter == "none":
                constraint = ~getattr(model, related_model).any()
            elif not relation_ids:
                continue
            elif relation_properties["list"]:
                constraint = getattr(model, related_model).any(
                    models[relation_properties["model"]].id.in_(relation_ids))
                if filter == "not_any":
                    constraint = ~constraint
            else:
                constraint = or_(
                    getattr(model, related_model).has(id=relation_id)
                    for relation_id in relation_ids)
            constraints.append(constraint)
        return constraints

    def multiselect_filtering(self, type, **params):
        model = models[type]
        results = Session.query(model).filter(
            model.name.contains(params.get("term")))
        return {
            "items": [{
                "text": r.ui_name,
                "id": str(r.id)
            } for r in results.limit(10).offset((int(params["page"]) - 1) *
                                                10).all()],
            "total_count":
            results.count(),
        }

    def table_filtering(self, table, **kwargs):
        model, properties = models[table], table_properties[table]
        operator = and_ if kwargs["form"].get("operator",
                                              "all") == "all" else or_
        column_index = int(kwargs["order"][0]["column"])
        if column_index < len(properties):
            order_property = getattr(model, properties[column_index])
            order_function = getattr(order_property, kwargs["order"][0]["dir"],
                                     None)
        else:
            order_function = None
        constraints = self.build_filtering_constraints(table, **kwargs)
        if table == "result":
            constraints.append(
                getattr(
                    models["result"],
                    "service" if "service" in kwargs["instance"]["type"] else
                    kwargs["instance"]["type"],
                ).has(id=kwargs["instance"]["id"]))
            if kwargs.get("runtime"):
                constraints.append(
                    models["result"].parent_runtime == kwargs["runtime"])
        result = Session.query(model).filter(operator(*constraints))
        if order_function:
            result = result.order_by(order_function())
        return {
            "draw":
            int(kwargs["draw"]),
            "recordsTotal":
            Session.query(func.count(model.id)).scalar(),
            "recordsFiltered":
            get_query_count(result),
            "data": [[
                getattr(obj, f"table_{property}", getattr(obj, property))
                for property in properties
            ] + obj.generate_row(table) for obj in result.limit(
                int(kwargs["length"])).offset(int(kwargs["start"])).all()],
        }

    def allowed_file(self, name, allowed_modules):
        allowed_syntax = "." in name
        allowed_extension = name.rsplit(".", 1)[1].lower() in allowed_modules
        return allowed_syntax and allowed_extension

    def get_time(self):
        return str(datetime.now())

    def send_email(
        self,
        subject,
        content,
        sender=None,
        recipients=None,
        filename=None,
        file_content=None,
    ):
        sender = sender or self.config["mail"]["sender"]
        recipients = recipients or self.config["mail"]["recipients"]
        message = MIMEMultipart()
        message["From"] = sender
        message["To"] = recipients
        message["Date"] = formatdate(localtime=True)
        message["Subject"] = subject
        message.attach(MIMEText(content))
        if filename:
            attached_file = MIMEApplication(file_content, Name=filename)
            attached_file[
                "Content-Disposition"] = f'attachment; filename="{filename}"'
            message.attach(attached_file)
        server = SMTP(self.config["mail"]["server"],
                      self.config["mail"]["port"])
        if self.config["mail"]["use_tls"]:
            server.starttls()
            server.login(self.config["mail"]["username"],
                         environ.get("MAIL_PASSWORD"))
        server.sendmail(sender, recipients.split(","), message.as_string())
        server.close()

    def str_dict(self, input, depth=0):
        tab = "\t" * depth
        if isinstance(input, list):
            result = "\n"
            for element in input:
                result += f"{tab}- {self.str_dict(element, depth + 1)}\n"
            return result
        elif isinstance(input, dict):
            result = ""
            for key, value in input.items():
                result += f"\n{tab}{key}: {self.str_dict(value, depth + 1)}"
            return result
        else:
            return str(input)

    def strip_all(self, input):
        return input.translate(str.maketrans("", "", f"{punctuation} "))

    def update_database_configurations_from_git(self):
        for dir in scandir(self.path / "network_data"):
            if dir.name == ".git":
                continue
            device = fetch("device", allow_none=True, name=dir.name)
            if device:
                with open(Path(dir.path) / "data.yml") as data:
                    parameters = yaml.load(data)
                    device.update(**{"dont_update_pools": True, **parameters})

                for data in ("configuration", "operational_data"):
                    filepath = Path(dir.path) / dir.name / data
                    if not filepath.exists():
                        continue
                    with open(filepath) as file:
                        setattr(device, data, file.read())
        Session.commit()
        for pool in fetch_all("pool"):
            if pool.device_configuration or pool.device_operational_data:
                pool.compute_pool()
示例#52
0
def main():

    # Internal function to get help message
    def get_help_message():
        msg = ""
        msg = msg + "To transition ground station into these modes, enter commands: " + "\n"
        msg = msg + "Contact mode:                      [C] " + "\n"
        # msg = msg + "Downlink mode: [D] " + "\n"
        msg = msg + "Keep beacons quiet:                [Q] " + "\n"
        msg = msg + "Turn on beacons:                   [U] " + "\n"
        msg = msg + "Terminate Script:                  [Z] " + "\n"
        msg = msg + "Display this help message:         [H]" + "\n"
        return msg

    try:
        # Check for mission folder
        if not os.path.exists(GROUND_STN_MISSION_FOLDER_PATH):
            os.makedirs(GROUND_STN_MISSION_FOLDER_PATH)

        # Check for hk logs folder
        if not os.path.exists(GROUND_STN_OBC_HK_FOLDER_PATH):
            os.makedirs(GROUND_STN_OBC_HK_FOLDER_PATH)

        # Initialize serial ports for TT&C transceiver
        ttnc_port = input("Enter COM port for TT&C transceiver: ")
        serial_ttnc = serial.Serial(ttnc_port, 9600, timeout=10)

        # Create pipes to communicate with beacon process
        conn_process_beacon, conn_main_process = Pipe(duplex=True)

        # Initialize serial ports for payload transceiver
        payload_port = input("Enter COM port for Payload transceiver: ")
        serial_payload = serial.Serial(payload_port, 115200, timeout=None)

        # Initialize background scheduler for Downlink task
        scheduler = BackgroundScheduler()
        scheduler.start()

        # Enter Autonomous mode to wait for beacons
        process_beacon_collection = Process(target=handle_incoming_beacons,
                                            args=(serial_ttnc,
                                                  conn_process_beacon),
                                            daemon=True)

        run_flag = True

        while run_flag:

            # Initial begin
            print()
            print("---- GROUND STATION ----")
            init_response = input("To begin, enter [Y]... ")
            if init_response.lower() == 'y':
                # Carry on running script
                print()
                pass
            else:
                print()
                print("Exiting script...")
                break

            # Begin Autonomous Mode
            print("Entering Autonomous Mode...")
            print()
            process_beacon_collection.start()

            # Wait for trigger to enter other modes
            print("---- WAITING FOR COMMANDS ----")
            print(get_help_message())

            while run_flag:
                choice = input()
                print()

                if choice.lower() == 'h':
                    print(get_help_message())

                elif choice.lower() == 'c':

                    # Stop beacon receiving process
                    conn_main_process.send("stop")
                    process_beacon_collection.join()

                    # Start contact mode process
                    print("Start Contact mode process")
                    telecommand_type, ts = handle_contact_mode(serial_ttnc)

                    # Schedule downlink task
                    if telecommand_type == TELECOMMAND_TYPE_MISSION_DOWNLINK:
                        # Subtract 2 mins from time stamp
                        ts = ts - timedelta(minutes=2)

                        scheduler.add_job(handle_downlink_task,
                                          next_run_time=ts,
                                          args=[serial_payload])

                        print("Scheduled downlink job")
                        print()

                    # Resume beacon collection after contact mode process ends
                    print("Restart beacon collection process")
                    print()
                    process_beacon_collection = Process(
                        target=handle_incoming_beacons,
                        args=(serial_ttnc, conn_process_beacon),
                        daemon=True)
                    process_beacon_collection.start()

                elif choice.lower() == 'q':
                    print("Verbose mode now\n")
                    conn_main_process.send("verbose on")
                    pass

                elif choice.lower() == 'u':
                    print("Verbose mode off\n")
                    conn_main_process.send("verbose off")
                    pass

                elif choice.lower() == 'z':
                    conn_main_process.send("stop")
                    process_beacon_collection.join()
                    run_flag = False

                else:
                    print("Command not found...")
                    print()

    except KeyboardInterrupt:
        run_flag = False

    except serial.serialutil.SerialException:
        print("Invalid Serial port!")
        sys.exit()

    serial_payload.close()
    serial_ttnc.close()

    conn_main_process.close()
    conn_process_beacon.close()

    print("Terminated script")
    sys.exit()
示例#53
0
def run_schedule() -> None:
    """定期実行ジョブのスケジューリング
    """
    sched = BackgroundScheduler(standalone=True, coalesce=True)
    sched.add_job(job_update_session, 'interval', minutes=1)  # セッション更新
    sched.start()
示例#54
0
 def run(self, output_fn, **kwargs):
     '''处理数据库中的任务队列'''
     # 引入MySQL配置
     from Functions import AppServer
     db_name = AppServer().getConfValue('Databases', 'MysqlDB')
     db_user = AppServer().getConfValue('Databases', 'MysqlUser')
     db_pass = AppServer().getConfValue('Databases', 'MysqlPass')
     db_ip = AppServer().getConfValue('Databases', 'MysqlHost')
     db_port = AppServer().getConfValue('Databases', 'MysqlPort')
     dbconn = 'mysql://%s:%s@%s:%s/%s' % (db_user, db_pass, db_ip,
                                          int(db_port), db_name)
     from MySQL import writeDb
     # 尝试清空DB数据库中记录的JOB信息
     try:
         sql = """delete from apscheduler_jobs ;"""
         writeDb(sql, )
     except:
         True
     # 动态引入任务函数
     moduleSrc = 'TaskFunctions'
     dylib = importlib.import_module(moduleSrc)
     # 重新加载job队列[两种类型调度器按情况选择]
     from apscheduler.schedulers.background import BackgroundScheduler
     from apscheduler.executors.pool import ThreadPoolExecutor, ProcessPoolExecutor
     job_defaults = {'max_instances': 1}
     executors = {
         'default': ThreadPoolExecutor(20),
         'processpool': ProcessPoolExecutor(5)
     }
     scheduler = BackgroundScheduler(timezone='Asia/Shanghai',
                                     executors=executors,
                                     job_defaults=job_defaults)
     # sqlite or mysql
     scheduler.add_jobstore('sqlalchemy', url='%s' % dbconn)
     from MySQL import readDb
     sql = """ Select id,timedesc from taskconf where status='1' """
     result = readDb(sql, )
     for taskobject in result:
         Taskid = 'TaskID_%s' % taskobject.get('id')
         FunName = 'TaskFunc_%s' % taskobject.get('id')
         function = getattr(dylib, FunName)
         cronlist = taskobject.get('timedesc').strip().split(' ')
         print cronlist
         if len(cronlist) == 5:
             scheduler.add_job(func=function,
                               trigger='cron',
                               month=cronlist[4],
                               day=cronlist[3],
                               hour=cronlist[2],
                               minute=cronlist[1],
                               second=cronlist[0],
                               id=Taskid)
         elif len(cronlist) == 6:
             scheduler.add_job(func=function,
                               trigger='cron',
                               day_of_week=cronlist[5],
                               month=cronlist[4],
                               day=cronlist[3],
                               hour=cronlist[2],
                               minute=cronlist[1],
                               second=cronlist[0],
                               id=Taskid)
         else:
             continue
     scheduler.start()
     fd = open(output_fn, 'a')
     try:
         dtnow = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
         line = '\n[%s]: System Starting all Tasks...\n' % dtnow
         fd.write(line)
         fd.flush()
         while 1:
             pass
     except KeyboardInterrupt:  #捕获键盘ctrl+c,在此脚本中不生效,console下可用
         dtnow = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
         line = '\n[%s]: System Stoping all Tasks, please wait...\n' % dtnow
         fd.write(line)
         fd.close()
         scheduler.shutdown()
         time.sleep(1)
         os._exit(0)
示例#55
0
def start_week():
    scheduler = BackgroundScheduler()
    scheduler.add_job(weekRoutine, 'cron', day_of_week='tue-sun', hour=2, minute=50)
    scheduler.start()
示例#56
0
def start():
    scheduler = BackgroundScheduler()
    scheduler.add_job(scrape_data, 'interval', minutes=1)
    scheduler.start()
示例#57
0
def before_first_request():
    if not app.debug or environ.get("WERKZEUG_RUN_MAIN") == "true":
        sched = BackgroundScheduler()
        sched.add_job(receiveRouteUpdate, 'interval', seconds=10)
        sched.start()
示例#58
0
class AlarmClock:
    def __init__(self, loggingLevel=logging.INFO):
        self.log = rcpy.setupQueueLogger("AlarmClock", loggingLevel)

        self._alarmActive = True
        self._alarmWeekends = True
        self._alarmTime = datetime.time(6, 30)

        self.scheduler = BackgroundScheduler()
        self.scheduler.start()
        self.alarmJob = None

        self.mqtt = rcMQTT.RCMQTTclient(loggingLevel=logging.DEBUG)

        self.scheduleAlarm()

        self.log.debug("Alarm Setup Complete")

    @property
    def alarmTime(self):
        return self._alarmTime

    @alarmTime.setter
    def alarmTime(self, set_time):
        self._alarmTime = set_time
        self.scheduleAlarm()

    @property
    def alarmActive(self):
        return self._alarmActive

    @alarmActive.setter
    def alarmActive(self, is_active):
        self._alarmActive = is_active
        self.scheduleAlarm()

    @property
    def alarmWeekends(self):
        return self._alarmWeekends

    @alarmWeekends.setter
    def alarmWeekends(self, is_active):
        self._alarmWeekends = is_active
        self.scheduleAlarm()

    def updateDisplay(self, device):
        pass

    def scheduleAlarm(self):
        if self.alarmJob:
            self.alarmJob.remove()
            self.alarmJob = None

        if self.alarmActive is True:
            daysOfWeek = 'mon-sun' if self.alarmWeekends else 'mon-fri'
            self.alarmJob = self.scheduler.add_job(
                self.alarmFunction,
                'cron',
                hour=self.alarmTime.hour,
                minute=self.alarmTime.minute,
                day_of_week=daysOfWeek)
            self.log.info("Alarm Scheduled for: {} on {}".format(
                self.alarmTime, daysOfWeek))
            self.log.debug("Alarm using scheduler job: > {}".format(
                self.alarmJob))

    def alarmFunction(self):
        self.log.info("Alarm Triggered! Wake-up!")

        self.mqtt.publish("smartthings/Bedroom Light/switch/cmd", "on")

        self.log.debug("Connecting to Chromecast")
        chromecasts = pychromecast.get_chromecasts()
        cast = next(cc for cc in chromecasts
                    if cc.device.friendly_name == "Bedroom")
        self.log.debug("Chromecast Status: " + str(cast.status))

        mc = cast.media_controller
        cast.set_volume(0.00)
        self.log.debug("Audio zero")
        time.sleep(2)
        self.log.debug("Playing")
        # mc.play_media('http://techslides.com/demos/samples/sample.m4a', 'audio/mp4') # Bart Simpson
        # mc.play_media('https://github.com/akosma/Ringtones/raw/master/DaleHendrix.m4r', 'audio/mp4')
        mc.play_media('http://ice1.somafm.com/groovesalad-128-aac',
                      'audio/aac')
        mc.block_until_active()
        cast.set_volume(0.0)
        self.log.debug("Blocking complete, waiting 2")
        time.sleep(5)
        self.log.debug("Ramping")

        volume_inc = 0.01
        ramp_time = 60
        max_volume = 0.4
        volume_steps = int(max_volume / volume_inc)

        for i in range(volume_steps):
            time.sleep(ramp_time / volume_steps)
            volume = i / volume_steps * max_volume
            self.log.debug("Setting Volume: {}".format(volume))
            cast.set_volume(volume)

        self.log.debug("Ramp Complete")
        time.sleep(2)
        self.log.debug("Ending...")
示例#59
0
import wx
from apscheduler.schedulers.blocking import BlockingScheduler
from apscheduler.schedulers.background import BackgroundScheduler

scheduler_ = BackgroundScheduler()
scheduler_.start()
jobId = "myjob"
jobTime = 2


def job_function():
    print("---job---")


def startBtnClick(evt):
    if (scheduler_.get_job(jobId)):
        print("---任务已经开始了,不要重复执行---")
    else:
        print("开始任务")
        scheduler_.add_job(job_function, 'interval', seconds=jobTime, id=jobId)


def endBtnClick(evt):
    if (scheduler_.get_job(jobId)):
        scheduler_.remove_job(jobId)
        print("结束任务")
    else:
        print("任务已经停止")


app = wx.App()
示例#60
0
class JobQueue:
    """This class allows you to periodically perform tasks with the bot. It is a convenience
    wrapper for the APScheduler library.

    Attributes:
        scheduler (:class:`apscheduler.schedulers.background.BackgroundScheduler`): The APScheduler
        bot (:class:`telegram.Bot`): The bot instance that should be passed to the jobs.
            DEPRECATED: Use :attr:`set_dispatcher` instead.

    """
    def __init__(self) -> None:
        self._dispatcher: 'Dispatcher' = None  # type: ignore[assignment]
        self.logger = logging.getLogger(self.__class__.__name__)
        self.scheduler = BackgroundScheduler(timezone=pytz.utc)
        self.scheduler.add_listener(self._update_persistence,
                                    mask=EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)

        # Dispatch errors and don't log them in the APS logger
        def aps_log_filter(record):  # type: ignore
            return 'raised an exception' not in record.msg

        logging.getLogger('apscheduler.executors.default').addFilter(
            aps_log_filter)
        self.scheduler.add_listener(self._dispatch_error, EVENT_JOB_ERROR)

    def _build_args(self,
                    job: 'Job') -> List[Union[CallbackContext, 'Bot', 'Job']]:
        if self._dispatcher.use_context:
            return [CallbackContext.from_job(job, self._dispatcher)]
        return [self._dispatcher.bot, job]

    def _tz_now(self) -> datetime.datetime:
        return datetime.datetime.now(self.scheduler.timezone)

    def _update_persistence(self, event: JobEvent) -> None:  # pylint: disable=W0613
        self._dispatcher.update_persistence()

    def _dispatch_error(self, event: JobEvent) -> None:
        try:
            self._dispatcher.dispatch_error(None, event.exception)
        # Errors should not stop the thread.
        except Exception:
            self.logger.exception(
                'An error was raised while processing the job and an '
                'uncaught error was raised while handling the error '
                'with an error_handler.')

    @overload
    def _parse_time_input(self, time: None, shift_day: bool = False) -> None:
        ...

    @overload
    def _parse_time_input(
        self,
        time: Union[float, int, datetime.timedelta, datetime.datetime,
                    datetime.time],
        shift_day: bool = False,
    ) -> datetime.datetime:
        ...

    def _parse_time_input(
        self,
        time: Union[float, int, datetime.timedelta, datetime.datetime,
                    datetime.time, None],
        shift_day: bool = False,
    ) -> Optional[datetime.datetime]:
        if time is None:
            return None
        if isinstance(time, (int, float)):
            return self._tz_now() + datetime.timedelta(seconds=time)
        if isinstance(time, datetime.timedelta):
            return self._tz_now() + time
        if isinstance(time, datetime.time):
            date_time = datetime.datetime.combine(
                datetime.datetime.now(
                    tz=time.tzinfo or self.scheduler.timezone).date(), time)
            if date_time.tzinfo is None:
                date_time = self.scheduler.timezone.localize(date_time)
            if shift_day and date_time <= datetime.datetime.now(pytz.utc):
                date_time += datetime.timedelta(days=1)
            return date_time
        # isinstance(time, datetime.datetime):
        return time

    def set_dispatcher(self, dispatcher: 'Dispatcher') -> None:
        """Set the dispatcher to be used by this JobQueue. Use this instead of passing a
        :class:`telegram.Bot` to the JobQueue, which is deprecated.

        Args:
            dispatcher (:class:`telegram.ext.Dispatcher`): The dispatcher.

        """
        self._dispatcher = dispatcher
        if dispatcher.bot.defaults:
            if dispatcher.bot.defaults:
                self.scheduler.configure(
                    timezone=dispatcher.bot.defaults.tzinfo or pytz.utc)

    def run_once(
        self,
        callback: Callable[['CallbackContext'], None],
        when: Union[float, datetime.timedelta, datetime.datetime,
                    datetime.time],
        context: object = None,
        name: str = None,
        job_kwargs: JSONDict = None,
    ) -> 'Job':
        """Creates a new ``Job`` that runs once and adds it to the queue.

        Args:
            callback (:obj:`callable`): The callback function that should be executed by the new
                job. Callback signature for context based API:

                    ``def callback(CallbackContext)``

                ``context.job`` is the :class:`telegram.ext.Job` instance. It can be used to access
                its ``job.context`` or change it to a repeating job.
            when (:obj:`int` | :obj:`float` | :obj:`datetime.timedelta` |                         \
                  :obj:`datetime.datetime` | :obj:`datetime.time`):
                Time in or at which the job should run. This parameter will be interpreted
                depending on its type.

                * :obj:`int` or :obj:`float` will be interpreted as "seconds from now" in which the
                  job should run.
                * :obj:`datetime.timedelta` will be interpreted as "time from now" in which the
                  job should run.
                * :obj:`datetime.datetime` will be interpreted as a specific date and time at
                  which the job should run. If the timezone (``datetime.tzinfo``) is :obj:`None`,
                  the default timezone of the bot will be used.
                * :obj:`datetime.time` will be interpreted as a specific time of day at which the
                  job should run. This could be either today or, if the time has already passed,
                  tomorrow. If the timezone (``time.tzinfo``) is :obj:`None`, the
                  default timezone of the bot will be used.

            context (:obj:`object`, optional): Additional data needed for the callback function.
                Can be accessed through ``job.context`` in the callback. Defaults to :obj:`None`.
            name (:obj:`str`, optional): The name of the new job. Defaults to
                ``callback.__name__``.
            job_kwargs (:obj:`dict`, optional): Arbitrary keyword arguments to pass to the
                ``scheduler.add_job()``.

        Returns:
            :class:`telegram.ext.Job`: The new ``Job`` instance that has been added to the job
            queue.

        """
        if not job_kwargs:
            job_kwargs = {}

        name = name or callback.__name__
        job = Job(callback, context, name, self)
        date_time = self._parse_time_input(when, shift_day=True)

        j = self.scheduler.add_job(
            callback,
            name=name,
            trigger='date',
            run_date=date_time,
            args=self._build_args(job),
            timezone=date_time.tzinfo or self.scheduler.timezone,
            **job_kwargs,
        )

        job.job = j
        return job

    def run_repeating(
        self,
        callback: Callable[['CallbackContext'], None],
        interval: Union[float, datetime.timedelta],
        first: Union[float, datetime.timedelta, datetime.datetime,
                     datetime.time] = None,
        last: Union[float, datetime.timedelta, datetime.datetime,
                    datetime.time] = None,
        context: object = None,
        name: str = None,
        job_kwargs: JSONDict = None,
    ) -> 'Job':
        """Creates a new ``Job`` that runs at specified intervals and adds it to the queue.

        Args:
            callback (:obj:`callable`): The callback function that should be executed by the new
                job. Callback signature for context based API:

                    ``def callback(CallbackContext)``

                ``context.job`` is the :class:`telegram.ext.Job` instance. It can be used to access
                its ``job.context`` or change it to a repeating job.
            interval (:obj:`int` | :obj:`float` | :obj:`datetime.timedelta`): The interval in which
                the job will run. If it is an :obj:`int` or a :obj:`float`, it will be interpreted
                as seconds.
            first (:obj:`int` | :obj:`float` | :obj:`datetime.timedelta` |                        \
                   :obj:`datetime.datetime` | :obj:`datetime.time`, optional):
                Time in or at which the job should run. This parameter will be interpreted
                depending on its type.

                * :obj:`int` or :obj:`float` will be interpreted as "seconds from now" in which the
                  job should run.
                * :obj:`datetime.timedelta` will be interpreted as "time from now" in which the
                  job should run.
                * :obj:`datetime.datetime` will be interpreted as a specific date and time at
                  which the job should run. If the timezone (``datetime.tzinfo``) is :obj:`None`,
                  the default timezone of the bot will be used.
                * :obj:`datetime.time` will be interpreted as a specific time of day at which the
                  job should run. This could be either today or, if the time has already passed,
                  tomorrow. If the timezone (``time.tzinfo``) is :obj:`None`, the
                  default timezone of the bot will be used.

                Defaults to ``interval``
            last (:obj:`int` | :obj:`float` | :obj:`datetime.timedelta` |                        \
                   :obj:`datetime.datetime` | :obj:`datetime.time`, optional):
                Latest possible time for the job to run. This parameter will be interpreted
                depending on its type. See ``first`` for details.

                If ``last`` is :obj:`datetime.datetime` or :obj:`datetime.time` type
                and ``last.tzinfo`` is :obj:`None`, the default timezone of the bot will be
                assumed.

                Defaults to :obj:`None`.
            context (:obj:`object`, optional): Additional data needed for the callback function.
                Can be accessed through ``job.context`` in the callback. Defaults to :obj:`None`.
            name (:obj:`str`, optional): The name of the new job. Defaults to
                ``callback.__name__``.
            job_kwargs (:obj:`dict`, optional): Arbitrary keyword arguments to pass to the
                ``scheduler.add_job()``.

        Returns:
            :class:`telegram.ext.Job`: The new ``Job`` instance that has been added to the job
            queue.

        Note:
             `interval` is always respected "as-is". That means that if DST changes during that
             interval, the job might not run at the time one would expect. It is always recommended
             to pin servers to UTC time, then time related behaviour can always be expected.

        """
        if not job_kwargs:
            job_kwargs = {}

        name = name or callback.__name__
        job = Job(callback, context, name, self)

        dt_first = self._parse_time_input(first)
        dt_last = self._parse_time_input(last)

        if dt_last and dt_first and dt_last < dt_first:
            raise ValueError("'last' must not be before 'first'!")

        if isinstance(interval, datetime.timedelta):
            interval = interval.total_seconds()

        j = self.scheduler.add_job(
            callback,
            trigger='interval',
            args=self._build_args(job),
            start_date=dt_first,
            end_date=dt_last,
            seconds=interval,
            name=name,
            **job_kwargs,
        )

        job.job = j
        return job

    def run_monthly(
        self,
        callback: Callable[['CallbackContext'], None],
        when: datetime.time,
        day: int,
        context: object = None,
        name: str = None,
        day_is_strict: bool = True,
        job_kwargs: JSONDict = None,
    ) -> 'Job':
        """Creates a new ``Job`` that runs on a monthly basis and adds it to the queue.

        Args:
            callback (:obj:`callable`):  The callback function that should be executed by the new
                job. Callback signature for context based API:

                    ``def callback(CallbackContext)``

                ``context.job`` is the :class:`telegram.ext.Job` instance. It can be used to access
                its ``job.context`` or change it to a repeating job.
            when (:obj:`datetime.time`): Time of day at which the job should run. If the timezone
                (``when.tzinfo``) is :obj:`None`, the default timezone of the bot will be used.
            day (:obj:`int`): Defines the day of the month whereby the job would run. It should
                be within the range of 1 and 31, inclusive.
            context (:obj:`object`, optional): Additional data needed for the callback function.
                Can be accessed through ``job.context`` in the callback. Defaults to :obj:`None`.
            name (:obj:`str`, optional): The name of the new job. Defaults to
                ``callback.__name__``.
            day_is_strict (:obj:`bool`, optional): If :obj:`False` and day > month.days, will pick
                the last day in the month. Defaults to :obj:`True`.
            job_kwargs (:obj:`dict`, optional): Arbitrary keyword arguments to pass to the
                ``scheduler.add_job()``.

        Returns:
            :class:`telegram.ext.Job`: The new ``Job`` instance that has been added to the job
            queue.

        """
        if not job_kwargs:
            job_kwargs = {}

        name = name or callback.__name__
        job = Job(callback, context, name, self)

        if day_is_strict:
            j = self.scheduler.add_job(
                callback,
                trigger='cron',
                args=self._build_args(job),
                name=name,
                day=day,
                hour=when.hour,
                minute=when.minute,
                second=when.second,
                timezone=when.tzinfo or self.scheduler.timezone,
                **job_kwargs,
            )
        else:
            trigger = OrTrigger([
                CronTrigger(
                    day=day,
                    hour=when.hour,
                    minute=when.minute,
                    second=when.second,
                    timezone=when.tzinfo,
                    **job_kwargs,
                ),
                CronTrigger(
                    day='last',
                    hour=when.hour,
                    minute=when.minute,
                    second=when.second,
                    timezone=when.tzinfo or self.scheduler.timezone,
                    **job_kwargs,
                ),
            ])
            j = self.scheduler.add_job(callback,
                                       trigger=trigger,
                                       args=self._build_args(job),
                                       name=name,
                                       **job_kwargs)

        job.job = j
        return job

    def run_daily(
        self,
        callback: Callable[['CallbackContext'], None],
        time: datetime.time,
        days: Tuple[int, ...] = Days.EVERY_DAY,
        context: object = None,
        name: str = None,
        job_kwargs: JSONDict = None,
    ) -> 'Job':
        """Creates a new ``Job`` that runs on a daily basis and adds it to the queue.

        Args:
            callback (:obj:`callable`): The callback function that should be executed by the new
                job. Callback signature for context based API:

                    ``def callback(CallbackContext)``

                ``context.job`` is the :class:`telegram.ext.Job` instance. It can be used to access
                its ``job.context`` or change it to a repeating job.
            time (:obj:`datetime.time`): Time of day at which the job should run. If the timezone
                (``time.tzinfo``) is :obj:`None`, the default timezone of the bot will be used.
            days (Tuple[:obj:`int`], optional): Defines on which days of the week the job should
                run (where ``0-6`` correspond to monday - sunday). Defaults to ``EVERY_DAY``
            context (:obj:`object`, optional): Additional data needed for the callback function.
                Can be accessed through ``job.context`` in the callback. Defaults to :obj:`None`.
            name (:obj:`str`, optional): The name of the new job. Defaults to
                ``callback.__name__``.
            job_kwargs (:obj:`dict`, optional): Arbitrary keyword arguments to pass to the
                ``scheduler.add_job()``.

        Returns:
            :class:`telegram.ext.Job`: The new ``Job`` instance that has been added to the job
            queue.

        Note:
            For a note about DST, please see the documentation of `APScheduler`_.

        .. _`APScheduler`: https://apscheduler.readthedocs.io/en/stable/modules/triggers/cron.html
                           #daylight-saving-time-behavior

        """
        if not job_kwargs:
            job_kwargs = {}

        name = name or callback.__name__
        job = Job(callback, context, name, self)

        j = self.scheduler.add_job(
            callback,
            name=name,
            args=self._build_args(job),
            trigger='cron',
            day_of_week=','.join([str(d) for d in days]),
            hour=time.hour,
            minute=time.minute,
            second=time.second,
            timezone=time.tzinfo or self.scheduler.timezone,
            **job_kwargs,
        )

        job.job = j
        return job

    def run_custom(
        self,
        callback: Callable[['CallbackContext'], None],
        job_kwargs: JSONDict,
        context: object = None,
        name: str = None,
    ) -> 'Job':
        """Creates a new customly defined ``Job``.

        Args:
            callback (:obj:`callable`): The callback function that should be executed by the new
                job. Callback signature for context based API:

                    ``def callback(CallbackContext)``

                ``context.job`` is the :class:`telegram.ext.Job` instance. It can be used to access
                its ``job.context`` or change it to a repeating job.
            job_kwargs (:obj:`dict`): Arbitrary keyword arguments. Used as arguments for
                ``scheduler.add_job``.
            context (:obj:`object`, optional): Additional data needed for the callback function.
                Can be accessed through ``job.context`` in the callback. Defaults to ``None``.
            name (:obj:`str`, optional): The name of the new job. Defaults to
                ``callback.__name__``.

        Returns:
            :class:`telegram.ext.Job`: The new ``Job`` instance that has been added to the job
            queue.

        """
        name = name or callback.__name__
        job = Job(callback, context, name, self)

        j = self.scheduler.add_job(callback,
                                   args=self._build_args(job),
                                   name=name,
                                   **job_kwargs)

        job.job = j
        return job

    def start(self) -> None:
        """Starts the job_queue thread."""
        if not self.scheduler.running:
            self.scheduler.start()

    def stop(self) -> None:
        """Stops the thread."""
        if self.scheduler.running:
            self.scheduler.shutdown()

    def jobs(self) -> Tuple['Job', ...]:
        """
        Returns a tuple of all *pending/scheduled* jobs that are currently in the ``JobQueue``.
        """
        return tuple(
            Job.from_aps_job(job, self) for job in self.scheduler.get_jobs())

    def get_jobs_by_name(self, name: str) -> Tuple['Job', ...]:
        """Returns a tuple of all *pending/scheduled* jobs with the given name that are currently
        in the ``JobQueue``"""
        return tuple(job for job in self.jobs() if job.name == name)