Esempio n. 1
2
class LightManager(object):
    def __init__(self):
        self.setterZones = [LightSetter(zone.mode, zone.pinout, description=zone.description) for zone in zones]
        self.defaultSetterZone = self.setterZones[0]
        self.scheduler = BackgroundScheduler()
        self.scheduler.start()

    def setLights(self, color, zone=None):
        if not zone:
            self.defaultSetterZone.setLights(color)
        elif int(zone) in range(len(self.setterZones)):
            self.setterZones[int(zone)].setLights(color)
        else:
            raise InvalidZoneException

    def setEvent(self, secondsUntilEvent, color, zone=None):
        eventTime = datetime.now()+timedelta(seconds=secondsUntilEvent)
        eventId = str(hash((eventTime, color)))
        self.scheduler.add_job(self.setLights, args=(color,zone), next_run_time=eventTime, id=eventId)
        return eventId

    def cancelEvent(self, eventId):
        self.scheduler.remove_job(eventId)

    def getZoneInfo(self):
        return {str(i): {'type': zone.mode, 'description': zone.description} for i, zone in zip(range(len(self.setterZones)), self.setterZones)}
Esempio n. 2
1
def main():
	plugin_list= cf.sections()
	print plugin_list
	notif = pynsca.NSCANotifier(cf.monitor_server, password=cf.nsca_pass)
	plugin_scheduler = BackgroundScheduler()
	try:	
		for svc_name in plugin_list:
			if str(svc_name) == 'defaults': pass
			else:
				logging.info("%s loading" % str(svc_name))
				cmd=cf.g(svc_name,"check")
				if cmd is None:
					logging.error("%s check not exists" % svc_name)
					sys.exit(2)
				plugin_interval = cf.g(svc_name,"interval")
				if plugin_interval is None:
					continue
				plugin_scheduler.add_job(func=check_execute,args=[cmd,svc_name,notif],trigger='interval',seconds = plugin_interval, id = svc_name)
				logging.info("%s loaded" %str(svc_name))
		plugin_scheduler.start()
		while True:
			time.sleep(2)
	except Exception,e:
		logging.info("Agent boot up error: %s" % str(e))
		sys.exit(1)
Esempio n. 3
0
    def start(self):
        if self.config.webui['enable']:
            scheduler = BackgroundScheduler(
                logger=logging.getLogger('schedule'))
            logging.info("Using Background Scheduler")
        else:
            scheduler = BlockingScheduler(logger=logging.getLogger('schedule'))
            logging.info("Using Blocking Scheduler")

        scheduler.add_job(lambda: self.fetch(),
                          'cron', minute=30, id='fetch')
        scheduler.add_job(lambda: self.sort(),
                          'cron', minute=00, id='sort')

        # run once at launch
        self.fetch()
        self.sort()

        scheduler.start()

        if self.config.webui['enable']:
            logging.debug("Setting up WebUI")
            from fetcherd.webui import WebUI
            self.webui = WebUI(self, self.config)
            logging.debug("Starting WebUI")
            self.webui.run()
class TestPySpout(basesinfonierspout.BaseSinfonierSpout):

    def __init__(self):

        basesinfonierspout.BaseSinfonierSpout().__init__()

    def useropen(self):

        # Using deque as a queue
        self.queue = deque()
        
        self.frequency = int(self.getParam("frequency"))
        
        # This scheduler launches self.job function every X seconds
        self.sched = BackgroundScheduler()
        self.sched.add_job(self.job, "interval", seconds=self.frequency, id="testpy")
        self.sched.start()

    def usernextTuple(self):

        # If there are items in self.queue, get the first one (.popleft()), do what you want with it and emit the tuple
        if self.queue:
            self.addField("timestamp",self.queue.popleft())
            self.emit()
    
    def userclose(self):
        
        pass
    
    def job(self):
        
        self.queue.append(str(int(time.time())))
Esempio n. 5
0
def start():
    print('cront started')
    scheduler = BackgroundScheduler()
    scheduler.add_job(run_daily, 'interval', days=1)
    scheduler.add_job(run_sync, 'interval', minutes=5,
                      max_instances=1, next_run_time=datetime.now())
    scheduler.start()
Esempio n. 6
0
def main():
    scheduler = Scheduler()
    servermap = ServerMap()
    scheduler.add_job(servermap.reload, trigger='cron', minute='*/5')
    scheduler.add_job(servermap.main, trigger='cron', minute='*/1')
    scheduler.start()
    dashboard.run()
Esempio n. 7
0
def schedule_notices():
    sched = BackgroundScheduler()
    sched.start()


    trigger = CronTrigger(day_of_week='*', hour=17)
    sched.add_job(send_notice, trigger)
Esempio n. 8
0
def initialize():
	"""Setting a schedule for the background process"""
	with app.app_context():
		print("Scheduling...")
		apsched = BackgroundScheduler()
		apsched.add_job(run_check, 'interval', seconds=60)
		apsched.start()
Esempio n. 9
0
def nameserver_check_scheduler(heartbeat_obj):
    """ Schedule the check using the heartbeat object """
    sched = BackgroundScheduler()
    sched.start()
    sched.add_job(heartbeat_obj.nameserver_check,
                  'cron',
                  second=("*/%s" % int(heartbeat_obj.configuration['heartbeat']['default']['interval'])))

    retries_check = int(heartbeat_obj.configuration['heartbeat']['default']['init_retries'])
    retry_wait = int(10)

    while(retries_check != 0):
        try:
            heartbeat_obj.nameservers.next()
        except StopIteration:
            pretty_log("Heartbeat scheduler not initialized yet... Will retry %s times..." % retries_check)
            pretty_log("Will retry in %s seconds" % retry_wait)
            retries_check -= 1
            sleep(retry_wait)
        else:
            pretty_log("Heartbeat scheduler initalized...")
            return True
    else:
        pretty_log("Heartbeat scheduler error!")
        return False
Esempio n. 10
0
def mail2diaspora(config_pathname):

    # configure logging
    logger = logging.getLogger(__name__)
    configure_logging(logging.INFO)

    logger.info("Start mail2diaspora application")
    config.initialize(config_pathname)

    os.chdir(config.get(config.TEMP))

    # cron email fetcher
    scheduler = BackgroundScheduler()
    scheduler.add_job(
        diaspora.mail_poll, "interval", seconds=config.getInt(config.MAIL_POLLING)
    )
    scheduler.start()

    print("Press Ctrl+{0} to exit".format("Break" if os.name == "nt" else "C"))
    try:
        # This is here to simulate application activity (which keeps the main thread alive).
        while True:
            time.sleep(2)
    except (KeyboardInterrupt, SystemExit):
        # Not strictly necessary if daemonic mode is enabled but should be done if possible
        scheduler.shutdown()

    logger.info("Stop mail2diaspora application")
    def build(cube_name_list, buildType, start_time=None, end_time=None):
        run_cube_job_id = '1'
        check_cube_job_id = '2'
        scheduler = BackgroundScheduler()
        CubeWorker.job_instance_dict = {}

        for cube_name in cube_name_list:
            CubeWorker.job_instance_dict[cube_name] = None

        CubeWorker.scheduler = scheduler
        CubeWorker.run_cube_job_id = run_cube_job_id
        CubeWorker.check_cube_job_id = check_cube_job_id
        # start the run cube job immediately
        CubeWorker.run_cube_job(buildType, start_time, end_time)

        scheduler.add_job(CubeWorker.run_cube_job, 'interval', seconds=30, id=run_cube_job_id, args=[buildType, start_time, end_time])
        scheduler.add_job(CubeWorker.check_cube_job, 'interval', seconds=30, id=check_cube_job_id)
        scheduler.start()

        while True:
            if CubeWorker.all_finished():
                print "all cube jobs are finished"
                scheduler.remove_job(check_cube_job_id)
                scheduler.remove_job(run_cube_job_id)
                scheduler.shutdown()
                
                status = CubeWorker.get_status()
                print 'Build exited with status %s' % status
                return status == CubeWorkerStatus.SUCCESS

            time.sleep(settings.KYLIN_CHECK_STATUS_INTERVAL)
Esempio n. 12
0
def run_scheduler():
    scheduler = BackgroundScheduler()
    # scheduler.add_job(func, "interval", days=1)
    scheduler.add_job(check_overdue, "interval", days=1)
   # scheduler.add_job(send_mail_test(), "interval", minutes=1)
    scheduler.start()
    print "Scheduler started!"
Esempio n. 13
0
File: quote.py Progetto: xujhao/py
class RealTimeQuote(object):
    def __init__(self, cf, codelist, eventEngine_):
        self._codelist = codelist
        logger.info("codelist:%s", self._codelist)
        self._eventEngine = eventEngine_
        #self._eventEngine.register(EVENT_TIMER, self.TimerCall)
        self._sched  = BackgroundScheduler()

    def start(self):
        self._sched.add_job(self.TimerCall, 'interval',  seconds=3)
        self._sched.start()
        logger.info('RealTimeQuote start')

    def stop(self):
        logger.info('RealTimeQuote stop')
        self._sched.shutdown()

    def TimerCall(self):
        '''
        定时根据代码列表获取最新行情
        :return:
        '''
        if len(self._codelist) < 1:
            return

        rtQuote = GetRealTimeQuote(self._codelist)
        for i in range(rtQuote.shape[0]):
            itQuote = rtQuote.ix[i]
            if float(itQuote['amount']) <= 0.01:
                continue
            event = Event(type_=EVENT_MARKETDATA_CONTRACT + itQuote['code'])
            event.dict_['tick'] = itQuote
            self._eventEngine.put(event)
Esempio n. 14
0
class TimerTrigger(BaseTrigger):
    name = "timer"
    log = logging.getLogger("zuul.Timer")

    def __init__(self, trigger_config={}, sched=None, connection=None):
        super(TimerTrigger, self).__init__(trigger_config, sched, connection)
        self.apsched = BackgroundScheduler()
        self.apsched.start()

    def _onTrigger(self, pipeline_name, timespec):
        for project in self.sched.layout.projects.values():
            event = TriggerEvent()
            event.type = "timer"
            event.timespec = timespec
            event.forced_pipeline = pipeline_name
            event.project_name = project.name
            self.log.debug("Adding event %s" % event)
            self.sched.addEvent(event)

    def stop(self):
        self.apsched.shutdown()

    def getEventFilters(self, trigger_conf):
        def toList(item):
            if not item:
                return []
            if isinstance(item, list):
                return item
            return [item]

        efilters = []
        for trigger in toList(trigger_conf):
            f = EventFilter(trigger=self, types=["timer"], timespecs=toList(trigger["time"]))

            efilters.append(f)

        return efilters

    def postConfig(self):
        for job in self.apsched.get_jobs():
            job.remove()
        for pipeline in self.sched.layout.pipelines.values():
            for ef in pipeline.manager.event_filters:
                if ef.trigger != self:
                    continue
                for timespec in ef.timespecs:
                    parts = timespec.split()
                    if len(parts) < 5 or len(parts) > 6:
                        self.log.error(
                            "Unable to parse time value '%s' " "defined in pipeline %s" % (timespec, pipeline.name)
                        )
                        continue
                    minute, hour, dom, month, dow = parts[:5]
                    if len(parts) > 5:
                        second = parts[5]
                    else:
                        second = None
                    trigger = CronTrigger(day=dom, day_of_week=dow, hour=hour, minute=minute, second=second)

                    self.apsched.add_job(self._onTrigger, trigger=trigger, args=(pipeline.name, timespec))
Esempio n. 15
0
    def __init__(self, profile):
        self._logger = logging.getLogger(__name__)
        self.q = Queue.Queue()
        self.profile = profile
        self.notifiers = []
        
        self._logger.debug('Initializing Notifier...')

        if 'gmail_address' in profile and 'gmail_password' in profile:
            self.notifiers.append(self.NotificationClient(
                self.handleEmailNotifications, None))
        else:
            self._logger.warning('gmail_address or gmail_password not set ' +
                                 'in profile, Gmail notifier will not be used')

        if 'ssh_auth_log' in profile:
            self.notifiers.append(self.NotificationClient(
                    self.handleSSHAuthNotifications, None))
        else:
            self._logger.warning('ssh_auth_log not set,' +
                                 'SSH login notifier will not be used')

        job_defaults = {
            'coalesce': True,
            'max_instances': 1
        }
        sched = BackgroundScheduler(timezone="UTC", job_defaults=job_defaults)
        sched.start()
        sched.add_job(self.gather, 'interval', seconds=30)
        atexit.register(lambda: sched.shutdown(wait=False))
        
        # put the scheduler in Notifier object for reference
        self._sched = sched
Esempio n. 16
0
class Processor:

	def like_wall(self):
		print "Will like a wall!"
		try:
			news = self.user.get_news(random.randint(10,100))
			print "News: ", len(news)
			for n in news:
				likes = n.get("likes",None)
				if likes and likes["user_likes"] == 0 and likes["can_like"] == 1:
					print "LIKE", n["post_id"]
					self.user.like_post(n["post_id"],n["source_id"])
					print "Sleeep"
					time.sleep(random.uniform(0, 5))
					print "Done"
						
		except:
			print "Error in like"
			pass


	def __init__(self , user):
		self.user = user
		self.shced  = BackgroundScheduler()
		self.shced.add_job(self.like_wall, "interval", seconds=60);
		self.shced.start()


	def process_message(self, message, chatid, userid):
		return;
Esempio n. 17
0
class Bot():
    def __init__(self, cfg):
        self.token = cfg.TOKEN_BOT
       
        self.scheduler = BackgroundScheduler({
            'apscheduler.jobstores.default': {
                'type': 'sqlalchemy',
                'url': cfg.POSTGRESQL_DB
            },
            'apscheduler.executors.default': {
                'class': 'apscheduler.executors.pool:ThreadPoolExecutor',
                'max_workers': '20'
            },
            'apscheduler.executors.processpool': {
                'type': 'processpool',
                'max_workers': '5'
            },
            'apscheduler.job_defaults.coalesce': 'false',
            'apscheduler.job_defaults.max_instances': '3',
            'apscheduler.timezone': 'UTC',
        })

    def post(self, text, channel):
        url = 'https://{domain}/services/hooks/slackbot?token={token}&channel=%23{channel}'
        r = requests.post(url.format(domain=cfg.TEAMDOMAIN, token=self.token, channel=channel), data=text)

    def test(self, args=None):
        print 'Scheduler test'
        if args:
            print 'job args: {0}'.format(' '.join(args))

    def add_reminder(self):
        self.scheduler.add_job(self.test, 'interval', minutes=1, id='job_id', jobstore='default')
Esempio n. 18
0
    def post(self, action, position = ''):
        global scheduler
        self.checkStartup()
        
        if action == 'play':
            runCommand('mpc play ' + position)
            #Settings.set('radio', 'state', 'play')
            
            if scheduler is None:
                scheduler = BackgroundScheduler()
                scheduler.add_job(self.checkStatus, 'interval', seconds=30, id='checkStatus', replace_existing=True)
                scheduler.start()
        elif action == 'stop':
            runCommand('mpc stop')
            #Settings.set('radio', 'state', 'stop')
            
            if scheduler is not None:
                scheduler.remove_job('checkStatus')
                scheduler.shutdown()
                scheduler = None
            return {'playMode': 'stopped'}
        elif action =='pause':
            runCommand('mpc pause')
        elif action =='next':
            runCommand('mpc next')
        elif action =='previous':
            runCommand('mpc prev')
        else:
            return {'playMode': 'invalid'}

        (out, err) = runCommand('mpc status')
        if err:
            return {'error', err}, 500
        return {'playMode': Parser.parsePlayMode(out)}
Esempio n. 19
0
def run_web():

	nt.dirNameProxy.startDirObservers()


	sched = BackgroundScheduler(jobstores=jobstores, executors=executors, job_defaults=job_defaults)
	sched.start()


	x = 60
	for name, classInstance in nt.__dict__.items():

		# look up all class instances in nameTools. If they have the magic attribute "NEEDS_REFRESHING",
		# that means they support scheduling, so schedule the class in question.
		# To support auto-refreshing, the class needs to define:
		# cls.NEEDS_REFRESHING = {anything, just checked for existance}
		# cls.REFRESH_INTERVAL = {number of seconds between refresh calls}
		# cls.refresh()        = Call to do the refresh operation. Takes no arguments.
		#
		if  isinstance(classInstance, type) or not hasattr(classInstance, "NEEDS_REFRESHING"):
			continue

		sched.add_job(classInstance.refresh,
					trigger='interval',
					seconds=classInstance.REFRESH_INTERVAL,
					start_date=datetime.datetime.now()+datetime.timedelta(seconds=20+x),
					jobstore='main_jobstore')

		x += 60*2.5


	# It looks like cherrypy installs a ctrl+c handler, so I don't need to.
	webserver_process.serverProcess()
Esempio n. 20
0
class timer:
    '''
    process = timer(function, [para1,para2,...], intervalseconds)
    process.run()
    '''
    def __init__(self, func, paras, seconds, id):
        self.func = func
        self.paras = paras
        self.time = seconds
        self.id = id
        self.scheduler = None
        self.setTimer()

    def setTimer(self):
        self.scheduler = BackgroundScheduler()
        self.scheduler.add_job(self.func, 'interval', seconds=self.time, id=self.id)

    def add_job(self, func, seconds, id):
        self.scheduler.add_job(func, 'interval', seconds, id)

    def remove_job(self, id):
        self.scheduler.remove_job(id)


    def run(self):
        self.scheduler.start()
Esempio n. 21
0
def startScheduler():
    db.create_all()
    #create default roles!
    if not db.session.query(models.Role).filter(models.Role.name == "admin").first():
        admin_role = models.Role(name='admin', description='Administrator Role')
        user_role = models.Role(name='user', description='User Role')
        db.session.add(admin_role)
        db.session.add(user_role)
        db.session.commit()
        
    try:
        import tzlocal

        tz = tzlocal.get_localzone()
        logger.info("local timezone: %s" % tz)
    except:
        tz = None

    if not tz or tz.zone == "local":
        logger.error('Local timezone name could not be determined. Scheduler will display times in UTC for any log'
                 'messages. To resolve this set up /etc/timezone with correct time zone name.')
        tz = pytz.utc
    #in debug mode this is executed twice :(
    #DONT run flask in auto reload mode when testing this!
    scheduler = BackgroundScheduler(logger=sched_logger, timezone=tz)
    scheduler.add_job(notify.task, 'interval', seconds=config.SCAN_INTERVAL, max_instances=1,
                      start_date=datetime.datetime.now(tz) + datetime.timedelta(seconds=2))
    scheduler.start()
    sched = scheduler
Esempio n. 22
0
def getScheduler():
  "getScheduler constructs and returns a scheduler object"

  #Define default background scheduler
  scheduler = BackgroundScheduler()

  #Define ownerJob to be scheduled
  def ownerJob():
    "ownerJob updates the active_owners file with owner information"

    #Get current list of owners
    data = DevicesAttached().getActiveOwners()

    #Obtain local time and 
    timeStruct = time.localtime()
    [year,month,day,hour,minute,second,weekDay,yearDay,isDes] = timeStruct
    data["timestamp"] = "{:02d}:{:02d}:{:02d}".format(hour,minute,second)

    #Save file with date name
    filename = "active_owners_{:02d}_{:02d}_{:02d}.csv".format(day,month,year)
    saveRow(filename,data)

    #If end of a day upload log to dropbox
    if ((hour == 23) and (minute > 57)):
      try:
        DropboxAPI().saveFile(filename)
      except Exception as exception:
        print str(exception)

  ownerJob()

  scheduler.add_job(ownerJob,'interval',minutes=2,id="owner-job")
  return scheduler
Esempio n. 23
0
def configure_scheduler_from_config(settings):
    scheduler = BackgroundScheduler()
    scheduler.start()

    # run `purge_account` job at 0:00
    scheduler.add_job(
        purge_account,
        id='purge_account',
        name='Purge accounts which where not activated',
        trigger='cron',
        hour=0,
        minute=0
    )

    # run `purge_token` job at 0:30
    scheduler.add_job(
        purge_token,
        id='purge_token',
        name='Purge expired tokens',
        trigger='cron',
        hour=0,
        minute=30
    )

    scheduler.add_listener(exception_listener, EVENT_JOB_ERROR)

    atexit.register(lambda: scheduler.shutdown())
Esempio n. 24
0
def init():
    scheduler = BackgroundScheduler()
    scheduler.start()
    scheduler.add_job(dochatcleanup, 'interval',
                      minutes=int(plugin.config.config('sleep')),
                      id='dochatcleanup', replace_existing=True)
    return
class JobScheduler(object):

    def __init__(self, config):
        self.scheduler = BackgroundScheduler()
        self.scheduler.add_job(
            self.job_process,
            'interval',
            seconds=config['JOB_POLL_INTERVAL_SECONDS'],
            max_instances=1)
        self.scheduler.add_job(
            self.notification_job_process,
            'interval',
            seconds=config['DELIVERY_POLL_INTERVAL_SECONDS'],
            max_instances=1)
        self.config = config

    def start(self):
        self.scheduler.start()

    def shutdown(self):
        self.scheduler.shutdown(wait=True)

    def job_process(self):
        process_jobs(self.config)

    def notification_job_process(self):
        process_notification_job(self.config)
Esempio n. 26
0
class schedulecontrol:
    def __init__(self):
        self.scheduler = BackgroundScheduler()
        self.oncescheduler=BlockingScheduler()
        self.scheduler.start()
    def start(self):
        self.oncescheduler.start()
    def addschedule(self,event=None, day_of_week='0-7', hour='11',minute='57' ,second='0',id='',type='cron',run_date='',args=None):
        if id=='':
            id=str(time.strftime("%Y-%m-%d %X", time.localtime()));
        if type=='date':
            if run_date=='':

                self.oncescheduler.add_job(event, args=args)


            else:

                self.oncescheduler.add_job(event, 'date', run_date=run_date, args=args)
        elif type=='back':
            self.oncescheduler.add_job(event,type, day_of_week=day_of_week, hour=hour,minute=minute ,second=second,id=id)
        else:

            self.scheduler.add_job(event, type, day_of_week=day_of_week, hour=hour, minute=minute, second=second, id=id)
    def removeschedule(self,id):
        self.scheduler.remove_job(id)
Esempio n. 27
0
    def setup_tasks(self):
        scheduler = BackgroundScheduler()

        def remove_dead_inbound_connections():
            self.remove_dead_inbound_connections()

        scheduler.add_job(remove_dead_inbound_connections, CronTrigger(second='*/15'))
        scheduler.start()
def test_aps_eventlet():
    def showMessage():
        print "Show this message"

    sh = BackgroundScheduler()
    sh.start()
    sh.add_job(showMessage, 'interval', seconds=2, timezone=utc)
    time.sleep(10)
Esempio n. 29
0
def interval():
    """
    Setup the scheduler.
    """
    scheduler = BackgroundScheduler()
    scheduler.add_job(tick, 'interval', seconds=10)
    scheduler.start()
    print('Scheduler Started')
Esempio n. 30
0
def initialize():
    Database.initialize()
    session['email'] = session.get('email')
    session['name'] = session.get('name')

    scheduler = BackgroundScheduler()
    scheduler.add_job(check_alert, "cron", day_of_week="0-4", hour="16", minute=30)
    scheduler.start()
Esempio n. 31
0
def start():
    scheduler = BackgroundScheduler()
    scheduler.add_job(rateApi.update_rate, 'interval', minutes=45)
    scheduler.start()
Esempio n. 32
0
class MyWindow(Gtk.Window):
    def __init__(self):
        Gtk.Window.__init__(self, title="Hello PyObject")
        imgH, imgW = [800, 600]
        self.set_default_size(imgH, imgW)  # set_size_request(imgH, imgW)
        self.GtkWindowType = Gtk.WindowType.TOPLEVEL
        # set hiden window bar
        # self.set_decorated(False)
        # keyname = Gdk.keyval_name(self.key)
        self.connect("key-press-event", self.on_key_press_event)
        self.data_path = path.join(path.abspath(path.dirname(__file__)),
                                   "data")
        self.media_path = path.join(path.abspath(path.dirname(__file__)),
                                    "media")

        # box = Gtk.VBox()

        # label = Gtk.Label("Insert text you want to search for:")
        # box.add(label)

        #        self.entry = Gtk.Entry()
        #        box.add(self.entry)

        self.main_area = Gtk.Stack()
        self.main_area.set_transition_type(
            Gtk.StackTransitionType.SLIDE_LEFT_RIGHT)
        self.main_area.set_transition_duration(1000)

        self.conWin1 = Gtk.Box()
        self.main_area.add_titled(self.conWin1, "window1", "window1")
        fixCon = Gtk.Fixed()
        self.conWin1.add(fixCon)
        filePath = path.join(self.media_path, "img1.jpg")
        newImage = ipImage("demoImage", filePath, 800, 600)
        fixCon.put(newImage, 0, 0)

        # self.labelS = Gtk.Label()
        # self.label_txt = """<big><i>you have choice to runn the scan directly or after setup the scanning process you want to follow on your target</i></big>"""
        # self.labelS.set_markup(self.label_txt)
        # self.labelS.set_line_wrap(True)

        self.conWin2 = Gtk.Box()
        self.main_area.add_titled(self.conWin2, "window2", "window2")
        fixCon2 = Gtk.Fixed()
        self.conWin2.add(fixCon2)
        filePath2 = path.join(self.media_path, "img5.jpg")
        newImage2 = ipImage("demoImage2", filePath2, 800, 600)
        fixCon2.put(newImage2, 0, 0)

        # self.our_stackSwitcher = Gtk.StackSwitcher()
        # self.our_stackSwitcher.set_stack(self.main_area)

        # box.add(self.our_stackSwitcher)
        # box.add(self.main_area)
        self.add(self.main_area)

        self.show_all()

        self.iScheduler = BackgroundScheduler()
        exec_date = datetime.now() + timedelta(seconds=5)
        print(exec_date)
        self.iScheduler.add_job(self.switchClipWin, "date", run_date=exec_date)
        self.iScheduler.start()

    def switchClipWin(self):
        print("switchClipWin")
        self.main_area.set_visible_child_name("window2")

    def on_key_press_event(self, widget, event):
        # print("Key press on widget: ", widget)
        # print("          Modifiers: ", event.state)
        # print("      Key val, name: ", event.keyval, Gdk.keyval_name(event.keyval))

        # check the event modifiers (can also use SHIFTMASK, etc)
        ctrl = event.state & Gdk.ModifierType.CONTROL_MASK

        # see if we recognise a keypress
        if ctrl and event.keyval == Gdk.KEY_q:
            print("Quit App")
            Gtk.main_quit()
Esempio n. 33
0
	global articles # fix this later or something lol

	print('updating articles...')
	if scrape_new:
		print('scraping new...')
		run_engine()

	print('loading new data...')
	with open('data.json', 'rb') as file_in:
		articles = json.load(file_in)

	articles = sorted(articles, key=lambda x:x['neutrality'])
	make_nosummary()

scheduler = BackgroundScheduler()
scheduler.add_job(func=update, trigger='cron', hour='11', minute='0')
#scheduler.start()

update(scrape_new=False)

def filter(query, articles):
	results = []
	for art in articles:
		if query is None or query.lower() in art['title'].lower():
			results.append(art)
	return results

def similar(idx, articles):
	results = []

	sims = articles[idx]['sims']
Esempio n. 34
0
    # time.sleep(20)
    if book_to_list != []:
        for i in book_to_list:
            content = "%s,您借用的%s已到归还时间,请及时归还给实验室管理员。" % (i['username'], i['bookname'])
            msg = MIMEText(
                content, _subtype='plain', _charset='utf-8')
            msg['Subject'] = Header(sub, "utf-8")
            msg['From'] = MAIL['user']
            msg['To'] = i['usermail']
            send_email(to_addr=i['usermail'], msg=msg)




scheduler = BackgroundScheduler()
scheduler.add_job(send_lend_mail, 'cron', day_of_week='mon-fri', hour=10, minute=24)

if __name__ == "__main__":
    # get_books_list()
    # MSG1 = MIMEText(
    #     '测试', _subtype='plain', _charset='utf-8')
    # MSG1['Subject'] = 'test'
    # MSG1['From'] = MAIL['user']
    # MSG1['To'] = '*****@*****.**'
    # # send_email(to_addr='*****@*****.**', msg=MSG1)
    # get_books_list()
    # send_lend_mail(sub='测试')
    # 
    scheduler.start()
    try:
        while True:
Esempio n. 35
0
        format(newDaily.total_cases, newDaily.total_deaths,
               newDaily.total_recoveries))
    newDaily.calculate(previousCases, previousDeaths, previousRecoveries)
    time = newDaily.date
    try:
        db.session.add(newDaily)
        db.session.commit()
    except Exception as e:
        logger.error("Key already exists")
        logger.error(e)
    logger.info("Successfuly added to database")


#----------------TASK SCHEDULER---------------#
scheduler = BackgroundScheduler(daemon=True)
scheduler.add_job(dataAdd, 'interval', args=[db], days=1)
scheduler.start()


#---------ROUTING---------------#
@app.route('/')
def index():
    data = get_covid_data()
    return "Cases: {}   Deaths: {}    Recoveries: {}".format(
        data.totalCases, data.totalDeaths, data.totalRecoveries)


@app.route('/home')
def home():
    currentData = get_covid_data()
    logger.debug(currentData.totalDeaths)
Esempio n. 36
0

# This job runs once an hour
def try_and_run_trial():
    # Check if behavenet model is running
    proc_running = checkProcRunning('python behavenet')
    print('Process currently running: ', proc_running)
    if not proc_running:
        # 1) Update decoding_data file with new trial
        with open('../.behavenet/decoding_data.json', 'a') as f:
            print('Updating JSON file for new trial ', TRIAL_TO_TRAIN)
            data = json.loads(f)
            data['session'] = TRIAL_TO_TRAIN
            json.dump(data, f)

        print('Kicking off new process...')
        proc = subprocess.Popen([
            'nohup python',
            'behavenet/fitting/decoder_grid_search.py --data_config ~/.behavenet/decoding_data.json --model_config ~/.behavenet/decoding_ae_model.json --training_config ~/.behavenet/decoding_training.json --compute_config ~/.behavenet/decoding_compute.json > plaw_ctypes.out &2>&1'
        ])
        print('Updating trial_count...')
        TRIAL_TO_TRAIN += 1
        if TRIAL_TO_TRAIN > 4:
            scheduler.shutdown()


print('Adding job to scheduler')
scheduler.add_job(try_and_run_trial, 'interval', minutes=60)
print('Starting scheduler...')
scheduler.start()
Esempio n. 37
0
    def jobomatic(buggo):
        #Use buggo to pass outlet instance instead of the ever changing global outlet
        scheduler.remove_job(str(outlet.num)+'on')
        scheduler.remove_job(str(outlet.num)+'off')
        scheduler.add_job(lambda: spiffyon(buggo), 'cron'
                          ,hour=outlet.t_on.hour
                          ,minute=outlet.t_on.minute
                          ,id=str(outlet.num)+'on')
        scheduler.add_job(lambda: spiffyoff(buggo), 'cron'
                          ,hour=outlet.t_off.hour
                          ,minute=outlet.t_off.minute
                          ,id=str(outlet.num)+'off')
    def jobdummystart():
        for x in range(1,6):
            scheduler.add_job(do_nothing, 'cron'
                          ,hour=20
                          ,id=str(x)+'off')
            scheduler.add_job(do_nothing, 'cron'
                          ,hour=8
                          ,id=str(x)+'on')
    #Set up dummy jobs so it doesn't complain
    #Set up interval based jobs
    jobdummystart()
    scheduler.add_job(checker,'interval', minutes=1)
    scheduler.add_job(camerago, 'cron',hour=12)
    socketio.run(app, debug="False", host='0.0.0.0')


Outlets.cleanup()
Esempio n. 38
0
            Metric="BLENDED_COST"
        )
        forecast = r["Total"]["Amount"][0]
        print("Updated AWS Forecast cost: %s" %(forecast))
        g_forecast.set(float(forecast))

    print("Finished calculating costs")

    return 0



@app.route('/metrics/')
def metrics():
    return Response(generate_latest(), mimetype=CONTENT_TYPE_LATEST)

@app.route('/health')
def health():
    return "OK"

scheduler.start()
scheduler.add_job(
    func=aws_query,
    trigger=IntervalTrigger(seconds=int(QUERY_PERIOD),start_date=(datetime.now() + timedelta(seconds=5))),
    id='aws_query',
    name='Run AWS Query',
    replace_existing=True
    )
# Shut down the scheduler when exiting the app
atexit.register(lambda: scheduler.shutdown())
Esempio n. 39
0
        mydb.commit()

        ################### LINUX ONLY ################
        ################# MOD_WSGI MODE ###############
        shutil.rmtree('var/www/FlaskApp/bluechipApi/static/' + provider_token +
                      '/' + name + '')

        return ('successful deleted')

    except Exception:
        return 'error'

    finally:
        mycursor.close()
        mydb.close()


@app.errorhandler(404)
def page_not_found(e):
    return "<h1>404</h1><p>The resource could not be found.</p>", 404


################# TASK RUNNING AT EVERY 1 MINUTE ################
scheduler = BackgroundScheduler()
job = scheduler.add_job(image_process_to_obj, 'interval', minutes=1)
scheduler.start()

#app.run()
if __name__ == "__main__":
    app.run()
Esempio n. 40
0
        for i in resp:
            d = {}
            for j in i.keys():
                if (j == "order_id" or j == "_id"):
                    d[j] = str(i[j])
                else:
                    d[j] = i[j]
            print(d)
            details['det'].append(d)
        response = jsonify(details)
        response.headers.add('Access-Control-Allow-Origin', '*')
        return response, 200

    else:
        return 405


client = pymongo.MongoClient(
    "mongodb+srv://WalPoolAdmin:[email protected]/test?retryWrites=true&w=majority"
)
order_db = client.Order
cab_db = client.Cab
order_collection = order_db.OrderCollection
cab_details = cab_db.CabDetails

from apscheduler.schedulers.background import BackgroundScheduler
scheduler = BackgroundScheduler()
scheduler.add_job(update_cab_location, 'interval', seconds=10)
scheduler.start()

app.run(host='localhost', port=8000)
Esempio n. 41
0
class Base():
    def __init__(self):
        self.history = History()
        self.instance_list = {}
        self.token_list = {}
        self.scheduler = BackgroundScheduler()
        self.scheduler.start()
        self.scheduler.add_job(
            func=self._remove_staleinstances,
            trigger=IntervalTrigger(seconds=120),
            id='stale_instance_remover',
            name='Remove stale instances if no heartbeat in 120 seconds',
            replace_existing=True
        )
        self.scheduler.add_job(
        func=self._update_history_count,
        trigger=IntervalTrigger(seconds=30),
        id='update history',
        name='update client and instance count every 30 seconds',
        replace_existing=True
        )

    def _update_history_count(self):
        servers = [instance.servers for instance in self.instance_list.values()]
        servers = [inner for outer in servers for inner in outer]
        client_num = 0
        # force it being a number
        for server in servers:
            client_num += server.clientnum
        self.history.add_client_history(client_num)
        self.history.add_instance_history(len(self.instance_list))

    def _remove_staleinstances(self):
        for key, value in list(self.instance_list.items()):
            if int(time.time()) - value.last_heartbeat > 120:
                print('[_remove_staleinstances] removing stale instance {id}'.format(id=key))
                del self.instance_list[key]
                del self.token_list[key]
        print('[_remove_staleinstances] {count} active instances'.format(count=len(self.instance_list.items())))

    def get_instances(self):
        return self.instance_list.values()

    def get_instance_count(self):
        return self.instance_list.count

    def get_instance(self, id):
        return self.instance_list[id]

    def instance_exists(self, instance_id):
        if instance_id in self.instance_list.keys():
            return instance_id
        else:
            False

    def add_instance(self, instance):
        if instance.id in self.instance_list:
            print('[add_instance] instance {id} already added, updating instead'.format(id=instance.id))
            return self.update_instance(instance)
        else:
            print('[add_instance] adding instance {id}'.format(id=instance.id))
            self.instance_list[instance.id] = instance

    def update_instance(self, instance):
        if instance.id not in self.instance_list:
            print('[update_instance] instance {id} not added, adding instead'.format(id=instance.id))
            return self.add_instance(instance)
        else:
            print('[update_instance] updating instance {id}'.format(id=instance.id))
            self.instance_list[instance.id] = instance

    def add_token(self, instance_id, token):
        print('[add_token] adding {token} for id {id}'.format(token=token, id=instance_id))
        self.token_list[instance_id] = token

    def get_token(self, instance_id):
        try:
            return self.token_list[instance_id]
        except KeyError:
            return False
            global FIRSTFLAG
            FIRSTFLAG = False  #setting the create FLAG.Creating a slave worker container as per needs
            createSlave()
    elif diff < 0:  #no of slave is more than required slave.Scaling down
        for i in range(abs(diff)):
            deleteSlave()  #killing extra slave containers
    else:
        print("NO change")  #no of slaves required and present are same

    resetRequestCount(
    )  #resetting the read request couter to zero after the 2 min time interval


# Setting a timer which calls checkRequestCount method every 2 minutes
cron = BackgroundScheduler(daemon=True)
cron.add_job(checkRequestCount, 'interval', seconds=120)

client = docker.from_env()  #connecting to docker daemon
countFile = "myCount.txt"

timerStart = False  #timer starts once a request is made to the orchestrator


# To get a new slave name which'll be assigned to a freshly created slave.
def slaveName():
    name = ""
    flag1 = False
    while not flag1:
        name = "slave" + str(randint(1, 10**3))
        flag2 = False
        for c in client.containers.list():
Esempio n. 43
0
datasets = {}
version_datasets = requests.get(
    'https://api.github.com/repos/ccodwg/Covid19CanadaArchive/commits?path=datasets.json'
).headers['last-modified']
load_data_datasets(temp_dir)

## archive file index
global archive, version_archive_index
archive = {}
version_archive_index = client(
    's3', config=Config(signature_version=UNSIGNED)).get_object(
        Bucket='data.opencovid.ca', Key='archive/file_index.csv'
    )['ResponseMetadata']['HTTPHeaders']['last-modified']
load_data_archive_index(temp_dir)

# check for data updates
scheduler = BackgroundScheduler()
job_ts = scheduler.add_job(update_data_ts,
                           'interval',
                           minutes=5,
                           args=[temp_dir])
job_datasets = scheduler.add_job(update_data_datasets,
                                 'interval',
                                 minutes=5,
                                 args=[temp_dir])
job_archive_index = scheduler.add_job(update_data_archive_index,
                                      'interval',
                                      minutes=30,
                                      args=[temp_dir])
scheduler.start()
Esempio n. 44
0
from datetime import datetime
import time
import os

from apscheduler.schedulers.background import BackgroundScheduler


def tick():
    print('Tick! The time is: %s' % datetime.now())


if __name__ == '__main__':
    scheduler = BackgroundScheduler()
    scheduler.add_job(tick, 'interval', seconds=3)
    scheduler.start()
    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))

    try:
        # This is here to simulate application activity (which keeps the main thread alive).
        while True:
            time.sleep(2)
    except (KeyboardInterrupt, SystemExit):
        # Not strictly necessary if daemonic mode is enabled but should be done if possible
        scheduler.shutdown()
Esempio n. 45
0
def start():
    scheduler = BackgroundScheduler()
    scheduler.add_job(BackgroundClass.power_allot_func, 'interval', minutes=1)
    scheduler.add_job(BackgroundClass.device_consumption, 'interval', minutes=1)
    scheduler.start()
 def set_sheduler(self, args):
     scheduler = BackgroundScheduler()
     scheduler.add_job(self.startTest,
                       'date',
                       run_date=self.run_config['ExecTime'],
                       args=(args, ))
Esempio n. 47
0
    if test.time <= datetime.datetime.now():
        test.time = datetime.datetime.now() + datetime.timedelta(seconds=10)
        print(str(test.time))
        test.state = not test.state

    print(test.state)


app = Flask(__name__)


@app.route("/")
def hello():
    now = datetime.datetime.now()
    templateData = {'title': 'HELLO!', 'time': now}
    return render_template('main.html', **templateData)
    """obj.test2 = obj.test2 * 2
    return "value: " + str(obj.test2)"""


"""app.config['SECRET_KEY'] = 'secret!'"""
socketio = SocketIO(app)

if __name__ == "__main__":
    obj = MyClass(1)
    socketio.run(app)
    sched = BackgroundScheduler(daemon=True)
    sched.add_job(sensor, 'interval', seconds=5, id='my_job', args=[obj])
    sched.start()
    app.run(host='0.0.0.0', port=91, use_reloader=False)
Esempio n. 48
0
def timing_background(func, timing_type='interval', seconds=2):
    scheduler = BackgroundScheduler()
    scheduler.daemonic = False
    scheduler.add_job(func, timing_type, seconds=seconds)
    scheduler.start()
Esempio n. 49
0
"""
 Copyright 2021 Ela El-Heni
   Licensed under the Apache License, Version 2.0 (the "License");
   you may not use this file except in compliance with the License.
   You may obtain a copy of the License at
       http://www.apache.org/licenses/LICENSE-2.0
   Unless required by applicable law or agreed to in writing, software
   distributed under the License is distributed on an "AS IS" BASIS,
   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
   See the License for the specific language governing permissions and
   limitations under the License.
"""

# A3 BackgroundScheduler pour telecharger les données de la ville chaque
# jour à minuit Ce bout de code peut etre deplacé à votre guise,
# faire attention aux imports circulaires

# pip3 install appscheduler
from apscheduler.schedulers.background import BackgroundScheduler
import atexit
import manage

# set scheduler
sched = BackgroundScheduler(daemon=True)
sched.add_job(manage.data_handler(), 'cron', hour=0)
sched.start()
atexit.register(lambda: sched.shutdown())
from server.app import app
from server.api.v1.api import api
from server.controllers.cron import cron_job
from flask import Flask
from flask_restful import Api
from apscheduler.schedulers.background import BackgroundScheduler
import sys

app.register_blueprint(api.blueprint, url_prefix='/api/v1')

print("Initializing Background Scheduler")
sched = BackgroundScheduler()
sched.add_job(cron_job, trigger='interval', days=1)
sched.start()
cron_job()
Esempio n. 51
0
class HeartbeatFormatter(object):
    def __init__(self, sender, log, config=None, redis=None):
        self.sender = sender
        self.log = log

        self.config = config or forge.get_config()
        self.datastore = forge.get_datastore(self.config)

        self.redis = redis or get_client(
            host=self.config.core.redis.nonpersistent.host,
            port=self.config.core.redis.nonpersistent.port,
            private=False,
        )
        self.redis_persist = get_client(
            host=self.config.core.redis.persistent.host,
            port=self.config.core.redis.persistent.port,
            private=False,
        )
        self.status_queue = CommsQueue(STATUS_QUEUE, self.redis)
        self.dispatch_active_hash = Hash(DISPATCH_TASK_HASH,
                                         self.redis_persist)
        self.dispatcher_submission_queue = NamedQueue(SUBMISSION_QUEUE,
                                                      self.redis)
        self.dispatcher_file_queue = NamedQueue(FILE_QUEUE, self.redis)
        self.ingest_scanning = Hash('m-scanning-table', self.redis_persist)
        self.ingest_unique_queue = PriorityQueue('m-unique',
                                                 self.redis_persist)
        self.ingest_queue = NamedQueue(INGEST_QUEUE_NAME, self.redis_persist)
        self.alert_queue = NamedQueue(ALERT_QUEUE_NAME, self.redis_persist)

        constants = forge.get_constants(self.config)
        self.c_rng = constants.PRIORITY_RANGES['critical']
        self.h_rng = constants.PRIORITY_RANGES['high']
        self.m_rng = constants.PRIORITY_RANGES['medium']
        self.l_rng = constants.PRIORITY_RANGES['low']
        self.c_s_at = self.config.core.ingester.sampling_at['critical']
        self.h_s_at = self.config.core.ingester.sampling_at['high']
        self.m_s_at = self.config.core.ingester.sampling_at['medium']
        self.l_s_at = self.config.core.ingester.sampling_at['low']

        self.to_expire = {k: 0 for k in metrics.EXPIRY_METRICS}
        if self.config.core.expiry.batch_delete:
            self.delete_query = f"expiry_ts:[* TO {self.datastore.ds.now}-{self.config.core.expiry.delay}" \
                f"{self.datastore.ds.hour}/DAY]"
        else:
            self.delete_query = f"expiry_ts:[* TO {self.datastore.ds.now}-{self.config.core.expiry.delay}" \
                f"{self.datastore.ds.hour}]"

        self.scheduler = BackgroundScheduler(daemon=True)
        self.scheduler.add_job(
            self._reload_expiry_queues,
            'interval',
            seconds=self.config.core.metrics.export_interval * 4)
        self.scheduler.start()

    def _reload_expiry_queues(self):
        try:
            self.log.info("Refreshing expiry queues...")
            for collection_name in metrics.EXPIRY_METRICS:
                try:
                    collection = getattr(self.datastore, collection_name)
                    self.to_expire[collection_name] = collection.search(
                        self.delete_query, rows=0, fl='id')['total']
                except SearchException:
                    self.to_expire[collection_name] = 0
        except Exception:
            self.log.exception(
                "Unknown exception occurred while reloading expiry queues:")

    def send_heartbeat(self, m_type, m_name, m_data, instances):
        if m_type == "dispatcher":
            try:
                msg = {
                    "sender": self.sender,
                    "msg": {
                        "inflight": {
                            "max": self.config.core.dispatcher.max_inflight,
                            "outstanding": self.dispatch_active_hash.length()
                        },
                        "instances": instances,
                        "metrics": m_data,
                        "queues": {
                            "ingest":
                            self.dispatcher_submission_queue.length(),
                            "files": self.dispatcher_file_queue.length(),
                        },
                        "component": m_name,
                    }
                }
                self.status_queue.publish(
                    DispatcherMessage(msg).as_primitives())
                self.log.info(f"Sent dispatcher heartbeat: {msg['msg']}")
            except Exception:
                self.log.exception(
                    "An exception occurred while generating DispatcherMessage")

        elif m_type == "ingester":
            try:
                c_q_len = self.ingest_unique_queue.count(*self.c_rng)
                h_q_len = self.ingest_unique_queue.count(*self.h_rng)
                m_q_len = self.ingest_unique_queue.count(*self.m_rng)
                l_q_len = self.ingest_unique_queue.count(*self.l_rng)

                msg = {
                    "sender": self.sender,
                    "msg": {
                        "instances": instances,
                        "metrics": m_data,
                        "processing": {
                            "inflight": self.ingest_scanning.length()
                        },
                        "processing_chance": {
                            "critical": 1 - drop_chance(c_q_len, self.c_s_at),
                            "high": 1 - drop_chance(h_q_len, self.h_s_at),
                            "low": 1 - drop_chance(l_q_len, self.l_s_at),
                            "medium": 1 - drop_chance(m_q_len, self.m_s_at)
                        },
                        "queues": {
                            "critical": c_q_len,
                            "high": h_q_len,
                            "ingest": self.ingest_queue.length(),
                            "low": l_q_len,
                            "medium": m_q_len
                        }
                    }
                }
                self.status_queue.publish(IngestMessage(msg).as_primitives())
                self.log.info(f"Sent ingester heartbeat: {msg['msg']}")
            except Exception:
                self.log.exception(
                    "An exception occurred while generating IngestMessage")

        elif m_type == "alerter":
            try:
                msg = {
                    "sender": self.sender,
                    "msg": {
                        "instances": instances,
                        "metrics": m_data,
                        "queues": {
                            "alert": self.alert_queue.length()
                        }
                    }
                }
                self.status_queue.publish(AlerterMessage(msg).as_primitives())
                self.log.info(f"Sent alerter heartbeat: {msg['msg']}")
            except Exception:
                self.log.exception(
                    "An exception occurred while generating AlerterMessage")

        elif m_type == "expiry":
            try:
                msg = {
                    "sender": self.sender,
                    "msg": {
                        "instances": instances,
                        "metrics": m_data,
                        "queues": self.to_expire
                    }
                }
                self.status_queue.publish(ExpiryMessage(msg).as_primitives())
                self.log.info(f"Sent expiry heartbeat: {msg['msg']}")
            except Exception:
                self.log.exception(
                    "An exception occurred while generating ExpiryMessage")

        elif m_type == "archive":
            try:
                msg = {
                    "sender": self.sender,
                    "msg": {
                        "instances": instances,
                        "metrics": m_data
                    }
                }
                self.status_queue.publish(ArchiveMessage(msg).as_primitives())
                self.log.info(f"Sent archive heartbeat: {msg['msg']}")
            except Exception:
                self.log.exception(
                    "An exception occurred while generating ArchiveMessage")

        elif m_type == "scaler":
            try:
                msg = {
                    "sender": self.sender,
                    "msg": {
                        "instances": instances,
                        "metrics": m_data,
                    }
                }
                self.status_queue.publish(ScalerMessage(msg).as_primitives())
                self.log.info(f"Sent scaler heartbeat: {msg['msg']}")
            except Exception:
                self.log.exception(
                    "An exception occurred while generating WatcherMessage")

        elif m_type == "scaler-status":
            try:
                msg = {
                    "sender": self.sender,
                    "msg": {
                        "service_name": m_name,
                        "metrics": m_data,
                    }
                }
                self.status_queue.publish(
                    ScalerStatusMessage(msg).as_primitives())
                self.log.info(f"Sent scaler status heartbeat: {msg['msg']}")
            except Exception:
                self.log.exception(
                    "An exception occurred while generating WatcherMessage")

        elif m_type == "service":
            try:
                busy, idle = get_working_and_idle(self.redis, m_name)
                msg = {
                    "sender": self.sender,
                    "msg": {
                        "instances": len(busy) + len(idle),
                        "metrics": m_data,
                        "activity": {
                            'busy': len(busy),
                            'idle': len(idle)
                        },
                        "queue": get_service_queue(m_name,
                                                   self.redis).length(),
                        "service_name": m_name
                    }
                }
                self.status_queue.publish(ServiceMessage(msg).as_primitives())
                self.log.info(f"Sent service heartbeat: {msg['msg']}")
            except Exception:
                self.log.exception(
                    "An exception occurred while generating ServiceMessage")

        elif m_type == "watcher":
            try:
                msg = {
                    "sender": self.sender,
                    "msg": {
                        "instances":
                        instances,
                        "metrics":
                        m_data,
                        "watching":
                        UniquePriorityQueue(WATCHER_QUEUE,
                                            self.redis).length(),
                    }
                }
                self.status_queue.publish(WatcherMessage(msg).as_primitives())
                self.log.info(f"Sent watcher heartbeat: {msg['msg']}")
            except Exception:
                self.log.exception(
                    "An exception occurred while generating WatcherMessage")

        else:
            self.log.warning(
                f"Skipping unknown counter: {m_name} [{m_type}] ==> {m_data}")
Esempio n. 52
0
            for month in range(1, 13):
                days = monthrange(year, month)[1]
                for day in range(1, days+1):
                    if f"{type}{country}{year}{str(month).zfill(2)}{str(day).zfill(2)}.json" in file_names:
                        print(f"{type}{country}{year}{month}{day} exists in S3, skipping")
                    else:
                        if not dt.datetime(year=int(year), month=int(month), day=int(day)) > dt.datetime.today():
                            print(f"{year}/{month}/{day}/{type}/{country}")
                            day = str(day).zfill(2)
                            month = str(month).zfill(2)
                            year = year
                            link = f"https://promo.betfair.com/betfairsp/prices/" \
                                   f"dwbfprices{country}{type}{day}{month}{year}.csv"
                            scheduler.add_job(func=download_sp_from_link, id=str(hash(link)), kwargs={
                                'link': link, 'country': country, 'type': type,
                                'day': day, 'month': month, 'year': year,
                                'mode': 'overwrite' if not table_refreshed else 'append'},
                                              misfire_grace_time=999999999)
                            table_refreshed = True

scheduler.start()
time.sleep(1)
print(f"Jobs left: {len(scheduler.get_jobs())}")
time.sleep(1)
while len(scheduler.get_jobs()) > 0:
    print(f"Jobs left: {len(scheduler.get_jobs())}")
    time.sleep(1)
scheduler.shutdown()

# # Run crawler
# session = boto3.session.Session(
Esempio n. 53
0
    def __init__(self):
        cmd.Cmd.__init__(self)
        self.trader = 'trader'

    def do_s(self, args):
        print('pressed s')

    def do_bb(self, args):
        print('pressed b')

    def do_q(self, args):
        exit(0)

    def do_t(self, args):
        global bsstart
        if bsstart:
            bs.pause()
        else:
            bs.start()
        bsstart = not bsstart


def say():
    print datetime.datetime.now()


bs.add_job(say, 'interval', seconds=2, id='my_job_id')

if __name__ == '__main__':
    InteractiveShell().cmdloop()
Esempio n. 54
0
	return False

def confirmarDomotica():
	ejecutarConsulta('confirmar')

def ejecutarDomotica():
	if buscarAccionDomotica():
		confirmarDomotica()
	

if __name__ == '__main__':
	constantes.constantesLola()
	logging.basicConfig(filename='./log/domotica.log', level=logging.INFO)

	scheduler = BackgroundScheduler()
	scheduler.add_job(ejecutarDomotica, 'interval', seconds=4)
	scheduler.start()
	
	print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))

    	try:
        	# This is here to simulate application activity (which keeps the main thread alive).
        	while True:
            		time.sleep(200)
	except (KeyboardInterrupt, SystemExit):
        	# Not strictly necessary if daemonic mode is enabled but should be done if possible
		scheduler.shutdown()



Esempio n. 55
0
def schedule():
    scheduler = BackgroundScheduler()
    scheduler.add_job(process, 'interval', seconds=2)
    scheduler.start()
Esempio n. 56
0
                continue
            attrpropa = itemdl.find_all('a', target="_blank", class_="linkblack")
            attrs = itemdl.a['href'] # 获取这个标签下第一个a属性中的href值
            reply = itemdl.find('span', class_='fontblue').string
            if ("viewthread" not in attrs)  :
                continue
            props = attrs.split('=')
            pid = int(props[1])
            replycount = int(reply)
            #print('当前帖子%d回复量:%d' % (pid, replycount))
            if replycount < 2:
                auto_post_thread(pid)

get_properties_from_remote()

scheduler = BackgroundScheduler()
scheduler.add_job(get_properties_from_remote, 'interval', seconds=600)
# 这里的调度任务是独立的一个线程
scheduler.start()

try:
    # 其他任务是独立的线程执行
    while True:
        time.sleep(50)
except (KeyboardInterrupt, SystemExit):
    scheduler.shutdown()
    print('Exit The Job!')
#print(html)


Esempio n. 57
0
    elif event_name == 'live':  # juchao_kuaixun
        JuchaoLiveNewsSpider().start()
    elif event_name == 'fin':  # juchao_ant_finance
        JuchaoFinanceSpider().start()

    # 新版
    elif event_name == 'his':  # spy_announcement_data
        JuchaoHistorySpy().start()
    elif event_name == 'ref':  # an_announcement_secu_ref
        AnnSecuRef().daily_sync()

    # 播报模块
    elif event_name == 'ding':
        utils.send_crawl_overview()


for data in task_info:
    ap_scheduler.add_job(
        func=handle,
        trigger=data['trigger'],
        minutes=data['time_interval'],
        args=(data['task_id'], ),
        name=data['task_name'],
        max_instances=1,
    )

ap_scheduler.start()

while True:
    time.sleep(10)
Esempio n. 58
0
    for i in uL.keys():  # chat id
        for j in uL[i].keys():
            if uL[i][j][3] + 15 <= int(time.time()):
                try:
                    bot.delete_message(int(i), uL[i][j][2])
                except:
                    pass
            if uL[i][j][3] + 60 <= int(time.time()):
                bot.kick_chat_member(int(i), int(j), until_date=None)
                bot.restrict_chat_member(int(i),
                                         int(j),
                                         until_date=None,
                                         can_send_messages=True)
                bot.restrict_chat_member(int(i),
                                         int(j),
                                         until_date=None,
                                         can_send_messages=False)
                userList[i].pop(j)
        if len(userList[i]) == 0:
            userList.pop(i)


if __name__ == '__main__':
    scheduler = BackgroundScheduler()
    scheduler.add_job(clean_list, 'interval', seconds=15)
    scheduler.start()
    try:
        bot.polling()
    except BaseException:
        pass
Esempio n. 59
0
class RoleManager:
	def __init__(self, cfg, section):
		h = logging.StreamHandler()
		h.setFormatter(logging.Formatter('%(levelname)s:%(name)s:%(message)s'))
		log = logging.getLogger('apscheduler.executors.default')
		log.setLevel(logging.INFO)
		log.addHandler(h)

		self.section	= section
		self.cfg 		= cfg
		self.conn 		= None
		self.cursor 	= None
		self.restAPI	= RestAPI()
		self.hostList   = self.cfg.get('ENM_SERVER_MAPPING', 'ENM_API_SERVER_LIST').split(',') 

		self.initRepo()

		self.scheduler = BackgroundScheduler()
		#self.scheduler.add_job(self.initCommandRepo, 'cron', minute='*/{}'.format(INIT_INTERVAL), second='0', id='RoleManager')
		self.scheduler.add_job(self.checkJob, 'cron', minute='*', second='0', id='RoleManager')
		self.scheduler.add_job(self.checkRoleStatus, 'cron', minute='*/5', second='0', id='RoleCheckManager')
		#self.scheduler.add_job(self.checkRoleStatus, 'cron', minute='*', second='0', id='RoleCheckManager')
		self.scheduler.start()
		__LOG__.Trace('start!')

	def initRepo(self):
		repoStr = getContents(os.path.join(self.cfg.get(self.section, 'DUMP_PATH'), 'schedule_dump.json'))
		self.jobRepo = json.loads(repoStr)

	def stdOut(self, msg):
		sys.stdout.write(msg+'\n')
		sys.stdout.flush()
		# print(msg, file=sys.stdout)
		__LOG__.Trace('OUT: %s' % msg)
		
	def stdErr(self, msg):
		sys.stderr.write('%s\n' % (msg))
		sys.stderr.flush()
		__LOG__.Trace('ERR: %s' % msg)

	def shutdown(self):
		try :
			df = open(os.path.join(self.cfg.get(self.section, 'DUMP_PATH'), 'schedule_dump.json'), 'w')
			df.write(json.dumps(self.jobRepo, encoding='utf-8'))
			df.close()
			if self.scheduler :
				self.scheduler.shutdown()
				__LOG__.Trace('scheduler shutdown')
			else :
				__LOG__.Trace('scheduler is None')
		except :
			__LOG__.Exception()

	def disConnect(self,conn,cursor):
		if cursor != None:
			try : cursor.close()
			except : pass
		if conn != None :	
			try : conn.close()
			except : pass

	def initConnect(self):
		self.conn = M6.Connection(self.IRIS, self.IRIS_ID, self.IRIS_PASS, Database='tacs')
		__LOG__.Trace('IRIS Connect!')
		try :
			self.cursor = self.conn.cursor()
			self.cursor.SetFieldSep('|^|')
			self.cursor.SetRecordSep('|^-^|')
		except :
			__LOG__.Exception()
		finally :
			self.conn.commit()

	def run(self, section):
		__LOG__.Trace('RoleManager start!!')
		#self.initConnect()
		while not SHUTDOWN:
			try : 
				strIn = sys.stdin.readline()
				strLine = strIn.strip()
				if strLine == '' : 
					self.stdErr(strIn)
				else :
					if os.path.exists(strLine) : 
						jsonStr 	= getContents(strLine)
				#jsonStr 	= getContents('/home/tacs/DATA/WORKINFO/RAN_EMS/O190429000001_192.168.100.55.json')
						jsonObj		= json.loads(jsonStr)
						__LOG__.Trace(jsonObj)
						repoKey = jsonObj['workId']
						self.jobRepo[repoKey] = jsonObj

			except :
				__LOG__.Exception()

			finally :
				self.stdErr(strIn)
				

		#self.disConnect(self.conn, self.cursor)


	def checkJob(self):
		nDate 	= datetime.datetime.now()
		nStr 	= nDate.strftime('%Y%m%d%H%M00')
		gabStr  = (nDate - datetime.timedelta(minutes=1)).strftime('%Y%m%d%H%M00')
		__LOG__.Trace('nStr : %s' % nStr)

		for key in self.jobRepo.keys() :
			oneJob 		= self.jobRepo[key]
			staDate 	= oneJob['workStaDate']
			endDate 	= oneJob['workEndDate']
			__LOG__.Trace('%s : %s ~ %s, ENABLED:%s' % (oneJob['workId'], staDate, endDate, 'ENABLED' in oneJob))
			
			if 'ENABLED' not in oneJob and (staDate <= nStr and gabStr <= staDate) : self.addRole(oneJob)
			elif 'ENABLED' in oneJob and endDate <= nStr : self.delRole(oneJob)
			else :
				if nStr < staDate or nStr < endDate : __LOG__.Trace('keep : %s' % oneJob['workId'])
				else :
					del self.jobRepo[oneJob['workId']]
					__LOG__.Trace('delete: %s' % oneJob)

	def addRole(self, jsonObj):
		enmApiServer = self.cfg.get('ENM_SERVER_MAPPING', jsonObj['emsIp'])
		__LOG__.Trace('addRole : %s, %s, %s' % (enmApiServer, jsonObj['workId'], jsonObj['oprrId']))
		self.restAPI.changeUserRole(jsonObj['emsIp'],'ADD',jsonObj['oprrId'])
		self.jobRepo[jsonObj['workId']]['ENABLED'] = True
		
	def delRole(self, jsonObj):
		enmApiServer = self.cfg.get('ENM_SERVER_MAPPING', jsonObj['emsIp'])
		__LOG__.Trace('delRole : %s, %s, %s' % (enmApiServer, jsonObj['workId'], jsonObj['oprrId']))
		self.restAPI.changeUserRole(jsonObj['emsIp'],'REMOVE',jsonObj['oprrId'])
		del self.jobRepo[jsonObj['workId']]

	def checkRoleStatus(self) :
		nDate   	= datetime.datetime.now()
		yymmdd 		= nDate.strftime('%Y%m%d')
		hhmm 		= nDate.strftime('%H%M')
		evntDate 	= nDate.strftime('%Y%m%d%H%M%S')
		for oneHost in self.hostList :
			result = self.checkAllUserRole(oneHost)
			if len(result) > 0 :
				f = open('/home/tacs/DATA2/AUDIT_LOG/AUDIT_17/%s_%s_%s.audit' % (yymmdd, hhmm, oneHost), 'a')
				for oneInfo in result :
					oneInfo['evntDate'] = evntDate
					f.write('%s\n' % JSON.dumps(oneInfo, encoding='utf-8'))
				f.close()

	def getAllUser(self, host) :
		uri = '/oss/idm/usermanagement/users'
		result = '[]'
		try :
			code, result = self.restAPI.execute(host, 'GET', uri)
		except :
			__LOG__.Exception()

		userList = json.loads(result)

		return userList
		

	def getUserRole(self, host, userId) :
		if userId is None or userId == '' : return None
		uri = '/oss/idm/usermanagement/users/%s/privileges' % userId
		code, result = self.restAPI.execute(host, 'GET', uri)
		userInfo = json.loads(result)
		
		return userInfo

	def checkAllUserRole(self, host) :
		userRoleList = []
		userList = self.getAllUser(host)
		for oneUser in userList :
			if oneUser['username'] == 'SKT_TACS' : continue
			userRoleInfo = self.getUserRole(host, oneUser['username'])
			for oneRole in userRoleInfo :
				if ENM_WORK_ROLE == oneRole['role'] :
					__LOG__.Trace('Host: %s ,User : %s, Role : %s' % (host, oneUser['username'], oneRole['role']))
					nDate   = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
					userRoleList.append({'workUserId':oneUser['username'], 'beforePriv':oneRole['role'], 'checkDate':nDate})

			time.sleep(1)

		currentOprrIdList = []
		for key in self.jobRepo.keys() :
			oneJob      = self.jobRepo[key]
			if 'ENABLED' in oneJob :
				oprrIdList = oneJob['oprrId'].split(';')
				oprrIdList = [x for x in oprrIdList if x]
				currentOprrIdList = currentOprrIdList + oprrIdList
		
		result = []
		for oneUserInfo in userRoleList :
			if not oneUserInfo['workUserId'] in currentOprrIdList :
				result.append(oneUserInfo)		
				
		return result
Esempio n. 60
0
class WeChatBot:
    def __init__(self, user):
        self.user = '******'
        self.status = Status['init']

        self.movie = json.load(open('m.json', 'r'))
        self.movie_index = 0
        self.max_movie = len(self.movie)
        self.is_send_movie_list = False

        self.schedule = BackgroundScheduler()

        self.function = {
            Status['init']: self.handle_text,
            Status['weather']: self.handle_weather,
            Status['schedule']: self.handle_schedule,
            Status['package']: self.handle_package,
            Status['movie']: self.handle_movie,
        }

    def entry(self, text):
        self.function[self.status](text)

    """
    trigger:
       功能
    """

    def handle_text(self, text):
        if re.search(r"定时", text):
            self.handle_schedule(text)
        elif re.search(r"快递", text):
            self.handle_package(text)
        elif re.search(r"天气", text) or re.search(r"气温", text):
            self.handle_weather(text)
        elif re.search(r"电影", text) or re.search(r"正在热映", text) or re.search(
                r"院线热映", text):
            self.handle_movie(text)
        elif re.search(r"功能", text):
            reply = "本机器人有如下功能:[快递查询],[天气查询],[院线热映],[设定定时任务]"
            send_msg(reply, self.user)
        else:
            reply = '机器人自动回复:' + get_response(text)
            send_msg(reply, self.user)

    """
    trigger:
       定时
    """

    def handle_schedule(self, text):
        try:
            temp = text.split('+')[1:]
            time = temp[0]
            msg = temp[1]
            name = temp[2]
            self.schedule.add_job(send_msg,
                                  'date',
                                  run_date=time,
                                  kwargs={
                                      "msg": msg,
                                      "name": name
                                  })
            reply = '设置成功'
            self.schedule.start()
        except:
            reply = "设置定时任务请输入:定时发送+时间+msg+姓名,如:定时发送+2019-2-16 21:51:00+晚上好+filehelper"
        send_msg(reply, self.user)

    """
    trigger:
        快递
    """

    def handle_package(self, text):
        try:
            pack_num = re.search(r"(快递)(\+)([0-9]+)", text).group(3)
            reply = get_package(pack_num)
        except:
            reply = "查询快递请输入:快递+运单号,如:快递+12345"
        send_msg(reply, self.user)

    """
    trigger:
       天气
        今日天气,七日天气
    """

    def handle_weather(self, text):
        if re.search(r"今日天气", text):
            try:
                city_name = re.search(r"(今日天气)(\+)(.*)", text).group(3)
                reply = get_forecast('今日天气', city_name)
            except:
                reply = '查询今日天气请输入今日天气 + 城市名,如: 今日天气 + 西安.'
        elif re.search(r"七日天气", text):
            try:
                city_name = re.search(r"(七日天气)(\+)(.*)", text).group(3)
                reply = get_forecast('七日天气', city_name)
            except:
                reply = '查询七日天气请输入七日天气+城市名,如:七日天气+西安'
        else:
            reply = '''查询今日天气请输入今日天气+城市名,如:今日天气+西安.\n查询七日天气请输入七日天气+城市名,如:七日天气+西安。'''
        send_msg(reply, self.user)

    """
    trigger:
        电影,正在热映,院线热映
        in:
            \d 选择
            影院:(一个电影中)查看电影院,
            简介:(一个电影中)查看简介,
            l 展示列表
            q 退出

    """

    def send_list(self):
        temp = ''
        if not self.is_send_movie_list:
            for i, m in enumerate(self.movie):
                temp += f"{i+1}.{m['name']}\n"
            send_msg(temp, self.user)
            self.is_send_movie_list = True

    def handle_movie(self, text):
        self.status = Status['movie']
        self.send_list()
        if text.isdigit():
            try:
                index = int(text) - 1
                if 0 <= index < self.max_movie:
                    self.movie_index = index
                    reply1 = self.movie[index]['basic']
                    img = self.movie[index]['img']
                    send_img(img, self.user)
                    send_msg(reply1, self.user)
                else:
                    send_msg("请输入正确的数字范围1~" + str(self.max_movie), self.user)
            except:
                send_msg("请输入合法的数字1~" + str(self.max_movie), self.user)
        elif re.search(r'影院', text):
            reply2 = ''
            for i in self.movie[self.movie_index]['cinema']:
                reply2 += i + '\n'
            send_msg(reply2, self.user)
        elif re.search(r'简介', text):
            reply3 = self.movie[self.movie_index]['intro']
            send_msg(reply3, self.user)
        elif re.search(r'l', text):
            self.is_send_movie_list = False
            self.send_list()
        elif re.search(r'q', text):
            self.status = Status['init']
            self.movie_index = 0
            self.is_send_movie_list = False