Example #1
1
def main():
	plugin_list= cf.sections()
	print plugin_list
	notif = pynsca.NSCANotifier(cf.monitor_server, password=cf.nsca_pass)
	plugin_scheduler = BackgroundScheduler()
	try:	
		for svc_name in plugin_list:
			if str(svc_name) == 'defaults': pass
			else:
				logging.info("%s loading" % str(svc_name))
				cmd=cf.g(svc_name,"check")
				if cmd is None:
					logging.error("%s check not exists" % svc_name)
					sys.exit(2)
				plugin_interval = cf.g(svc_name,"interval")
				if plugin_interval is None:
					continue
				plugin_scheduler.add_job(func=check_execute,args=[cmd,svc_name,notif],trigger='interval',seconds = plugin_interval, id = svc_name)
				logging.info("%s loaded" %str(svc_name))
		plugin_scheduler.start()
		while True:
			time.sleep(2)
	except Exception,e:
		logging.info("Agent boot up error: %s" % str(e))
		sys.exit(1)
Example #2
0
def go():
	preflight()

	sched = BackgroundScheduler(jobstores=jobstores, executors=executors, job_defaults=job_defaults)

	# startTime = datetime.datetime.now()+datetime.timedelta(seconds=60*60)
	# startTime = datetime.datetime.now()+datetime.timedelta(seconds=60*15)
	# startTime = datetime.datetime.now()+datetime.timedelta(seconds=60*5)
	# startTime = datetime.datetime.now()+datetime.timedelta(seconds=20)
	startTime = datetime.datetime.now()+datetime.timedelta(seconds=10)
	scheduleJobs(sched, startTime)
	sched.start()

	# spinwait for ctrl+c, and exit when it's received.
	loops = 0
	while runStatus.run:
		time.sleep(0.1)
		# loops += 1
		# if loops > 100:
		# 	logging_tree.printout()
		# 	loops = 0

	print("Scraper stopping scheduler")
	sched.shutdown()
	nt.dirNameProxy.stop()
Example #3
0
class cScheduler():
    def __init__(self):
        jobstores = {
            'default': SQLAlchemyJobStore(url='sqlite:///__secret//jobs.db')
        }
        executors = {
            'default': {'type': 'threadpool', 'max_workers': 20},
            'processpool': ProcessPoolExecutor(max_workers=5)
        }
        job_defaults = {
            'coalesce': False,
            'max_instances': 1
        }
        self.the_sched = BackgroundScheduler()
        self.the_sched.configure(jobstores=jobstores, executors=executors, job_defaults=job_defaults, timezone=utc)

        self.telegram_server = None
        self.job_queue = None

    def start(self):
        self.the_sched.start()

    def set_telegram_server(self, telegram_server):
        self.telegram_server = telegram_server
        self.telegram_job_queue = telegram_server.get_job_queue()

    def start_main_schedule(self):
        self.telegram_job_queue.put(self.telegram_server.extjob_send_all, 5, repeat=True)
Example #4
0
def run_web():

	nt.dirNameProxy.startDirObservers()


	sched = BackgroundScheduler(jobstores=jobstores, executors=executors, job_defaults=job_defaults)
	sched.start()


	x = 60
	for name, classInstance in nt.__dict__.items():

		# look up all class instances in nameTools. If they have the magic attribute "NEEDS_REFRESHING",
		# that means they support scheduling, so schedule the class in question.
		# To support auto-refreshing, the class needs to define:
		# cls.NEEDS_REFRESHING = {anything, just checked for existance}
		# cls.REFRESH_INTERVAL = {number of seconds between refresh calls}
		# cls.refresh()        = Call to do the refresh operation. Takes no arguments.
		#
		if  isinstance(classInstance, type) or not hasattr(classInstance, "NEEDS_REFRESHING"):
			continue

		sched.add_job(classInstance.refresh,
					trigger='interval',
					seconds=classInstance.REFRESH_INTERVAL,
					start_date=datetime.datetime.now()+datetime.timedelta(seconds=20+x),
					jobstore='main_jobstore')

		x += 60*2.5


	# It looks like cherrypy installs a ctrl+c handler, so I don't need to.
	webserver_process.serverProcess()
Example #5
0
def schedule_notices():
    sched = BackgroundScheduler()
    sched.start()


    trigger = CronTrigger(day_of_week='*', hour=17)
    sched.add_job(send_notice, trigger)
Example #6
0
def scheduler(event):
    scheduler = BackgroundScheduler()
    settings = event.app.registry.settings
    jobstores = {'default': SQLAlchemyJobStore(url=settings['scheduler.url'])}
    executors = {
        'default': {
            'type': settings['scheduler.executors.type'],
            'max_workers': settings['scheduler.executors.max_workers']
        },
        'processpool': ProcessPoolExecutor(
            max_workers=settings['scheduler.executors.processpool.max_workers']
        )
    }
    job_defaults = {
        'coalesce': False,
        'max_instances': settings['scheduler.job_defaults.max_instances']
    }
    scheduler.configure(
        jobstores=jobstores,
        executors=executors,
        job_defaults=job_defaults,
        timezone=timezone('UTC')
    )
    if settings['scheduler.autostart'] == 'true':
        scheduler.start()
    event.app.registry.registerUtility(scheduler, IScheduler)
Example #7
0
class MyScheduler(object):
	"""scheduler class, difine scheduler and jobs"""
	def __init__(self):
		super(MyScheduler, self).__init__()
		self.scheduler = BackgroundScheduler()
		self.wish_time = setting.WISH_DELAY
		self.book_time = setting.BOOK_LAST_TIME

	def start(self):
		self.scheduler.start()

	def shutdown(self):
		self.scheduler.shutdown()

	def add_check_wish_status_job(self, wish_id):
		# print datetime.now()
		run_date = datetime.now() + timedelta(seconds=self.wish_time)
		self.scheduler.add_job(_reset_wish_status, 'date', args=[wish_id],
			run_date=run_date)

	def add_set_book_removed_job(self, book_id):
		run_date = datetime.now() + timedelta(days=self.book_time)
		self.scheduler.add_job(_set_book_removed, 'date', args=[book_id],
			run_date=run_date)

	def add_xapian_reindex_job(self, xapian_tool):
		self.scheduler.add_job(xapian_tool.index, 'interval', minutes=15)
def main(args):
    scheduler = BackgroundScheduler(coalesce=True, misfire_grace_time=4)
    taskparser = TaskParser(args['f'])
    taskparser.parse()

    yml_handler = YmlFileEventHandler(patterns=["*.yml"])
    yml_handler.set_scheduler(scheduler)
    yml_handler.set_parser(taskparser)
    file_observer = Observer()
    file_observer.schedule(yml_handler, path=args['f'], recursive=False)
    file_observer.start()

    # Initial parsing of the task folder
    for t in taskparser.task_list:
        addJob(t, scheduler)
        # Spread tasks from each other to prevent overload/task miss
        time.sleep(1)

    scheduler.start()

    # Update jobs while running
    while True:
        try:
            time.sleep(15)
        except KeyboardInterrupt:
            break

    scheduler.shutdown()
Example #9
0
def run_scheduler():
    scheduler = BackgroundScheduler()
    # scheduler.add_job(func, "interval", days=1)
    scheduler.add_job(check_overdue, "interval", days=1)
   # scheduler.add_job(send_mail_test(), "interval", minutes=1)
    scheduler.start()
    print "Scheduler started!"
Example #10
0
class Bot():
    def __init__(self, cfg):
        self.token = cfg.TOKEN_BOT
       
        self.scheduler = BackgroundScheduler({
            'apscheduler.jobstores.default': {
                'type': 'sqlalchemy',
                'url': cfg.POSTGRESQL_DB
            },
            'apscheduler.executors.default': {
                'class': 'apscheduler.executors.pool:ThreadPoolExecutor',
                'max_workers': '20'
            },
            'apscheduler.executors.processpool': {
                'type': 'processpool',
                'max_workers': '5'
            },
            'apscheduler.job_defaults.coalesce': 'false',
            'apscheduler.job_defaults.max_instances': '3',
            'apscheduler.timezone': 'UTC',
        })

    def post(self, text, channel):
        url = 'https://{domain}/services/hooks/slackbot?token={token}&channel=%23{channel}'
        r = requests.post(url.format(domain=cfg.TEAMDOMAIN, token=self.token, channel=channel), data=text)

    def test(self, args=None):
        print 'Scheduler test'
        if args:
            print 'job args: {0}'.format(' '.join(args))

    def add_reminder(self):
        self.scheduler.add_job(self.test, 'interval', minutes=1, id='job_id', jobstore='default')
Example #11
0
def init():
    scheduler = BackgroundScheduler()
    scheduler.start()
    scheduler.add_job(dochatcleanup, 'interval',
                      minutes=int(plugin.config.config('sleep')),
                      id='dochatcleanup', replace_existing=True)
    return
Example #12
0
def startScheduler():
    db.create_all()
    #create default roles!
    if not db.session.query(models.Role).filter(models.Role.name == "admin").first():
        admin_role = models.Role(name='admin', description='Administrator Role')
        user_role = models.Role(name='user', description='User Role')
        db.session.add(admin_role)
        db.session.add(user_role)
        db.session.commit()
        
    try:
        import tzlocal

        tz = tzlocal.get_localzone()
        logger.info("local timezone: %s" % tz)
    except:
        tz = None

    if not tz or tz.zone == "local":
        logger.error('Local timezone name could not be determined. Scheduler will display times in UTC for any log'
                 'messages. To resolve this set up /etc/timezone with correct time zone name.')
        tz = pytz.utc
    #in debug mode this is executed twice :(
    #DONT run flask in auto reload mode when testing this!
    scheduler = BackgroundScheduler(logger=sched_logger, timezone=tz)
    scheduler.add_job(notify.task, 'interval', seconds=config.SCAN_INTERVAL, max_instances=1,
                      start_date=datetime.datetime.now(tz) + datetime.timedelta(seconds=2))
    scheduler.start()
    sched = scheduler
Example #13
0
def nameserver_check_scheduler(heartbeat_obj):
    """ Schedule the check using the heartbeat object """
    sched = BackgroundScheduler()
    sched.start()
    sched.add_job(heartbeat_obj.nameserver_check,
                  'cron',
                  second=("*/%s" % int(heartbeat_obj.configuration['heartbeat']['default']['interval'])))

    retries_check = int(heartbeat_obj.configuration['heartbeat']['default']['init_retries'])
    retry_wait = int(10)

    while(retries_check != 0):
        try:
            heartbeat_obj.nameservers.next()
        except StopIteration:
            pretty_log("Heartbeat scheduler not initialized yet... Will retry %s times..." % retries_check)
            pretty_log("Will retry in %s seconds" % retry_wait)
            retries_check -= 1
            sleep(retry_wait)
        else:
            pretty_log("Heartbeat scheduler initalized...")
            return True
    else:
        pretty_log("Heartbeat scheduler error!")
        return False
Example #14
0
def initialize():
	"""Setting a schedule for the background process"""
	with app.app_context():
		print("Scheduling...")
		apsched = BackgroundScheduler()
		apsched.add_job(run_check, 'interval', seconds=60)
		apsched.start()
Example #15
0
def getScheduler():
  "getScheduler constructs and returns a scheduler object"

  #Define default background scheduler
  scheduler = BackgroundScheduler()

  #Define ownerJob to be scheduled
  def ownerJob():
    "ownerJob updates the active_owners file with owner information"

    #Get current list of owners
    data = DevicesAttached().getActiveOwners()

    #Obtain local time and 
    timeStruct = time.localtime()
    [year,month,day,hour,minute,second,weekDay,yearDay,isDes] = timeStruct
    data["timestamp"] = "{:02d}:{:02d}:{:02d}".format(hour,minute,second)

    #Save file with date name
    filename = "active_owners_{:02d}_{:02d}_{:02d}.csv".format(day,month,year)
    saveRow(filename,data)

    #If end of a day upload log to dropbox
    if ((hour == 23) and (minute > 57)):
      try:
        DropboxAPI().saveFile(filename)
      except Exception as exception:
        print str(exception)

  ownerJob()

  scheduler.add_job(ownerJob,'interval',minutes=2,id="owner-job")
  return scheduler
class TestPySpout(basesinfonierspout.BaseSinfonierSpout):

    def __init__(self):

        basesinfonierspout.BaseSinfonierSpout().__init__()

    def useropen(self):

        # Using deque as a queue
        self.queue = deque()
        
        self.frequency = int(self.getParam("frequency"))
        
        # This scheduler launches self.job function every X seconds
        self.sched = BackgroundScheduler()
        self.sched.add_job(self.job, "interval", seconds=self.frequency, id="testpy")
        self.sched.start()

    def usernextTuple(self):

        # If there are items in self.queue, get the first one (.popleft()), do what you want with it and emit the tuple
        if self.queue:
            self.addField("timestamp",self.queue.popleft())
            self.emit()
    
    def userclose(self):
        
        pass
    
    def job(self):
        
        self.queue.append(str(int(time.time())))
Example #17
0
def configure_scheduler_from_config(settings):
    scheduler = BackgroundScheduler()
    scheduler.start()

    # run `purge_account` job at 0:00
    scheduler.add_job(
        purge_account,
        id='purge_account',
        name='Purge accounts which where not activated',
        trigger='cron',
        hour=0,
        minute=0
    )

    # run `purge_token` job at 0:30
    scheduler.add_job(
        purge_token,
        id='purge_token',
        name='Purge expired tokens',
        trigger='cron',
        hour=0,
        minute=30
    )

    scheduler.add_listener(exception_listener, EVENT_JOB_ERROR)

    atexit.register(lambda: scheduler.shutdown())
Example #18
0
class Processor:

	def like_wall(self):
		print "Will like a wall!"
		try:
			news = self.user.get_news(random.randint(10,100))
			print "News: ", len(news)
			for n in news:
				likes = n.get("likes",None)
				if likes and likes["user_likes"] == 0 and likes["can_like"] == 1:
					print "LIKE", n["post_id"]
					self.user.like_post(n["post_id"],n["source_id"])
					print "Sleeep"
					time.sleep(random.uniform(0, 5))
					print "Done"
						
		except:
			print "Error in like"
			pass


	def __init__(self , user):
		self.user = user
		self.shced  = BackgroundScheduler()
		self.shced.add_job(self.like_wall, "interval", seconds=60);
		self.shced.start()


	def process_message(self, message, chatid, userid):
		return;
Example #19
0
def initialize():
    Database.initialize()
    session['email'] = session.get('email')
    session['name'] = session.get('name')

    scheduler = BackgroundScheduler()
    scheduler.add_job(check_alert, "cron", day_of_week="0-4", hour="16", minute=30)
    scheduler.start()
Example #20
0
 def __init__(self,interval=2):
     global se_scheduler
     se_scheduler = BackgroundScheduler()
     se_scheduler.add_jobstore("mongodb", database="felix_se",
                               collection="ScheduledJobs")
     se_scheduler.start()
     super(SESchedulerService, self).__init__("SESchedulerService",interval)
     self.first_time = True
Example #21
0
def interval():
    """
    Setup the scheduler.
    """
    scheduler = BackgroundScheduler()
    scheduler.add_job(tick, 'interval', seconds=10)
    scheduler.start()
    print('Scheduler Started')
def test_aps_eventlet():
    def showMessage():
        print "Show this message"

    sh = BackgroundScheduler()
    sh.start()
    sh.add_job(showMessage, 'interval', seconds=2, timezone=utc)
    time.sleep(10)
Example #23
0
    def setup_tasks(self):
        scheduler = BackgroundScheduler()

        def remove_dead_inbound_connections():
            self.remove_dead_inbound_connections()

        scheduler.add_job(remove_dead_inbound_connections, CronTrigger(second='*/15'))
        scheduler.start()
Example #24
0
def run_timer():
    global RUNNING_TIMER
    if not RUNNING_TIMER:
        RUNNING_TIMER = True
        # 30分钟同步一次文章点击率
        scheduler = BackgroundScheduler()
        scheduler.add_job(sync_click, 'interval', minutes=30)
        scheduler.start()
def set_scheduler_initial():
    print(' -------------- SETTING INITiAL SCHEDULER ---------------------')
    global scheduler
    scheduler = BackgroundScheduler()
    scheduler.start()
    log.info(os.environ.get('SCHEDULER_HOUR'))
    log.info(os.environ.get('SCHEDULER_MIN'))

    set_scheduler(os.environ.get('SCHEDULER_HOUR'), os.environ.get('SCHEDULER_MIN'))
Example #26
0
class scheduler(object):
    def __init__(self):
        logging.basicConfig()
        self.sched = BackgroundScheduler()
        self.sched.start()

    def addjob(self, function, time, arguments):
        print "in addjob"
        self.sched.add_job(function, 'date', run_date=time, args= arguments)
Example #27
0
class Pokeradar():
    def __init__(self):
        self.api = PGoApi()
        self.scheduler = BackgroundScheduler()
        self.logged_in = False
        self.nearby_pokemon = None

        self.latitude = None
        self.longitude = None

        with open(os.path.join(os.path.dirname(__file__), 'data/pokemon.json')) as f:
            self.pokemon_names = json.load(f)


    def login(self, form_data):
        username = form_data['username']
        password = form_data['password']
        latitude = form_data['latitude']
        longitude = form_data['longitude']

        if not self.logged_in:
            if not self.api.login('ptc', username, password):
                return 'Login failed (either servers are down or invalid credentials).', 401

            self.latitude = float(latitude)
            self.longitude = float(longitude)

            self.api.set_position(self.latitude, self.longitude, 0)
            self.api.get_player()

            if self.api.call() == False:
                logging.warning("Pokémon GO servers are currently down.")
                return 'Pokémon GO servers are currently down. Try again later.', 502

            self.scheduler.start()
            self.scheduler.add_job(self.update_nearby, 'interval', seconds=30)

            self.logged_in = True

        return 'Success!', 200


    def update_nearby(self):
        pokemon = find_nearby_pokemon(self.api, self.latitude, self.longitude)
        self.nearby_pokemon = []

        for key, poke in pokemon.items():
            dex_num = int(poke['pokemon_data']['pokemon_id'])
            name = self.pokemon_names[format(dex_num, '03')]
            time_til_hidden = poke['time_till_hidden_ms'] // 1000
            distance = poke['distance']

            if time_til_hidden > 0 and distance < 200:
                p = [dex_num, name, time_til_hidden, distance]
                self.nearby_pokemon.append(p)

        self.nearby_pokemon.sort(key=lambda x: x[2], reverse=True)
Example #28
0
def timer():
    global scheduler
    global job
    if scheduler is not None:
        job = scheduler.add_job(watering_task,'interval',hours=INTERVAL_BETWN_WATER_HR)
    else: 
        scheduler = BackgroundScheduler()
        job = scheduler.add_job(watering_task,'interval',hours=INTERVAL_BETWN_WATER_HR)
        scheduler.start()
    return 'ok'
Example #29
0
class Scheduler_publish(object):
    def __init__(self):
        self.run_date = datetime.datetime.now() + datetime.timedelta(seconds=3)
        self.run_date = self.run_date.strftime('%Y-%m-%d %H:%M:%S')
        self.tm = time.strftime('%Y%m%d%H%M',time.localtime())
        self.scheduler = BackgroundScheduler({'apscheduler.job_defaults.max_instances': '5'})
        self.scheduler.configure(timezone=pytz.timezone('Asia/Shanghai'))
    def Scheduler_mem(self,func,publish_key = 'None',taskKey = 'None'):
        self.scheduler.add_job(func,'date', run_date=self.run_date,args=[publish_key,taskKey],id='%s_%s'%(taskKey,self.tm),replace_existing=False)
        return self.scheduler
Example #30
0
    def post(self, action, position = ''):
        global scheduler
        self.checkStartup()
        
        if action == 'play':
            runCommand('mpc play ' + position)
            #Settings.set('radio', 'state', 'play')
            
            if scheduler is None:
                scheduler = BackgroundScheduler()
                scheduler.add_job(self.checkStatus, 'interval', seconds=30, id='checkStatus', replace_existing=True)
                scheduler.start()
        elif action == 'stop':
            runCommand('mpc stop')
            #Settings.set('radio', 'state', 'stop')
            
            if scheduler is not None:
                scheduler.remove_job('checkStatus')
                scheduler.shutdown()
                scheduler = None
            return {'playMode': 'stopped'}
        elif action =='pause':
            runCommand('mpc pause')
        elif action =='next':
            runCommand('mpc next')
        elif action =='previous':
            runCommand('mpc prev')
        else:
            return {'playMode': 'invalid'}

        (out, err) = runCommand('mpc status')
        if err:
            return {'error', err}, 500
        return {'playMode': Parser.parsePlayMode(out)}
Example #31
0
class Core(object):
    def __init__(self):
        self.started = False

        # process id
        self.PID = os.getpid()

        # generate notifiers dict
        self.notifiersDict = notifiersDict()

        # generate metadata providers dict
        self.metadataProvidersDict = metadataProvidersDict()

        # generate providers dict
        self.providersDict = providersDict()

        # init notification queue
        self.srNotifications = Notifications()

        # init logger
        self.srLogger = srLogger()

        # init config
        self.srConfig = srConfig()

        # init databases
        self.mainDB = MainDB()
        self.cacheDB = CacheDB()
        self.failedDB = FailedDB()

        # init scheduler service
        self.srScheduler = BackgroundScheduler()

        # init web server
        self.srWebServer = srWebServer()

        # init web client session
        self.srWebSession = srSession()

        # google api
        self.googleAuth = googleAuth()

        # name cache
        self.NAMECACHE = srNameCache()

        # queues
        self.SHOWQUEUE = srShowQueue()
        self.SEARCHQUEUE = srSearchQueue()

        # updaters
        self.VERSIONUPDATER = srVersionUpdater()
        self.SHOWUPDATER = srShowUpdater()

        # searchers
        self.DAILYSEARCHER = srDailySearcher()
        self.BACKLOGSEARCHER = srBacklogSearcher()
        self.PROPERSEARCHER = srProperSearcher()
        self.TRAKTSEARCHER = srTraktSearcher()
        self.SUBTITLESEARCHER = srSubtitleSearcher()

        # auto postprocessor
        self.AUTOPOSTPROCESSOR = srPostProcessor()

        # sickrage version
        self.NEWEST_VERSION = None
        self.NEWEST_VERSION_STRING = None

        # anidb connection
        self.ADBA_CONNECTION = None

        # show list
        self.SHOWLIST = []

        self.USER_AGENT = 'SiCKRAGE.CE.1/({};{};{})'.format(
            platform.system(), platform.release(), str(uuid.uuid1()))

        self.SYS_ENCODING = get_sys_encoding()

        # patch modules with encoding kludge
        patch_modules()

    def start(self):
        self.started = True

        # thread name
        threading.currentThread().setName('CORE')

        # Check if we need to perform a restore first
        if os.path.exists(
                os.path.abspath(os.path.join(sickrage.DATA_DIR, 'restore'))):
            success = restoreSR(
                os.path.abspath(os.path.join(sickrage.DATA_DIR, 'restore')),
                sickrage.DATA_DIR)
            print("Restoring SiCKRAGE backup: %s!\n" %
                  ("FAILED", "SUCCESSFUL")[success])
            if success:
                shutil.rmtree(os.path.abspath(
                    os.path.join(sickrage.DATA_DIR, 'restore')),
                              ignore_errors=True)

        # migrate old database file names to new ones
        if os.path.isfile(
                os.path.abspath(os.path.join(sickrage.DATA_DIR,
                                             'sickbeard.db'))):
            if os.path.isfile(os.path.join(sickrage.DATA_DIR, 'sickrage.db')):
                helpers.moveFile(
                    os.path.join(sickrage.DATA_DIR, 'sickrage.db'),
                    os.path.join(
                        sickrage.DATA_DIR, '{}.bak-{}'.format(
                            'sickrage.db',
                            datetime.datetime.now().strftime(
                                '%Y%m%d_%H%M%S'))))

            helpers.moveFile(
                os.path.abspath(os.path.join(sickrage.DATA_DIR,
                                             'sickbeard.db')),
                os.path.abspath(os.path.join(sickrage.DATA_DIR,
                                             'sickrage.db')))

        # load config
        self.srConfig.load()

        # set socket timeout
        socket.setdefaulttimeout(self.srConfig.SOCKET_TIMEOUT)

        # setup logger settings
        self.srLogger.logSize = self.srConfig.LOG_SIZE
        self.srLogger.logNr = self.srConfig.LOG_NR
        self.srLogger.logFile = self.srConfig.LOG_FILE
        self.srLogger.debugLogging = self.srConfig.DEBUG
        self.srLogger.consoleLogging = not sickrage.QUITE

        # start logger
        self.srLogger.start()

        # user agent
        if self.srConfig.RANDOM_USER_AGENT:
            self.USER_AGENT = UserAgent().random

        urlparse.uses_netloc.append('scgi')
        urllib.FancyURLopener.version = self.USER_AGENT

        # Check available space
        try:
            total_space, available_space = getFreeSpace(sickrage.DATA_DIR)
            if available_space < 100:
                self.srLogger.error(
                    'Shutting down as SiCKRAGE needs some space to work. You\'ll get corrupted data otherwise. Only %sMB left',
                    available_space)
                return
        except:
            self.srLogger.error('Failed getting diskspace: %s',
                                traceback.format_exc())

        # perform database startup actions
        for db in [self.mainDB, self.cacheDB, self.failedDB]:
            # initialize database
            db.initialize()

            # check integrity of database
            db.check_integrity()

            # migrate database
            db.migrate()

            # misc database cleanups
            db.cleanup()

        # compact main database
        if not self.srConfig.DEVELOPER and self.srConfig.LAST_DB_COMPACT < time.time(
        ) - 604800:  # 7 days
            self.mainDB.compact()
            self.srConfig.LAST_DB_COMPACT = int(time.time())

        # load name cache
        self.NAMECACHE.load()

        # load data for shows from database
        self.load_shows()

        if self.srConfig.DEFAULT_PAGE not in ('home', 'schedule', 'history',
                                              'news', 'IRC'):
            self.srConfig.DEFAULT_PAGE = 'home'

        # cleanup cache folder
        for folder in ['mako', 'sessions', 'indexers']:
            try:
                shutil.rmtree(os.path.join(sickrage.CACHE_DIR, folder),
                              ignore_errors=True)
            except Exception:
                continue

        # init anidb connection
        if self.srConfig.USE_ANIDB:

            def anidb_logger(msg):
                return self.srLogger.debug("AniDB: {} ".format(msg))

            try:
                self.ADBA_CONNECTION = adba.Connection(keepAlive=True,
                                                       log=anidb_logger)
                self.ADBA_CONNECTION.auth(self.srConfig.ANIDB_USERNAME,
                                          self.srConfig.ANIDB_PASSWORD)
            except Exception as e:
                self.srLogger.warning("AniDB exception msg: %r " % repr(e))

        if self.srConfig.WEB_PORT < 21 or self.srConfig.WEB_PORT > 65535:
            self.srConfig.WEB_PORT = 8081

        if not self.srConfig.WEB_COOKIE_SECRET:
            self.srConfig.WEB_COOKIE_SECRET = generateCookieSecret()

        # attempt to help prevent users from breaking links by using a bad url
        if not self.srConfig.ANON_REDIRECT.endswith('?'):
            self.srConfig.ANON_REDIRECT = ''

        if not re.match(r'\d+\|[^|]+(?:\|[^|]+)*', self.srConfig.ROOT_DIRS):
            self.srConfig.ROOT_DIRS = ''

        self.srConfig.NAMING_FORCE_FOLDERS = check_force_season_folders()
        if self.srConfig.NZB_METHOD not in ('blackhole', 'sabnzbd', 'nzbget'):
            self.srConfig.NZB_METHOD = 'blackhole'

        if self.srConfig.TORRENT_METHOD not in ('blackhole', 'utorrent',
                                                'transmission', 'deluge',
                                                'deluged', 'download_station',
                                                'rtorrent', 'qbittorrent',
                                                'mlnet', 'putio'):
            self.srConfig.TORRENT_METHOD = 'blackhole'

        if self.srConfig.PROPER_SEARCHER_INTERVAL not in ('15m', '45m', '90m',
                                                          '4h', 'daily'):
            self.srConfig.PROPER_SEARCHER_INTERVAL = 'daily'

        if self.srConfig.AUTOPOSTPROCESSOR_FREQ < self.srConfig.MIN_AUTOPOSTPROCESSOR_FREQ:
            self.srConfig.AUTOPOSTPROCESSOR_FREQ = self.srConfig.MIN_AUTOPOSTPROCESSOR_FREQ

        if self.srConfig.NAMECACHE_FREQ < self.srConfig.MIN_NAMECACHE_FREQ:
            self.srConfig.NAMECACHE_FREQ = self.srConfig.MIN_NAMECACHE_FREQ

        if self.srConfig.DAILY_SEARCHER_FREQ < self.srConfig.MIN_DAILY_SEARCHER_FREQ:
            self.srConfig.DAILY_SEARCHER_FREQ = self.srConfig.MIN_DAILY_SEARCHER_FREQ

        self.srConfig.MIN_BACKLOG_SEARCHER_FREQ = self.BACKLOGSEARCHER.get_backlog_cycle_time(
        )
        if self.srConfig.BACKLOG_SEARCHER_FREQ < self.srConfig.MIN_BACKLOG_SEARCHER_FREQ:
            self.srConfig.BACKLOG_SEARCHER_FREQ = self.srConfig.MIN_BACKLOG_SEARCHER_FREQ

        if self.srConfig.VERSION_UPDATER_FREQ < self.srConfig.MIN_VERSION_UPDATER_FREQ:
            self.srConfig.VERSION_UPDATER_FREQ = self.srConfig.MIN_VERSION_UPDATER_FREQ

        if self.srConfig.SHOWUPDATE_HOUR > 23:
            self.srConfig.SHOWUPDATE_HOUR = 0
        elif self.srConfig.SHOWUPDATE_HOUR < 0:
            self.srConfig.SHOWUPDATE_HOUR = 0

        if self.srConfig.SUBTITLE_SEARCHER_FREQ < self.srConfig.MIN_SUBTITLE_SEARCHER_FREQ:
            self.srConfig.SUBTITLE_SEARCHER_FREQ = self.srConfig.MIN_SUBTITLE_SEARCHER_FREQ

        if self.srConfig.SUBTITLES_LANGUAGES[0] == '':
            self.srConfig.SUBTITLES_LANGUAGES = []

        # add version checker job
        self.srScheduler.add_job(
            self.VERSIONUPDATER.run,
            srIntervalTrigger(
                **{
                    'hours': self.srConfig.VERSION_UPDATER_FREQ,
                    'min': self.srConfig.MIN_VERSION_UPDATER_FREQ
                }),
            name="VERSIONUPDATER",
            id="VERSIONUPDATER")

        # add network timezones updater job
        self.srScheduler.add_job(update_network_dict,
                                 srIntervalTrigger(**{'days': 1}),
                                 name="TZUPDATER",
                                 id="TZUPDATER")

        # add show updater job
        self.srScheduler.add_job(
            self.SHOWUPDATER.run,
            srIntervalTrigger(
                **{
                    'days':
                    1,
                    'start_date':
                    datetime.datetime.now().replace(
                        hour=self.srConfig.SHOWUPDATE_HOUR)
                }),
            name="SHOWUPDATER",
            id="SHOWUPDATER")

        # add show next episode job
        self.srScheduler.add_job(self.SHOWUPDATER.nextEpisode,
                                 srIntervalTrigger(**{'hours': 1}),
                                 name="SHOWNEXTEP",
                                 id="SHOWNEXTEP")

        # add daily search job
        self.srScheduler.add_job(self.DAILYSEARCHER.run,
                                 srIntervalTrigger(
                                     **{
                                         'minutes':
                                         self.srConfig.DAILY_SEARCHER_FREQ,
                                         'min':
                                         self.srConfig.MIN_DAILY_SEARCHER_FREQ,
                                         'start_date':
                                         datetime.datetime.now() +
                                         datetime.timedelta(minutes=4)
                                     }),
                                 name="DAILYSEARCHER",
                                 id="DAILYSEARCHER")

        # add backlog search job
        self.srScheduler.add_job(
            self.BACKLOGSEARCHER.run,
            srIntervalTrigger(
                **{
                    'minutes':
                    self.srConfig.BACKLOG_SEARCHER_FREQ,
                    'min':
                    self.srConfig.MIN_BACKLOG_SEARCHER_FREQ,
                    'start_date':
                    datetime.datetime.now() + datetime.timedelta(minutes=30)
                }),
            name="BACKLOG",
            id="BACKLOG")

        # add auto-postprocessing job
        self.srScheduler.add_job(
            self.AUTOPOSTPROCESSOR.run,
            srIntervalTrigger(
                **{
                    'minutes': self.srConfig.AUTOPOSTPROCESSOR_FREQ,
                    'min': self.srConfig.MIN_AUTOPOSTPROCESSOR_FREQ
                }),
            name="POSTPROCESSOR",
            id="POSTPROCESSOR")

        # add find proper job
        self.srScheduler.add_job(
            self.PROPERSEARCHER.run,
            srIntervalTrigger(
                **{
                    'minutes': {
                        '15m': 15,
                        '45m': 45,
                        '90m': 90,
                        '4h': 4 * 60,
                        'daily': 24 * 60
                    }[self.srConfig.PROPER_SEARCHER_INTERVAL]
                }),
            name="PROPERSEARCHER",
            id="PROPERSEARCHER")

        # add trakt.tv checker job
        self.srScheduler.add_job(self.TRAKTSEARCHER.run,
                                 srIntervalTrigger(**{'hours': 1}),
                                 name="TRAKTSEARCHER",
                                 id="TRAKTSEARCHER")

        # add subtitles finder job
        self.srScheduler.add_job(
            self.SUBTITLESEARCHER.run,
            srIntervalTrigger(
                **{'hours': self.srConfig.SUBTITLE_SEARCHER_FREQ}),
            name="SUBTITLESEARCHER",
            id="SUBTITLESEARCHER")

        # start scheduler service
        self.srScheduler.start()

        # Pause/Resume PROPERSEARCHER job
        (self.srScheduler.get_job('PROPERSEARCHER').pause,
         self.srScheduler.get_job('PROPERSEARCHER').resume
         )[self.srConfig.DOWNLOAD_PROPERS]()

        # Pause/Resume TRAKTSEARCHER job
        (self.srScheduler.get_job('TRAKTSEARCHER').pause,
         self.srScheduler.get_job('TRAKTSEARCHER').resume
         )[self.srConfig.USE_TRAKT]()

        # Pause/Resume SUBTITLESEARCHER job
        (self.srScheduler.get_job('SUBTITLESEARCHER').pause,
         self.srScheduler.get_job('SUBTITLESEARCHER').resume
         )[self.srConfig.USE_SUBTITLES]()

        # Pause/Resume POSTPROCESS job
        (self.srScheduler.get_job('POSTPROCESSOR').pause,
         self.srScheduler.get_job('POSTPROCESSOR').resume
         )[self.srConfig.PROCESS_AUTOMATICALLY]()

        # start queue's
        self.SEARCHQUEUE.start()
        self.SHOWQUEUE.start()

        # start webserver
        self.srWebServer.start()

    def shutdown(self, restart=False):
        if self.started:
            self.srLogger.info('SiCKRAGE IS SHUTTING DOWN!!!')

            # shutdown webserver
            self.srWebServer.shutdown()

            # shutdown show queue
            if self.srScheduler:
                self.srLogger.debug("Shutting down scheduler")
                self.srScheduler.shutdown()

            # shutdown show queue
            if self.SHOWQUEUE:
                self.srLogger.debug("Shutting down show queue")
                self.SHOWQUEUE.shutdown()
                del self.SHOWQUEUE

            # shutdown search queue
            if self.SEARCHQUEUE:
                self.srLogger.debug("Shutting down search queue")
                self.SEARCHQUEUE.shutdown()
                del self.SEARCHQUEUE

            # log out of ADBA
            if self.ADBA_CONNECTION:
                self.srLogger.debug("Shutting down ANIDB connection")
                self.ADBA_CONNECTION.stop()

            # save all show and config settings
            self.save_all()

            # close databases
            for db in [self.mainDB, self.cacheDB, self.failedDB]:
                if db.opened:
                    self.srLogger.debug(
                        "Shutting down {} database connection".format(db.name))
                    db.close()

            # shutdown logging
            self.srLogger.close()

        if restart:
            os.execl(sys.executable, sys.executable, *sys.argv)
        elif sickrage.daemon:
            sickrage.daemon.stop()

        self.started = False

    def save_all(self):
        # write all shows
        self.srLogger.info("Saving all shows to the database")
        for SHOW in self.SHOWLIST:
            try:
                SHOW.saveToDB()
            except:
                continue

        # save config
        self.srConfig.save()

    def load_shows(self):
        """
        Populates the showlist with shows from the database
        """

        for dbData in [
                x['doc'] for x in self.mainDB.db.all('tv_shows', with_doc=True)
        ]:
            try:
                self.srLogger.debug("Loading data for show: [%s]",
                                    dbData['show_name'])
                show = TVShow(int(dbData['indexer']),
                              int(dbData['indexer_id']))
                show.nextEpisode()
                self.NAMECACHE.build(show)
                self.SHOWLIST += [show]
            except Exception as e:
                self.srLogger.error("Show error in [%s]: %s" %
                                    (dbData['location'], e.message))
Example #32
0
class BountyCollector(base_agent.BaseAgent):
    def __init__(self, skale, node_id=None):
        super().__init__(skale, node_id)
        self.logger.info('Start checking logs on blockchain')
        start = time.time()
        try:
            pass
            self.collect_last_bounty_logs()
        except Exception as err:
            self.logger.error(
                f'Error occurred while checking logs from blockchain: {err} ')
        end = time.time()
        self.logger.info(f'Check completed. Execution time = {end - start}')
        self.scheduler = BackgroundScheduler(timezone='UTC',
                                             job_defaults={
                                                 'coalesce':
                                                 True,
                                                 'misfire_grace_time':
                                                 MISFIRE_GRACE_TIME
                                             })

    def get_reward_date(self):
        reward_period = self.skale.constants_holder.get_reward_period()
        reward_date = self.skale.nodes_data.get(
            self.id)['last_reward_date'] + reward_period
        return datetime.utcfromtimestamp(reward_date) + timedelta(
            seconds=REWARD_DELAY)

    def collect_last_bounty_logs(self):
        start_date = datetime.utcfromtimestamp(
            self.skale.nodes_data.get(self.id)['start_date'])
        last_block_number_in_db = db.get_bounty_max_block_number()
        if last_block_number_in_db is None:
            start_block_number = find_block_for_tx_stamp(
                self.skale, start_date)
        else:
            start_block_number = last_block_number_in_db + 1
        count = 0
        while True:
            last_block_number = self.skale.web3.eth.blockNumber
            self.logger.debug(f'last block = {last_block_number}')
            end_chunk_block_number = start_block_number + BLOCK_STEP_SIZE - 1

            if end_chunk_block_number > last_block_number:
                end_chunk_block_number = last_block_number + 1
            event_filter = self.skale.manager.contract.events.BountyGot.createFilter(
                argument_filters={'nodeIndex': self.id},
                fromBlock=hex(start_block_number),
                toBlock=hex(end_chunk_block_number))
            logs = event_filter.get_all_entries()

            for log in logs:
                args = log['args']
                tx_block_number = log['blockNumber']
                block_data = self.skale.web3.eth.getBlock(tx_block_number)
                block_timestamp = datetime.utcfromtimestamp(
                    block_data['timestamp'])
                self.logger.debug(log)
                tx_hash = log['transactionHash'].hex()
                gas_used = self.skale.web3.eth.getTransactionReceipt(
                    tx_hash)['gasUsed']
                db.save_bounty_event(block_timestamp, tx_hash,
                                     log['blockNumber'], args['nodeIndex'],
                                     args['bounty'], args['averageDowntime'],
                                     args['averageLatency'], gas_used)
                count += 1
            self.logger.debug(f'count = {count}')
            start_block_number = start_block_number + BLOCK_STEP_SIZE
            if end_chunk_block_number >= last_block_number:
                break

    def get_bounty(self):
        address = self.skale.wallet.address
        eth_bal_before = self.skale.web3.eth.getBalance(address)
        skl_bal_before = self.skale.token.get_balance(address)
        self.logger.info(f'ETH balance before: {eth_bal_before}')

        self.logger.info('--- Getting Bounty ---')
        try:
            self.skale.manager.get_bounty(self.id, dry_run=True)
        except ValueError as err:
            self.logger.info(f'Tx call failed: {err}')
            raise TxCallFailedException
        tx_res = self.skale.manager.get_bounty(self.id, wait_for=True)
        tx_res.raise_for_status()
        tx_hash = tx_res.receipt['transactionHash'].hex()

        self.logger.info(LONG_DOUBLE_LINE)
        self.logger.info('The bounty was successfully received')
        self.logger.info(f'tx hash: {tx_hash}')
        self.logger.debug(f'Receipt: {tx_res.receipt}')

        eth_bal = self.skale.web3.eth.getBalance(address)
        skl_bal = self.skale.token.get_balance(address)
        self.logger.info(f'ETH balance after: {eth_bal}')
        self.logger.info(f'ETH difference: {eth_bal - eth_bal_before}')
        try:
            db.save_bounty_stats(tx_hash, eth_bal_before, skl_bal_before,
                                 eth_bal, skl_bal)
        except Exception as err:
            self.logger.error(f'Cannot save getBounty stats. Error: {err}')

        h_receipt = self.skale.manager.contract.events.BountyGot(
        ).processReceipt(tx_res.receipt, errors=DISCARD)
        self.logger.info(LONG_LINE)
        self.logger.info(h_receipt)
        args = h_receipt[0]['args']
        try:
            db.save_bounty_event(datetime.utcfromtimestamp(args['time']),
                                 str(tx_hash), tx_res.receipt['blockNumber'],
                                 args['nodeIndex'], args['bounty'],
                                 args['averageDowntime'],
                                 args['averageLatency'],
                                 tx_res.receipt['gasUsed'])
        except Exception as err:
            self.logger.error(f'Cannot save getBounty event. Error: {err}')

        return tx_res.receipt['status']

    @tenacity.retry(wait=tenacity.wait_fixed(60),
                    retry=tenacity.retry_if_exception_type(IsNotTimeException)
                    | tenacity.retry_if_exception_type(TxCallFailedException))
    def job(self) -> None:
        """ Periodic job"""
        self.logger.info(f'Job started')

        try:
            reward_date = self.get_reward_date()
        except Exception as err:
            self.logger.error(f'Cannot get reward date: {err}')
            # TODO: notify Skale Admin
            raise

        last_block_number = self.skale.web3.eth.blockNumber
        block_data = self.skale.web3.eth.getBlock(last_block_number)
        block_timestamp = datetime.utcfromtimestamp(block_data['timestamp'])
        self.logger.info(f'Reward date: {reward_date}')
        self.logger.info(f'Timestamp: {block_timestamp}')
        if reward_date > block_timestamp:
            self.logger.info(
                'Current block timestamp is less than reward time. Will try in 1 min'
            )
            raise IsNotTimeException(Exception)
        self.get_bounty()

    def job_listener(self, event):
        if event.exception:
            self.logger.info('The job failed')
            utc_now = datetime.utcnow()
            self.scheduler.add_job(self.job,
                                   'date',
                                   run_date=utc_now + timedelta(seconds=60))
            self.logger.debug(self.scheduler.get_jobs())
        else:
            self.logger.debug('The job finished successfully)')
            reward_date = self.get_reward_date()
            self.logger.debug(f'Reward date after job: {reward_date}')
            utc_now = datetime.utcnow()
            if utc_now > reward_date:
                self.logger.debug('Changing reward date for now')
                reward_date = utc_now
            self.scheduler.add_job(self.job, 'date', run_date=reward_date)
            self.scheduler.print_jobs()

    def run(self) -> None:
        """Starts agent"""
        self.logger.debug(f'{self.agent_name} started')
        reward_date = self.get_reward_date()
        self.logger.debug(f'Reward date on agent\'s start: {reward_date}')
        utc_now = datetime.utcnow()
        if utc_now > reward_date:
            reward_date = utc_now
        self.scheduler.add_job(self.job, 'date', run_date=reward_date)
        self.scheduler.print_jobs()
        self.scheduler.add_listener(self.job_listener,
                                    EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)
        self.scheduler.start()
        while True:
            time.sleep(1)
            pass
Example #33
0
import atexit
import logging

RELAY_SUNSCREEN = [17, 18, 27, 22]
RELAY_MOTOR_POWER = 25
RELAY_MOTOR_OVERLOAD = 8

MOTOR_1 = 23
MOTOR_2 = 24

# Motor speed 0 (off) to 255 (max speed)
currentMotorSpeed = 0
timestamp_motor_power = 0
status_motor_power = GPIO.LOW

sched = BackgroundScheduler()  #daemon=True
# create a pigpio object, that accesses the pigpiod deamon (which handles the PWM to the motors)
# to start the daemon: sudo pigpiod
pi = pigpio.pi()


def control(screen_id, movement, percentage, ms=5000.0):
    ensure_motor_power()

    ss = [screen_id]
    if (screen_id == 0):
        ss = [1, 2, 3, 4]
    print("relais ", ss, " on ")
    for s in ss:
        GPIO.output(RELAY_SUNSCREEN[s - 1], GPIO.LOW)
    time.sleep(1)
Example #34
0
class CardLiveDataManager:
    INSTANCE = None

    def __init__(self, cardlive_home: Path):
        ncbi_db_path = cardlive_home / 'db' / 'taxa.sqlite'
        card_live_data_dir = cardlive_home / 'data' / 'card_live'

        self._data_loader = CardLiveDataLoader(card_live_data_dir)
        self._data_loader.add_data_modifiers([
            AntarcticaNAModifier(np.datetime64('2020-07-20')),
            AddGeographicNamesModifier(region_codes),
            AddTaxonomyModifier(ncbi_db_path),
        ])

        self._card_live_data = self._data_loader.read_or_update_data()

        self._scheduler = BackgroundScheduler(
            jobstores={'default': MemoryJobStore()},
            executors={'default': ThreadPoolExecutor(1)},
            job_defaults={'max_instances': 1})
        self._scheduler.add_job(self.update_job, 'interval', minutes=10)
        self._scheduler.start()

    def update_job(self):
        logger.debug('Updating CARD:Live data.')
        try:
            new_data = self._data_loader.read_or_update_data(
                self._card_live_data)
            if new_data is not self._card_live_data:
                logger.debug(
                    f'Old data has {len(self._card_live_data)} samples, new data has {len(new_data)} samples'
                )
                self._card_live_data = new_data
        except Exception as e:
            logger.info(
                'An exeption occured when attempting to load new data. Skipping new data.'
            )
            logger.exception(e)
        logger.debug('Finished updating CARD:Live data.')

    def data_archive_generator(
        self,
        file_names: Union[List[str], Set[str]] = None
    ) -> Generator[bytes, None, None]:
        """
        Get the CARD:Live JSON files as a zipstream generator.
        :param file_names: The file names to load into the archive.
        :return: A generator which allows streaming of the zip file contents.
        """
        if file_names is None:
            file_names = self.card_data.files()

        return self._data_loader.data_archive_generator(file_names)

    @property
    def card_data(self) -> CardLiveData:
        return self._card_live_data

    @classmethod
    def create_instance(cls, cardlive_home: Path) -> None:
        cls.INSTANCE = CardLiveDataManager(cardlive_home)

    @classmethod
    def get_instance(cls) -> CardLiveDataManager:
        if cls.INSTANCE is not None:
            return cls.INSTANCE
        else:
            raise Exception(f'{cls} does not yet have an instance.')
Example #35
0
        witness = rpc.get_witness_by_account(user)
        pprint(witness)
        for key in ['last_mbd_exchange_update']:
            witness[key] = datetime.strptime(witness[key], "%Y-%m-%dT%H:%M:%S")
        # Convert to Numbers
        for key in ['votes', 'total_missed']:
            witness[key] = float(witness[key])
        witness.update({'account': user})
        db.witness.update({'_id': user}, {'$set': witness}, upsert=True)


def run():
    update_witnesses()
    # check_misses()


if __name__ == '__main__':
    # Start job immediately
    run()
    # Schedule it to run every 1 minute
    scheduler = BackgroundScheduler()
    scheduler.add_job(run, 'interval', minutes=1, id='run')
    scheduler.start()
    # Loop
    try:
        while True:
            sys.stdout.flush()
            time.sleep(2)
    except (KeyboardInterrupt, SystemExit):
        scheduler.shutdown()
Example #36
0
PROG_DIR = None
FULL_PATH = None

ARGS = None
SIGNAL = None

SYS_PLATFORM = None
SYS_ENCODING = None

QUIET = False
VERBOSE = True
DAEMON = False
CREATEPID = False
PIDFILE = None

SCHED = BackgroundScheduler()
SCHED_LOCK = threading.Lock()

INIT_LOCK = threading.Lock()
_INITIALIZED = False
started = False

DATA_DIR = None

CONFIG = None
CONFIG_FILE = None

DB_FILE = None

LOG_LIST = []
Example #37
0
    # the task may not exist if sent using `send_task` which
    # sends tasks by name, so fall back to the default result backend
    # if that is the case.
    task = celery.tasks.get(sender)
    backend = task.backend if task else celery.backend
    backend.store_result(headers['id'], None, 'WAITING')


# register celery tasks. removing them will cause the tasks to not function. so don't remove them
# it is important to register them after celery is defined to resolve circular imports

from .api.helpers import tasks

# import helpers.tasks

scheduler = BackgroundScheduler(timezone=utc)
# scheduler.add_job(send_mail_to_expired_orders, 'interval', hours=5)
# scheduler.add_job(empty_trash, 'cron', hour=5, minute=30)
if app.config['ENABLE_ELASTICSEARCH']:
    scheduler.add_job(sync_events_elasticsearch, 'interval', minutes=60)
    scheduler.add_job(cron_rebuild_events_elasticsearch, 'cron', day=7)

scheduler.add_job(send_after_event_mail, 'cron', hour=5, minute=30)
scheduler.add_job(send_event_fee_notification, 'cron', day=1)
scheduler.add_job(send_event_fee_notification_followup, 'cron', day=15)
scheduler.add_job(change_session_state_on_event_completion,
                  'cron',
                  hour=5,
                  minute=30)
scheduler.add_job(expire_pending_tickets, 'cron', minute=45)
scheduler.add_job(send_monthly_event_invoice, 'cron', day=1, month='1-12')
Example #38
0
from apscheduler.schedulers.background import BackgroundScheduler
import atexit
from flask import Flask
from flask_bootstrap import Bootstrap
from SpeedTester import SpeedTester

app = Flask(__name__)
Bootstrap(app)

speed_tester = SpeedTester()
scheduler = BackgroundScheduler()
scheduler.add_job(func=speed_tester.run_speed_test,
                  trigger="interval",
                  minutes=15)
scheduler.start()

# Shut down the scheduler when exiting the app
atexit.register(lambda: scheduler.shutdown())
Example #39
0
# -*- coding: utf-8 -*-
import logging
import time

from apscheduler.schedulers.background import BackgroundScheduler

from zvt import init_log
from zvt.domain import *
from zvt.informer.informer import EmailInformer

logger = logging.getLogger(__name__)

sched = BackgroundScheduler()


@sched.scheduled_job('cron', hour=15, minute=30)
def run():
    while True:
        email_action = EmailInformer()

        try:
            Block.record_data(provider='sina')
            BlockStock.record_data(provider='sina')
            BlockMoneyFlow.record_data(provider='sina')

            email_action.send_message("*****@*****.**", 'sina runner finished',
                                      '')
            break
        except Exception as e:
            msg = f'sina runner error:{e}'
            logger.exception(msg)
Example #40
0
import os
import logging
from flask import Flask
from flask_cors import CORS
from back import route
from back.main import main
from apscheduler.schedulers.background import BackgroundScheduler

logging.basicConfig(filename='log_app.log',
                    format='%(asctime)s %(message)s',
                    level=logging.DEBUG)

sched = BackgroundScheduler(daemon=True)
sched.add_job(main, 'interval', seconds=59)
sched.start()

template_dir = os.path.abspath('./front/templates')
app = Flask(__name__, template_folder=template_dir)
CORS(app)
logging.debug("the db is going to initialize")
main()
logging.debug("the db is initialize")

app.add_url_rule('/', view_func=route.entry_point)
app.add_url_rule('/hello_world', view_func=route.hello_world)
app.add_url_rule('/<ville>/stations/<station>', view_func=route.nexttram)
app.add_url_rule('/<ville>/stations/', view_func=route.citystations)
app.add_url_rule('/<ville>/ligne/<ligne>', view_func=route.line_station)
app.add_url_rule('/<ville>/<station>/<ligne>/<direction>',
                 view_func=route.next_to_direction)
app.add_url_rule('/<ville>/stationslike/<station>',
Example #41
0
    def run(self):
        """TODO: Docstring for run.
        :returns: TODO

        """
        SCHED = BackgroundScheduler()
        ACCOUNT_TRIGGER = IntervalTrigger(seconds=3)
        PID_TRIGGER = IntervalTrigger(seconds=5)
        EIP_TRIGGER = IntervalTrigger(seconds=10)

        FUNC_IAM = iam.Iam('/etc/.hwzabbix/config.ini')
        FUNC_ACCOUNT = FUNC_IAM.account_to_redis
        FUNC_PID = FUNC_IAM.pid_to_redis

        FUNC_VPC = vpc.Vpc()
        FUNC_EIP = FUNC_VPC.eip_to_redis

        SCHED.add_job(func=FUNC_ACCOUNT, trigger=ACCOUNT_TRIGGER)
        SCHED.add_job(func=FUNC_PID,
                      args=('cn-north-1', ),
                      trigger=PID_TRIGGER)
        SCHED.add_job(func=FUNC_PID, args=('cn-east-2', ), trigger=PID_TRIGGER)
        SCHED.add_job(func=FUNC_PID,
                      args=('cn-south-1', ),
                      trigger=PID_TRIGGER)
        SCHED.add_job(func=FUNC_EIP,
                      args=('cn-north-1', ),
                      trigger=EIP_TRIGGER)
        SCHED.add_job(func=FUNC_EIP, args=('cn-east-2', ), trigger=EIP_TRIGGER)
        SCHED.add_job(func=FUNC_EIP,
                      args=('cn-south-1', ),
                      trigger=EIP_TRIGGER)

        SCHED.start()
        while True:
            time.sleep(10)
            log.logging.info('service is active.')
Example #42
0
from module import Module
from task import ActiveTask
from apis import api_lib
from apscheduler.schedulers.background import BackgroundScheduler
import string
import random
import dateutil.parser as parser
from functions import jsonentities
from datetime import datetime
from pyowm.utils import timeutils
import json
import log

scheduler = BackgroundScheduler()
scheduler.start()


def id_generator(size=6, chars=string.ascii_uppercase + string.digits):
    return ''.join(random.choice(chars) for _ in range(size))


def timer_function(id, text):
    print(text)
    scheduler.print_jobs()
    api_lib['sql'].delete_item('timers', 'key', id)
    print('Scheduled Timers Remaining..')
    scheduler.print_jobs()


class Timers(ActiveTask):
    def __init__(self):
Example #43
0
              template_mode='bootstrap3',
              index_view=MyHomeView())

# config celery
celery = celery_module.make_celery(app)

# config logging
logging.basicConfig(level=logging.DEBUG)

# config jwt
jwt = JWTManager(app)

# config firebase
cred = credentials.Certificate(app.config['FIREBASE'])
default_app = firebase_admin.initialize_app(cred)

# config schedule
scheduler = BackgroundScheduler({'apscheduler.timezone': 'Asia/Ho_Chi_Minh'})

# import routes
from project.routes.v1 import *

# schedule init
from project.schedule import *

# start schedule
scheduler.start()

# Shut down the scheduler when exiting the app
atexit.register(lambda: scheduler.shutdown())

def sendMsgToskYpe(empName, skypeId):
    try:
        sk = Skype("*****@*****.**",
                   "Terafastnetworkspvtltd")  # connect to Skype
        sk.user
        cont = sk.contacts
        sk.chats
        ch = sk.contacts[skypeId].chat
        ch.sendMsg(
            "Hi %s (cake) On your special day, I wish you good luck. I hope this wonderful day will fill up your heart with joy and blessings. Have a fantastic birthday, celebrate the happiness on every day of your life. Happy Birthday!! (sparkler) (flower) "
            % empName)
        ch.getMsgs()
    except Exception as er:
        print("[WARNING] SEND MESSAGE TO SKYPE EXCEPTION ERROR :: %s " % er)


scheduler = BackgroundScheduler()
scheduler.start()
job = scheduler.add_job(readDataFromExcelSheet,
                        'cron',
                        hour='12',
                        minute='25',
                        second='00')
print "Current job starting....."
while True:
    time.sleep(45)

readDataFromExcelSheet()
Example #45
0
from flask import Flask

from apscheduler.schedulers.background import BackgroundScheduler

from cryptomon.jobs import *

app = Flask(__name__)
app.config.from_mapping(
    SECRET_KEY='dev'
)

app.config.from_pyfile('config.py', silent=True)
app.config.from_envvar('CRYPTOMON_SETTINGS', silent=True)

cron = BackgroundScheduler()
job_kwargs = {
    'ether_scan_api_root': app.config['ETHER_SCAN_API_ROOT'],
    'ether_scan_api_key':  app.config['ETHER_SCAN_API_KEY'],
    'check_threshold_min': app.config['CHECK_THRESHOLD_MIN'],
    'check_threshold_max': app.config['CHECK_THRESHOLD_MAX'],
    'mail_from':           app.config['MAIL_FROM'],
    'mail_to':             app.config['MAIL_TO'],
    'mail_server':         app.config['MAIL_SERVER'],
    'mail_user':           app.config['MAIL_USER'],
    'mail_pass':           app.config['MAIL_PASS']
}

cron.add_job(last_price_alarm,
             'interval',
             kwargs=job_kwargs,
             minutes=app.config['CHECK_LAST_PRICE_INTERVAL'])
Example #46
0
        # Establish session for retrieval of all balances
        with requests.Session() as session:
            balances = []
            # Determine which tokens to load
            targetTokens = tokens
            # If a tokens array is specified in the request, use it
            if 'tokens' in request:
                targetTokens = []
                for token in request.get('tokens'):
                    contract, symbol = token.split(':')
                    targetTokens.append((symbol, contract))
            # Launch async event loop to gather balances
            loop = asyncio.get_event_loop()
            balances = loop.run_until_complete(
                get_balances(request.get('account'), targetTokens))
            # Server the response
            resp.body = json.dumps(balances)


# Load the initial tokens on startup
get_tokens()

# Schedule tokens to be refreshed from smart contract every minute
scheduler = BackgroundScheduler()
scheduler.add_job(get_tokens, 'interval', minutes=1, id='get_tokens')
scheduler.start()

# Launch falcon API
app = falcon.API()
app.add_route('/v1/chain/get_currency_balances', GetCurrencyBalances())
Example #47
0
class RateFetcher(object):
    """ Fetches currency exchange rates from a URL (that is defined in a
        configuration file). The new rates are stored in a file (JSON),
        as well as kept in memory.
    """
    def __init__(self):
        self.settings = None
        self.scheduler = BackgroundScheduler()
        self.full_url = ""
        self.rates_file_name = ""
        self.rates = None
        self.update_iterval_sec = 86400  # don't lower it

    def apply_settings(self, settingsfile):
        with open(settingsfile, 'r') as f:
            self.settings = json.loads(f.read())
            subsettings = self.settings['fetch_rates']
            if 'refresh_seconds' in self.settings:
                self.update_iterval_sec = max(subsettings['refresh_seconds'],
                                              self.update_iterval_sec)
            self.full_url = (subsettings['base_url'] + "?app_id=" +
                             subsettings['app_id'])
        self.rates = self.load_rates()
        if self.settings['test_mode']:
            pass
        else:
            self.start_scheduler()

    def get_rates(self):
        return self.rates

    def load_rates(self):
        if self.settings['test_mode']:  # load from fallback file
            rates_file_name = str(self.settings['common_path'] +
                                  self.settings['fallback_currency_file'])
            rates_file = open(rates_file_name, 'r')
            data = json.loads(rates_file.read())
            return data['rates']
        else:
            rates_file_name = str(self.settings['common_path'] +
                                  self.settings['refreshed_currency_file'])
            self.rates_file_name = rates_file_name
            rates_file = open(rates_file_name, 'w')  # to write!
            newrates = self.download_and_save_rates(rates_file)
            return newrates

    def start_scheduler(self):
        self.scheduler.add_job(self.update_rates,
                               'interval',
                               seconds=self.update_iterval_sec)
        self.scheduler.start()

    def update_rates(self):
        rates_file = open(self.rates_file_name, 'w')
        new_rates = self.download_and_save_rates(rates_file)
        self.rates = new_rates

    def download_and_save_rates(self, outfile):
        r = requests.get(self.full_url)
        if 200 != r.status_code:
            print("There was a problem with downloading the conversion rates")
            outfile.close()
            return None
        else:
            data = r.json()
            json.dump(data, outfile)
            outfile.close()
            return data['rates']

    def __del__(self):
        self.scheduler.shutdown()
Example #48
0
login = LoginManager(app)
login.login_view = 'login'

mail = Mail(app)

bootstrap = Bootstrap(app)
moment = Moment(app)

log_path = os.path.join(app.config['WRITE_PATH'], 'logs')

if not os.path.exists(log_path):
    os.mkdir(log_path)

file_handler = RotatingFileHandler(log_path + '/olirowanxyz_app.log',
                                   maxBytes=10000000,
                                   backupCount=10)
file_handler.setFormatter(
    logging.Formatter(
        '%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'))
file_handler.setLevel(logging.DEBUG)
app.logger.addHandler(file_handler)

app.logger.setLevel(logging.DEBUG)
app.logger.info('STARTED olirowanxyz')

task_schedule = BackgroundScheduler(daemon=True)
task_schedule.start()

from app import routes, models, errors
Example #49
0
class Filament(Process):
    """Filament initialization and execution engine.

    Filaments are lightweight Python modules which run
    on top of Fibratus. They are often used to enrich/extend the
    functionality of Fibratus by performing any type of logic
    (aggregations, groupings, filters, counters, etc) on the
    kernel event stream.

    Each filament runs in its own execution context, i.e. in
    its own copy of the Python interpreter process.

    """
    def __init__(self):
        """Builds a new instance of the filament.

        Attributes:
        ----------

        filament_module: module
            module which contains the filament logic

        keventq: Queue
            queue where the main process pushes the kernel events
        """
        Process.__init__(self)
        self._filament_module = None
        self._keventq = None
        self._filters = []
        self._cols = []
        self._tabular = None
        self._limit = 10
        self._interval = 1
        self._sort_by = None
        self._sort_desc = True
        self._log_path = None
        self._logger = None
        self.scheduler = BackgroundScheduler()

    def load_filament(self, name):
        """Loads the filament module.

        Finds and loads the python module which
        holds the filament logic. It also looks up for
        some essential filament methods and raises an error
        if they can't be found.

        Parameters
        ----------
        name: str
            name of the filament to load

        """
        Filament._assert_root_dir()
        filament_path = self._find_filament_path(name)
        if filament_path:
            loader = SourceFileLoader(name, filament_path)
            self._filament_module = loader.load_module()
            sys.path.append(FILAMENTS_DIR)
            doc = inspect.getdoc(self._filament_module)
            if not doc:
                raise FilamentError('Please provide a short '
                                    'description for the filament')

            on_next_kevent = self._find_filament_func('on_next_kevent')
            if on_next_kevent:
                if self._num_args(on_next_kevent) != 1:
                    raise FilamentError('Missing one argument on_next_kevent '
                                        'method on filament')
                self._initialize_funcs()
            else:
                raise FilamentError('Missing required on_next_kevent '
                                    'method on filament')
        else:
            raise FilamentError('%s filament not found' % name)

    def _initialize_funcs(self):
        """Setup the filament modules functions.

        Functions
        ---------

        set_filter: func
            accepts the comma separated list of kernel events
            for whose the filter should be applied
        set_interval: func
            establishes the fixed repeating interval in seconds
        columns: func
            configure the column set for the table
        add_row: func
            adds a new row to the table
        sort_by: func
            sorts the table by specific column
        """
        def set_filter(*args):
            self._filters = args

        self._filament_module.set_filter = set_filter

        def set_interval(interval):
            if not type(interval) is int:
                raise FilamentError('Interval must be an integer value')
            self._interval = interval

        self._filament_module.set_interval = set_interval

        def columns(cols):
            if not isinstance(cols, list):
                raise FilamentError('Columns must be a list, '
                                    '%s found' % type(cols))
            self._cols = cols
            self._tabular = Tabular(self._cols)
            self._tabular.padding_width = 10
            self._tabular.junction_char = '|'

        def add_row(row):
            if not isinstance(row, list):
                raise FilamentError(
                    'Expected list type for the row, found %s' % type(row))
            self._tabular.add_row(row)

        def sort_by(col, sort_desc=True):
            if len(self._cols) == 0:
                raise FilamentError('Expected at least 1 column but 0 found')
            if col not in self._cols:
                raise FilamentError('%s column does not exist' % col)
            self._sort_by = col
            self._sort_desc = sort_desc

        def limit(l):
            if len(self._cols) == 0:
                raise FilamentError('Expected at least 1 column but 0 found')
            if not type(l) is int:
                raise FilamentError('Limit must be an integer value')
            self._limit = l

        def title(text):
            self._tabular.title = text

        self._filament_module.columns = columns
        self._filament_module.title = title
        self._filament_module.sort_by = sort_by
        self._filament_module.limit = limit
        self._filament_module.add_row = add_row
        self._filament_module.render_tabular = self.render_tabular

        # call filament initialization method.
        # This is the right place to perform any
        # initialization logic
        on_init = self._find_filament_func('on_init')
        if on_init and self._zero_args(on_init):
            self._filament_module.on_init()

    def setup_adapters(self, output_adapters):
        """Creates the filament adapters accessors.

        Parameters
        ----------

        output_adapters: dict
            output adapters
        """
        for name, adapter in output_adapters.items():
            adapter_metavariable = AdapterMetaVariable(adapter)
            setattr(self._filament_module, name, adapter_metavariable)

    def run(self):
        """Filament main routine.

        Setups the interval repeating function and polls for
        the kernel events from the queue.
        """
        on_interval = self._find_filament_func('on_interval')
        if on_interval:
            self.scheduler.add_executor(ThreadPoolExecutor(max_workers=4))
            self.scheduler.start()
            self.scheduler.add_job(self._filament_module.on_interval,
                                   'interval',
                                   seconds=self._interval,
                                   max_instances=4,
                                   misfire_grace_time=60)
        while self._poll():
            try:
                kevent = self._keventq.get()
                self._filament_module.on_next_kevent(ddict(kevent))
            except Exception:
                print(traceback.format_exc())
                self._logger.error('Unexpected filament error %s' %
                                   traceback.format_exc())

    def render_tabular(self):
        """Renders the table to the console.
        """
        if len(self._cols) > 0:
            tabular = self._tabular.get_string(start=1, end=self._limit)
            if self._sort_by:
                tabular = self._tabular.get_string(start=1,
                                                   end=self._limit,
                                                   sortby=self._sort_by,
                                                   reversesort=self._sort_desc)
            self._tabular.clear_rows()
            if not _ansi_term.term_ready:
                try:
                    _ansi_term.setup_console()
                except TermInitializationError:
                    panic(
                        'fibratus run: ERROR - console initialization failed')
            _ansi_term.cls()
            _ansi_term.write_output(tabular)

    def close(self):
        on_stop = self._find_filament_func('on_stop')
        if on_stop and self._zero_args(on_stop):
            self._filament_module.on_stop()
        if self.scheduler.running:
            self.scheduler.shutdown()
        _ansi_term.restore_console()
        if self.is_alive():
            self.terminate()

    def _poll(self):
        return True

    @classmethod
    def exists(cls, filament):
        Filament._assert_root_dir()
        return os.path.exists(os.path.join(FILAMENTS_DIR, '%s.py' % filament))

    @classmethod
    def list_filaments(cls):
        Filament._assert_root_dir()
        filaments = {}
        paths = [
            os.path.join(FILAMENTS_DIR, path)
            for path in os.listdir(FILAMENTS_DIR) if path.endswith('.py')
        ]
        for path in paths:
            filament_name = os.path.basename(path)[:-3]
            loader = SourceFileLoader(filament_name, path)
            filament = loader.load_module()
            filaments[filament_name] = inspect.getdoc(filament)
        return filaments

    @classmethod
    def _assert_root_dir(cls):
        if not os.path.exists(FILAMENTS_DIR):
            panic('fibratus run: ERROR - %s path does not exist.' %
                  FILAMENTS_DIR)

    @property
    def keventq(self):
        return self._keventq

    @keventq.setter
    def keventq(self, keventq):
        self._keventq = keventq

    @property
    def filters(self):
        return self._filters

    @property
    def logger(self):
        return self._logger

    @logger.setter
    def logger(self, log_path):
        self._log_path = log_path
        FileHandler(self._log_path).push_application()
        self._logger = Logger(Filament.__name__)

    @property
    def filament_module(self):
        return self._filament_module

    def _find_filament_func(self, func_name):
        """Finds the function in the filament module.

        Parameters
        ----------

        func_name: str
            the name of the function
        """
        functions = inspect.getmembers(self._filament_module,
                                       predicate=inspect.isfunction)
        return next(
            iter([func for name, func in functions if name == func_name]),
            None)

    def _find_filament_path(self, filament_name):
        """Resolves the filament full path from the name

        Parameters
        ----------

        filament_name: str
            the name of the filament whose path if about to be resolved
        """
        return next(
            iter([
                os.path.join(FILAMENTS_DIR, filament)
                for filament in os.listdir(FILAMENTS_DIR)
                if filament.endswith('.py') and filament_name == filament[:-3]
            ]), None)

    def _num_args(self, func):
        return len(inspect.getargspec(func).args)

    def _zero_args(self, func):
        return self._num_args(func) == 0
Example #50
0
class SchedulerManager(object, metaclass=Singleton):

    def __init__(self):
        print("Initialisation of the scheduler manager")

        self.scheduler = BackgroundScheduler()
        # create the async loop in the main thread
        self.loop = asyncio.new_event_loop()
        asyncio.set_event_loop(self.loop)  # bind event loop to current thread
        asyncio.get_child_watcher().attach_loop(self.loop)

    def start(self):
        print("Starting scheduler manager")
        if settings.DEBUG:
            # Hook into the apscheduler logger
            logging.basicConfig()
            logging.getLogger('apscheduler').setLevel(logging.DEBUG)
        self.scheduler.start()

        self.add_job_for_each_alarm_clock()

    def play_web_radio(self, url, auto_stop_minutes):
        from restapi.models.backup_file import BackupFile
        backup_files = BackupFile.objects.all()
        backup_file_path = None
        if backup_files is not None and len(backup_files) > 0:
            backup_file_path = backup_files[0].backup_file.url
            print("Path to the backup MP3: {}".format(backup_file_path))
        print("play_web_radio triggered by scheduler")
        PlayerManager().async_start(self.loop, url, auto_stop_minutes, backup_file_path)
        print("play_web_radio thread terminated")

    def add_job_for_each_alarm_clock(self):
        """
        load all enabled alarm clock from the DB and create a new app scheduler job
        """
        from restapi.models import AlarmClock
        all_alarm_clock = AlarmClock.objects.all()

        for alarm in all_alarm_clock:
            if alarm.enabled:
                self.add_new_job(job_id=alarm.id,
                                 day_of_week_string=alarm.get_day_of_week(),
                                 hour=alarm.hour,
                                 minute=alarm.minute,
                                 url=alarm.webradio.url,
                                 auto_stop_minutes=alarm.auto_stop_minutes)

    def add_new_job(self, job_id, day_of_week_string, hour, minute, url, auto_stop_minutes) -> bool:
        print("add a new job with id {}, {}, {}, {}, {}, {}".
              format(job_id, day_of_week_string, hour, minute, url, auto_stop_minutes))
        my_cron = CronTrigger(hour=hour,
                              minute=minute,
                              day_of_week=day_of_week_string)
        self.scheduler.add_job(func=self.play_web_radio,
                               trigger=my_cron,
                               id=str(job_id),
                               args=[url, auto_stop_minutes])
        return True

    def delete_job_by_id(self, job_id):
        print("removing job id {} form the scheduler".format(job_id))
        try:
            self.scheduler.remove_job(str(job_id))
        except JobLookupError:
            print("Job id {} was already deleted".format(job_id))
Example #51
0
#     sched.start()
#     while True:
#         time.sleep(1)
#
#
# if __name__ == '__main__':
#     main()
import datetime as dt
import os
import time
from datetime import datetime

from apscheduler.executors.pool import ThreadPoolExecutor
from apscheduler.schedulers.background import BackgroundScheduler

scheduler = BackgroundScheduler(
    executors={"processpool": ThreadPoolExecutor(10)})


def tick():
    next_run_at = datetime.now() + dt.timedelta(seconds=3.0)
    print('Tick! The time is: %s, run next at:%s' %
          (datetime.now(), next_run_at))
    scheduler.add_job(tick,
                      next_run_time=next_run_at,
                      executor="processpool",
                      id="hello kitty")
    print("pid:%r" % os.getpid())


if __name__ == '__main__':
    next_run_at = datetime.now() + dt.timedelta(seconds=3.0)
Example #52
0
    entries_newDF['TournamentTotal'] = entries_newDF['TournamentTotal'].astype(
        'str')

    entries_newDF.to_csv('entries2.csv')

    for i in range(len(entries_newDF)):
        entry_id = entries_newDF.loc[i]['id']
        fields = {
            'R3': entries_newDF.loc[i]['R3'],
            'R3Total': entries_newDF.loc[i]['R3Total'],
            'TournamentTotal': entries_newDF.loc[i]['TournamentTotal']
        }
        airtableEntries.update(entry_id, fields)
        print(i)
        print('Tick! The time is: %s' % datetime.now())


if __name__ == '__main__':
    scheduler = BackgroundScheduler()
    scheduler.add_job(tick, 'interval', minutes=5)
    scheduler.start()
    print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C'))

    try:
        # This is here to simulate application activity (which keeps the main thread alive).
        while True:
            time.sleep(2)
    except (KeyboardInterrupt, SystemExit):
        # Not strictly necessary if daemonic mode is enabled but should be done if possible
        scheduler.shutdown()
Example #53
0
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from dotenv import load_dotenv
from apscheduler.schedulers.background import BackgroundScheduler
from rival_regions_wrapper.rival_regions_wrapper import LocalAuthentication, RemoteAuthentication

load_dotenv()

# database
ENGINE = create_engine(os.environ["DATABASE_URI"])
SESSION = sessionmaker(bind=ENGINE)

# scheduler
SCHEDULER = BackgroundScheduler(
    daemon=True,
    job_defaults={'misfire_grace_time': 300},
)
SCHEDULER.start()

# get logger
LOGGER = logging.getLogger(__name__)
LOGGER.setLevel(logging.INFO)
SCHEDULER_LOGGER = logging.getLogger('apscheduler')
SCHEDULER_LOGGER.setLevel(logging.DEBUG)

# create file handler
FILE_HANDLER = logging.FileHandler('output.log')
FILE_HANDLER.setLevel(logging.DEBUG)

# create console handler
STREAM_HANDLER = logging.StreamHandler()
Example #54
0
def mainjob(self, name):

    self.filename = '/tmp/dc/django_celery_%s.log' % name

    # prepare
    out_output(str(self.request.retries), self.filename)
    self.default_retry_delay = 5
    self.max_retries = 2
    self.count = 0

    # reply_to = self.request.reply_to
    # out_output(reply_to, self.filename)
    # out_output(dir(reply_to), self.filename)
    # out_output(reply_to.__doc__, self.filename)
    # out_output(type(reply_to), self.filename)

    jlogger = logging.getLogger('apscheduler.scheduler')
    jlogger.setLevel(logging.ERROR)
    scheduler = Scheduler(logger=jlogger)
    self.update_state(state='RUNNING')

    # shutdown 
    def shutdown(a, b):
        output('shuting')
        scheduler.shutdown()
        output('down - %s' % time())

    def terminate(a, b):
        output('terminating')
        scheduler.shutdown()
        output('down - %s' % time())

    def retryer(a, b):
        out_output('before retry', self.filename)
        scheduler.shutdown()
        raise self.retry()
        out_output('after retry', self.filename)


    signal(SIGUSR1, shutdown)
    signal(SIGTERM, terminate)
    signal(SIGALRM, retryer)

    # listen
    def listener(event):
        try:
            raise event.exception
        except Exception as e:
            out_output(e, self.filename)
            [job.pause() for job in scheduler.get_jobs()]
            alarm(1)

    scheduler.add_listener(listener, EVENT_JOB_ERROR)

    # add job and start
    def output(msg):
        self.count += 1
        #if self.count == 9: raise StandardError('haha')
        with open(self.filename, 'a') as file:
            file.write('(%s - %s) %s\n' % (time(), name, msg))

    scheduler.add_job(output, 'interval', args=('--a',), seconds=1)
    sleep(0.5)
    scheduler.add_job(output, 'interval', args=('------b',), seconds=1)
    #scheduler.add_job(output, 'interval', args=('--b',), seconds=3)
    scheduler.start()

    # wait for retry
    pause()
def checkForPolling():

	last_update = FileUtils.getObject('last_update.txt')
	for tweet_id,last_updated in last_update.iteritems():
		current_time = TimeUtils.getCurrentTime()
		time_difference = TimeUtils.getTimeDifference(last_updated,current_time)
		
		if time_difference > 5:
			
			last_update[tweet_id] = TimeUtils.getCurrentTime()
			updateTwitterStatistics(tweet_id)
			FileUtils.persist('last_update.txt',last_update)
			


scheduler = BackgroundScheduler()
scheduler.add_job(checkForPolling,'interval',seconds=60)


try:	
    scheduler.start()
except (KeyboardInterrupt, SystemExit):
    pass

try:
	while True:
		time.sleep(1)
except (KeyboardInterrupt, SystemExit):
	pass

Example #56
0
    def run(self):
        logging.warning("Starting streamming")

        # Initialize a timer
        sched = BackgroundScheduler(daemon=True)
        # check if user has request to stop server, every "seconds" ,
        sched.add_job(self.checkStatus,
                      'interval',
                      seconds=int(os.environ['TIME_TO_CHECK']))
        sched.start()

        # read Data for specific user and camera
        if self.checkData():
            username = self.settings[0]['username']
            password = self.settings[0]['password']
            ip = self.settings[0]["ip"]
            port = self.settings[0]["port"]
            URL = self.settings[0]["url_path"]

        else:
            sched.shutdown()
            logging.warning("User doesn't exists or Camera is not configured")
            return "Server stopped"
        try:
            video_capture = cv2.VideoCapture(
                f"rtsp://{username}:{password}@{ip}:{port}/{URL}")
            if not video_capture.isOpened():
                self.suspectData.changeServerStatus(0, "No stream available")
                logging.warning("No stream available, check RTSP settings")
                video_capture.release()
                sched.shutdown()
                return "Server stopped"
            else:
                self.suspectData.changeServerStatus(1, "Streaming")

            # Initialize detection variables
            face_locations = []
            face_encodings = []
            face_names = []
            process_this_frame = True
            log = {}
            # Face Detaction process, exits if self.stop_running is True
            logging.warning("Streamming")

            while not self.stop_running:

                try:

                    # Grab a single frame of video
                    ret, frame = video_capture.read()
                    if not (ret):
                        logging.warning("No Video capture")
                        st = time.time()
                        video_capture = cv2.VideoCapture(
                            f"rtsp://{username}:{password}@{ip}:{port}/{URL}")
                        ret, frame = video_capture.read()
                        continue

                    #if ret == True:

                    # Resize frame of video to 1/4 size for faster face recognition processing
                    small_frame = cv2.resize(frame, (0, 0), fx=0.25, fy=0.25)

                    # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
                    rgb_small_frame = small_frame[:, :, ::-1]

                    # Only process every other frame of video to save time
                    if process_this_frame:
                        # Find all the faces and face encodings in the current frame of video
                        face_locations = face_recognition.face_locations(
                            rgb_small_frame)
                        face_encodings = face_recognition.face_encodings(
                            rgb_small_frame, face_locations)

                        face_names = []

                        for face_encoding in face_encodings:
                            # See if the face is a match for the known face(s)
                            matches = face_recognition.compare_faces(
                                self.encodings, face_encoding)
                            name = "Unknown"

                            face_distances = face_recognition.face_distance(
                                self.encodings, face_encoding)
                            best_match_index = np.argmin(face_distances)
                            if matches[best_match_index]:
                                name = self.names[best_match_index]
                                now = datetime.now()

                                if log.get(name) != None:
                                    if abs(now -
                                           log[name]) < timedelta(minutes=10):
                                        log[name] = now
                                        logging.warning(
                                            f'{name} seen no more than 10 min ago'
                                        )
                                    else:
                                        face_names.append(name)
                                        logging.warning(
                                            f'{name} Return to site')
                                        log[name] = now
                                        date = now.strftime("%m/%d/%y, %H:%M")
                                        self.snapShot(face_locations,
                                                      face_names, frame, name)
                                        self.reportFinding(name, date)

                                else:
                                    #logger(name, self.settings["phone"])
                                    face_names.append(name)
                                    logging.warning(f'{name} seen')
                                    log[name] = now
                                    print(name)
                                    date = now.strftime("%m/%d/%y, %H:%M")
                                    self.snapShot(face_locations, face_names,
                                                  frame, name)
                                    self.reportFinding(name, date)

                            face_names.append(name)

                            # if cv2.waitKey(1) & 0xFF == ord('q'):
                            #     break
                            if self.stop_running:
                                break

                    process_this_frame = not process_this_frame

                    if (self.showVideoVariable):
                        # Display the results
                        for (top, right, bottom,
                             left), name in zip(face_locations, face_names):
                            # Scale back up face locations since the frame we detected in was scaled to 1/4 size
                            top *= 4
                            right *= 4
                            bottom *= 4
                            left *= 4

                            # Draw a box around the face
                            cv2.rectangle(frame, (left, top), (right, bottom),
                                          (0, 0, 255), 2)

                            # Draw a label with a name below the face
                            cv2.rectangle(frame, (left, bottom - 35),
                                          (right, bottom), (0, 0, 255),
                                          cv2.FILLED)
                            font = cv2.FONT_HERSHEY_DUPLEX
                            cv2.putText(frame, name, (left + 6, bottom - 6),
                                        font, 1.0, (255, 255, 255), 1)

                        # Display the resulting image
                        cv2.imshow('Video', frame)

                        # Hit 'q' on the keyboard to quit!
                        if cv2.waitKey(1) & 0xFF == ord('q'):
                            break
                        # if self.stop_running:
                        #         break

                except cv2.error as e:
                    self.suspectData.changeServerStatus(
                        0, "No stream available")
                    logging.warning(
                        "CV2 Error - Streaming failed, check RTSP settings", e)
                    #video_capture.release()
                    sched.shutdown()
                    return "Server stopped"

            # Release handle to the webcam
            video_capture.release()
            cv2.destroyAllWindows()
            sched.shutdown()
            logging.warning("Streamming terminated")
            self.suspectData.changeServerStatus(0, "streamming terminated")
            return "Server stopped"
        except cv2.error as e:
            self.suspectData.changeServerStatus(0, "No stream available")
            logging.warning(
                "CV2 Error - Streaming failed, check RTSP settings", e)
            #video_capture.release()
            sched.shutdown()
            return "Server stopped"
Example #57
0
    def __init__(self):
        self.started = False

        # process id
        self.PID = os.getpid()

        # generate notifiers dict
        self.notifiersDict = notifiersDict()

        # generate metadata providers dict
        self.metadataProvidersDict = metadataProvidersDict()

        # generate providers dict
        self.providersDict = providersDict()

        # init notification queue
        self.srNotifications = Notifications()

        # init logger
        self.srLogger = srLogger()

        # init config
        self.srConfig = srConfig()

        # init databases
        self.mainDB = MainDB()
        self.cacheDB = CacheDB()
        self.failedDB = FailedDB()

        # init scheduler service
        self.srScheduler = BackgroundScheduler()

        # init web server
        self.srWebServer = srWebServer()

        # init web client session
        self.srWebSession = srSession()

        # google api
        self.googleAuth = googleAuth()

        # name cache
        self.NAMECACHE = srNameCache()

        # queues
        self.SHOWQUEUE = srShowQueue()
        self.SEARCHQUEUE = srSearchQueue()

        # updaters
        self.VERSIONUPDATER = srVersionUpdater()
        self.SHOWUPDATER = srShowUpdater()

        # searchers
        self.DAILYSEARCHER = srDailySearcher()
        self.BACKLOGSEARCHER = srBacklogSearcher()
        self.PROPERSEARCHER = srProperSearcher()
        self.TRAKTSEARCHER = srTraktSearcher()
        self.SUBTITLESEARCHER = srSubtitleSearcher()

        # auto postprocessor
        self.AUTOPOSTPROCESSOR = srPostProcessor()

        # sickrage version
        self.NEWEST_VERSION = None
        self.NEWEST_VERSION_STRING = None

        # anidb connection
        self.ADBA_CONNECTION = None

        # show list
        self.SHOWLIST = []

        self.USER_AGENT = 'SiCKRAGE.CE.1/({};{};{})'.format(
            platform.system(), platform.release(), str(uuid.uuid1()))

        self.SYS_ENCODING = get_sys_encoding()

        # patch modules with encoding kludge
        patch_modules()
Example #58
0
from django_apscheduler.jobstores import DjangoJobStore, register_events, register_job
from proxy.models import *
from main.models import *
from lazy_balancer.views import is_auth
from nginx.views import reload_config
from .models import system_settings, sync_status
from datetime import datetime
from nginx.views import *
import logging
import uuid
import json
import time
import hashlib

logger = logging.getLogger('django')
scheduler = BackgroundScheduler()
scheduler.add_jobstore(DjangoJobStore(), 'default')

@login_required(login_url="/login/")
def view(request):
    user = {
        'name':request.user,
        'date':time.time()
    }

    _system_settings = system_settings.objects.all()
    _sync_status = sync_status.objects.all()
    if len(_system_settings) == 0:
        system_settings.objects.create(config_sync_type=0)
        _system_settings = system_settings.objects.all()
    
Example #59
0
    db.drop()  #Drop data before uploading again
    records_ = df.to_dict(orient="records")
    result = db.insert_many(records_)
    x = db.count_documents({})
    print("number of records",
          x)  #Print the number of records to see how many are added
    headData = db.find()
    row_list = []
    for i in headData:
        row_list.append(i)


#Using apscheduler to refresh data every 24 hours

sched = BackgroundScheduler(daemon=True)
sched.add_job(get_data, 'interval', hours=24)
sched.start()


@app.route('/')
def index():
    return render_template('index.html')


@app.route('/casesbydate')
def casesbydate():
    return render_template('casesbydate.html')


@app.route('/activecasesbystates')
Example #60
0
        trigger_method = "manual"
    else:
        trigger_method = "scheduled"

    logger.info(f'APscheduler {trigger_method} phonescrape update triggered')

    from api.scheduler.update_from_phonescraper import rq_scrape_phones

    # call phone scrape script to do the actual work
    rq_scrape_phones()

    scheduler.resume()  # resume scheduler


# scheduler init
scheduler = BackgroundScheduler()

# get settings values from database and convert values to dict
settings = settings_management.get_all_settings()
settings_dict = {}
for setting in settings:
    settings_dict[setting.name] = setting.value

# schedule CUCM API phone sync job
scheduler_phone_sync_job = scheduler.add_job(
    scheduler_phone_sync,
    'cron',
    hour='*',
    minute=settings_dict['cucm_update_minute'])

# schedule Phonescrape API sync job