Exemple #1
0
def main():

	config_file = "config.xml"
	xml = ET.parse(config_file)
	HOST_NAME = xml.find('host_name').text
	DB_NAME = xml.find('db_name').text
	DISPLAY_ADDR = xml.find("display_addr").text
	DISPLAY_PORT = int(xml.find('display_port').text)
	ENDPOINT = xml.find("endpoint").text
	SECONDS = int(xml.find("seconds").text)
	USERNAME = xml.find("username").text
	PASSWORD = xml.find("password").text

	HOST = 'http://'+ USERNAME+ ':'+ PASSWORD + '@'+ HOST_NAME

	print "starting ..."
	
	#print HOST, DB_NAME, DISPLAY_ADDR, DISPLAY_PORT
	#readData(HOST, DB_NAME, DISPLAY_ADDR, DISPLAY_PORT)
	display = LedDisplay(HOST, DB_NAME, DISPLAY_ADDR, DISPLAY_PORT, ENDPOINT)
	# Start the scheduler
	sched = Scheduler()

	sched.add_interval_job(display.query, seconds=SECONDS )
	sched.start()

	try: 
		while True:
			time.sleep(0.1)

	except KeyboardInterrupt:
		print "terminating"
		sched.shutdown()
Exemple #2
0
class Timer:
    def __init__(self, fieldCanvas, field):
        #self.sched = BackgroundScheduler()
        self.sched = Scheduler()
        self.sched.start()
        self.timerValue = 0
        self.fieldCanvas = fieldCanvas
        self.field = field


    def updateTimerValue(self):
        self.timerValue += 1
        minutes = "0" + str(int(self.timerValue/60)) if int(self.timerValue/60) < 9 else str(int(self.timerValue/60))
        secondes = "0" + str(self.timerValue%60) if self.timerValue%60 < 9 else str(self.timerValue%60) 
        timerString = minutes + ":" + secondes
        self.fieldCanvas.itemconfigure(self.field ,text=timerString)

    def getTimer(self):
        return self.timerValue

    def startTimer(self):
        #job = self.sched.add_job(self.updateTimerValue, 'interval', seconds = 1)
        job = self.sched.add_interval_job(self.updateTimerValue, seconds = 1)
        pass
        
    def stopTimer(self):
        try:
            model.user.setScore(self.getTimer())
            self.sched.shutdown(wait=False)
        except Exception as e:
            #print(e)
            pass
Exemple #3
0
class Scheduler(Plugin):

    crons = {}
    intervals = {}
    started = False

    def __init__(self):

        addEvent("schedule.cron", self.cron)
        addEvent("schedule.interval", self.interval)
        addEvent("schedule.remove", self.remove)

        self.sched = Sched(misfire_grace_time=60)
        self.sched.start()
        self.started = True

    def remove(self, identifier):
        for cron_type in ["intervals", "crons"]:
            try:
                self.sched.unschedule_job(getattr(self, cron_type)[identifier]["job"])
                log.debug("%s unscheduled %s", (cron_type.capitalize(), identifier))
            except:
                pass

    def doShutdown(self):
        self.stop()
        return super(Scheduler, self).doShutdown()

    def stop(self):
        if self.started:
            log.debug("Stopping scheduler")
            self.sched.shutdown()
            log.debug("Scheduler stopped")
        self.started = False

    def cron(self, identifier="", handle=None, day="*", hour="*", minute="*"):
        log.info('Scheduling "%s", cron: day = %s, hour = %s, minute = %s', (identifier, day, hour, minute))

        self.remove(identifier)
        self.crons[identifier] = {
            "handle": handle,
            "day": day,
            "hour": hour,
            "minute": minute,
            "job": self.sched.add_cron_job(handle, day=day, hour=hour, minute=minute),
        }

    def interval(self, identifier="", handle=None, hours=0, minutes=0, seconds=0):
        log.info(
            "Scheduling %s, interval: hours = %s, minutes = %s, seconds = %s", (identifier, hours, minutes, seconds)
        )

        self.remove(identifier)
        self.intervals[identifier] = {
            "handle": handle,
            "hours": hours,
            "minutes": minutes,
            "seconds": seconds,
            "job": self.sched.add_interval_job(handle, hours=hours, minutes=minutes, seconds=seconds),
        }
Exemple #4
0
def schedule(file_name, n_jobs, frequency):
    '''Schedule the scraper to execute every hour and shut it down after a
       certain number of jos have been run'''

    # Create a default logger
    basicConfig()

    # Run the first job
    scrape(file_name)

    # Instantiate the scheduler
    sched = Scheduler()

    # Start it
    sched.start()

    # Schedule the function
    sched.add_interval_job(scrape,
                           args=[file_name],
                           minutes=frequency,
                           misfire_grace_time=60)

    # Wait to run n_jobs (assuming 1 job per hour, which is 3600 seconds)
    sleep(n_jobs * 3600)

    # Shutdown the scheduler
    sched.shutdown()
Exemple #5
0
def schedule(file_name, n_jobs, frequency):

    '''Schedule the scraper to execute every hour and shut it down after a
       certain number of jos have been run'''

    # Create a default logger
    basicConfig()

    # Run the first job
    scrape(file_name)

    # Instantiate the scheduler
    sched = Scheduler()
    
    # Start it
    sched.start()

    # Schedule the function
    sched.add_interval_job(scrape, args=[file_name], minutes=frequency,
                           misfire_grace_time=60)
    
    # Wait to run n_jobs (assuming 1 job per hour, which is 3600 seconds)
    sleep(n_jobs * 3600)
    
    # Shutdown the scheduler
    sched.shutdown()
Exemple #6
0
def main(config_file):
    info = load_config(config_file)

    day_of_week = info['days']
    hour = info['hour']
    minute = info['minute']

    config = {
        'apscheduler.jobstores.file.class': info['scheduler-config']['class'],
        'apscheduler.jobstores.file.path': info['scheduler-config']['path']
    }
    sched = Scheduler(config)

    sched.add_cron_job(run_scraper,
                       day_of_week=day_of_week,
                       hour=hour,
                       minute=minute)
    sched.add_cron_job(check_archive, day='first')

    sched.start()

    print('Press Ctrl+{0} to exit'.format('C'))

    try:
        # This is here to simulate application activity (which keeps the main thread alive).
        while True:
            time.sleep(2)
    except (KeyboardInterrupt, SystemExit):
        sched.shutdown(
        )  # Not strictly necessary if daemonic mode is enabled but should be done if possibleisched.start()
Exemple #7
0
class SnortScheduler(SimplePlugin):
    """ Enables Schduling for Snortmanager """
    
    scheduler = None # The APS instance
    
    def __init__(self, bus):
        """ Initiates scheduler. """
        SimplePlugin.__init__(self, bus)
        self.scheduler = Scheduler()
        
    def __initiate_jobs(self):
        """ Adds schedueled tasks if database is empty. """
        sched = self.scheduler
        sched.add_cron_job(update_snort_rules, hour = 7, jobstore='sql')
        sched.add_cron_job(produce_configuration_files, hour = 9, jobstore='sql')
        
    def start(self):
        """ Intitates scheduler when Snortmanager starts """
        sched = self.scheduler
        sched.add_jobstore(ScheduleStore(), 'sql')
        if len(sched.get_jobs()) is 0:
            self.__initiate_jobs()
        sched.start()
    
    def stop(self):
        """ Stops Scheduler service when thread dies. """
        self.scheduler.shutdown(wait=False)
        
    def restart(self):
        """ Restarts the service if necassary. """
        self.stop()
        self.start()
    
    def get_jobs(self):
        return self.scheduler.get_jobs()
Exemple #8
0
class BlueprintHandler:
    setter_blacklist = []
    getter_blacklist = []

    def __init__(self, blueprint, testing=False, testing_count=10):
        self.blueprint = blueprint
        self.testing = testing
        self.testing_count = testing_count
        self.scheduler = Scheduler()

    def do_step(self):
        print "stepping"
        try:
            # Fetch any outstanding events from the engine process and execute in simulator
            while not self.local_queue.empty():
                action = self.local_queue.get()
                try:
                    self.blueprint.interface.set(action[0], float(action[1]))
                    print "Received action:", action
                except exceptions.ValueError:
                    print "Value '" + str(action[1]) + "' is not convertable to float"

            points = self.blueprint.interface.get_getters()

            self.blueprint.step(stepcount=int(1 / 0.1))

            g = {}
            for point in points:
                if point in BlueprintHandler.getter_blacklist:
                    continue
                g[point] = self.blueprint.interface.get(point)

            for k in g.keys():
                m = Measurement()
                m.bid = self.blueprint.building.buildingID
                m.timestamp = datetime.utcnow().replace(tzinfo=utc)
                m.uuid = k
                m.val = g[k]
                m.save()
        except:
            # print 'error: ', sys.exc_info()
            print "trace: ", traceback.print_exc()

    def init_scheduler(self):
        schedule_store = RAMJobStore()

        # Write data every 15 seconds.
        job_second = self.scheduler.add_interval_job(self.do_step, 0, 0, 0, 0, 15)

        schedule_store.add_job(job_second)

        self.scheduler.add_jobstore(schedule_store, "Simulator scheduler", quiet=False)

    def start(self, queue=None):
        self.local_queue = queue
        self.init_scheduler()
        self.scheduler.start()

    def stop(self):
        self.scheduler.shutdown()
Exemple #9
0
class Ddate():
    ''' Scheduling class to post ddate each morning. '''

    def __init__(self, connection, target):
        ''' Set irc details, initialise scheduler, set daily task,
        and grab the current date. '''
        self.con = connection
        self.target = target
        self.sched = Scheduler()
        self.sched.start()
        self.fetch_ddate()
        # scheduler for 0915 (server is gmt+1)
        self.sched.add_cron_job(self.change_ddate, hour=10, minute=15)

    def change_ddate(self):
        ''' update ddate and announce to channel. '''
        self.fetch_ddate()
        self.post_ddate()

    def fetch_ddate(self):
        ''' fetch ddate from shell.'''
        date = check_output(['ddate'])
        # trim `today is ` and `\n`
        self.ddate = date[9:-2]

    def post_ddate(self):
        ''' post date to connection/channel supplied on startup. '''
        self.con.send_msg(self.target, self.ddate)

    def __del__(self):
        ''' Kill scheduler on close. '''
        self.sched.shutdown()
Exemple #10
0
def main(config_file):
    info = load_config(config_file)

    day_of_week = info['days']
    hour = info['hour']
    minute = info['minute']

    config = {
        'apscheduler.jobstores.file.class': info['scheduler-config']['class'],
        'apscheduler.jobstores.file.path': info['scheduler-config']['path']
    }
    sched = Scheduler(config)

    sched.add_cron_job(run_scraper, day_of_week=day_of_week, hour=hour, minute=minute)
    sched.add_cron_job(check_archive, day='first')

    sched.start()

    print('Press Ctrl+{0} to exit'.format('C'))

    try:
        # This is here to simulate application activity (which keeps the main thread alive).
        while True:
            time.sleep(2)
    except (KeyboardInterrupt, SystemExit):
        sched.shutdown()  # Not strictly necessary if daemonic mode is enabled but should be done if possibleisched.start()
Exemple #11
0
def main():
    sched = Scheduler()
    sched.start()

    c_date = datetime.today()
    date_ = '2013-11-30'
    delay = timedelta(minutes=2)

    all_portfolios = combine_lists(PROGRESS_PORTFOLIOS,FIS_GROUP_PORTFOLIOS)

    # end of week jobs
    #-----------------
    if c_date.weekday == 4:
        # runs at 6pm Friday evening
        sched_date = datetime(c_date.year, c_date.month, c_date.day, 18, 0, 0)
        sched.add_date_job(
            axys_job,
            sched_date,
            [MODEL_PORTFOLIOS,all_portfolios,date_])

    # monthly jobs
    #-------------
    if c_date.day == 1:
        # runs at 10 am
        sched_date = datetime(c_date.year, c_date.month, c_date.day, 10, 0, 0)
        sched.add_date_job(
            axys_job,
            sched_date,
            [MODEL_PORTFOLIOS,all_portfolios,date_])
        sched_date = sched_date + delay

    # keep script 'running' in order to allow the scheduler to stay open and run
    # the jobs added
    time.sleep(60)
    sched.shutdown()
def main():
    from apscheduler.scheduler import Scheduler

    if len(sys.argv) != 2:
        sys.exit("usage: %s <config-file>" % sys.argv[0])

    try:
        from ConfigParser import ConfigParser
    except ImportError: # python3
        from configparser import ConfigParser

    try:
        config = ConfigParser(inline_comment_prefixes=(';',))
    except TypeError: # not python3
        config = ConfigParser()

    config.readfp(open(sys.argv[1]))
    global logfile
    logfile = config.get("global", "logfile")
    FORMAT = "%(asctime)-15s: %(message)s"
    logging.basicConfig(level=logging.INFO, filename=logfile, filemode='w',
                        format=FORMAT)

    # Set time on WDLXTV systems
    rdate = "/usr/sbin/rdate"
    if os.path.exists(rdate) and os.access(rdate, os.X_OK):
        cmd = [rdate, "ntp.internode.on.net"]
        subprocess.Popen(cmd).wait()

    logging.info("Main process PID: %d, use this for sending SIGHUP "
                 "for re-reading the schedule-file", os.getpid())

    global tuners
    tuners = TUNERS(config.get("global", "tuners"))

    global hdhomerun_config
    hdhomerun_config = config.get("global", "hdhomerun_config")

    schedule_file = config.get("global", "schedule_file")
    media_dir = config.get("global", "media_dir")

    channelmap = {}
    for opt in config.options("channelmap"):
        channelmap[opt] = config.get("channelmap", opt).split(",")

    while True:
        global reload_jobs, shutdown
        reload_jobs = False
        shutdown = False
        sched = Scheduler(misfire_grace_time=60, daemonic=False)
        sched.start()
        signal.signal(signal.SIGHUP, sighup_handler)
        signal.signal(signal.SIGTERM, sigterm_handler)
        schedule_jobs(sched, schedule_file, channelmap, media_dir)
        while not (reload_jobs or shutdown):
            signal.pause()
        sched.shutdown()
        if shutdown:
            sys.exit(0)
class Scheduler(Plugin):

    crons = {}
    intervals = {}
    started = False

    def __init__(self):

        addEvent('schedule.cron', self.cron)
        addEvent('schedule.interval', self.interval)
        addEvent('schedule.remove', self.remove)

        self.sched = Sched(misfire_grace_time = 60)
        self.sched.start()
        self.started = True

    def remove(self, identifier):
        for cron_type in ['intervals', 'crons']:
            try:
                self.sched.unschedule_job(getattr(self, cron_type)[identifier]['job'])
                log.debug('%s unscheduled %s', (cron_type.capitalize(), identifier))
            except:
                pass

    def doShutdown(self):
        super(Scheduler, self).doShutdown()
        self.stop()

    def stop(self):
        if self.started:
            log.debug('Stopping scheduler')
            self.sched.shutdown()
            log.debug('Scheduler stopped')
        self.started = False

    def cron(self, identifier = '', handle = None, day = '*', hour = '*', minute = '*'):
        log.info('Scheduling "%s", cron: day = %s, hour = %s, minute = %s', (identifier, day, hour, minute))

        self.remove(identifier)
        self.crons[identifier] = {
            'handle': handle,
            'day': day,
            'hour': hour,
            'minute': minute,
            'job': self.sched.add_cron_job(handle, day = day, hour = hour, minute = minute)
        }

    def interval(self, identifier = '', handle = None, hours = 0, minutes = 0, seconds = 0):
        log.info('Scheduling %s, interval: hours = %s, minutes = %s, seconds = %s', (identifier, hours, minutes, seconds))

        self.remove(identifier)
        self.intervals[identifier] = {
            'handle': handle,
            'hours': hours,
            'minutes': minutes,
            'seconds': seconds,
            'job': self.sched.add_interval_job(handle, hours = hours, minutes = minutes, seconds = seconds)
        }
Exemple #14
0
class Main(Daemon):
    """
    do some things
    """
    def __init__(self, pidfile, cfgfile):
        Daemon.__init__(self, pidfile)
        self.jobs = {}
        self.immediately = False
        self.scheduler = Scheduler(daemonic=False)
        self.logger = logging.getLogger(self.__class__.__name__)
        if os.path.exists(cfgfile):
            with open(cfgfile, 'rt') as f:
                config = yaml.load(f.read())
            for k1 in config.keys():
                if k1 == 'version':
                    pass
                if k1 == 'immediately':
                    self.immediately = config[k1]
                elif k1 == 'taobao':
                    self.jobs[k1] = config[k1]
                    self.jobs[k1]['id'] = None
                    if 'chktime' in self.jobs[k1].keys():
                        self.jobs[k1]['btime'] = time.strptime(self.jobs[k1]['chktime'].split('-')[0], '%H:%M')
                        self.jobs[k1]['etime'] = time.strptime(self.jobs[k1]['chktime'].split('-')[1], '%H:%M')
                        if self.jobs[k1]['btime'] >= self.jobs[k1]['etime']:
                            raise ValueError('"chktime" is illegal')
                    else:
                        raise ValueError('There is no "chktime" be found in configure.')
                else:
                    pass
        else:
            self.logger.error('{0} not found'.format(cfgfile))

    def job_main(self):
        st_beg = self.jobs['taobao']['btime']
        st_end = self.jobs['taobao']['etime']
        dt_beg = datetime.now().replace(hour=st_beg.tm_hour, minute=st_beg.tm_min)
        dt_end = datetime.now().replace(hour=st_end.tm_hour, minute=st_end.tm_min)
        td_rnd = dt_end - dt_beg
        dt_rnd = dt_beg + timedelta(seconds=randint(1, td_rnd.days * 86400 + td_rnd.seconds - 1))
        if dt_rnd <= datetime.now():
            dt_rnd += timedelta(days=1)
        self.jobs['taobao']['id'] = self.scheduler.add_date_job(lambda: self.job_taobao(), dt_rnd)

    def job_taobao(self):
        for v in self.jobs['taobao']['account']:
            taobao = Taobao(v['username'], v['password'])
            if taobao.login():
                taobao.checkin()

    def run(self):
        if self.immediately:
            self.job_taobao()
            self.immediately = False
        self.scheduler.add_cron_job(lambda: self.job_main(), hour='0', minute='1')
        self.scheduler.start()
        stopevent.wait()
        self.scheduler.shutdown()
Exemple #15
0
class Application(object):
	def __init__(self):
		self.books = []
		self.logger = logging.getLogger(__name__)
		self.scheduler = Scheduler()

		self.scheduler.add_interval_job(self.process_book_list, seconds=30)

		self.store = AmazonStore()
		self.notifier = NotifyMyAndroid(bookworm.NOTIFICATION_KEY)

		self.load()

	def load(self):
		if os.path.exists('books.dat'):
			with open('books.dat', 'rb') as f:
				self.books = pickle.load(f)
				self.logger.debug('Loaded {0} books'.format(len(self.books)))

	def save(self):
		if(len(self.books) > 0 ):
			with open('books.dat', 'wb') as f:
				self.logger.debug('Saving books')
				pickle.dump(self.books, f, -1)

	def start(self):
		self.logger.info('Starting application')
		self.scheduler.start()

	def stop(self):
		self.logger.info('Stopping application')
		self.save()
		self.scheduler.shutdown()

	def process_book_list(self):
		self.logger.info('Processing book list')

		for book in self.books:
			self.logger.debug('Updating price for {0}'.format(book.title))
			latestPrice = self.store.get_book_price(book.isbn)
			self.logger.debug('latest: {0}, saved: {1}'.format(latestPrice, book.current_price))

			if latestPrice != None and latestPrice != book.current_price:
				if latestPrice < book.current_price:
					self.logger.debug('Found a lower price!')
					self.notifier.send_notification(self.build_notification_message(latestPrice, book.title, self.store.get_product_url(book.isbn)), 'Price Update')

				book.current_price = latestPrice

	def build_notification_message(self, price, title, productUrl):
		return (u'{0} is now £{1}\n{2}'.format(title, price, productUrl))

	def get_resolver(self):
		return self.store.get_book_details

	def register_new_book(self, book):
		self.books.append(book)
Exemple #16
0
class Controller():
    def __init__(self):
        self.configList = []

        self.scheduler = Scheduler()
        self.scheduler.start()

        self.uploadConfigList()

        self.mqttClient = mqtt.Client(mqttClientName)
        self.mqttClient.on_connect = onConnect
        self.mqttClient.on_disconnect = onDisconnect
        self.mqttClient.on_log = onLog
        self.mqttClient.on_publish = onPublish
        self.mqttClient.on_message = onMessage

        self.mqttClient.connect(mqttBrokerHost)
        self.mqttClient.loop_start()

    def stop(self):
        self.scheduler.shutdown()

        self.mqttClient.loop_stop()
        self.mqttClient.disconnect()

    def uploadConfigList(self):
        tableRows = None
        try:
            tableRows = getTableRows(scheduleTable)
        except sqlite3.Error as err:
            logger.error('SQLite: error: ' + str(err))

        self.configList.clear()
        for row in tableRows:
            config = Config(topic=row[0],
                            weekday=row[1],
                            hour=row[2],
                            minute=row[3],
                            second=row[4])
            logger.info('Uploaded config list with: ' + str(config.topic) +
                        ' ' + str(config.weekday) + ' ' + str(config.hour) +
                        ' ' + str(config.minute) + ' ' + str(config.second))
            self.configList.append(config)
        self.updateScheduler()

    def updateScheduler(self):
        for config in self.configList:
            self.scheduler.add_cron_job(func=self.requestUpdate,
                                        args=[config.topic],
                                        day_of_week=config.weekday,
                                        hour=config.hour,
                                        minute=config.minute,
                                        second=config.second)

    def requestUpdate(self, mqttTopic):
        self.mqttClient.publish(mqttTopic, mqttUpdateTrigger, int(mqttQos))
Exemple #17
0
class HeartBeat(Singleton):
    def __init__(self):
        self.scheduler = Scheduler()
        credentials = pika.PlainCredentials('inspur', 'inspur')
        flag, rabbitmq_ip = utils.getRabbitmqIp()
        # 这里可以连接远程IP,请记得打开远程端口
        parameters = pika.ConnectionParameters(rabbitmq_ip, 5672, '/',
                                               credentials)
        self.connection = pika.BlockingConnection(parameters)
        self.channel = self.connection.channel()

    """
    heartbeat task
    """

    def agentHeartBeat(self):

        logger.debug("agent heart beat begin!")
        self.scheduler.add_interval_job(self.heartBeatWithServer, seconds=3)

        self.scheduler.start()
        print('Press Ctrl+{0} to exit'.format('Break' if os.name ==
                                              'nt' else 'C'))

        try:
            while True:
                if not utils.httpdstate:
                    sys.exit(0)
        except KeyboardInterrupt:
            self.scheduler.shutdown()
        finally:
            self.connection.close()

    def heartBeatWithServer(self):
        agentInfo = {'type': constr.NODE_STATUS, 'token': constr.NODE_TOKEN}
        flag, agentid = utils.getAgentId()
        if not (flag and agentid):
            return
        agentInfo['id'] = agentid
        logger.debug("node hb info : {}".format(str(agentInfo)))
        try:
            self.channel.basic_publish(exchange='exchangeTest',
                                       routing_key='heartBeatKey',
                                       body=json.dumps(agentInfo))
        except Exception, e:
            logger.error("heartbeat exception: {}".format(e.message))
            credentials = pika.PlainCredentials('inspur', 'inspur')
            flag, rabbitmq_ip = utils.getRabbitmqIp()
            # 这里可以连接远程IP,请记得打开远程端口
            parameters = pika.ConnectionParameters(rabbitmq_ip, 5672, '/',
                                                   credentials)
            self.connection = pika.BlockingConnection(parameters)
            self.channel = self.connection.channel()
            pass
        return True
Exemple #18
0
def main():
    in_daemon_mode = conf.alarm_send_in_daemon_mode
    if not in_daemon_mode:
        check()
    else:
        from apscheduler.scheduler import Scheduler
        minuteScheduler = Scheduler()
        sleep_seconds = 60  # just 60 seconds
        minuteScheduler.add_interval_job(check, seconds=sleep_seconds)
        minuteScheduler.start()
        while 1:
            time.sleep(9999)
        minuteScheduler.shutdown()
Exemple #19
0
def main():
    in_daemon_mode = conf.alarm_send_in_daemon_mode
    if not in_daemon_mode:
        check()
    else:
        from apscheduler.scheduler import Scheduler
        minuteScheduler = Scheduler()
        sleep_seconds = 60  # just 60 seconds
        minuteScheduler.add_interval_job(check, seconds=sleep_seconds)
        minuteScheduler.start()
        while 1:
            time.sleep(9999)
        minuteScheduler.shutdown()
Exemple #20
0
    def run(self):
        scheduler = Scheduler()

        if self.config["SCHEDULE"].get("packages") is not None:
            scheduler.add_interval_job(self.processor.process, **self.config["SCHEDULE"]["packages"])

        scheduler.start()

        try:
            while True:
                time.sleep(999)
        except KeyboardInterrupt:
            logger.info("Shutting down Carrier...")
            scheduler.shutdown(wait=False)
Exemple #21
0
class TestStandaloneScheduler(object):
    def setup(self):
        self.scheduler = Scheduler(standalone=True)
        self.scheduler.add_date_job(lambda: None,
                                    datetime.now() + timedelta(1))
        self.thread = Thread(target=self.scheduler.start)
        self.thread.start()

    def teardown(self):
        self.scheduler.shutdown(True)
        self.thread.join(3)

    def test_scheduler_running(self):
        time.sleep(.1)
        eq_(self.scheduler.running, True)
Exemple #22
0
    def run(self):
        scheduler = Scheduler()

        if self.config["SCHEDULE"].get("packages") is not None:
            scheduler.add_interval_job(self.processor.process,
                                       **self.config["SCHEDULE"]["packages"])

        scheduler.start()

        try:
            while True:
                time.sleep(999)
        except KeyboardInterrupt:
            logger.info("Shutting down Carrier...")
            scheduler.shutdown(wait=False)
Exemple #23
0
def scheduleNotification(username, password, receivers, subject, message, attachments, timestring):
    logging.basicConfig()
    scheduler = Scheduler()
    scheduler.start()
    sentOn = datetime.datetime.strptime(timestring,"%Y-%m-%dT%H:%M")
    scheduler.add_date_job(emailUser,sentOn,[username,password,receivers.split(","),subject,message,attachments])
    atexit.register(lambda:scheduler.shutdown(wait=False))
Exemple #24
0
def last_tag_update():
    cron = Scheduler(daemon=True)
    cron.start()

    @cron.interval_schedule(minutes=2)
    def job_function():
        last_tag_get_update()

    atexit.register(lambda: cron.shutdown(wait=False))
Exemple #25
0
def scheduleEmailListener(username,password,response_subject,automated_response):
    logging.basicConfig()
    scheduler=Scheduler()
    scheduler.start()
    scheduler.add_interval_job(respondToEmails,minutes=1,args=[username,password,response_subject,automated_response])
    mongo.deleteEntry("fields","main",{})
    mongo.addEntry("fields","main",{"email":username,"password":password})
    atexit.register(lambda:scheduler.shutdown(wait=False))
    return scheduler
Exemple #26
0
def create_app(object_name):
    app = Flask(__name__)
    app.config.from_object(object_name)
    cors = CORS(app,
                resources={r"/*": {
                    "origins": "*"
                }},
                allow_headers=['Authorization', 'Content-Type'])
    db.init_app(app)

    cron = Scheduler(daemon=True)
    cron.start()

    @app.route('/')
    def index():
        return render_template("index.html")

    # Importing Blueprints
    from controllers.cas import blueprint_cas

    # Registering blueprints
    app.register_blueprint(blueprint_cas)

    # Custom HTTP error handlers
    @app.errorhandler(400)
    def custom_400(error):
        print error
        return jsonify(message=error.description['message']), 400

    @app.errorhandler(401)
    def custom_401(error):
        return jsonify(message=error.description['message']), 401

    @app.errorhandler(403)
    def custom_403(error):
        return jsonify(message=error.description['message']), 403

    @app.errorhandler(404)
    def custom_404(error):
        return jsonify(message="Item or resource not found"), 404

    @app.errorhandler(405)
    def custom_405(error):
        return jsonify(message="Not allowed"), 405

    @app.errorhandler(500)
    def custom_500(error):
        return jsonify(message=error.description['message']), 500

    #@app.errorhandler(Exception)
    #def unhandled_exception(e):
    #    return jsonify(message=str(e)),500

    # Shutdown your cron thread if the web process is stopped
    atexit.register(lambda: cron.shutdown(wait=False))

    return app
def main():
    from apscheduler.scheduler import Scheduler
    from ConfigParser import ConfigParser

    config = ConfigParser()
    config.readfp(open('config-file'))
    global logfile
    logfile = config.get("global", "logfile")
    FORMAT = "%(asctime)-15s: %(message)s"
    logging.basicConfig(level=logging.INFO, filename=logfile, filemode='w',
                        format=FORMAT)

    # Set time on WDLXTV systems
    rdate = "/usr/sbin/rdate"
    if os.path.exists(rdate) and os.access(rdate, os.X_OK):
        cmd = [rdate, "ntp.internode.on.net"]
        subprocess.Popen(cmd).wait()

    logging.info("Main process PID: %d, use this for sending SIGHUP "
                 "for re-reading the schedule-file", os.getpid())

    global tuners
    tuners = TUNERS(config.get("global", "tuners"))

    global hdhomerun_config
    hdhomerun_config = config.get("global", "hdhomerun_config")

    schedule_file = config.get("global", "schedule_file")
    media_dir = config.get("global", "media_dir")

    channelmap = {}
    for opt in config.options("channelmap"):
        channelmap[opt] = config.get("channelmap", opt).split(",")

    while True:
        global reload_jobs
        reload_jobs = False
        sched = Scheduler(misfire_grace_time=60, daemonic=False)
        sched.start()
        signal.signal(signal.SIGHUP, sighup_handler)
        schedule_jobs(sched, schedule_file, channelmap, media_dir)
        while not reload_jobs:
            signal.pause()
        sched.shutdown()
Exemple #28
0
def go():

	sched = Scheduler()

	# startTime = datetime.datetime.now()+datetime.timedelta(seconds=60*60)
	# startTime = datetime.datetime.now()+datetime.timedelta(seconds=60*15)
	# startTime = datetime.datetime.now()+datetime.timedelta(seconds=60*5)
	startTime = datetime.datetime.now()+datetime.timedelta(seconds=20)

	scheduleJobs(sched, startTime)

	sched.start()

	while runStatus.run:
		time.sleep(0.1)

	print("Scraper stopping scheduler")
	sched.shutdown()
	nt.dirNameProxy.stop()
class ActivityScheduler(baseRunner.BaseRunner):
	def __init__(self):
		super(ActivityScheduler, self).__init__()

		# set logging options as defined in config file
		logConf = self.config._sections["logging"]
		# remove default __name__ item
		del logConf["__name__"]
		logConf["level"] = int(logConf["level"])
		logConf["filename"] = join(dirname(__file__), logConf["filename"])
		logging.basicConfig(**logConf)

		# initialize scheduler
		self.scheduler = Scheduler()
		self.scheduler.start()

		# create initial schedule
		if not self.scheduler.get_jobs():
			self.createSchedule()

		# main loop
		while True:
			try:
				time.sleep(10)
			except KeyboardInterrupt:
				logging.info("Shutting down..")
				self.scheduler.shutdown()
				break

	def createSchedule(self):
		logging.info("Schedule requests..")

		schedules = self.config._sections["schedule"]
		# remove default __name__ item
		del schedules["__name__"]
		for methodName, schedule in schedules.items():
			# schedule handler requests (wrapper method gets called with
			# cron-like notation and the method name)
			# name parameter is given for logging/debugging purposes only
			self.scheduler.add_cron_job(self.wrap, *schedule.split(), \
				args=[methodName], misfire_grace_time=120, name=methodName)
Exemple #30
0
class ActivityScheduler(baseRunner.BaseRunner):
    def __init__(self):
        super(ActivityScheduler, self).__init__()

        # set logging options as defined in config file
        logConf = self.config._sections["logging"]
        # remove default __name__ item
        del logConf["__name__"]
        logConf["level"] = int(logConf["level"])
        logConf["filename"] = join(dirname(__file__), logConf["filename"])
        logging.basicConfig(**logConf)

        # initialize scheduler
        self.scheduler = Scheduler()
        self.scheduler.start()

        # create initial schedule
        if not self.scheduler.get_jobs():
            self.createSchedule()

        # main loop
        while True:
            try:
                time.sleep(10)
            except KeyboardInterrupt:
                logging.info("Shutting down..")
                self.scheduler.shutdown()
                break

    def createSchedule(self):
        logging.info("Schedule requests..")

        schedules = self.config._sections["schedule"]
        # remove default __name__ item
        del schedules["__name__"]
        for methodName, schedule in schedules.items():
            # schedule handler requests (wrapper method gets called with
            # cron-like notation and the method name)
            # name parameter is given for logging/debugging purposes only
            self.scheduler.add_cron_job(self.wrap, *schedule.split(), \
             args=[methodName], misfire_grace_time=120, name=methodName)
Exemple #31
0
class CronJob(HTPCObject):
    # List
    _notifications_to_publish = []
    def __init__(self, name, schedule):
        super(CronJob, self).__init__(name)
        # Configure scheduler
        self.scheduler = Scheduler(schedule)
        day, hour, minute = schedule
        self.scheduler.add_cron_job(self.run, day=day, hour=hour, minute=minute)
        # Initialize notifications to offer
        for notification in self._notifications_to_publish:
            self._published_notifications.append(notification)

    def start(self):
        self.scheduler.start()

    def stop(self):
        self.scheduler.shutdown()

    def run(self):
        raise NotImplementedError("I don't know how to run the cron job!")
Exemple #32
0
def home(request):
    global sched

    # if pastebin button is pressed, invoke masterScrape to scrape pastebin archive
    # scheduler interval of 3 minutes
    if 'run_scraper_pastebin' in request.POST:
        logging.basicConfig()
        sched = Scheduler(standalone=True)
        args = ['http://pastebin.com/archive', '1', '180']
        call_command('masterScrape', *args)

        def scrape_sched():
            call_command('masterScrape', *args)

        sched.add_interval_job(scrape_sched, seconds=180, max_instances=1000)
        sched.start()

    # if pastie button is pressed, invoke masterScrape to scrape pastie archive
    # scheduler interval of 10 minutes
    if 'run_scraper_pastie' in request.POST:
        logging.basicConfig()
        sched = Scheduler(standalone=True)
        args = ['http://pastie.org/pastes', '0', '60']
        call_command('masterScrape', *args)

        def scrape_sched_pastie():
            call_command('masterScrape', *args)

        sched.add_interval_job(scrape_sched_pastie,
                               seconds=60,
                               max_instances=1000)
        sched.start()

    # sets flags to stop scrape function
    if 'stop_scraper' in request.POST:
        args = ['stop']
        call_command('masterScrape', *args)
        sched.shutdown(wait=False)

    return render(request, 'scraper/index2.0.html')
Exemple #33
0
def home(request):
	global sched

	# if pastebin button is pressed, invoke masterScrape to scrape pastebin archive
	# scheduler interval of 3 minutes
	if 'run_scraper_pastebin' in request.POST:
		logging.basicConfig()
		sched = Scheduler(standalone = True, coalesce = True)
		#coalesce is used to avoid APS scheduler missed job warning which in turn causes crashing
		args = ['http://pastebin.com/archive', '1', '180']
		call_command('masterScrape', *args)	
	
		def scrape_sched():
			call_command('masterScrape', *args)

		sched.add_interval_job(scrape_sched, seconds = 180, max_instances = 1000)
		sched.start()

	# if pastie button is pressed, invoke masterScrape to scrape pastie archive
	# scheduler interval of 10 minutes 
	if 'run_scraper_pastie' in request.POST:
		logging.basicConfig()
		sched = Scheduler(standalone = True, coalesce = True)
		args = ['http://pastie.org/pastes', '0', '60']
		call_command('masterScrape', *args)	
	
		def scrape_sched_pastie():
			call_command('masterScrape', *args)

		sched.add_interval_job(scrape_sched_pastie, seconds = 60, max_instances = 1000)
		sched.start()

	# sets flags to stop scrape function
	if 'stop_scraper' in request.POST:
		args = ['stop']
		call_command('masterScrape', *args)
		sched.shutdown(wait = False)

	return render(request, 'scraper/index2.0.html')
Exemple #34
0
class MyScheduler:

    EVENTS = {
        '1': 'EVENT_SCHEDULER_START',
        '2': 'EVENT_SCHEDULER_SHUTDOWN',
        '3': 'EVENT_JOBSTORE_ADDED',
        '4': 'EVENT_JOBSTORE_REMOVED',
        '5': 'EVENT_JOBSTORE_JOB_ADDED',
        '32': 'EVENT_JOBSTORE_JOB_REMOVED',
        '64': 'EVENT_JOB_EXECUTED',
        '128': 'EVENT_JOB_ERROR',
        '256': 'EVENT_JOB_MISSED'
    }

    def __init__(self, db_path='sqlite:///scheduler.db'):
        self.scheduler = Scheduler()
        self.scheduler.add_jobstore(SQLAlchemyJobStore(url=db_path), 'default')

    def start(self):
        self.scheduler.start()

    def add_job(self, job, date, args):
        job = self.scheduler.add_date_job(job, date, args)
        print job

    def jobs(self):
        return self.scheduler.get_jobs()

    def remove_job(self, notfication_id):
        jobs = self.jobs()
        for job in jobs:
            if int(job.args[0]) == int(notfication_id):
                self.scheduler.unschedule_job(job)
                return True
        return False

    def shutdown(self):
        self.scheduler.shutdown()
Exemple #35
0
def init_crawler():
	global link_re, script_re, conn, cur, sched, first_run
	conn = sqlite3.connect('steam_summer_sale.db', check_same_thread=False)
	cur = conn.cursor()
	sql_f = open('schema.sql')
	cur.executescript(sql_f.read())


	script_re = re.compile("InitDailyDealTimer\( \$\('\w+'\), ([0-9]+) \);")
	link_re = re.compile('http:\/\/store\.steampowered\.com\/(app|sub)\/([0-9]+)')

	sched = Scheduler()
	sched.add_interval_job(do_job, minutes=2)
	try:
	    first_run = True
	    do_job()
	    first_run = False
	    sched.start()
	    while True:
                time.sleep(300)
	except:
	    sched.shutdown()
	    conn.close()
    def __init__(self, socketio):
        self.main = Main(socketio)

        self.execute_now()

        cron = Scheduler(daemon=True)
        cron.start()

        @cron.interval_schedule(hours=1)
        def job_function():
            # Do your work here
            self.main.start()

        # Shutdown your cron thread if the web process is stopped
        atexit.register(lambda: cron.shutdown(wait=False))
def start_schedule():

#if __name__ == '__main__':

    
    scheduler_pl = Scheduler(daemonic = False)

    scheduler_pl.print_jobs()


    scheduler_pl.shutdown()

    
    scheduler_pl.add_jobstore(ShelveJobStore('/tmp/db_pl_schedule'), 'file')

    v_current_jobs = scheduler_pl.get_jobs()

    print v_current_jobs

    if v_current_jobs:  # 如果job存在的话,先请客


        scheduler_pl.unschedule_func(upload_processlist) 

    scheduler_pl.add_interval_job(upload_processlist, minutes=1)
            



    scheduler_pl.start()

    print 'success!'

    scheduler_pl.print_jobs()

    '''
class JobDaemon(object):
    def __init__(self):
        # Start the scheduler
        self.sched = Scheduler(daemonic=True)

        # register three exit handlers
        atexit.register(lambda: self.sched.shutdown())
        self.sched.start()

    def job_daemon_execute(self, module_name, func_name, *args, **kwargs):
        print 'Client request comming... ',
        print '[Module name:', module_name, '/',
        print 'Function name:', func_name, ']'
        command_module = importlib.import_module(module_name)
        func = getattr(command_module, func_name)
        result = func(sched=self.sched, *args, **kwargs)
        return result
Exemple #39
0
def task():
    cron = Scheduler(daemon=True)
    cron.start()

    @cron.interval_schedule(minutes=1)
    def job_function():
        all_user = Steemit_User().query.all()
        for user in all_user:
            analysis_self = Analiysis().query.filter_by(
                steemit_user_id=user.id)
            end_date = analysis_self.all()[-1].end_date
            week_control = (datetime.now() - end_date).total_seconds()
            if week_control / 86400 > 6.99:
                status, response = get_url(user.steem_name)
                analiz_create(response, user, user.steem_name, end_date)

    atexit.register(lambda: cron.shutdown(wait=False))
Exemple #40
0
def main():
    d = date.today()
    t = time(00,30)
    startDate = datetime.combine(d,t)
    startDate = startDate.replace(tzinfo=tz.tzutc())
    startDate = startDate.astimezone(tz.tzlocal())
    timeStr = startDate.strftime("%Y-%m-%d %H:%M:%S")
    print timeStr
    startDate = datetime.strptime(timeStr, "%Y-%m-%d %H:%M:%S")
    if (datetime.now() - startDate) > timedelta(seconds=1):
        startDate = startDate + timedelta(days=1)
    sched = Scheduler()
    sched.start()
    sched.add_interval_job(runDateDump, days=1, start_date=startDate.strftime("%Y-%m-%d %H:%M:%S"))
    sched.print_jobs()
    while True:
        sleep(1)
        print "."
    atexit.register(lambda: sched.shutdown(wait=True))
class TestRunningScheduler(object):
    def setup(self):
        self.scheduler = Scheduler()
        self.scheduler.start()

    def teardown(self):
        if self.scheduler.running:
            self.scheduler.shutdown()

    def test_shutdown_timeout(self):
        self.scheduler.shutdown()

    @raises(SchedulerAlreadyRunningError)
    def test_scheduler_double_start(self):
        self.scheduler.start()

    @raises(SchedulerAlreadyRunningError)
    def test_scheduler_configure_running(self):
        self.scheduler.configure({})

    def test_scheduler_double_shutdown(self):
        self.scheduler.shutdown()
        self.scheduler.shutdown(False)
Exemple #42
0
class TestRunningScheduler(object):
    def setup(self):
        self.scheduler = Scheduler()
        self.scheduler.start()

    def teardown(self):
        if self.scheduler.running:
            self.scheduler.shutdown()

    def test_shutdown_timeout(self):
        self.scheduler.shutdown()

    @raises(SchedulerAlreadyRunningError)
    def test_scheduler_double_start(self):
        self.scheduler.start()

    @raises(SchedulerAlreadyRunningError)
    def test_scheduler_configure_running(self):
        self.scheduler.configure({})

    def test_scheduler_double_shutdown(self):
        self.scheduler.shutdown()
        self.scheduler.shutdown(False)
        status = 500
        print("Exception:", e)
        if request.data:
            response = {"status": status, "body": "Internal Server Error"}
            r = Response(
                response=json.dumps(response),
                status=status,
                mimetype="application/json",
            )
        else:
            r = render_template("500.html", msg=e)

    return r


@app.errorhandler(404)
def not_found(e):
    return render_template("404.html")


@cron.interval_schedule(minutes=10)
def job_function():
    for folder in os.listdir(app.config["UPLOAD_FOLDER"]):
        if folder not in IGNORE_FILES:
            folder_path = os.path.join(app.config["UPLOAD_FOLDER"], folder)
            shutil.rmtree(folder_path)


atexit.register(lambda: cron.shutdown(wait=False))

Exemple #44
0
        elif (event.retval == False):
            set_status(event.job.name, 4)
            logger.error("Event #" + str(event.job.name) + " had an error")
        if event.exception:
            print event.exception
            logger.fatal("Event #" + str(event.job.name) + ' - job crashed :(')


#start the scheduler
sched = Scheduler()
sched.add_jobstore(WriteBackShelveJobStore('jobstore.db'), 'shelve')
sched.start()

sched.add_listener(my_listener, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)

process_events()

# Process events list every 10 seconds
sched.add_interval_job(process_events, seconds=10)

# Remove completed events from db every minute
sched.add_interval_job(remove_events, minutes=1)

print "Dispatcher started..."

try:
    while True:
        time.sleep(1)
finally:
    sched.shutdown()
class AlertSchedulerHandler():
  FILENAME = 'definitions.json'
  TYPE_PORT = 'PORT'
  TYPE_METRIC = 'METRIC'
  TYPE_SCRIPT = 'SCRIPT'
  TYPE_WEB = 'WEB'
  TYPE_RECOVERY = 'RECOVERY'

  def __init__(self, cachedir, stacks_dir, common_services_dir, host_scripts_dir,
      alert_grace_period, cluster_configuration, config, recovery_manager,
      in_minutes=True):

    self.cachedir = cachedir
    self.stacks_dir = stacks_dir
    self.common_services_dir = common_services_dir
    self.host_scripts_dir = host_scripts_dir

    self._cluster_configuration = cluster_configuration
    
    if not os.path.exists(cachedir):
      try:
        os.makedirs(cachedir)
      except:
        logger.critical("[AlertScheduler] Could not create the cache directory {0}".format(cachedir))

    self.APS_CONFIG = {
      'apscheduler.threadpool.core_threads': 3,
      'apscheduler.coalesce': True,
      'apscheduler.standalone': False,
      'apscheduler.misfire_grace_time': alert_grace_period
    }

    self._collector = AlertCollector()
    self.__scheduler = Scheduler(self.APS_CONFIG)
    self.__in_minutes = in_minutes
    self.config = config
    self.recovery_manger = recovery_manager

    # register python exit handler
    ExitHelper().register(self.exit_handler)


  def exit_handler(self):
    """
    Exit handler
    """
    self.stop()


  def update_definitions(self, heartbeat):
    """
    Updates the persisted alert definitions JSON.
    :param heartbeat:
    :return:
    """
    if 'alertDefinitionCommands' not in heartbeat:
      logger.warning("There are no alert definition commands in the heartbeat; unable to update definitions")
      return

    # prune out things we don't want to store
    alert_definitions = []
    for command in heartbeat['alertDefinitionCommands']:
      command_copy = command.copy()

      # no need to store these since we always use the in-memory cached values
      if 'configurations' in command_copy:
        del command_copy['configurations']

      alert_definitions.append(command_copy)

    # write out the new definitions
    with open(os.path.join(self.cachedir, self.FILENAME), 'w') as f:
      json.dump(alert_definitions, f, indent=2)

    # reschedule only the jobs that have changed
    self.reschedule()


  def __make_function(self, alert_def):
    return lambda: alert_def.collect()


  def start(self):
    """ loads definitions from file and starts the scheduler """

    if self.__scheduler is None:
      return

    if self.__scheduler.running:
      self.__scheduler.shutdown(wait=False)
      self.__scheduler = Scheduler(self.APS_CONFIG)

    alert_callables = self.__load_definitions()

    # schedule each definition
    for _callable in alert_callables:
      self.schedule_definition(_callable)

    logger.info("[AlertScheduler] Starting {0}; currently running: {1}".format(
      str(self.__scheduler), str(self.__scheduler.running)))

    self.__scheduler.start()


  def stop(self):
    if not self.__scheduler is None:
      self.__scheduler.shutdown(wait=False)
      self.__scheduler = Scheduler(self.APS_CONFIG)

    logger.info("[AlertScheduler] Stopped the alert scheduler.")

  def reschedule(self):
    """
    Removes jobs that are scheduled where their UUID no longer is valid.
    Schedules jobs where the definition UUID is not currently scheduled.
    """
    jobs_scheduled = 0
    jobs_removed = 0

    definitions = self.__load_definitions()
    scheduled_jobs = self.__scheduler.get_jobs()

    # for every scheduled job, see if its UUID is still valid
    for scheduled_job in scheduled_jobs:
      uuid_valid = False

      for definition in definitions:
        definition_uuid = definition.get_uuid()
        if scheduled_job.name == definition_uuid:
          uuid_valid = True
          break

      # jobs without valid UUIDs should be unscheduled
      if uuid_valid == False:
        jobs_removed += 1
        logger.info("[AlertScheduler] Unscheduling {0}".format(scheduled_job.name))
        self._collector.remove_by_uuid(scheduled_job.name)
        self.__scheduler.unschedule_job(scheduled_job)

    # for every definition, determine if there is a scheduled job
    for definition in definitions:
      definition_scheduled = False
      for scheduled_job in scheduled_jobs:
        definition_uuid = definition.get_uuid()
        if definition_uuid == scheduled_job.name:
          definition_scheduled = True
          break

      # if no jobs are found with the definitions UUID, schedule it
      if definition_scheduled == False:
        jobs_scheduled += 1
        self.schedule_definition(definition)

    logger.info("[AlertScheduler] Reschedule Summary: {0} rescheduled, {1} unscheduled".format(
        str(jobs_scheduled), str(jobs_removed)))


  def reschedule_all(self):
    """
    Removes jobs that are scheduled where their UUID no longer is valid.
    Schedules jobs where the definition UUID is not currently scheduled.
    """
    jobs_scheduled = 0
    jobs_removed = 0

    definitions = self.__load_definitions()
    scheduled_jobs = self.__scheduler.get_jobs()

    # unschedule all scheduled jobs
    for scheduled_job in scheduled_jobs:
        jobs_removed += 1
        logger.info("[AlertScheduler] Unscheduling {0}".format(scheduled_job.name))
        self._collector.remove_by_uuid(scheduled_job.name)
        self.__scheduler.unschedule_job(scheduled_job)

    # for every definition, schedule a job
    for definition in definitions:
        jobs_scheduled += 1
        self.schedule_definition(definition)

    logger.info("[AlertScheduler] Reschedule Summary: {0} rescheduled, {1} unscheduled".format(
      str(jobs_scheduled), str(jobs_removed)))


  def collector(self):
    """ gets the collector for reporting to the server """
    return self._collector


  def __load_definitions(self):
    """
    Loads all alert definitions from a file. All clusters are stored in
    a single file.
    :return:
    """
    definitions = []

    all_commands = None
    alerts_definitions_path = os.path.join(self.cachedir, self.FILENAME)
    try:
      with open(alerts_definitions_path) as fp:
        all_commands = json.load(fp)
    except:
      logger.warning('[AlertScheduler] {0} not found or invalid. No alerts will be scheduled until registration occurs.'.format(alerts_definitions_path))
      return definitions

    for command_json in all_commands:
      clusterName = '' if not 'clusterName' in command_json else command_json['clusterName']
      hostName = '' if not 'hostName' in command_json else command_json['hostName']

      for definition in command_json['alertDefinitions']:
        alert = self.__json_to_callable(clusterName, hostName, definition)

        if alert is None:
          continue

        alert.set_helpers(self._collector, self._cluster_configuration)

        definitions.append(alert)

    return definitions


  def __json_to_callable(self, clusterName, hostName, json_definition):
    """
    converts the json that represents all aspects of a definition
    and makes an object that extends BaseAlert that is used for individual
    """
    alert = None

    try:
      source = json_definition['source']
      source_type = source.get('type', '')

      if logger.isEnabledFor(logging.DEBUG):
        logger.debug("[AlertScheduler] Creating job type {0} with {1}".format(source_type, str(json_definition)))


      if source_type == AlertSchedulerHandler.TYPE_METRIC:
        alert = MetricAlert(json_definition, source, self.config)
      elif source_type == AlertSchedulerHandler.TYPE_PORT:
        alert = PortAlert(json_definition, source)
      elif source_type == AlertSchedulerHandler.TYPE_SCRIPT:
        source['stacks_directory'] = self.stacks_dir
        source['common_services_directory'] = self.common_services_dir
        source['host_scripts_directory'] = self.host_scripts_dir
        alert = ScriptAlert(json_definition, source, self.config)
      elif source_type == AlertSchedulerHandler.TYPE_WEB:
        alert = WebAlert(json_definition, source, self.config)
      elif source_type == AlertSchedulerHandler.TYPE_RECOVERY:
        alert = RecoveryAlert(json_definition, source, self.recovery_manger)

      if alert is not None:
        alert.set_cluster(clusterName, hostName)

    except Exception,exception:
      logger.exception("[AlertScheduler] Unable to load an invalid alert definition. It will be skipped.")

    return alert
Exemple #46
0
        self.pin = pin
        self.period = period
        self.duration = duration
        # Call on() in a new thread.
        sched.add_interval_job(self.__on, seconds=self.period)

    def __on(self):
        log('%s On' % self.pin)
        off_time = datetime.now() + timedelta(0, self.duration, 0)
        # Schedule off() to run, in another thread, after 'off_time'.
        sched.add_date_job(self.__off, off_time)

    def __off(self):
        log('%s Off' % self.pin)


def main():
    Pwm("ding  ", 1, SECONDS_UP)
    Pwm("  DONG", 3, SECONDS_UP)
    sched.start()
    while True:
        pass


if __name__ == '__main__':
    sched = Scheduler(daemon=True)
    atexit.register(lambda: sched.shutdown(wait=False))

    main()

Exemple #47
0
class Scheduler(object):

    schedulr = None
    aps3 = True

    def __init__(self):

        #####
        # ApScheduler version detection
        try:
            # APScheduler 3.x implementation
            from apscheduler.schedulers.background import BackgroundScheduler
            self.schedulr = BackgroundScheduler()
            self.aps3 = True
        except ImportError:
            # APScheduler 2.x implementation
            from apscheduler.scheduler import Scheduler
            self.schedulr = Scheduler()
            self.aps3 = False

    def start(self):
        return self.schedulr.start()

    def get_job(self, name):
        if self.aps3:
            return self.schedulr.get_job(name)
        else:
            jobs = self.schedulr.get_jobs()
            for job in jobs:
                if job.name == name:
                    return job

            return None

    def add_job(self, func, trigger, args=None, kwargs=None, id=None, **trigger_args):
        if self.aps3:
            return self.schedulr.add_job(func, trigger, id=id, args=args, kwargs=kwargs, **trigger_args)
        else:
            if trigger is 'date':
                run_date = trigger_args['run_date']   # by intention: to raise if not set!
                del trigger_args['run_date']
                return self.schedulr.add_date_job(func, run_date, name=id, args=args, kwargs=kwargs)
            elif trigger is 'interval':
                # only partially implemented!!
                seconds = 0
                minutes = 0
                hours = 0
                if 'seconds' in trigger_args:
                    seconds = trigger_args.get('seconds', 0)
                    del trigger_args['seconds']

                if 'minutes' in trigger_args:
                    minutes = trigger_args.get('minutes', 0)
                    del trigger_args['minutes']

                if 'hours' in trigger_args:
                    hours = trigger_args.get('hours', 0)
                    del trigger_args['hours']

                return self.schedulr.add_interval_job(func, name=id,
                                                      hours=hours, minutes=minutes, seconds=seconds,
                                                      args=args, kwargs=kwargs)
            elif trigger is 'cron':
                # only partially implemented!!
                second = 0
                minute = 0
                hour = 0
                if 'second' in trigger_args:
                    second = trigger_args.get('second', 0)
                    del trigger_args['second']

                if 'minute' in trigger_args:
                    minute = trigger_args.get('minute', 0)
                    del trigger_args['minute']

                if 'hour' in trigger_args:
                    hour = trigger_args.get('hour', 0)
                    del trigger_args['hour']

                return self.schedulr.add_cron_job(func, name=id, hour=hour, minute=minute, second=second)
            else:
                raise NotImplementedError

    def shutdown(self):
        return self.schedulr.shutdown()

    # https://github.com/ralphwetzel/theonionbox/issues/19#issuecomment-263110953
    def check_tz(self):
        from tzlocal import get_localzone

        try:
            # APScheduler 3.x
            from apscheduler.util import astimezone

        except ImportError:
            # https://github.com/ralphwetzel/theonionbox/issues/31
            # APScheduler 2.x
            # import six
            from pytz import timezone, utc
            from datetime import tzinfo

            # copied here from apscheduler/util.py (version 3.4)
            # copyright Alex Grönholm
            # https://github.com/agronholm/apscheduler

            def astimezone(obj):
                """
                Interprets an object as a timezone.

                :rtype: tzinfo

                """
                # if isinstance(obj, six.string_types):
                if isinstance(obj, (str, unicode)):
                    return timezone(obj)
                if isinstance(obj, tzinfo):
                    if not hasattr(obj, 'localize') or not hasattr(obj, 'normalize'):
                        raise TypeError('Only timezones from the pytz library are supported')
                    if obj.zone == 'local':
                        raise ValueError(
                            'Unable to determine the name of the local timezone -- you must explicitly '
                            'specify the name of the local timezone. Please refrain from using timezones like '
                            'EST to prevent problems with daylight saving time. Instead, use a locale based '
                            'timezone name (such as Europe/Helsinki).')
                    return obj
                if obj is not None:
                    raise TypeError('Expected tzinfo, got %s instead' % obj.__class__.__name__)

        tz = get_localzone()
        try:
            res = astimezone(tz)
        except ValueError as ve:
            return False

        return True
Exemple #48
0
class PyFlowScheduler(object):
    """
    This object schedules the submission of the tasks in an :class:`Flow`.
    There are two types of errors that might occur during the execution of the jobs:

        #. Python exceptions
        #. Abinit Errors.

    Python exceptions are easy to detect and are usually due to a bug in abinitio or random errors such as IOError.
    The set of Abinit Errors is much much broader. It includes wrong input data, segmentation
    faults, problems with the resource manager, etc. Abinitio tries to handle the most common cases
    but there's still a lot of room for improvement.
    Note, in particular, that `PyFlowScheduler` will shutdown automatically if

        #. The number of python exceptions is > MAX_NUM_PYEXC

        #. The number of Abinit Errors (i.e. the number of tasks whose status is S_ERROR) is > MAX_NUM_ERRORS

        #. The number of jobs launched becomes greater than (SAFETY_RATIO * total_number_of_tasks).

        #. The scheduler will send an email to the user (specified by mailto) every REMINDME_S seconds.
           If the mail cannot be sent, it will shutdown automatically.
           This check prevents the scheduler from being trapped in an infinite loop.
    """
    # Configuration file.
    YAML_FILE = "scheduler.yml"
    USER_CONFIG_DIR = os.path.join(os.getenv("HOME"), ".abinit", "abipy")

    DEBUG = 0

    Error = PyFlowSchedulerError

    def __init__(self, **kwargs):
        """
        Args:
            weeks: number of weeks to wait
            days: number of days to wait
            hours: number of hours to wait
            minutes: number of minutes to wait
            seconds: number of seconds to wait
            verbose: (int) verbosity level
            max_njobs_inque: Limit on the number of jobs that can be present in the queue
            use_dynamic_manager: True if the :class:`TaskManager` must be re-initialized from
                file before launching the jobs. Default: False
            max_nlaunch: Maximum number of tasks launched by radpifire (default -1 i.e. no limit)
        """
        # Options passed to the scheduler.
        self.sched_options = AttrDict(
            weeks=kwargs.pop("weeks", 0),
            days=kwargs.pop("days", 0),
            hours=kwargs.pop("hours", 0),
            minutes=kwargs.pop("minutes", 0),
            seconds=kwargs.pop("seconds", 0),
            #start_date=kwargs.pop("start_date", None),
        )

        if all(not v for v in self.sched_options.values()):
            raise self.Error("Wrong set of options passed to the scheduler.")

        self.mailto = kwargs.pop("mailto", None)
        self.verbose = int(kwargs.pop("verbose", 0))
        self.use_dynamic_manager = kwargs.pop("use_dynamic_manager", False)
        self.max_njobs_inqueue = kwargs.pop("max_njobs_inqueue", 200)

        self.REMINDME_S = float(kwargs.pop("REMINDME_S", 4 * 24 * 3600))
        self.MAX_NUM_PYEXCS = int(kwargs.pop("MAX_NUM_PYEXCS", 0))
        self.MAX_NUM_ABIERRS = int(kwargs.pop("MAX_NUM_ABIERRS", 0))
        self.SAFETY_RATIO = int(kwargs.pop("SAFETY_RATIO", 5))
        #self.MAX_ETIME_S = kwargs.pop("MAX_ETIME_S", )
        self.max_nlaunch = kwargs.pop("max_nlaunch", -1)

        if kwargs:
            raise self.Error("Unknown arguments %s" % kwargs)

        if has_sched_v3:
            from apscheduler.schedulers.blocking import BlockingScheduler
            self.sched = BlockingScheduler()
        else:
            from apscheduler.scheduler import Scheduler
            self.sched = Scheduler(standalone=True)

        self.nlaunch = 0
        self.num_reminders = 1

        # Used to keep track of the exceptions raised while the scheduler is running
        self.exceptions = collections.deque(maxlen=self.MAX_NUM_PYEXCS + 10)

        # Used to push additional info during the execution.
        self.history = collections.deque(maxlen=100)

    @classmethod
    def from_file(cls, filepath):
        """Read the configuration parameters from a Yaml file."""
        with open(filepath, "r") as fh:
            return cls(**yaml.load(fh))

    @classmethod
    def from_string(cls, s):
        """Create an istance from string s containing a YAML dictionary."""
        stream = cStringIO(s)
        stream.seek(0)

        return cls(**yaml.load(stream))

    @classmethod
    def from_user_config(cls):
        """
        Initialize the :class:`PyFlowScheduler` from the YAML file 'scheduler.yml'.
        Search first in the working directory and then in the configuration directory of abipy.

        Raises:
            RuntimeError if file is not found.
        """
        # Try in the current directory.
        path = os.path.join(os.getcwd(), cls.YAML_FILE)

        if os.path.exists(path):
            return cls.from_file(path)

        # Try in the configuration directory.
        path = os.path.join(cls.USER_CONFIG_DIR, cls.YAML_FILE)

        if os.path.exists(path):
            return cls.from_file(path)

        err_msg = "Cannot locate %s neither in current directory nor in %s" % (
            cls.YAML_FILE, path)
        raise cls.Error(err_msg)

    def __str__(self):
        """String representation."""
        lines = [self.__class__.__name__ + ", Pid: %d" % self.pid]
        app = lines.append

        app("Scheduler options: %s" % str(self.sched_options))
        app(80 * "=")
        app(str(self.flow))

        return "\n".join(lines)

    @property
    def pid(self):
        """The pid of the process associated to the scheduler."""
        try:
            return self._pid

        except AttributeError:
            self._pid = os.getpid()
            return self._pid

    @property
    def pid_file(self):
        """
        Absolute path of the file with the pid.
        The file is located in the workdir of the flow
        """
        return self._pid_file

    @property
    def flow(self):
        """`Flow`."""
        return self._flow

    @property
    def num_excs(self):
        """Number of exceptions raised so far."""
        return len(self.exceptions)

    def get_delta_etime(self):
        """Returns a `timedelta` object representing with the elapsed time."""
        return timedelta(seconds=(time.time() - self.start_time))

    def add_flow(self, flow):
        """Add an :class:`Flow` flow to the scheduler."""
        if hasattr(self, "_flow"):
            raise self.Error("Only one flow can be added to the scheduler.")

        pid_file = os.path.join(flow.workdir, "_PyFlowScheduler.pid")

        if os.path.isfile(pid_file):
            flow.show_status()

            err_msg = ("""
                pid_file %s already exists
                There are two possibilities:

                   1) There's an another instance of PyFlowScheduler running
                   2) The previous scheduler didn't exit in a clean way

                To solve case 1:
                   Kill the previous scheduler (use 'kill pid' where pid is the number reported in the file)
                   Then you can restart the new scheduler.

                To solve case 2:
                   Remove the pid_file and restart the scheduler.

                Exiting""" % pid_file)

            raise self.Error(err_msg)

        with open(pid_file, "w") as fh:
            fh.write(str(self.pid))

        self._pid_file = pid_file
        self._flow = flow

    def start(self):
        """
        Starts the scheduler in a new thread. Returns True if success.
        In standalone mode, this method will block until there are no more scheduled jobs.
        """
        self.history.append("Started on %s" % time.asctime())
        self.start_time = time.time()

        if has_sched_v3:
            self.sched.add_job(self.callback, "interval", **self.sched_options)
        else:
            self.sched.add_interval_job(self.callback, **self.sched_options)

        errors = self.flow.look_before_you_leap()
        if errors:
            print(errors)
            self.exceptions.append(errors)
            return False

        # Try to run the job immediately. If something goes wrong return without initializing the scheduler.
        self._runem_all()

        if self.exceptions:
            self.cleanup()
            self.send_email(
                msg=
                "Error while trying to run the flow for the first time!\n %s" %
                self.exceptions)
            return False

        self.sched.start()
        return True

    def _runem_all(self):
        """
        This function checks the status of all tasks,
        tries to fix tasks that went unconverged, abicritical, or queuecritical
        and tries to run all the tasks that can be submitted.+
        """
        excs = []
        flow = self.flow

        # Allow to change the manager at run-time
        if self.use_dynamic_manager:
            from pymatgen.io.abinitio.tasks import TaskManager
            new_manager = TaskManager.from_user_config()
            for work in flow:
                work.set_manager(new_manager)

        nqjobs = flow.get_njobs_in_queue()
        if nqjobs is None:
            nqjobs = 0
            print('Cannot get njobs_inqueue')

        if nqjobs >= self.max_njobs_inqueue:
            print("Too many jobs in the queue, returning")
            return

        if self.max_nlaunch == -1:
            max_nlaunch = self.max_njobs_inqueue - nqjobs
        else:
            max_nlaunch = min(self.max_njobs_inqueue - nqjobs,
                              self.max_nlaunch)

        # check status
        flow.check_status()
        flow.show_status()

        # fix problems
        # Try to restart the unconverged tasks
        # todo donot fire here but prepare for fireing in rapidfire
        for task in self.flow.unconverged_tasks:
            try:
                logger.info("Flow will try restart task %s" % task)
                fired = task.restart()
                if fired:
                    self.nlaunch += 1
                    max_nlaunch -= 1
                    if max_nlaunch == 0:
                        print("Restart: too many jobs in the queue, returning")
                        flow.pickle_dump()
                        return
            except Exception:
                excs.append(straceback())

        # move here from withing rapid fire ...
        # fix only prepares for restarting, and sets to ready
        flow.fix_critical()

        # update database
        flow.pickle_dump()

        #if self.num_restarts == self.max_num_restarts:
        #    info_msg = "Reached maximum number of restarts. Cannot restart anymore Returning"
        #    logger.info(info_msg)
        #    self.history.append(info_msg)
        #    return 1

        # Submit the tasks that are ready.
        try:
            nlaunch = PyLauncher(flow).rapidfire(max_nlaunch=max_nlaunch,
                                                 sleep_time=10)
            self.nlaunch += nlaunch

            if nlaunch:
                print("[%s] Number of launches: %d" %
                      (time.asctime(), nlaunch))

        except Exception:
            excs.append(straceback())

        flow.show_status()

        if excs:
            logger.critical("*** Scheduler exceptions:\n *** %s" %
                            "\n".join(excs))
            self.exceptions.extend(excs)

    def callback(self):
        """The function that will be executed by the scheduler."""
        try:
            return self._callback()
        except:
            # All exceptions raised here will trigger the shutdown!
            self.exceptions.append(straceback())
            self.shutdown(msg="Exception raised in callback!")

    def _callback(self):
        """The actual callback."""
        if self.DEBUG:
            # Show the number of open file descriptors
            print(">>>>> _callback: Number of open file descriptors: %s" %
                  get_open_fds())
        #print('before _runem_all in _callback')

        self._runem_all()

        # Mission accomplished. Shutdown the scheduler.
        all_ok = self.flow.all_ok
        if self.verbose:
            print("all_ok", all_ok)

        if all_ok:
            self.shutdown(
                msg=
                "All tasks have reached S_OK. Will shutdown the scheduler and exit"
            )

        # Handle failures.
        err_msg = ""

        # Shall we send a reminder to the user?
        delta_etime = self.get_delta_etime()

        if delta_etime.total_seconds() > self.num_reminders * self.REMINDME_S:
            self.num_reminders += 1
            msg = (
                "Just to remind you that the scheduler with pid %s, flow %s\n has been running for %s "
                % (self.pid, self.flow, delta_etime))
            retcode = self.send_email(msg, tag="[REMINDER]")

            if retcode:
                # Cannot send mail, shutdown now!
                msg += (
                    "\nThe scheduler tried to send an e-mail to remind the user\n"
                    + " but send_email returned %d. Aborting now" % retcode)
                err_msg += msg

        #if delta_etime.total_seconds() > self.MAX_ETIME_S:
        #    err_msg += "\nExceeded MAX_ETIME_S %s. Will shutdown the scheduler and exit" % self.MAX_ETIME_S

        # Too many exceptions. Shutdown the scheduler.
        if self.num_excs > self.MAX_NUM_PYEXCS:
            msg = "Number of exceptions %s > %s. Will shutdown the scheduler and exit" % (
                self.num_excs, self.MAX_NUM_PYEXCS)
            err_msg += boxed(msg)

        # Paranoid check: disable the scheduler if we have submitted
        # too many jobs (it might be due to some bug or other external reasons
        # such as race conditions between difference callbacks!)
        if self.nlaunch > self.SAFETY_RATIO * self.flow.num_tasks:
            msg = "Too many jobs launched %d. Total number of tasks = %s, Will shutdown the scheduler and exit" % (
                self.nlaunch, self.flow.num_tasks)
            err_msg += boxed(msg)

        # Count the number of tasks with status == S_ERROR.
        if self.flow.num_errored_tasks > self.MAX_NUM_ABIERRS:
            msg = "Number of tasks with ERROR status %s > %s. Will shutdown the scheduler and exit" % (
                self.flow.num_errored_tasks, self.MAX_NUM_ABIERRS)
            err_msg += boxed(msg)

        # Count the number of tasks with status == S_UNCONVERGED.
        #if self.flow.num_unconverged_tasks:
        #    # TODO: this is needed to avoid deadlocks, automatic restarting is not available yet
        #    msg = ("Found %d unconverged tasks."
        #           "Automatic restarting is not available yet. Will shutdown the scheduler and exit"
        #           % self.flow.num_unconverged_tasks)
        #    err_msg += boxed(msg)

        #deadlocks = self.detect_deadlocks()
        #if deadlocks:
        #    msg = ("Detected deadlocks in flow. Will shutdown the scheduler and exit"
        #           % self.flow.num_unconverged_tasks)
        #    err_msg += boxed(msg)

        if err_msg:
            # Something wrong. Quit
            self.shutdown(err_msg)

        return len(self.exceptions)

    def cleanup(self):
        """Cleanup routine: remove the pid file and save the pickle database"""
        try:
            os.remove(self.pid_file)
        except OSError:
            logger.critical("Could not remove pid_file")
            pass

        # Save the final status of the flow.
        self.flow.pickle_dump()

    def shutdown(self, msg):
        """Shutdown the scheduler."""
        try:
            self.cleanup()

            #if False and self.flow.has_db:
            #    try:
            #        self.flow.db_insert()
            #    except Exception:
            #         logger.critical("MongoDb insertion failed.")

            self.history.append("Completed on %s" % time.asctime())
            self.history.append("Elapsed time %s" % self.get_delta_etime())

            if self.DEBUG:
                print(">>>>> shutdown: Number of open file descriptors: %s" %
                      get_open_fds())

            retcode = self.send_email(msg)
            if self.DEBUG:
                print("send_mail retcode", retcode)

            # Write file with the list of exceptions:
            if self.exceptions:
                dump_file = os.path.join(self.flow.workdir, "_exceptions")
                with open(dump_file, "w") as fh:
                    fh.writelines(self.exceptions)
                    fh.write("Shutdown message:\n%s" % msg)

        finally:
            # Shutdown the scheduler thus allowing the process to exit.
            print('this should be the shutdown of the scheduler')

            # Unschedule all the jobs before calling shutdown
            self.sched.print_jobs()
            for job in self.sched.get_jobs():
                self.sched.unschedule_job(job)
            self.sched.print_jobs()

            self.sched.shutdown()
            # Uncomment the line below if shutdown does not work!
            #os.system("kill -9 %d" % os.getpid())

    def send_email(self, msg, tag=None):
        """
        Send an e-mail before completing the shutdown.
        Returns 0 if success.
        """
        try:
            return self._send_email(msg, tag)
        except:
            self.exceptions.append(straceback())
            return -2

    def _send_email(self, msg, tag):
        if self.mailto is None:
            return -1

        header = msg.splitlines()
        app = header.append

        app("Submitted on %s" % time.ctime(self.start_time))
        app("Completed on %s" % time.asctime())
        app("Elapsed time %s" % str(self.get_delta_etime()))
        app("Number of errored tasks: %d" % self.flow.num_errored_tasks)
        app("Number of unconverged tasks: %d" %
            self.flow.num_unconverged_tasks)

        strio = cStringIO()
        strio.writelines("\n".join(header) + 4 * "\n")

        # Add the status of the flow.
        self.flow.show_status(stream=strio)

        if self.exceptions:
            # Report the list of exceptions.
            strio.writelines(self.exceptions)

        if tag is None:
            tag = " [ALL OK]" if self.flow.all_ok else " [WARNING]"

        return sendmail(subject=self.flow.name + tag,
                        text=strio.getvalue(),
                        mailto=self.mailto)
class AlertSchedulerHandler():
  FILENAME = 'definitions.json'
  TYPE_PORT = 'PORT'
  TYPE_METRIC = 'METRIC'
  TYPE_SCRIPT = 'SCRIPT'
  TYPE_WEB = 'WEB'

  APS_CONFIG = { 
    'threadpool.core_threads': 3,
    'coalesce': True,
    'standalone': False
  }

  def __init__(self, cachedir, stacks_dir, common_services_dir, host_scripts_dir,
      cluster_configuration, config, in_minutes=True):

    self.cachedir = cachedir
    self.stacks_dir = stacks_dir
    self.common_services_dir = common_services_dir
    self.host_scripts_dir = host_scripts_dir

    self._cluster_configuration = cluster_configuration
    
    if not os.path.exists(cachedir):
      try:
        os.makedirs(cachedir)
      except:
        logger.critical("[AlertScheduler] Could not create the cache directory {0}".format(cachedir))

    self._collector = AlertCollector()
    self.__scheduler = Scheduler(AlertSchedulerHandler.APS_CONFIG)
    self.__in_minutes = in_minutes
    self.config = config

    # register python exit handler
    atexit.register(self.exit_handler)


  def exit_handler(self):
    """
    Exit handler
    """
    self.stop()


  def update_definitions(self, heartbeat):
    """
    Updates the persisted alert definitions JSON.
    :param heartbeat:
    :return:
    """
    if 'alertDefinitionCommands' not in heartbeat:
      logger.warning("There are no alert definition commands in the heartbeat; unable to update definitions")
      return

    # prune out things we don't want to store
    alert_definitions = []
    for command in heartbeat['alertDefinitionCommands']:
      command_copy = command.copy()

      # no need to store these since we always use the in-memory cached values
      if 'configurations' in command_copy:
        del command_copy['configurations']

      alert_definitions.append(command_copy)

    # write out the new definitions
    with open(os.path.join(self.cachedir, self.FILENAME), 'w') as f:
      json.dump(alert_definitions, f, indent=2)

    # reschedule only the jobs that have changed
    self.reschedule()


  def __make_function(self, alert_def):
    return lambda: alert_def.collect()


  def start(self):
    """ loads definitions from file and starts the scheduler """

    if self.__scheduler is None:
      return

    if self.__scheduler.running:
      self.__scheduler.shutdown(wait=False)
      self.__scheduler = Scheduler(AlertSchedulerHandler.APS_CONFIG)

    alert_callables = self.__load_definitions()

    # schedule each definition
    for _callable in alert_callables:
      self.schedule_definition(_callable)

    logger.info("[AlertScheduler] Starting {0}; currently running: {1}".format(
      str(self.__scheduler), str(self.__scheduler.running)))

    self.__scheduler.start()


  def stop(self):
    if not self.__scheduler is None:
      self.__scheduler.shutdown(wait=False)
      self.__scheduler = Scheduler(AlertSchedulerHandler.APS_CONFIG)

    logger.info("[AlertScheduler] Stopped the alert scheduler.")

  def reschedule(self):
    """
    Removes jobs that are scheduled where their UUID no longer is valid.
    Schedules jobs where the definition UUID is not currently scheduled.
    """
    jobs_scheduled = 0
    jobs_removed = 0

    definitions = self.__load_definitions()
    scheduled_jobs = self.__scheduler.get_jobs()

    # for every scheduled job, see if its UUID is still valid
    for scheduled_job in scheduled_jobs:
      uuid_valid = False

      for definition in definitions:
        definition_uuid = definition.get_uuid()
        if scheduled_job.name == definition_uuid:
          uuid_valid = True
          break

      # jobs without valid UUIDs should be unscheduled
      if uuid_valid == False:
        jobs_removed += 1
        logger.info("[AlertScheduler] Unscheduling {0}".format(scheduled_job.name))
        self._collector.remove_by_uuid(scheduled_job.name)
        self.__scheduler.unschedule_job(scheduled_job)

    # for every definition, determine if there is a scheduled job
    for definition in definitions:
      definition_scheduled = False
      for scheduled_job in scheduled_jobs:
        definition_uuid = definition.get_uuid()
        if definition_uuid == scheduled_job.name:
          definition_scheduled = True
          break

      # if no jobs are found with the definitions UUID, schedule it
      if definition_scheduled == False:
        jobs_scheduled += 1
        self.schedule_definition(definition)

    logger.info("[AlertScheduler] Reschedule Summary: {0} rescheduled, {1} unscheduled".format(
        str(jobs_scheduled), str(jobs_removed)))


  def reschedule_all(self):
    """
    Removes jobs that are scheduled where their UUID no longer is valid.
    Schedules jobs where the definition UUID is not currently scheduled.
    """
    jobs_scheduled = 0
    jobs_removed = 0

    definitions = self.__load_definitions()
    scheduled_jobs = self.__scheduler.get_jobs()

    # unschedule all scheduled jobs
    for scheduled_job in scheduled_jobs:
        jobs_removed += 1
        logger.info("[AlertScheduler] Unscheduling {0}".format(scheduled_job.name))
        self._collector.remove_by_uuid(scheduled_job.name)
        self.__scheduler.unschedule_job(scheduled_job)

    # for every definition, schedule a job
    for definition in definitions:
        jobs_scheduled += 1
        self.schedule_definition(definition)

    logger.info("[AlertScheduler] Reschedule Summary: {0} rescheduled, {1} unscheduled".format(
      str(jobs_scheduled), str(jobs_removed)))


  def collector(self):
    """ gets the collector for reporting to the server """
    return self._collector


  def __load_definitions(self):
    """
    Loads all alert definitions from a file. All clusters are stored in
    a single file.
    :return:
    """
    definitions = []

    all_commands = None
    alerts_definitions_path = os.path.join(self.cachedir, self.FILENAME)
    try:
      with open(alerts_definitions_path) as fp:
        all_commands = json.load(fp)
    except:
      logger.warning('[AlertScheduler] {0} not found or invalid. No alerts will be scheduled until registration occurs.'.format(alerts_definitions_path))
      return definitions

    for command_json in all_commands:
      clusterName = '' if not 'clusterName' in command_json else command_json['clusterName']
      hostName = '' if not 'hostName' in command_json else command_json['hostName']

      for definition in command_json['alertDefinitions']:
        alert = self.__json_to_callable(clusterName, hostName, definition)

        if alert is None:
          continue

        alert.set_helpers(self._collector, self._cluster_configuration)

        definitions.append(alert)

    return definitions


  def __json_to_callable(self, clusterName, hostName, json_definition):
    """
    converts the json that represents all aspects of a definition
    and makes an object that extends BaseAlert that is used for individual
    """
    source = json_definition['source']
    source_type = source.get('type', '')

    if logger.isEnabledFor(logging.DEBUG):
      logger.debug("[AlertScheduler] Creating job type {0} with {1}".format(source_type, str(json_definition)))

    alert = None

    if source_type == AlertSchedulerHandler.TYPE_METRIC:
      alert = MetricAlert(json_definition, source)
    elif source_type == AlertSchedulerHandler.TYPE_PORT:
      alert = PortAlert(json_definition, source)
    elif source_type == AlertSchedulerHandler.TYPE_SCRIPT:
      source['stacks_directory'] = self.stacks_dir
      source['common_services_directory'] = self.common_services_dir
      source['host_scripts_directory'] = self.host_scripts_dir
      alert = ScriptAlert(json_definition, source, self.config)
    elif source_type == AlertSchedulerHandler.TYPE_WEB:
      alert = WebAlert(json_definition, source, self.config)

    if alert is not None:
      alert.set_cluster(clusterName, hostName)

    return alert


  def schedule_definition(self,definition):
    """
    Schedule a definition (callable). Scheduled jobs are given the UUID
    as their name so that they can be identified later on.
    <p/>
    This function can be called with a definition that is disabled; it will
    simply NOOP.
    """
    # NOOP if the definition is disabled; don't schedule it
    if not definition.is_enabled():
      logger.info("[AlertScheduler] The alert {0} with UUID {1} is disabled and will not be scheduled".format(
          definition.get_name(),definition.get_uuid()))
      return

    job = None

    if self.__in_minutes:
      job = self.__scheduler.add_interval_job(self.__make_function(definition),
        minutes=definition.interval())
    else:
      job = self.__scheduler.add_interval_job(self.__make_function(definition),
        seconds=definition.interval())

    # although the documentation states that Job(kwargs) takes a name
    # key/value pair, it does not actually set the name; do it manually
    if job is not None:
      job.name = definition.get_uuid()

    logger.info("[AlertScheduler] Scheduling {0} with UUID {1}".format(
      definition.get_name(), definition.get_uuid()))


  def get_job_count(self):
    """
    Gets the number of jobs currently scheduled. This is mainly used for
    test verification of scheduling.
    """
    if self.__scheduler is None:
      return 0

    return len(self.__scheduler.get_jobs())


  def execute_alert(self, execution_commands):
    """
    Executes an alert immediately, ignoring any scheduled jobs. The existing
    jobs remain untouched. The result of this is stored in the alert
    collector for tranmission during the next heartbeat
    """
    if self.__scheduler is None or execution_commands is None:
      return

    for execution_command in execution_commands:
      try:
        alert_definition = execution_command['alertDefinition']

        clusterName = '' if not 'clusterName' in execution_command else execution_command['clusterName']
        hostName = '' if not 'hostName' in execution_command else execution_command['hostName']

        alert = self.__json_to_callable(clusterName, hostName, alert_definition)

        if alert is None:
          continue

        logger.info("[AlertScheduler] Executing on-demand alert {0} ({1})".format(alert.get_name(),
            alert.get_uuid()))

        alert.set_helpers(self._collector, self._cluster_configuration)
        alert.collect()
      except:
        logger.exception("[AlertScheduler] Unable to execute the alert outside of the job scheduler")
Exemple #50
0
class TestOfflineScheduler(object):
    def setup(self):
        self.scheduler = Scheduler()

    def teardown(self):
        if self.scheduler.running:
            self.scheduler.shutdown()

    @raises(KeyError)
    def test_jobstore_twice(self):
        self.scheduler.add_jobstore(RAMJobStore(), 'dummy')
        self.scheduler.add_jobstore(RAMJobStore(), 'dummy')

    def test_add_tentative_job(self):
        job = self.scheduler.add_date_job(lambda: None,
                                          datetime(2200, 7, 24),
                                          jobstore='dummy')
        assert isinstance(job, Job)
        eq_(self.scheduler.get_jobs(), [])

    def test_configure_jobstore(self):
        conf = {
            'apscheduler.jobstore.ramstore.class':
            'apscheduler.jobstores.ram_store:RAMJobStore'
        }
        self.scheduler.configure(conf)
        self.scheduler.remove_jobstore('ramstore')

    def test_shutdown_offline(self):
        self.scheduler.shutdown()

    def test_configure_no_prefix(self):
        global_options = {'misfire_grace_time': '2', 'daemonic': 'false'}
        self.scheduler.configure(global_options)
        eq_(self.scheduler.misfire_grace_time, 1)
        eq_(self.scheduler.daemonic, True)

    def test_configure_prefix(self):
        global_options = {
            'apscheduler.misfire_grace_time': 2,
            'apscheduler.daemonic': False
        }
        self.scheduler.configure(global_options)
        eq_(self.scheduler.misfire_grace_time, 2)
        eq_(self.scheduler.daemonic, False)

    def test_add_listener(self):
        val = []
        self.scheduler.add_listener(val.append)

        event = SchedulerEvent(EVENT_SCHEDULER_START)
        self.scheduler._notify_listeners(event)
        eq_(len(val), 1)
        eq_(val[0], event)

        event = SchedulerEvent(EVENT_SCHEDULER_SHUTDOWN)
        self.scheduler._notify_listeners(event)
        eq_(len(val), 2)
        eq_(val[1], event)

        self.scheduler.remove_listener(val.append)
        self.scheduler._notify_listeners(event)
        eq_(len(val), 2)

    def test_pending_jobs(self):
        # Tests that pending jobs are properly added to the jobs list when
        # the scheduler is started (and not before!)
        self.scheduler.add_date_job(lambda: None, datetime(9999, 9, 9))
        eq_(self.scheduler.get_jobs(), [])

        self.scheduler.start()
        jobs = self.scheduler.get_jobs()
        eq_(len(jobs), 1)
Exemple #51
0
def tag_check_task(tag, min=1):
    cron = Scheduler(daemon=True)
    cron.start()

    @cron.interval_schedule(minutes=min)
    def job_function():
        now_day = datetime.now().date()
        tag_db = Tags().get_or_create(tag, now_day)

        tag_list = tag_filter(tag, 100)
        tag_list.reverse()
        new_tag = {}

        for tags in tag_list:
            _create_time = datetime.strptime(
                tags['created'], '%Y-%m-%dT%H:%M:%S') + timedelta(hours=3)
            if now_day.day == _create_time.day:
                new_tag[tags['id']] = tags

        post_list = list(new_tag.keys())
        post_list.sort()

        if not tag_db.last:
            tag_db.last = post_list[-1]
            post_ids = tag_db.last
            db.session.add(tag_db)
            db.session.commit()
        else:
            try:
                post_list = post_list[post_list.index(tag_db.last) + 1:]
            except:
                last_post = Posts().query.order_by('-id').first()
                tag_db.last = None
                db.session.delete(last_post)
                db.session.add(tag_db)
                db.session.commit()
                last_tag_get_update()
                return

            if not len(post_list):
                return
        for post_db in post_list:

            if not Posts.query.filter_by(post_id=post_db,
                                         tag=tag_db).first() or False:
                _post = Posts()
                _post.post_id = post_db
                _post.author = new_tag[post_db]['author']
                _post.author = new_tag[post_db]['author']
                _post.title = new_tag[post_db]['title']
                _post.url = new_tag[post_db]['url']
                _post.tag = tag_db
                _post.date = datetime.strptime(
                    new_tag[post_db]['created'],
                    '%Y-%m-%dT%H:%M:%S') + timedelta(hours=3)
                db.session.add(_post)
                db.session.commit()

        tag_db.last = post_db
        db.session.add(tag_db)
        db.session.commit()

    atexit.register(lambda: cron.shutdown(wait=False))
Exemple #52
0
class PeetsMediaTranslator(DatagramProtocol):
  ''' A translator protocol to relay local udp traffic to NDN and remote NDN traffic to local udp.
  This class also implements the strategy for fetching remote data.
  If the remote seq is unknown, use a short prefix without seq to probe;
  otherwise use a naive leaking-bucket like method to fetch the remote data

  We seperate the fetching of the media stream and the fetching of the control stream (RTCP, STUN, etc).
  '''
  __logger = Logger.get_logger('PeetsMediaTranslator')
  def __init__(self, factory, pipe_size):
    '''
    Args:
      factory (PeetsServerFactory) : the factory that stores necessory information about the local user
      pipe_size (int) : the pipeline size for fetching the remote media stream. Pipelining allows us to minimize impact of the interest-data roundtrip delay.
    '''
    self.factory = factory
    self.pipe_size = pipe_size
    self.factory = factory
    self.factory.set_local_status_callback(self.toggle_scheduler)
    # here we use two sockets, because the pending interests sent by a socket can not be satisified
    # by the content published later by the same socket
    self.ccnx_int_socket = CcnxSocket()
    self.ccnx_int_socket.start()
    self.ccnx_con_socket = CcnxSocket()
    self.ccnx_con_socket.start()
    self.stream_closure = PeetsClosure(msg_callback = self.stream_callback, timeout_callback = self.stream_timeout_callback)
    self.probe_closure = PeetsClosure(msg_callback = self.probe_callback, timeout_callback = self.probe_timeout_callback)
    self.ctrl_probe_closure = PeetsClosure(msg_callback = self.ctrl_probe_callback, timeout_callback = self.ctrl_probe_timeout_callback)
    self.scheduler = None
    self.peets_status = None
    
  def toggle_scheduler(self, status):
    '''Start or stop the scheduler for periodic jobs.

    Args:
      status (str): either 'Running' or 'Stopped'
    '''
    if status == 'Running':
      self.peets_status = 'Running'
      self.scheduler = Scheduler()
      self.scheduler.start()
      self.scheduler.add_interval_job(self.fetch_media, seconds = 0.01, max_instances = 2)
    elif status == 'Stopped':
      self.peets_status = 'Stopped'
      for job in self.scheduler.get_jobs():
        self.scheduler.unschedule_job(job)
      self.scheduler.shutdown(wait = True)
      self.scheduler = None
       
  def datagramReceived(self, data, (host, port)):
    '''Intercept the webrtc traffice from the local front end and relay it to the NDN

    Args:
      data (bytes) : the UDP data
      host (str) : the IP of the source
      port (int) : the port of the source

    1. Differentiate RTP vs RTCP
    RTCP: packet type (PT) = 200 - 208
    SR (sender report)        200
    RR (receiver report)      201
    SDES (source description) 202
    BYE (goodbye)             203
    App (application-defined) 204
    other types go until      208
    RFC 5761 (implemented by WebRTC) makes sure that RTP's PT field
    plus M field (which is equal to the PT field in RTCP) would not conflict

    2. Differentiate STUN vs RTP & RTCP
    STUN: the most significant 2 bits of every STUN msg MUST be zeros (RFC 5389)
    RTP & RTCP: version bits (2 bits) value equals 2

    Note:
    Tried to fake a Stun request and response so that we don't have to relay stun msgs to NDN, but failed. It worked for a time, although will significantly high rate of the STUN message exchanges
    We need to use the username exchanged in the sdps for stun it worked for a while but magically stopped working, so now we still send it over NDN

    Note 2:
    We only publish one medai stream from the local user (with the default offer SDP). We publish RTCP and STUN for each PeerConnections though.
    '''
    # mask to test most significant 2 bits
    msg = bytearray(data)
    c = self.factory.client

    if msg[0] & 0xC0 == 0 or msg[1] > 199 and msg[1] < 209:
      try:
        ctrl_seq = c.ctrl_seqs[port]
        cid = c.remote_cids[port]
        # RTCP and STUN is for each peerconnection. the cid of remote user is used to identify the peer connection so that remote user knows which one to fetch
        name = c.local_user.get_ctrl_prefix() + '/' + cid + '/' + str(ctrl_seq)
        c.ctrl_seqs[port] = ctrl_seq + 1
        self.ccnx_con_socket.publish_content(name, data)
      except KeyError:
        pass

    elif c.media_source_port == port:
      # only publish one media stream
      name = c.local_user.get_media_prefix() + '/' + str(c.local_seq)
      c.local_seq += 1
      self.ccnx_con_socket.publish_content(name, data)
Exemple #53
0
class Subject:
    def __init__(self):
        self.gender = 0
        self.mood = []
        self.scheduler = None
        self.name = "(no collaborator)"

    def evaluate(self):
        current = int(time.time())

        timeView = list(
            filter(lambda element: current - CONST_TIME <= element[0],
                   self.mood))
        happyView = list(
            filter(lambda element: element[1] == "happy", timeView))

        if (len(timeView) == 0):
            self.unload()
        elif len(happyView) / len(timeView) * 100 < CONST_TREESHOLD:
            self.unload()

    def unload(self):
        urllib.request.urlopen(CONST_BUZZ).read()

    def start(self):
        self.scheduler = Scheduler()
        self.scheduler.start()
        self.scheduler.add_interval_job(self.evaluate, seconds=CONST_TIME)

    def stop(self):
        self.scheduler.shutdown()

    def add(self, labels, mood, gender):
        labeledMoods = np.array(list(zip(labels, mood)))
        timestamp = int(time.time())

        self.gender = gender

        for labeledMood in labeledMoods:
            self.mood.append((timestamp, labeledMood[0], labeledMood[1]))

    def persist(self, mood, gender, timestamp):
        mongo = MongoClient(MONGO_CONNECTION)
        database = mongo[MONGO_DATABASE]

        post = {
            "collaborator": self.name,
            "mood": mood,
            "gender": gender,
            "timestamp": timestamp
        }

        database[COLLECTION].insert_one(post)

    def addMood(self, mood, gender):
        timestamp = int(time.time())

        self.gender = gender
        self.mood.append((timestamp, mood, 1))

        self.persist(mood, gender, timestamp)
__author__ = 'wenychan'

# For version apscheduler 2.1.2

import time
import atexit
from apscheduler.scheduler import Scheduler


# Start the scheduler
sched = Scheduler(daemonic=True)
# register three exit handlers
atexit.register(lambda: sched.shutdown())
sched.start()

def job_function():
    print "Hello World"
    time.sleep(3)
    print "exit Hello World"

# Schedule job_function to be called every two hours
sched.add_interval_job(job_function, seconds=1, name='test_job1', max_instances=1)
sched.add_interval_job(job_function, seconds=3, name='test_job2', max_instances=1, max_runs=1) # sched.add_interval_job(job_function, seconds=5, name='test_job3', max_instances=1)
sched.add_interval_job(job_function, seconds=5, name='test_job3', max_instances=1)
sched.add_interval_job(job_function, seconds=6, name='test_job4', max_instances=1)
sched.add_interval_job(job_function, seconds=3, name='test_job5', max_instances=1)
sched.add_interval_job(job_function, seconds=1, name='test_job6', max_instances=1)
print len(sched.get_jobs())
time.sleep(5)
print len(sched.get_jobs())
time.sleep(50)
Exemple #55
0
def init_scheduler():
    scheduler = Scheduler()
    atexit.register(lambda: scheduler.shutdown(wait=False))
    return scheduler
Exemple #56
0
    mC(auth['account_sid'],auth['auth_token'],targetNumber,ngrokURL)

    print "Call Queued Up"

    time.sleep(60) # Twilio default call timeout is 60 sec
    #kill = raw_input("Kill proceses? (y/n):")
    #while kill in ['n']:
    #    kill = raw_input("Kill proceses? (y/n):")

    print "Killing processes"
    ngrok.kill() # kill ngrok process
    os.system('killall -KILL Python') # kill web server and this thread

# Start the scheduler
sched = Scheduler()
sched.start()

# Convert UTC target to local time
localTarget = utc2local(datetime.strptime(targetTimeUTC,'%Y-%m-%d %H:%M:%S'))

job = sched.add_date_job(main, localTarget, [filename,targetNumber,ngrokURL])

sched.print_jobs()

print 'Current time is %s' % datetime.now()

# Keep scheduler alive until you hit Ctrl+C!
while True:
    time.sleep(1)
sched.shutdown()
Exemple #57
0
class LocalScheduler(object):
    scheduler_registry = {}
    _lockdown = False

    @classmethod
    def get(cls, name):
        return cls.scheduler_registry[name]

    @classmethod
    def get_all(cls):
        return cls.scheduler_registry.values()

    @classmethod
    def shutdown_all(cls):
        for scheduler in cls.scheduler_registry.values():
            scheduler.stop()

    @classmethod
    def lockdown(cls):
        cls._lockdown = True

    @classmethod
    def clear_all(cls):
        for scheduler in cls.scheduler_registry.values():
            scheduler.clear()

    def __init__(self, name, label=None):
        self.scheduled_jobs = {}
        self._scheduler = None
        self.name = name
        self.label = label
        self.__class__.scheduler_registry[self.name] = self

    def start(self):
        logger.info('Starting scheduler: %s' % self.name)
        if not self.__class__._lockdown:
            self._scheduler = OriginalScheduler()
            for job in self.scheduled_jobs.values():
                self._schedule_job(job)

            self._scheduler.start()
        else:
            logger.debug('lockdown in effect')

    def stop(self):
        if self._scheduler:
            self._scheduler.shutdown()
            del self._scheduler
            self._scheduler = None

    @property
    def running(self):
        if self._scheduler:
            return self._scheduler.running
        else:
            return False

    def clear(self):
        for job in self.scheduled_jobs.values():
            self.stop_job(job)

    def stop_job(self, job):
        if self.running:
            self._scheduler.unschedule_job(job._job)

        del (self.scheduled_jobs[job.name])
        job.scheduler = None

    def _schedule_job(self, job):
        if isinstance(job, IntervalJob):
            job._job = self._scheduler.add_interval_job(
                job.function, *job.args, **job.kwargs)
        elif isinstance(job, DateJob):
            job._job = self._scheduler.add_date_job(job.function, *job.args,
                                                    **job.kwargs)
        elif isinstance(job, CronJob):
            job._job = self._scheduler.add_cron_job(job.function, *job.args,
                                                    **job.kwargs)
        else:
            raise UnknownJobClass

    def add_job(self, job):
        logger.debug('adding job')
        if job.scheduler or job.name in self.scheduled_jobs.keys():
            raise AlreadyScheduled

        if self._scheduler:
            self._schedule_job(job)

        job.scheduler = self
        self.scheduled_jobs[job.name] = job

    def add_interval_job(self, name, label, function, *args, **kwargs):
        job = IntervalJob(name=name,
                          label=label,
                          function=function,
                          *args,
                          **kwargs)
        self.add_job(job)
        return job

    def add_date_job(self, name, label, function, *args, **kwargs):
        job = DateJob(name=name,
                      label=label,
                      function=function,
                      *args,
                      **kwargs)
        self.add_job(job)
        return job

    def add_cron_job(self, name, label, function, *args, **kwargs):
        job = CronJob(name=name,
                      label=label,
                      function=function,
                      *args,
                      **kwargs)
        self.add_job(job)
        return job

    def get_job_list(self):
        return self.scheduled_jobs.values()

    def get_job_by_name(self, name):
        try:
            return self.scheduled_jobs[name]
        except KeyError:
            raise UnknownJob

    def __unicode__(self):
        return unicode(self.label or self.name)
Exemple #58
0
        end_time = datetime.strptime(event['event_end_time'], time_format)
        event_duration = (end_time - start_time).seconds
        sched.add_cron_job(led_chain.auto_transition,
                           hour=start_hour,
                           minute=start_minute,
                           second=start_second,
                           name=event['event_name'],
                           kwargs={
                               'state': event['event_state'],
                               'transition_duration':
                               event['transition_duration']
                           },
                           misfire_grace_time=event_duration)

    app.logger.debug("Startup job list contains : %s" % sched.get_jobs())

    try:
        app.run(host='0.0.0.0',
                port=int(app_config.get("general", "web_port")),
                use_reloader=False)
    except KeyboardInterrupt:
        app.logger.warning("Caught keyboard interupt.  Shutting down ...")

    app.logger.info("Calling shutdown on led chain")
    led_chain.shutdown()
    app.logger.info("Calling shutdown on scheduler")
    sched.shutdown(wait=False)
    app.logger.info("Shutting down logger and exiting ...")
    logging.shutdown()
    exit(0)
Exemple #59
0
class PyFlowScheduler(object):
    """
    This object schedules the submission of the tasks in a :class:`Flow`.
    There are two types of errors that might occur during the execution of the jobs:

        #. Python exceptions
        #. Errors in the ab-initio code

    Python exceptions are easy to detect and are usually due to a bug in the python code or random errors such as IOError.
    The set of errors in the ab-initio is much much broader. It includes wrong input data, segmentation
    faults, problems with the resource manager, etc. The flow tries to handle the most common cases
    but there's still a lot of room for improvement.
    Note, in particular, that `PyFlowScheduler` will shutdown automatically in the following cases:

        #. The number of python exceptions is > max_num_pyexcs

        #. The number of task errors (i.e. the number of tasks whose status is S_ERROR) is > max_num_abierrs

        #. The number of jobs launched becomes greater than (`safety_ratio` * total_number_of_tasks).

        #. The scheduler will send an email to the user (specified by `mailto`) every `remindme_s` seconds.
           If the mail cannot be sent, the scheduler will shutdown automatically.
           This check prevents the scheduler from being trapped in an infinite loop.
    """
    # Configuration file.
    YAML_FILE = "scheduler.yml"
    USER_CONFIG_DIR = os.path.join(os.path.expanduser("~"), ".abinit", "abipy")

    Error = PyFlowSchedulerError

    @classmethod
    def autodoc(cls):
        i = cls.__init__.__doc__.index("Args:")
        return cls.__init__.__doc__[i + 5:]

    def __init__(self, **kwargs):
        """
        Args:
            weeks: number of weeks to wait (DEFAULT: 0).
            days: number of days to wait (DEFAULT: 0).
            hours: number of hours to wait (DEFAULT: 0).
            minutes: number of minutes to wait (DEFAULT: 0).
            seconds: number of seconds to wait (DEFAULT: 0).
            mailto: The scheduler will send an email to `mailto` every `remindme_s` seconds.
                (DEFAULT: None i.e. not used).
            verbose: (int) verbosity level. (DEFAULT: 0)
            use_dynamic_manager: "yes" if the :class:`TaskManager` must be re-initialized from
                file before launching the jobs. (DEFAULT: "no")
            max_njobs_inqueue: Limit on the number of jobs that can be present in the queue. (DEFAULT: 200)
            remindme_s: The scheduler will send an email to the user specified by `mailto` every `remindme_s` seconds.
                (int, DEFAULT: 1 day).
            max_num_pyexcs: The scheduler will exit if the number of python exceptions is > max_num_pyexcs
                (int, DEFAULT: 0)
            max_num_abierrs: The scheduler will exit if the number of errored tasks is > max_num_abierrs
                (int, DEFAULT: 0)
            safety_ratio: The scheduler will exits if the number of jobs launched becomes greater than
               `safety_ratio` * total_number_of_tasks_in_flow. (int, DEFAULT: 5)
            max_nlaunches: Maximum number of tasks launched in a single iteration of the scheduler.
                (DEFAULT: -1 i.e. no limit)
            debug: Debug level. Use 0 for production (int, DEFAULT: 0)
            fix_qcritical: "yes" if the launcher should try to fix QCritical Errors (DEFAULT: "yes")
            rmflow: If "yes", the scheduler will remove the flow directory if the calculation
                completed successfully. (DEFAULT: "no")
            killjobs_if_errors: "yes" if the scheduler should try to kill all the runnnig jobs
                before exiting due to an error. (DEFAULT: "yes")
        """
        # Options passed to the scheduler.
        self.sched_options = AttrDict(
            weeks=kwargs.pop("weeks", 0),
            days=kwargs.pop("days", 0),
            hours=kwargs.pop("hours", 0),
            minutes=kwargs.pop("minutes", 0),
            seconds=kwargs.pop("seconds", 0),
            #start_date=kwargs.pop("start_date", None),
        )
        if all(not v for v in self.sched_options.values()):
            raise self.Error("Wrong set of options passed to the scheduler.")

        self.mailto = kwargs.pop("mailto", None)
        self.verbose = int(kwargs.pop("verbose", 0))
        self.use_dynamic_manager = as_bool(
            kwargs.pop("use_dynamic_manager", False))
        self.max_njobs_inqueue = kwargs.pop("max_njobs_inqueue", 200)
        self.max_ncores_used = kwargs.pop("max_ncores_used", None)
        self.contact_resource_manager = as_bool(
            kwargs.pop("contact_resource_manager", False))

        self.remindme_s = float(kwargs.pop("remindme_s", 1 * 24 * 3600))
        self.max_num_pyexcs = int(kwargs.pop("max_num_pyexcs", 0))
        self.max_num_abierrs = int(kwargs.pop("max_num_abierrs", 0))
        self.safety_ratio = int(kwargs.pop("safety_ratio", 5))
        #self.max_etime_s = kwargs.pop("max_etime_s", )
        self.max_nlaunches = kwargs.pop("max_nlaunches", -1)
        self.debug = kwargs.pop("debug", 0)
        self.fix_qcritical = as_bool(kwargs.pop("fix_qcritical", True))
        self.rmflow = as_bool(kwargs.pop("rmflow", False))
        self.killjobs_if_errors = as_bool(
            kwargs.pop("killjobs_if_errors", True))

        self.customer_service_dir = kwargs.pop("customer_service_dir", None)
        if self.customer_service_dir is not None:
            self.customer_service_dir = Directory(self.customer_service_dir)
            self._validate_customer_service()

        if kwargs:
            raise self.Error("Unknown arguments %s" % kwargs)

        if not has_apscheduler:
            raise RuntimeError("Install apscheduler with pip")

        if has_sched_v3:
            logger.warning("Using scheduler v>=3.0.0")
            from apscheduler.schedulers.blocking import BlockingScheduler
            self.sched = BlockingScheduler()
        else:
            from apscheduler.scheduler import Scheduler
            self.sched = Scheduler(standalone=True)

        self.nlaunch = 0
        self.num_reminders = 1

        # Used to keep track of the exceptions raised while the scheduler is running
        self.exceptions = deque(maxlen=self.max_num_pyexcs + 10)

        # Used to push additional info during the execution.
        self.history = deque(maxlen=100)

    @classmethod
    def from_file(cls, filepath):
        """Read the configuration parameters from a Yaml file."""
        with open(filepath, "rt") as fh:
            return cls(**yaml.safe_load(fh))

    @classmethod
    def from_string(cls, s):
        """Create an istance from string s containing a YAML dictionary."""
        stream = cStringIO(s)
        stream.seek(0)
        return cls(**yaml.safe_load(stream))

    @classmethod
    def from_user_config(cls):
        """
        Initialize the :class:`PyFlowScheduler` from the YAML file 'scheduler.yml'.
        Search first in the working directory and then in the configuration directory of abipy.

        Raises:
            `RuntimeError` if file is not found.
        """
        # Try in the current directory.
        path = os.path.join(os.getcwd(), cls.YAML_FILE)

        if os.path.exists(path):
            return cls.from_file(path)

        # Try in the configuration directory.
        path = os.path.join(cls.USER_CONFIG_DIR, cls.YAML_FILE)

        if os.path.exists(path):
            return cls.from_file(path)

        raise cls.Error(
            "Cannot locate %s neither in current directory nor in %s" %
            (cls.YAML_FILE, path))

    def __str__(self):
        """String representation."""
        lines = [self.__class__.__name__ + ", Pid: %d" % self.pid]
        app = lines.append
        app("Scheduler options: %s" % str(self.sched_options))

        if self.flow is not None:
            app(80 * "=")
            app(str(self.flow))

        return "\n".join(lines)

    @property
    def pid(self):
        """The pid of the process associated to the scheduler."""
        try:
            return self._pid
        except AttributeError:
            self._pid = os.getpid()
            return self._pid

    @property
    def pid_file(self):
        """
        Absolute path of the file with the pid.
        The file is located in the workdir of the flow
        """
        return self._pid_file

    @property
    def flow(self):
        """`Flow`."""
        try:
            return self._flow
        except AttributeError:
            return None

    @property
    def num_excs(self):
        """Number of exceptions raised so far."""
        return len(self.exceptions)

    def get_delta_etime(self):
        """Returns a `timedelta` object representing with the elapsed time."""
        return timedelta(seconds=(time.time() - self.start_time))

    def add_flow(self, flow):
        """
        Add an :class:`Flow` flow to the scheduler.
        """
        if hasattr(self, "_flow"):
            raise self.Error("Only one flow can be added to the scheduler.")

        # Check if we are already using a scheduler to run this flow
        flow.check_pid_file()
        flow.set_spectator_mode(False)

        # Build dirs and files (if not yet done)
        flow.build()

        with open(flow.pid_file, "wt") as fh:
            fh.write(str(self.pid))

        self._pid_file = flow.pid_file
        self._flow = flow

    def _validate_customer_service(self):
        """
        Validate input parameters if customer service is on then
        create directory for tarball files with correct premissions for user and group.
        """
        direc = self.customer_service_dir
        if not direc.exists:
            mode = 0o750
            print("Creating customer_service_dir %s with mode %s" %
                  (direc, mode))
            direc.makedirs()
            os.chmod(direc.path, mode)

        if self.mailto is None:
            raise RuntimeError(
                "customer_service_dir requires mailto option in scheduler.yml")

    def _do_customer_service(self):
        """
        This method is called before the shutdown of the scheduler.
        If customer_service is on and the flow didn't completed successfully,
        a lightweight tarball file with inputs and the most important output files
        is created in customer_servide_dir.
        """
        if self.customer_service_dir is None: return
        doit = self.exceptions or not self.flow.all_ok
        doit = True
        if not doit: return

        prefix = os.path.basename(self.flow.workdir) + "_"

        import tempfile, datetime
        suffix = str(datetime.datetime.now()).replace(" ", "-")
        # Remove milliseconds
        i = suffix.index(".")
        if i != -1: suffix = suffix[:i]
        suffix += ".tar.gz"

        #back = os.getcwd()
        #os.chdir(self.customer_service_dir.path)

        _, tmpname = tempfile.mkstemp(suffix="_" + suffix,
                                      prefix=prefix,
                                      dir=self.customer_service_dir.path,
                                      text=False)

        print("Dear customer,\n We are about to generate a tarball in\n  %s" %
              tmpname)
        self.flow.make_light_tarfile(name=tmpname)
        #os.chdir(back)

    def start(self):
        """
        Starts the scheduler in a new thread. Returns 0 if success.
        In standalone mode, this method will block until there are no more scheduled jobs.
        """
        self.history.append("Started on %s" % time.asctime())
        self.start_time = time.time()

        if not has_apscheduler:
            raise RuntimeError("Install apscheduler with pip")

        if has_sched_v3:
            self.sched.add_job(self.callback, "interval", **self.sched_options)
        else:
            self.sched.add_interval_job(self.callback, **self.sched_options)

        errors = self.flow.look_before_you_leap()
        if errors:
            self.exceptions.append(errors)
            return 1

        # Try to run the job immediately. If something goes wrong return without initializing the scheduler.
        self._runem_all()

        if self.exceptions:
            self.cleanup()
            self.send_email(
                msg=
                "Error while trying to run the flow for the first time!\n %s" %
                self.exceptions)
            return 1

        try:
            self.sched.start()
            return 0

        except KeyboardInterrupt:
            self.shutdown(msg="KeyboardInterrupt from user")
            if ask_yesno(
                    "Do you want to cancel all the jobs in the queue? [Y/n]"):
                print("Number of jobs cancelled:", self.flow.cancel())

            self.flow.pickle_dump()
            return -1

    def _runem_all(self):
        """
        This function checks the status of all tasks,
        tries to fix tasks that went unconverged, abicritical, or queuecritical
        and tries to run all the tasks that can be submitted.+
        """
        excs = []
        flow = self.flow

        # Allow to change the manager at run-time
        if self.use_dynamic_manager:
            from pymatgen.io.abinit.tasks import TaskManager
            new_manager = TaskManager.from_user_config()
            for work in flow:
                work.set_manager(new_manager)

        nqjobs = 0
        if self.contact_resource_manager:
            # This call is expensive and therefore it's optional
            nqjobs = flow.get_njobs_in_queue()
            if nqjobs is None:
                nqjobs = 0
                if flow.manager.has_queue:
                    logger.warning('Cannot get njobs_inqueue')

            if nqjobs >= self.max_njobs_inqueue:
                print("Too many jobs in the queue: %s, returning" % nqjobs)
                return

        if self.max_nlaunches == -1:
            max_nlaunch = self.max_njobs_inqueue - nqjobs
        else:
            max_nlaunch = min(self.max_njobs_inqueue - nqjobs,
                              self.max_nlaunches)

        # check status.
        flow.check_status(show=False)

        # This check is not perfect, we should make a list of tasks to sumbit
        # and select only the subset so that we don't exceeed mac_ncores_used
        # Many sections of this code should be rewritten.
        #if self.max_ncores_used is not None and flow.ncores_used > self.max_ncores_used:
        if self.max_ncores_used is not None and flow.ncores_allocated > self.max_ncores_used:
            print("Cannot exceed max_ncores_use:d %s" % self.max_ncores_used)
            return

        # Try to restart the unconverged tasks
        # TODO: do not fire here but prepare for fireing in rapidfire
        for task in self.flow.unconverged_tasks:
            try:
                logger.info("Flow will try restart task %s" % task)
                fired = task.restart()
                if fired:
                    self.nlaunch += 1
                    max_nlaunch -= 1
                    if max_nlaunch == 0:
                        logger.info(
                            "Restart: too many jobs in the queue, returning")
                        flow.pickle_dump()
                        return

            except task.RestartError:
                excs.append(straceback())

        # Temporarily disable by MG because I don't know if fix_critical works after the
        # introduction of the new qadapters
        # reenabled by MsS disable things that do not work at low level
        # fix only prepares for restarting, and sets to ready
        if self.fix_qcritical:
            nfixed = flow.fix_queue_critical()
            if nfixed: print("Fixed %d QCritical error(s)" % nfixed)

        nfixed = flow.fix_abicritical()
        if nfixed: print("Fixed %d AbiCritical error(s)" % nfixed)

        # update database
        flow.pickle_dump()

        # Submit the tasks that are ready.
        try:
            nlaunch = PyLauncher(flow).rapidfire(max_nlaunch=max_nlaunch,
                                                 sleep_time=10)
            self.nlaunch += nlaunch

            if nlaunch:
                print("[%s] Number of launches: %d" %
                      (time.asctime(), nlaunch))

        except Exception:
            excs.append(straceback())

        # check status.
        flow.show_status()

        if excs:
            logger.critical("*** Scheduler exceptions:\n *** %s" %
                            "\n".join(excs))
            self.exceptions.extend(excs)

    def callback(self):
        """The function that will be executed by the scheduler."""
        try:
            return self._callback()
        except:
            # All exceptions raised here will trigger the shutdown!
            s = straceback()
            self.exceptions.append(s)

            # This is useful when debugging
            #try:
            #    print("Exception in callback, will cancel all tasks")
            #    for task in self.flow.iflat_tasks():
            #        task.cancel()
            #except Exception:
            #    pass

            self.shutdown(msg="Exception raised in callback!\n" + s)

    def _callback(self):
        """The actual callback."""
        if self.debug:
            # Show the number of open file descriptors
            print(">>>>> _callback: Number of open file descriptors: %s" %
                  get_open_fds())

        self._runem_all()

        # Mission accomplished. Shutdown the scheduler.
        all_ok = self.flow.all_ok
        if all_ok:
            return self.shutdown(
                msg=
                "All tasks have reached S_OK. Will shutdown the scheduler and exit"
            )

        # Handle failures.
        err_lines = []

        # Shall we send a reminder to the user?
        delta_etime = self.get_delta_etime()

        if delta_etime.total_seconds() > self.num_reminders * self.remindme_s:
            self.num_reminders += 1
            msg = (
                "Just to remind you that the scheduler with pid %s, flow %s\n has been running for %s "
                % (self.pid, self.flow, delta_etime))
            retcode = self.send_email(msg, tag="[REMINDER]")

            if retcode:
                # Cannot send mail, shutdown now!
                msg += (
                    "\nThe scheduler tried to send an e-mail to remind the user\n"
                    + " but send_email returned %d. Aborting now" % retcode)
                err_lines.append(msg)

        #if delta_etime.total_seconds() > self.max_etime_s:
        #    err_lines.append("\nExceeded max_etime_s %s. Will shutdown the scheduler and exit" % self.max_etime_s)

        # Too many exceptions. Shutdown the scheduler.
        if self.num_excs > self.max_num_pyexcs:
            msg = "Number of exceptions %s > %s. Will shutdown the scheduler and exit" % (
                self.num_excs, self.max_num_pyexcs)
            err_lines.append(boxed(msg))

        # Paranoid check: disable the scheduler if we have submitted
        # too many jobs (it might be due to some bug or other external reasons
        # such as race conditions between difference callbacks!)
        if self.nlaunch > self.safety_ratio * self.flow.num_tasks:
            msg = "Too many jobs launched %d. Total number of tasks = %s, Will shutdown the scheduler and exit" % (
                self.nlaunch, self.flow.num_tasks)
            err_lines.append(boxed(msg))

        # Count the number of tasks with status == S_ERROR.
        if self.flow.num_errored_tasks > self.max_num_abierrs:
            msg = "Number of tasks with ERROR status %s > %s. Will shutdown the scheduler and exit" % (
                self.flow.num_errored_tasks, self.max_num_abierrs)
            err_lines.append(boxed(msg))

        # Test on the presence of deadlocks.
        g = self.flow.find_deadlocks()
        if g.deadlocked:
            # Check the flow again so that status are updated.
            self.flow.check_status()

            g = self.flow.find_deadlocks()
            print("deadlocked:\n", g.deadlocked, "\nrunnables:\n", g.runnables,
                  "\nrunning\n", g.running)
            if g.deadlocked and not g.runnables and not g.running:
                err_lines.append(
                    "No runnable job with deadlocked tasks:\n%s." %
                    str(g.deadlocked))

        if not g.runnables and not g.running:
            # Check the flow again so that status are updated.
            self.flow.check_status()
            g = self.flow.find_deadlocks()
            if not g.runnables and not g.running:
                err_lines.append(
                    "No task is running and cannot find other tasks to submit."
                )

        # Something wrong. Quit
        if err_lines:
            # Cancel all jobs.
            if self.killjobs_if_errors:
                cprint(
                    "killjobs_if_errors set to 'yes' in scheduler file. Will kill jobs before exiting.",
                    "yellow")
                try:
                    num_cancelled = 0
                    for task in self.flow.iflat_tasks():
                        num_cancelled += task.cancel()
                    cprint("Killed %d tasks" % num_cancelled, "yellow")
                except Exception as exc:
                    cprint(
                        "Exception while trying to kill jobs:\n%s" % str(exc),
                        "red")

            self.shutdown("\n".join(err_lines))

        return len(self.exceptions)

    def cleanup(self):
        """Cleanup routine: remove the pid file and save the pickle database"""
        try:
            os.remove(self.pid_file)
        except OSError as exc:
            logger.critical("Could not remove pid_file: %s", exc)

        # Save the final status of the flow.
        self.flow.pickle_dump()

    def shutdown(self, msg):
        """Shutdown the scheduler."""
        try:
            self.cleanup()

            self.history.append("Completed on: %s" % time.asctime())
            self.history.append("Elapsed time: %s" % self.get_delta_etime())

            if self.debug:
                print(">>>>> shutdown: Number of open file descriptors: %s" %
                      get_open_fds())

            retcode = self.send_email(msg)
            if self.debug:
                print("send_mail retcode", retcode)

            # Write file with the list of exceptions:
            if self.exceptions:
                dump_file = os.path.join(self.flow.workdir, "_exceptions")
                with open(dump_file, "wt") as fh:
                    fh.writelines(self.exceptions)
                    fh.write("Shutdown message:\n%s" % msg)

            lines = []
            app = lines.append
            app("Submitted on: %s" % time.ctime(self.start_time))
            app("Completed on: %s" % time.asctime())
            app("Elapsed time: %s" % str(self.get_delta_etime()))

            if self.flow.all_ok:
                app("Flow completed successfully")
            else:
                app("Flow %s didn't complete successfully" %
                    repr(self.flow.workdir))
                app("use `abirun.py FLOWDIR debug` to analyze the problem.")
                app("Shutdown message:\n%s" % msg)

            print("")
            print("\n".join(lines))
            print("")

            self._do_customer_service()

            if self.flow.all_ok:
                print("Calling flow.finalize()...")
                self.flow.finalize()
                #print("finalized:", self.flow.finalized)
                if self.rmflow:
                    app("Flow directory will be removed...")
                    try:
                        self.flow.rmtree()
                    except Exception:
                        logger.warning(
                            "Ignoring exception while trying to remove flow dir."
                        )

        finally:
            # Shutdown the scheduler thus allowing the process to exit.
            logger.debug('This should be the shutdown of the scheduler')

            # Unschedule all the jobs before calling shutdown
            #self.sched.print_jobs()
            if not has_sched_v3:
                for job in self.sched.get_jobs():
                    self.sched.unschedule_job(job)
            #self.sched.print_jobs()

            self.sched.shutdown()
            # Uncomment the line below if shutdown does not work!
            #os.system("kill -9 %d" % os.getpid())

    def send_email(self, msg, tag=None):
        """
        Send an e-mail before completing the shutdown.
        Returns 0 if success.
        """
        try:
            return self._send_email(msg, tag)
        except:
            self.exceptions.append(straceback())
            return -2

    def _send_email(self, msg, tag):
        if self.mailto is None:
            return -1

        header = msg.splitlines()
        app = header.append

        app("Submitted on: %s" % time.ctime(self.start_time))
        app("Completed on: %s" % time.asctime())
        app("Elapsed time: %s" % str(self.get_delta_etime()))
        app("Number of errored tasks: %d" % self.flow.num_errored_tasks)
        app("Number of unconverged tasks: %d" %
            self.flow.num_unconverged_tasks)

        strio = cStringIO()
        strio.writelines("\n".join(header) + 4 * "\n")

        # Add the status of the flow.
        self.flow.show_status(stream=strio)

        if self.exceptions:
            # Report the list of exceptions.
            strio.writelines(self.exceptions)

        if tag is None:
            tag = " [ALL OK]" if self.flow.all_ok else " [WARNING]"

        return sendmail(subject=self.flow.name + tag,
                        text=strio.getvalue(),
                        mailto=self.mailto)