Esempio n. 1
0
	def handle(self, *args, **options):
		sched = Scheduler(daemonic=True)
		sched.add_cron_job(job_function,  minute='*')
		sched.configure()
		try:
		    sched.start()
		except (KeyboardInterrupt, SystemExit):
		    pass
		print sched.print_jobs()
Esempio n. 2
0
    def testOne(self):

      sched = Scheduler()

      #@sched.interval_schedule(seconds=10)
      @sched.cron_schedule(day_of_week=0,hour=5,minute=30)
      def testSched():
	      print "job invoked!"
        

      config = {'apscheduler.standalone': True}
      sched.configure(config)
      sched.start()
Esempio n. 3
0
class TestRunningScheduler(object):
    def setup(self):
        self.scheduler = Scheduler()
        self.scheduler.start()

    def teardown(self):
        if self.scheduler.running:
            self.scheduler.shutdown()

    def test_shutdown_timeout(self):
        self.scheduler.shutdown()

    @raises(SchedulerAlreadyRunningError)
    def test_scheduler_double_start(self):
        self.scheduler.start()

    @raises(SchedulerAlreadyRunningError)
    def test_scheduler_configure_running(self):
        self.scheduler.configure({})

    def test_scheduler_double_shutdown(self):
        self.scheduler.shutdown()
        self.scheduler.shutdown(False)
Esempio n. 4
0
class TestRunningScheduler(object):
    def setup(self):
        self.scheduler = Scheduler()
        self.scheduler.start()

    def teardown(self):
        if self.scheduler.running:
            self.scheduler.shutdown()

    def test_shutdown_timeout(self):
        self.scheduler.shutdown()

    @raises(SchedulerAlreadyRunningError)
    def test_scheduler_double_start(self):
        self.scheduler.start()

    @raises(SchedulerAlreadyRunningError)
    def test_scheduler_configure_running(self):
        self.scheduler.configure({})

    def test_scheduler_double_shutdown(self):
        self.scheduler.shutdown()
        self.scheduler.shutdown(False)
Esempio n. 5
0
    all_items = {}
    def process_items_from_day(items):
        data = []
        for item in filter(lambda item: item['link'] is not None, [item for item in items]):
            item = process(item)
            if item['link'] not in all_items:
                all_items[item['link']] = True
                data.append(item)
        return data

    collections = ['AND observed > date("now", "start of day", "-1 day") AND observed < date("now", "start of day")',
                   'AND observed > date("now", "start of day", "-2 day") AND observed < date("now", "start of day", "-1 day")',
                   'AND observed > date("now", "start of day", "-3 day") AND observed < date("now", "start of day", "-2 day")']
    collections = [process_items_from_day(get_items_from_day(date)) for date in collections]

    with open('./templates/newsletter.html', 'r') as newspaper:
        template = Template(newspaper.read())
        html = template.render(title="Reddit News Agency", edition=len(os.listdir('./deploy')),
                               collections=collections).encode('utf-8')
        f = open('./deploy/' + str(int(time())) + '.html', 'w')
        f.write(html)
        requests.post('http://reddit-snews-agency.herokuapp.com/', data=html, headers={
            'Authorization': '9f9fa431c64a86da8324bb370d05377bbf49dbf9'
        })

if __name__ == '__main__':
    main()
    sched.configure(standalone=True)
    sched.start()
import datetime
import logging
import time

from apscheduler.scheduler import Scheduler

gconfig = {'apscheduler.threadpool.core_threads':2,
           'apscheduler.threadpool.max_threads':50,
           'apscheduler.threadpool.keepalive':4,
           'apscheduler.misfire_grace_time':1,
           'apscheduler.coalesce':True}

logging.basicConfig()
sched = Scheduler()
sched.daemonic = True
sched.configure(gconfig)
sched.start()

def do_job(a):
    print "This is " + str(a)
    print repr(sched._threadpool), str(sched._threadpool._queue.qsize())
    print time.strftime('%Y-%m-%d %H:%M:%S')

current_date = datetime.datetime.now()
for i in range(0,10):
    start_date = current_date+datetime.timedelta(seconds=i)
    sched.add_interval_job(do_job, seconds=10, args=('number ' + str(i),), start_date=start_date)

while(True):
    #print repr(pool)
    time.sleep(1)
Esempio n. 7
0
	de_quien = 'Xendra info Cambio de ip'
	try:
		myip = urllib2.urlopen('http://www.curlmyip.com').read()
	except urllib2.HTTPError, e:
		myip = ''
	if myip:
		myip =  myip.split(' ')[0].split('\n')[0]
		try:
			ip_history 		= IpServer.objects.get(pk=1)
		except ObjectDoesNotExist, e:
			ip_history 		= IpServer()
			ip_history.ip 	= myip
			ip_history.save()
			correo = send_mail(asunto, myip, de_quien, para, fail_silently=True)
		if not myip == ip_history.ip:
			correo = send_mail(asunto, myip, de_quien, para, fail_silently=True)
			ip_history.ip = myip
			ip_history.save()

def borrar_antiguos():
	pass

sched = Scheduler(daemonic=True)
sched.add_cron_job(job_function,  minute='*/5')
sched.configure()
try:
    sched.start()
except (KeyboardInterrupt, SystemExit):
    pass
#print sched.print_jobs()
Esempio n. 8
0
class TestOfflineScheduler(object):
    def setup(self):
        self.scheduler = Scheduler()

    def teardown(self):
        if self.scheduler.running:
            self.scheduler.shutdown()

    @raises(KeyError)
    def test_jobstore_twice(self):
        self.scheduler.add_jobstore(RAMJobStore(), 'dummy')
        self.scheduler.add_jobstore(RAMJobStore(), 'dummy')

    def test_add_tentative_job(self):
        job = self.scheduler.add_date_job(lambda: None,
                                          datetime(2200, 7, 24),
                                          jobstore='dummy')
        assert isinstance(job, Job)
        eq_(self.scheduler.get_jobs(), [])

    def test_configure_jobstore(self):
        conf = {
            'apscheduler.jobstore.ramstore.class':
            'apscheduler.jobstores.ram_store:RAMJobStore'
        }
        self.scheduler.configure(conf)
        self.scheduler.remove_jobstore('ramstore')

    def test_shutdown_offline(self):
        self.scheduler.shutdown()

    def test_configure_no_prefix(self):
        global_options = {'misfire_grace_time': '2', 'daemonic': 'false'}
        self.scheduler.configure(global_options)
        eq_(self.scheduler.misfire_grace_time, 1)
        eq_(self.scheduler.daemonic, True)

    def test_configure_prefix(self):
        global_options = {
            'apscheduler.misfire_grace_time': 2,
            'apscheduler.daemonic': False
        }
        self.scheduler.configure(global_options)
        eq_(self.scheduler.misfire_grace_time, 2)
        eq_(self.scheduler.daemonic, False)

    def test_add_listener(self):
        val = []
        self.scheduler.add_listener(val.append)

        event = SchedulerEvent(EVENT_SCHEDULER_START)
        self.scheduler._notify_listeners(event)
        eq_(len(val), 1)
        eq_(val[0], event)

        event = SchedulerEvent(EVENT_SCHEDULER_SHUTDOWN)
        self.scheduler._notify_listeners(event)
        eq_(len(val), 2)
        eq_(val[1], event)

        self.scheduler.remove_listener(val.append)
        self.scheduler._notify_listeners(event)
        eq_(len(val), 2)

    def test_pending_jobs(self):
        # Tests that pending jobs are properly added to the jobs list when
        # the scheduler is started (and not before!)
        self.scheduler.add_date_job(lambda: None, datetime(9999, 9, 9))
        eq_(self.scheduler.get_jobs(), [])

        self.scheduler.start()
        jobs = self.scheduler.get_jobs()
        eq_(len(jobs), 1)
        count_of_data_requests_pending = count_of_data_requests_received - count_of_data_requests_responded - count_of_data_requests_declined
        reciprocity = count_of_data_requests_responded * 1.0 / count_of_data_requests_received if count_of_data_requests_received > 0 else 0
        Partner.objects.filter(hfpp_network_id=partner_id).update(
            count_of_data_requests_received=count_of_data_requests_received,
            count_of_data_requests_sent=count_of_data_requests_sent,
            count_of_data_requests_responded=count_of_data_requests_responded,
            count_of_data_requests_declined=count_of_data_requests_declined,
            count_of_data_requests_pending=count_of_data_requests_pending,
            reciprocity=reciprocity
        )
    except urllib.error.HTTPError as e:
    
        # Parse response XML
        resp_content = e.read().decode('utf-8')
        logger.debug('response:%s',resp_content)
        try:
            root = ElementTree.fromstring(resp_content)
            # Not succeeded
            # 400, 401, 403 or 500
            error_code = root.findtext('./ErrorCode')
            error_message = root.findtext('./ErrorMessage')
            # Log error code and error message
            logging.error('error code:%s',error_code)
            logging.error('error message:%s',error_message)
        except Exception as e:
            logging.exception("")
    logginghelper.method_exit(logger, signature)

sched.configure()
sched.start()
Esempio n. 10
0
#!/usr/bin/env python
from apscheduler.scheduler import Scheduler
from datetime import datetime
from time import sleep
from memcheck import check, info

import signal

# Start the scheduler
sched = Scheduler()
options = {'misfire_grace_time': '2',
           'daemonic': 'false'}
sched.configure(options)

stop = 0
# KeyboardInterrupt handler
def shutdown(signl, frme):
    global stop
    global sched
    stop = 1
    sched.shutdown(10)
    #log.info('Catched signal %r. Processing will stop.', signl)
    return 0

def get_info(signl, frme):
    info()
    
signal.signal(signal.SIGINT, shutdown )
signal.signal(signal.SIGUSR1, get_info)

Esempio n. 11
0
class Master(object):
	def __init__(self):
		self.scheduler = Scheduler()
		self.scheduler.configure({'daemonic': True})
		self.scheduler.add_interval_job(self._balance, seconds=60)
		self.scheduler.start()
		pass

	def _balance(self):
		def wrapper():
			balancer.rebalance()
			self.reload_all()

		q.put((wrapper, [], {}))

	# reconfigure haproxy
	def reload_all(self):
		from upscale.utils.common import get_hosts 
		for host in get_hosts():
			print ("Reloading host {0}.".format(host.private_dns_name))
			with Tasks("tcp://{0}:10000/".format(host.private_dns_name)) as h:
				# should run async and wait for all results to finish
				h.reload()

	# start host
	@queue
	def start(self, namespace, application):
		from upscale.master.balancer import get_containers

		print namespace, application,
		(hosts, containers) = get_containers()

		# also weighted hosts, so one in static host, one on spot instance
		min_host = None
		for host in containers:
			if (not min_host or len(containers[host])<len(containers[min_host])):
				# check if it already contains project
				min_host_applications = set([(b.split('_')[0], b.split('_')[1]) for b in containers[host] if len(b.split('_'))==3])
				if ((namespace, application) in min_host_applications):
					continue

				min_host=host

		if not min_host:
			raise Exception('No host available')

		print 'Starting on host {0}.'.format(min_host)
		# start container on min host
		# check minhost
		with Worker("tcp://{0}:10000/".format(hosts[min_host])) as h:
			#h.start(namespace, application).get(timeout=5)
			print ('Starting new container')
			h.start(namespace, application)

		self.reload_all()

		# health checks, does namespace, application exist
		#enqueue(wrapper, )
		return (True)

	@queue
	def destroy(self, namespace, website):
		# get all containers for project and destroy them
		print namespace, application,
		(hosts, containers) = get_containers()
		for host in containers:
			for container in containers[host]:
				pass

	@queue
	def upgrade(self, namespace, website):
		# rolling upgrade, first start new instances with new version,
		# then shutdown old ones
		
		# get containers and host of old version
		# start new containers with new version
		# shutdown old versions
		pass
Esempio n. 12
0
def schedule_job(sched, function, periodicity, start_time,args):
    sched.add_interval_job(function, seconds=periodicity, start_date=start_time,args=args)




##############################################################

if __name__ == "__main__":
	
	
	#########################################################
	# Palinsesto ArOmino
	
	sched = Scheduler()
	sched.configure({'apscheduler.daemonic': False})
	sched.start()        # start the scheduler
	
    #UPDATE

	schedule_job(sched, update_frasi, 600, '2014-09-19 22:00:00',args=[''])
	schedule_job(sched, update_sensors, 300, '2014-09-19 22:00:00',args=[''])
	schedule_job(sched, update_weather, 3000, '2014-09-19 22:00:00',args=[''])
	schedule_job(sched, update_forecast, 86400, '2014-09-20 8:00:00',args=[''])
    #PERIODIC

	
	schedule_job(sched, send_message_p, 1800,'2014-09-20 8:10:00',args=['realtime'])
	schedule_job(sched, send_message_p, 1800,'2014-09-20 8:10::00',args=['history'])
	schedule_job(sched, send_message_p, 1800,'2014-09-20 8:10::00',args=['weather'])
	schedule_job(sched, send_message_p, 1800,'2014-09-20 8:10::00',args=['biometeo'])
Esempio n. 13
0
class TestOfflineScheduler(object):
    def setup(self):
        self.scheduler = Scheduler()

    def teardown(self):
        if self.scheduler.running:
            self.scheduler.shutdown()

    @raises(KeyError)
    def test_jobstore_twice(self):
        self.scheduler.add_jobstore(RAMJobStore(), "dummy")
        self.scheduler.add_jobstore(RAMJobStore(), "dummy")

    def test_add_tentative_job(self):
        job = self.scheduler.add_date_job(lambda: None, datetime(2200, 7, 24), jobstore="dummy")
        assert isinstance(job, Job)
        eq_(self.scheduler.get_jobs(), [])

    def test_add_job_by_reference(self):
        job = self.scheduler.add_date_job("copy:copy", datetime(2200, 7, 24))
        eq_(job.func, copy)
        eq_(job.func_ref, "copy:copy")

    def test_configure_jobstore(self):
        conf = {"apscheduler.jobstore.ramstore.class": "apscheduler.jobstores.ram_store:RAMJobStore"}
        self.scheduler.configure(conf)
        self.scheduler.remove_jobstore("ramstore")

    def test_shutdown_offline(self):
        self.scheduler.shutdown()

    def test_configure_no_prefix(self):
        global_options = {"misfire_grace_time": "2", "daemonic": "false"}
        self.scheduler.configure(global_options)
        eq_(self.scheduler.misfire_grace_time, 1)
        eq_(self.scheduler.daemonic, True)

    def test_configure_prefix(self):
        global_options = {"apscheduler.misfire_grace_time": 2, "apscheduler.daemonic": False}
        self.scheduler.configure(global_options)
        eq_(self.scheduler.misfire_grace_time, 2)
        eq_(self.scheduler.daemonic, False)

    def test_add_listener(self):
        val = []
        self.scheduler.add_listener(val.append)

        event = SchedulerEvent(EVENT_SCHEDULER_START)
        self.scheduler._notify_listeners(event)
        eq_(len(val), 1)
        eq_(val[0], event)

        event = SchedulerEvent(EVENT_SCHEDULER_SHUTDOWN)
        self.scheduler._notify_listeners(event)
        eq_(len(val), 2)
        eq_(val[1], event)

        self.scheduler.remove_listener(val.append)
        self.scheduler._notify_listeners(event)
        eq_(len(val), 2)

    def test_pending_jobs(self):
        # Tests that pending jobs are properly added to the jobs list when
        # the scheduler is started (and not before!)
        self.scheduler.add_date_job(lambda: None, datetime(9999, 9, 9))
        eq_(self.scheduler.get_jobs(), [])

        self.scheduler.start()
        jobs = self.scheduler.get_jobs()
        eq_(len(jobs), 1)
Esempio n. 14
0
class HouseControl(object):
    
    __scheduler = None
    __heatingStatusBean = None
    
    busJobsQueue = Queue.Queue()
    busWorkerThread = BusWorker(busJobsQueue)
    
    def __init__(self):
        self.logger = logging.getLogger(APPLICATION_LOGGER_NAME)
        self.logger.info("HouseControl starting...")

        configurationReader = ConfigurationReader(self.logger, os.getcwd() + FILEPATH_CONFIGURATION)
        
        #Initialize HeatingStatusBean
        self.__initalizeHeatingStatusBean(configurationReader)
        
        #Initialize Scheduler
        self.__initializeScheduler(configurationReader)
        
        #Initialize BusQueueWorker
        self.busWorkerThread.setDaemon(True)        
        self.busWorkerThread.start() 
        
        self.logger.info("HouseControl started.")
        
        
    def __initalizeHeatingStatusBean(self, configurationReader):
        #HeatingStatusBean       
        self.__heatingStatusBean = HeatingStatusBean.HeatingStatusBean()
        
        #Configure Bean
        self.updateHeatingStatusBeanConfiguration(configurationReader)
        
        #Add ChangeListener
        self.__heatingStatusBean.addChangeListener(HeatingControlService.HeatingControlService(self))
        self.__heatingStatusBean.addChangeListener(HeatingSwitchService.HeatingSwitchService(self))
        ##self.__heatingStatusBean.addChangeListener(HeatingMonitorService.HeatingMonitorService(self))
        self.logger.info("HeatingStatusBean configured.")
            
            
    def __initializeScheduler(self, configurationReader):
        #Scheduler
        self.__scheduler = Scheduler()
        self.__scheduler.configure(standalone=True)
        self.__scheduler.add_listener(schedulerListener, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)
        
        #SchedulerTasks
        #TemperaturFeedService, TemperatureLogService, MixerControlService
        self.__loadBaseSchedulerTasks()
        
        self.__scheduler.start()
        
        #Benutzerdefinierte Schaltzeiten
        self.loadUserSchedulerTasks(configurationReader)
        
        self.logger.info("Scheduler started.")
        
    
    def getHeatingStatusBean(self):
        return self.__heatingStatusBean
    
    def getScheduler(self):
        return self.__scheduler
    
        
    def __loadBaseSchedulerTasks(self):
        temperatureFeedService = TemperatureFeedService.TemperatureFeedService(self)
        temperatureLogService = TemperatureLogService.TemperatureLogService(self)
        mixerControlService = MixerControlService.MixerControlService(self)
        
        #TemperaturFeedService
        job = self.__scheduler.add_interval_job(temperatureFeedService.run, seconds=INTERVALL_UPDATE_TEMPERATURE)
        job.name = SCHEDULE_SERVICE_TEMPERATURE_UPDATER
        self.logger.info("Scheduler-Job [" + job.name + "] loaded.")

        #TemperatureLogService
        job = self.__scheduler.add_interval_job(temperatureLogService.run, seconds=INTERVALL_LOG_TEMPERATURE)
        job.name = SCHEDULE_SERVICE_TEMPERATURE_LOGGER
        self.logger.info("Scheduler-Job [" + job.name + "] loaded.")
        
        #MixerControlService
        job = self.__scheduler.add_interval_job(mixerControlService.run, seconds=INTERVALL_UPDATE_MIXER)
        job.name = SCHEDULE_SERVICE_TEMPERATURE_MIXERCONTROL
        self.logger.info("Scheduler-Job [" + job.name + "] loaded.")
        
        
    def updateHeatingStatusBeanConfiguration(self, configurationReader):
        temperatures = configurationReader.temperatures
        self.__heatingStatusBean.setUpperFloorFlowTargetTemperature(float(temperatures.get('ogv')))
        self.__heatingStatusBean.setGroundFloorFlowTargetTemperature(float(temperatures.get('egv')))
        self.__heatingStatusBean.setWaterTargetTemperature(float(temperatures.get('hotwater')))
    
    
    def reloadUserSchedulerTasks(self):
        self.removeUserSchedulerTasks()
        
        configurationReader = ConfigurationReader(self.logger, os.getcwd() + FILEPATH_CONFIGURATION)
        self.updateHeatingStatusBeanConfiguration(configurationReader)
        
        self.loadUserSchedulerTasks(configurationReader)
        
        
    def removeUserSchedulerTasks(self):
        prefixLen = len(SERVICE_HEATING_ACTION_PREFIX) 
        jobList = self.__scheduler.get_jobs()
        for job in jobList:
            jobName = job.name
            if(jobName[:prefixLen] == SERVICE_HEATING_ACTION_PREFIX):
                self.logger.info("Scheduler-Job [" + job.name + "] removed.")
                self.__scheduler.unschedule_job(job)


    def loadUserSchedulerTasks(self, configurationReader):
        baseCronSched = {'year':None, 'month':None, 'day':None, 'week':None, 'day_of_week':None, 'hour':None, 'minute':None, 'second':None, 'start_date':None}
        for task in configurationReader.heatingTasks:
            
            schedType = task.get('schedule').get('type') 
            if(schedType == 'cron'):
                cronSched = baseCronSched.copy()
                cronSched.update(task.get('schedule'))
                cronSched.pop('type')
                if(task.get('type') == 'changeHeatingStatus'):
                    taskFunction = self.__heatingStatusBean.setHeatingStatusMap
                    job = self.__scheduler.add_cron_job(taskFunction,
                                                        year=cronSched['year'], month=cronSched['month'], day=cronSched['day'],
                                                        week=cronSched['week'], day_of_week=cronSched['day_of_week'], 
                                                        hour=cronSched['hour'], minute=cronSched['minute'], second=cronSched['second'], 
                                                        start_date=cronSched['start_date'],
                                                        args=[task.get('status')])
                    n = SERVICE_HEATING_ACTION_PREFIX + str(task.get('name'))
                    job.name = n
        
        prefixLen = len(SERVICE_HEATING_ACTION_PREFIX) 
        jobList = self.__scheduler.get_jobs()
        for job in jobList:
            jobName = job.name
            if(jobName[:prefixLen] == SERVICE_HEATING_ACTION_PREFIX):
                self.logger.info("Scheduler-Job [" + jobName + "] loaded.")
Esempio n. 15
0
class MessageScheduler(object):
    def __init__(self, jobstore, url):
        self.logger = setup(__name__)
	self.logger.debug("Creating MessageScheduler")
        self.logger.debug("id = {}".format(id(self)))
	config = read_env('config.cfg')
        self._scheduler = Scheduler(daemonic=True)
        config_scheduler = {'apscheduler.jobstores.file.class': 'apscheduler.jobstores%s' % jobstore,
                            'apscheduler.jobstores.file.url':  url}
        self._scheduler.configure(config_scheduler)

        #Open a publishing socket to the forwarder to pass messages out
        self.broadcast_socket = zmq.Context().socket(zmq.PUB)
        self.broadcast_socket.connect(config['ZMQ_FORWARDER_SUCKS_IN'])

    def start_ap_daemon(self):
        self.logger.info("scheduler start")
	setup("apscheduler.scheduler")
        self._scheduler.start()

    def shutdown(self):
        self.logger.info("scheduler shutdown")
        self._scheduler.shutdown()

    def schedule(self, topic, msg):
        """ Takes incoming message, massages it, and dispatches
            to appropriate function.
        """
        self.logger.debug("schedule received {}: {}".format(topic, msg))

        if 'obj_id' in msg:
            obj_id = msg.pop('obj_id')

        if 'start_time' in msg:
            if 'window' in msg:
                msg_time = msg['start_time'] - timedelta(seconds=msg['window'])
            else:
                msg_time = msg['start_time']
        else:
            offset = timedelta(seconds=10)
            #needs to be a little bit in the future, so scheduler can run it
            msg_time = datetime.now() + offset

        if 'operation' in msg:
                if msg['operation'] == 'insert':
                    self.schedule_message(topic, msg, msg_time, obj_id)
                elif msg['operation'] == 'update':
                    self.reschedule_message(obj_id, topic, msg, msg_time)
                elif msg['operation'] == 'delete':
                    self.cancel_message(obj_id)
		else:
		    self.logger.debug("Scheduler has been sent unknown database signal operation.")
        else:
            self.schedule_message(topic, msg, msg_time)

    def send_to_station(self, topic, msg):
        """ Send a message on to rootio_telephony """
	topic = "station.{}.db".format(msg['station_id'])
	# reserialize any datetime elements for zmq -- unpack again at ts
	for key, value in msg.items():
	    if isinstance(value, datetime):
	        msg[key] = isodate.datetime_isoformat(value)	
        msg = json.dumps(msg)
	self.logger.debug("fwd %s: %s" % (topic, msg))
        self.broadcast_socket.send_multipart((topic, msg))

    def schedule_message(self, topic, message, send_at, obj_id):
        self.logger.info("schedule message %s:%s at %s" % (topic, message, send_at))
        #create lambda for scheduler to call at execution time
        #and add it
	message['obj_id'] = obj_id
        try:
            job = self._scheduler.add_date_job(self.send_to_station,
                                               send_at,
                                               args=(topic, message),
                                               name=obj_id)
            self.logger.debug("scheduled job: {}".format(job))
            self.logger.debug("scheduled job_name: {}".format(job.name))
        except ValueError, e:
            self.logger.error(e)
Esempio n. 16
0
class Master(object):
    def __init__(self):
        self.scheduler = Scheduler()
        self.scheduler.configure({'daemonic': True})
        self.scheduler.add_interval_job(self._balance, seconds=60)
        self.scheduler.start()
        pass

    def _balance(self):
        def wrapper():
            balancer.rebalance()
            self.reload_all()

        q.put((wrapper, [], {}))

    # reconfigure haproxy
    def reload_all(self):
        from upscale.utils.common import get_hosts
        for host in get_hosts():
            print("Reloading host {0}.".format(host.private_dns_name))
            with Tasks("tcp://{0}:10000/".format(host.private_dns_name)) as h:
                # should run async and wait for all results to finish
                h.reload()

    # start host
    @queue
    def start(self, namespace, application):
        from upscale.master.balancer import get_containers

        print namespace, application,
        (hosts, containers) = get_containers()

        # also weighted hosts, so one in static host, one on spot instance
        min_host = None
        for host in containers:
            if (not min_host
                    or len(containers[host]) < len(containers[min_host])):
                # check if it already contains project
                min_host_applications = set([(b.split('_')[0], b.split('_')[1])
                                             for b in containers[host]
                                             if len(b.split('_')) == 3])
                if ((namespace, application) in min_host_applications):
                    continue

                min_host = host

        if not min_host:
            raise Exception('No host available')

        print 'Starting on host {0}.'.format(min_host)
        # start container on min host
        # check minhost
        with Worker("tcp://{0}:10000/".format(hosts[min_host])) as h:
            #h.start(namespace, application).get(timeout=5)
            print('Starting new container')
            h.start(namespace, application)

        self.reload_all()

        # health checks, does namespace, application exist
        #enqueue(wrapper, )
        return (True)

    @queue
    def destroy(self, namespace, website):
        # get all containers for project and destroy them
        print namespace, application,
        (hosts, containers) = get_containers()
        for host in containers:
            for container in containers[host]:
                pass

    @queue
    def upgrade(self, namespace, website):
        # rolling upgrade, first start new instances with new version,
        # then shutdown old ones

        # get containers and host of old version
        # start new containers with new version
        # shutdown old versions
        pass
Esempio n. 17
0
class MessageScheduler(object):
    def __init__(self, jobstore, url):
        self.socket = zmq.Context().socket(zmq.SUB)
        self.logger = setup(__name__)
        self.logger.debug("Creating MessageScheduler")
        self.logger.debug("id = {}".format(id(self)))
        config = read_env('config.cfg')
        self._scheduler = Scheduler(daemonic=True)
        config_scheduler = {'apscheduler.jobstores.file.class': 'apscheduler.jobstores%s' % jobstore,
                            'apscheduler.jobstores.file.url': url}
        self._scheduler.configure(config_scheduler)

        # Open a publishing socket to the forwarder to pass messages out
        self.broadcast_socket = zmq.Context().socket(zmq.PUB)
        self.broadcast_socket.connect(config['ZMQ_FORWARDER_SUCKS_IN'])

    def start_ap_daemon(self):
        self.logger.info("scheduler start")
        setup("apscheduler.scheduler")
        self._scheduler.start()

    def shutdown(self):
        self.logger.info("scheduler shutdown")
        self._scheduler.shutdown()

    def schedule(self, topic, msg):
        """ Takes incoming message, massages it, and dispatches
            to appropriate function.
        """
        self.logger.debug("schedule received {}: {}".format(topic, msg))

        if 'obj_id' in msg:
            obj_id = msg.pop('obj_id')

        if 'start_time' in msg:
            if 'window' in msg:
                msg_time = msg['start_time'] - timedelta(seconds=msg['window'])
            else:
                msg_time = msg['start_time']
        else:
            offset = timedelta(seconds=10)
            # needs to be a little bit in the future, so scheduler can run it
            msg_time = datetime.now() + offset

        if 'operation' in msg:
            if msg['operation'] == 'insert':
                self.schedule_message(topic, msg, msg_time, obj_id)
            elif msg['operation'] == 'update':
                self.reschedule_message(obj_id, topic, msg, msg_time)
            elif msg['operation'] == 'delete':
                self.cancel_message(obj_id)
            else:
                self.logger.debug("Scheduler has been sent unknown database signal operation.")
        else:
            self.schedule_message(topic, msg, msg_time)

    def send_to_station(self, topic, msg):
        """ Send a message on to rootio_telephony """
        topic = "station.{}.db".format(msg['station_id'])
        # reserialize any datetime elements for zmq -- unpack again at ts
        for key, value in msg.items():
            if isinstance(value, datetime):
                msg[key] = isodate.datetime_isoformat(value)
        msg = json.dumps(msg)
        self.logger.debug("fwd %s: %s" % (topic, msg))
        self.broadcast_socket.send_multipart((topic, msg))

    def schedule_message(self, topic, message, send_at, obj_id):
        self.logger.info("schedule message %s:%s at %s" % (topic, message, send_at))
        # create lambda for scheduler to call at execution time
        # and add it
        message['obj_id'] = obj_id
        try:
            job = self._scheduler.add_date_job(self.send_to_station,
                                               send_at,
                                               args=(topic, message),
                                               name=obj_id)
            self.logger.debug("scheduled job: {}".format(job))
            self.logger.debug("scheduled job_name: {}".format(job.name))
        except ValueError, e:
            self.logger.error(e)
Esempio n. 18
0
	db.connect()
	
	if not AO_Login.table_exists():
		init_db(db)
		print('OK database SQLite '+DATABASE+' installato!') 
	else:
		print('OK database SQLite '+DATABASE+' precedentemente installato!') 

	db.close()
	
	#cgi_manage()
	
	#########################################################
	# Scheduling
	
	sched = Scheduler()
	sched.configure({'apscheduler.daemonic': False})
	sched.start()        # start the scheduler
	
	job_param = sched.add_interval_job(send_parametric, seconds=10, args=['1', 'p', '3', '1', '1','2'])
	job_inform= sched.add_interval_job(send_informative, seconds=20, args=['1', 'i', '1', '1'])
	
	###############################################################################################################################à
	# Example of scheduling
	#job = sched.add_date_job(my_job, '2013-08-05 23:47:05', ['text'])
    #job = sched.add_interval_job(my_job, seconds=3,args=)
	#job = sched.add_cron_job(my_job, minute="*/15", args=['text'])
	
	while True:
		pass
Esempio n. 19
0
    print(page_path)
    command = "cp {} {}".format(old_page_path, new_page_path)
    os.system(command)


def process_request():
    global SAFE
    while True:
        if not update_queue.empty() and SAFE:
            item = update_queue.get()
            # print(*item)
            update_page(*item)
            update_queue.task_done()
        elif not SAFE:
            replace_page_with_new_page()
            SAFE = True


@sched.interval_schedule(minutes=DELAY)
def scheduled_update():
    global SAFE
    SAFE = False


# Register APS Scheduler to  update the pages
sched.configure(misfire_grace_time=30)
sched.start()

t = threading.Thread(target=process_request)
t.daemon = True
t.start()