Exemple #1
0
def watchlist(request):
#Handle file upload
	global location
	if 'import' in request.POST:
		form = DocumentForm(request.POST, request.FILES)
		if form.is_valid():
			newdoc = Document(docfile = request.FILES['docfile'])
			newdoc.save()
			location = newdoc.path()
			# Redirect to the document list after POST
			return HttpResponseRedirect(reverse('scraper.views.watchlist'))
	else:
		form = DocumentForm() # An empty, unbound form

	if 'match' in request.POST:
		call_command('readfile', location)

		sched = Scheduler(standalone = True)
		def match_sched():
			call_command('readfile', location)
		sched.add_interval_job(match_sched, seconds = 20, max_instances = 1000)
		sched.start()
		

	# Load documents for the list page
	documents = Document.objects.all()

	# Render list page with the documents and the form
	return render_to_response(
		'scraper/index2.0.html',
		{'documents': documents, 'form' : form},
		context_instance = RequestContext(request)
	)
Exemple #2
0
def main(standalone=True):
    
    env.configure("conf.ini")
    
    '''
    transfer History Detail for the first time
    when program is executed
    '''
    if env.config.get("transfer_hist_detail_on_loading") and env.config.get("transfer_hist_detail_on_loading") == "1":
        transferHistDetails(env.config.get("DB_FILE"), env.config.get("STOREID"), 
                         env.config.get("FTP_HOME"), env.config.get("FTP_HOST"),
                         env.config.get("FTP_PORT"), env.config.get("FTP_USERNAME"),
                         env.config.get("FTP_PASSWORD"))
     
    '''
    create the scheduler for two job:
    1. check the client's alive
    2. transfer details to server
    '''
    scheduler = Scheduler(standalone=standalone)
      
    scheduler.add_interval_job(sync, 
                               minutes=int(env.config.get("sync_interval")), 
                               args=(env.config.get("DB_FILE"), env.config.get("STOREID"),
                                     env.config.get("PROGNAME"), env.config.get("PROG"),
                                     env.config.get("FTP_HOME"), env.config.get("FTP_HOST"),env.config.get("FTP_PORT"), 
                                     env.config.get("FTP_USERNAME"),env.config.get("FTP_PASSWORD")))
       
    scheduler.add_cron_job(transferFluxDetailsByStatus, 
                           day_of_week=env.config.get("upload_day_of_week"), hour=env.config.get("upload_hour"), minute=env.config.get("upload_minute"), 
                           args=(env.config.get("DB_FILE"), env.config.get("STOREID"),
                                 env.config.get("FTP_HOME"), env.config.get("FTP_HOST"),env.config.get("FTP_PORT"), 
                                 env.config.get("FTP_USERNAME"),env.config.get("FTP_PASSWORD"), 
                                 0))
    scheduler.start()
Exemple #3
0
def schedule():
    logging.info("[+]shivascheduler.py: INSIDE SCHEDULER")
    sched = Scheduler()
    duration = server.shivaconf.getint('analyzer', 'schedulertime')
    sched.add_interval_job(resetcounter, minutes=duration)
    sched.start()
    logging.info("Shiva scheduler, which dumps data into maindb, resets global counter and sends data on hpfeeds, started at %s and would execute every %d minutes " % (datetime.datetime.now(), duration))
Exemple #4
0
 def pre_eva_start(self, conf):
     # Load all jobs
     self.invoke('pre_scheduler_load_jobs')
     sched = APScheduler()
     conf['scheduler']['scheduler'] = sched
     for job_name in conf['scheduler']['jobs']:
         job = conf['scheduler']['jobs'][job_name]
         if job.get('type') == 'date':
             # datetime is a datetime string in this case
             # ie: '2012-11-06 14:25:10.8880'
             sched.add_date_job(job['func'], job['datetime'], args=[conf])
         elif job.get('type') == 'interval':
             sched.add_interval_job(job['func'],
                                    seconds=job['interval'].get('seconds', 0),
                                    minutes=job['interval'].get('minutes', 0),
                                    hours=job['interval'].get('hours', 0),
                                    days=job['interval'].get('days', 0),
                                    weeks=job['interval'].get('weeks', 0),
                                    start_date=job['interval'].get('start_date'),
                                    args=[conf])
         elif job.get('type') == 'cron':
             sched.add_cron_job(job['func'],
                                second=job['interval'].get('second'),
                                minute=job['interval'].get('minute'),
                                hour=job['interval'].get('hour'),
                                day=job['interval'].get('day'),
                                week=job['interval'].get('week'),
                                month=job['interval'].get('month'),
                                year=job['interval'].get('year'),
                                day_of_week=job['interval'].get('day_of_week'),
                                args=[conf])
     sched.start()
     self.invoke('post_scheduler_load_jobs')
class TrackerManager(object):
    '''Manages process information collection for multiple processes'''
    LOG = logging.getLogger('pt.tracker_manager')

    def __init__(self, interval):
        TrackerManager.LOG.debug(
            "Initializing TrackerManager with interval = %s",
            interval)
        self.listeners = []
        self.probes = []
        self.scheduler = Scheduler()
        self.scheduler.add_interval_job(self.tracking_job, seconds=interval)
        self.scheduler.start()

    def add_listener(self, listener):
        '''Add listener that will receive metrics'''
        self.listeners.append(listener)

    def add_probes(self, probes):
        '''Add probe that will collect metrics'''
        self.probes.extend(probes)

    def tracking_job(self):
        '''a job that monitors'''
        results = []
        for probe in self.probes:
            results.extend(probe())
        self.submit(results)

    def submit(self, results):
        '''publish results to listeners'''
        for listener in self.listeners:
            listener.submit(results)
Exemple #6
0
def start_scheduler(event):
    sched = Scheduler()
    sched.start()

    sched.add_interval_job(manage_queue,
                           kwargs={'settings': event.app.registry.settings},
                           seconds=2)
Exemple #7
0
def schedule(file_name, n_jobs, frequency):

    '''Schedule the scraper to execute every hour and shut it down after a
       certain number of jos have been run'''

    # Create a default logger
    basicConfig()

    # Run the first job
    scrape(file_name)

    # Instantiate the scheduler
    sched = Scheduler()
    
    # Start it
    sched.start()

    # Schedule the function
    sched.add_interval_job(scrape, args=[file_name], minutes=frequency,
                           misfire_grace_time=60)
    
    # Wait to run n_jobs (assuming 1 job per hour, which is 3600 seconds)
    sleep(n_jobs * 3600)
    
    # Shutdown the scheduler
    sched.shutdown()
Exemple #8
0
def main():

    log.info("BLI Monitor starting...")

    #check_pid()

    bli = BLIMonitor()

    spot = SpotMonitor()

    sched = Scheduler(daemonic=False)

    #sched.add_listener(err_listener, events.EVENT_ALL)

    sched.add_interval_job(lambda: bli.check(), seconds=3)

    sched.add_interval_job(lambda: spot.check(), seconds=3)

    sched.add_listener(
        err_listener, events.EVENT_JOB_ERROR | events.EVENT_JOB_EXECUTED
        | events.EVENT_JOB_MISSED)

    sched.start()

    log.info("started")
    """
    while 1:
        time.sleep(2)
        
        monitor.check()
    """
    pass
Exemple #9
0
class IntegrationTestBase(object):
    def setup(self):
        self.jobstore = self.make_jobstore()
        self.scheduler = Scheduler()
        self.scheduler.add_jobstore(self.jobstore, 'persistent')
        self.scheduler.start()

    def test_overlapping_runs(self):
        # Makes sure that "increment" is only ran once, since it will still be
        # running when the next appointed time hits.

        vals = [0]
        self.scheduler.add_interval_job(increment, jobstore='persistent',
                                        seconds=1, args=[vals, 2])
        sleep(2.5)
        eq_(vals, [1])

    def test_max_instances(self):
        vals = [0]
        events = []
        self.scheduler.add_listener(events.append,
                                    EVENT_JOB_EXECUTED | EVENT_JOB_MISSED)
        self.scheduler.add_interval_job(increment, jobstore='persistent',
            seconds=0.3, max_instances=2, max_runs=4, args=[vals, 1])
        sleep(2.4)
        eq_(vals, [2])
        eq_(len(events), 4)
        eq_(events[0].code, EVENT_JOB_MISSED)
        eq_(events[1].code, EVENT_JOB_MISSED)
        eq_(events[2].code, EVENT_JOB_EXECUTED)
        eq_(events[3].code, EVENT_JOB_EXECUTED)
Exemple #10
0
def app(global_config, **settings):
    """ This function returns a WSGI application.
    
    It is usually called by the PasteDeploy framework during 
    ``paster serve``.
    """
    zcml_file = settings.get('configure_zcml', 'configure.zcml')
    db_string = settings.get('db_string')
    if db_string is None:
        raise ValueError("No 'db_string' value in application configuration.")
    initialize_sql(db_string)
    config = Configurator(root_factory=get_root, settings=settings)
    config.begin()
    config.load_zcml(zcml_file)
    config.end()
    # Ugly hack to configure the MapperExtension with the settings.
    removal_extension.path = settings.get('upload_directory')
    
    scheduler = Scheduler()
    # Send out queued mails
    from eportfolio.utilities.mail_delivery import trigger_queued_delivery
    scheduler.add_interval_job(trigger_queued_delivery, seconds=30)
    scheduler.start()
    
    return config.make_wsgi_app()
Exemple #11
0
class IsItWar(Plugin):
    def __init__(self, skype):
        super(IsItWar, self).__init__(skype)
        self.falseMessages =["The world is still safe, Russia has not declared war yet", "http://suptg.thisisnotatrueending.com/archive/29138254/images/1388285593271.jpg", "http://www.meh.ro/original/2010_03/meh.ro3771.jpg", "http://d24w6bsrhbeh9d.cloudfront.net/photo/arpBmWp_700b_v1.jpg", "http://d24w6bsrhbeh9d.cloudfront.net/photo/aXb2VAv_700b.jpg", "http://d24w6bsrhbeh9d.cloudfront.net/photo/aLKXd6v_700b.jpg"]
        self.sched = Scheduler()
        self.sched.start()
        self.sched.add_interval_job(self.is_it_war, minutes=10)
        self.command = "isitwaryet"

    def message_received(self, args, status, msg):

        res = urllib.urlopen("http://www.bbc.co.uk/news")
        text = res.read()
        if "declares war" in text.lower():
            msg.Chat.SendMessage("Brace your selves, mother Russia is coming")
        else:
            msg.Chat.SendMessage(choice(self.falseMessages))

    def is_it_war(self):
        print "checking if war"
        res = urllib.urlopen("http://www.bbc.co.uk")
        text = res.read()
        if "declares war" in text.lower():
            chat = self.skype.Chat("#stigrk85/$jvlomax;b43a0c90a2592b9b")
            chat.SendMessage("Brace yourself, Mother Russia is coming")
        


    def help(self, msg):
        msg.Chat.SendMessage("usage: @isitwaryet\nWill tell you if Russia has declared war")
Exemple #12
0
def app(global_config, **settings):
    """ This function returns a WSGI application.
    
    It is usually called by the PasteDeploy framework during 
    ``paster serve``.
    """
    zcml_file = settings.get('configure_zcml', 'configure.zcml')
    db_string = settings.get('db_string')
    if db_string is None:
        raise ValueError("No 'db_string' value in application configuration.")
    initialize_sql(db_string)
    config = Configurator(root_factory=get_root, settings=settings)
    config.begin()
    config.load_zcml(zcml_file)
    config.end()
    # Ugly hack to configure the MapperExtension with the settings.
    removal_extension.path = settings.get('upload_directory')

    scheduler = Scheduler()
    # Send out queued mails
    from eportfolio.utilities.mail_delivery import trigger_queued_delivery
    scheduler.add_interval_job(trigger_queued_delivery, seconds=30)
    scheduler.start()

    return config.make_wsgi_app()
Exemple #13
0
def watchlist(request):
    #Handle file upload
    global location
    if 'import' in request.POST:
        form = DocumentForm(request.POST, request.FILES)
        if form.is_valid():
            newdoc = Document(docfile=request.FILES['docfile'])
            newdoc.save()
            location = newdoc.path()
            # Redirect to the document list after POST
            return HttpResponseRedirect(reverse('scraper.views.watchlist'))
    else:
        form = DocumentForm()  # An empty, unbound form

    if 'match' in request.POST:
        call_command('readfile', location)

        sched = Scheduler(standalone=True)

        def match_sched():
            call_command('readfile', location)

        sched.add_interval_job(match_sched, seconds=20, max_instances=1000)
        sched.start()

    # Load documents for the list page
    documents = Document.objects.all()

    # Render list page with the documents and the form
    return render_to_response('scraper/index2.0.html', {
        'documents': documents,
        'form': form
    },
                              context_instance=RequestContext(request))
Exemple #14
0
 def schedule(self):
     sched = Scheduler()
     sched.add_interval_job(self.resetcounter, minutes=self.duration)
     sched.start()
     logging.info(
         "Artemis scheduler, which resets global counter and sends data on hpfeeds, started at %s, executes every %d minutes "
         % (datetime.datetime.now(), self.duration))
Exemple #15
0
def schedule(file_name, n_jobs, frequency):
    '''Schedule the scraper to execute every hour and shut it down after a
       certain number of jos have been run'''

    # Create a default logger
    basicConfig()

    # Run the first job
    scrape(file_name)

    # Instantiate the scheduler
    sched = Scheduler()

    # Start it
    sched.start()

    # Schedule the function
    sched.add_interval_job(scrape,
                           args=[file_name],
                           minutes=frequency,
                           misfire_grace_time=60)

    # Wait to run n_jobs (assuming 1 job per hour, which is 3600 seconds)
    sleep(n_jobs * 3600)

    # Shutdown the scheduler
    sched.shutdown()
Exemple #16
0
def main():

	config_file = "config.xml"
	xml = ET.parse(config_file)
	HOST_NAME = xml.find('host_name').text
	DB_NAME = xml.find('db_name').text
	DISPLAY_ADDR = xml.find("display_addr").text
	DISPLAY_PORT = int(xml.find('display_port').text)
	ENDPOINT = xml.find("endpoint").text
	SECONDS = int(xml.find("seconds").text)
	USERNAME = xml.find("username").text
	PASSWORD = xml.find("password").text

	HOST = 'http://'+ USERNAME+ ':'+ PASSWORD + '@'+ HOST_NAME

	print "starting ..."
	
	#print HOST, DB_NAME, DISPLAY_ADDR, DISPLAY_PORT
	#readData(HOST, DB_NAME, DISPLAY_ADDR, DISPLAY_PORT)
	display = LedDisplay(HOST, DB_NAME, DISPLAY_ADDR, DISPLAY_PORT, ENDPOINT)
	# Start the scheduler
	sched = Scheduler()

	sched.add_interval_job(display.query, seconds=SECONDS )
	sched.start()

	try: 
		while True:
			time.sleep(0.1)

	except KeyboardInterrupt:
		print "terminating"
		sched.shutdown()
def startScheduler():
	schedule = Scheduler()
	schedule.add_interval_job(doCleanUp, days=1)
	schedule.start()
	print 'Scheduler has started.'
	while True:
		time.sleep(5)
class IntegrationTestBase(object):
    def setup(self):
        self.jobstore = self.make_jobstore()
        self.scheduler = Scheduler()
        self.scheduler.add_jobstore(self.jobstore, 'persistent')
        self.scheduler.start()

    def test_overlapping_runs(self):
        # Makes sure that "increment" is only ran once, since it will still be
        # running when the next appointed time hits.

        vals = [0]
        self.scheduler.add_interval_job(increment, jobstore='persistent', seconds=1, args=[vals, 2])
        sleep(2.5)
        eq_(vals, [1])

    def test_max_instances(self):
        vals = [0]
        events = []
        self.scheduler.add_listener(events.append, EVENT_JOB_EXECUTED | EVENT_JOB_MISSED)
        self.scheduler.add_interval_job(increment, jobstore='persistent', seconds=0.3, max_instances=2, max_runs=4,
                                        args=[vals, 1])
        sleep(2.4)
        eq_(vals, [2])
        eq_(len(events), 4)
        eq_(events[0].code, EVENT_JOB_MISSED)
        eq_(events[1].code, EVENT_JOB_MISSED)
        eq_(events[2].code, EVENT_JOB_EXECUTED)
        eq_(events[3].code, EVENT_JOB_EXECUTED)
Exemple #19
0
def main():
    
    log.info( "BLI Monitor starting..." )
    
    #check_pid()
    
    bli = BLIMonitor()    
   
    spot = SpotMonitor()
    
    sched = Scheduler(daemonic = False)
    
    
    #sched.add_listener(err_listener, events.EVENT_ALL) 
    
    sched.add_interval_job(lambda:bli.check(), seconds=3)
    
    sched.add_interval_job(lambda:spot.check(), seconds=3)
    
    sched.add_listener(err_listener,  events.EVENT_JOB_ERROR | events.EVENT_JOB_EXECUTED| events.EVENT_JOB_MISSED)  
   
    sched.start()
    
    log.info( "started" )
    
    
    
    """
    while 1:
        time.sleep(2)
        
        monitor.check()
    """
    pass
class ClientScheduler(Observable):
    def __init__(self):
        global DeviceClientInstance
        
        self.scheduler = Scheduler()
        self.scheduler.start()
        
        self.wifi_job = WifiActiveCampaignCron.run
        self.bt_job = BluetoothActiveCampaignCron.run
        self.log_job = LogRotation.run
        self.status_job = StatusAPI.run
        
        # Schedule Status API
        self.addSecondSchedule(self.status_job, newschedule_secs=15)
        
        # Schedule Wi-Fi Job
        self.addMinuteSchedule(self.wifi_job, newschedule_mins=1)
        
        # Schedule BT Job
        self.addMinuteSchedule(self.bt_job, newschedule_mins=1)
        
        # Schedule Log Rotation Job, rotation doesn't always happen when this job runs
        self.addMinuteSchedule(self.log_job, newschedule_mins=1)
    
    def addMinuteSchedule(self, job, newschedule_mins):
        self.scheduler.add_interval_job(job, minutes=newschedule_mins)
    
    def addSecondSchedule(self, job, newschedule_secs):
        self.scheduler.add_interval_job(job, seconds=newschedule_secs)
Exemple #21
0
 def __init__(self):
     logging.basicConfig(filename=str(app.config['CONFIG']['logfile']),level=logging.INFO)
     sched = Scheduler(daemon=True)
     sched.start()
     sched.add_interval_job(lambda: self.fetch_report_by_interval(), hours=int(app.config['CONFIG']['scheduler']['hourly_interval']))
     while True:
         pass
Exemple #22
0
def initialize():
    apsched = Scheduler()
    apsched.start()

    apsched.add_interval_job(syncFacebookEvents, seconds=10)
    print "added job 1"
    apsched.add_interval_job(syncSharedEvents, seconds=10)
    print "added job 2"
Exemple #23
0
def schedule():
    sched = Scheduler()
    duration = server.shivaconf.getint('analyzer', 'schedulertime')
    sched.add_interval_job(resetcounter, minutes=duration)
    sched.start()
    logging.info(
        "Shiva scheduler, which dumps data into maindb, resets global counter and sends data on hpfeeds, started at %s and would execute every %d minutes "
        % (datetime.datetime.now(), duration))
Exemple #24
0
class ScheduledLoad():
    def __init__(self, store):
        self.loader = Loader(store)
        self.sched = Scheduler()
        self.sched.start()

    def load(self):
        self.sched.add_interval_job(self.loader.load_all, minutes=5)
Exemple #25
0
def run_scheduler(app):
    scheduler = Scheduler()

    expire_wrapper = lambda: expire_sessions(app)
    scheduler.add_interval_job(expire_wrapper, seconds=EXPIRE_SESSION_EVERY)

    scheduler.start()
    print "APScheduler started successfully"
Exemple #26
0
def start_scheduled_jobs():
    sched = Scheduler()
    sched.start()

    # add scheduler jobs below

    if app.config.get('IS_PRODUCTION'):
        sched.add_interval_job(sqldump_job, days=1)
Exemple #27
0
class AdvancedPythonScheduler(object):
    
    def __init__(self):
        self.scheduler = Scheduler()
        self.scheduler.start()

    def schedule_interval_task(self, action, weeks=0, days=0, hours=0, minutes=0, seconds=0):
        self.scheduler.add_interval_job(action, weeks, days, hours, minutes, seconds)
 def runScheduler(self, interval, periodicMovingWindow):
     self.currentPlatform.logger.info("Started Scheduler.")
     sched = Scheduler()
     sched.add_interval_job(self.scheduledJob1, minutes=interval)
     sched.start() 
     
     while True:
         pass
     self.logger.info("Finished Scheduler.")
Exemple #29
0
def dev_server():
    trainer = Process(target=train_classifier, args=[classifier])
    trainer.start()

    sched = Scheduler()
    sched.start()
    sched.add_interval_job(dump_classifier, args=[classifier], hours=1)

    app.run(debug=True)
Exemple #30
0
 def bootstrap(self):
     """Bootstraps the static inventory writer background job"""
     self.write_static_inventory()
     logging.basicConfig()
     interval = self.config['interval']
     sched = Scheduler()
     sched.start()
     sched.add_interval_job(self.write_static_inventory,
                             seconds=interval)
Exemple #31
0
 def runScheduler(self, interval, periodicMovingWindow):
     self.currentPlatform.logger.info("Started Scheduler.")
     sched = Scheduler()
     sched.add_interval_job(self.scheduledJob1, minutes=interval)
     sched.start() 
     
     while True:
         pass
     self.logger.info("Finished Scheduler.")
Exemple #32
0
 def bootstrap(self):
     """Bootstraps the static inventory writer background job"""
     self.rm_sitemap_files(Source.STATIC_FILE_PATH)
     self.write_static_inventory()
     logging.basicConfig()
     interval = self.config['interval']
     sched = Scheduler()
     sched.start()
     sched.add_interval_job(self.write_static_inventory, seconds=interval)
Exemple #33
0
def scheduleEmailListener(username,password,response_subject,automated_response):
    logging.basicConfig()
    scheduler=Scheduler()
    scheduler.start()
    scheduler.add_interval_job(respondToEmails,minutes=1,args=[username,password,response_subject,automated_response])
    mongo.deleteEntry("fields","main",{})
    mongo.addEntry("fields","main",{"email":username,"password":password})
    atexit.register(lambda:scheduler.shutdown(wait=False))
    return scheduler
Exemple #34
0
class Application(object):
	def __init__(self):
		self.books = []
		self.logger = logging.getLogger(__name__)
		self.scheduler = Scheduler()

		self.scheduler.add_interval_job(self.process_book_list, seconds=30)

		self.store = AmazonStore()
		self.notifier = NotifyMyAndroid(bookworm.NOTIFICATION_KEY)

		self.load()

	def load(self):
		if os.path.exists('books.dat'):
			with open('books.dat', 'rb') as f:
				self.books = pickle.load(f)
				self.logger.debug('Loaded {0} books'.format(len(self.books)))

	def save(self):
		if(len(self.books) > 0 ):
			with open('books.dat', 'wb') as f:
				self.logger.debug('Saving books')
				pickle.dump(self.books, f, -1)

	def start(self):
		self.logger.info('Starting application')
		self.scheduler.start()

	def stop(self):
		self.logger.info('Stopping application')
		self.save()
		self.scheduler.shutdown()

	def process_book_list(self):
		self.logger.info('Processing book list')

		for book in self.books:
			self.logger.debug('Updating price for {0}'.format(book.title))
			latestPrice = self.store.get_book_price(book.isbn)
			self.logger.debug('latest: {0}, saved: {1}'.format(latestPrice, book.current_price))

			if latestPrice != None and latestPrice != book.current_price:
				if latestPrice < book.current_price:
					self.logger.debug('Found a lower price!')
					self.notifier.send_notification(self.build_notification_message(latestPrice, book.title, self.store.get_product_url(book.isbn)), 'Price Update')

				book.current_price = latestPrice

	def build_notification_message(self, price, title, productUrl):
		return (u'{0} is now £{1}\n{2}'.format(title, price, productUrl))

	def get_resolver(self):
		return self.store.get_book_details

	def register_new_book(self, book):
		self.books.append(book)
 def watch_remote_git(self):
     '''
     Monitor a remote git repository by polling at a set interval.
     '''
     sched = Scheduler()
     sched.start()
     sched.add_interval_job(sync_remote_git, seconds=30, args=[self.repo, self.config])
     
     self._watch()
Exemple #36
0
 def bootstrap(self):
     """Bootstraps the static inventory writer background job"""
     self.rm_sitemap_files(Source.STATIC_FILE_PATH)
     self.write_static_inventory()
     logging.basicConfig()
     interval = self.config['interval']
     sched = Scheduler()
     sched.start()
     sched.add_interval_job(self.write_static_inventory,
                             seconds=interval)
Exemple #37
0
def schedule():
    """Schedule a timer before the counter of relayed emails is reset."""
    sched = Scheduler()
    duration = utils.settings.data["relay"]["schedulertime"]
    sched.add_interval_job(resetcounter, minutes=duration)
    sched.start()
    logging.info(
        "[+] (salmonscheduler.py) - Salmon scheduler started at %s and will execute every %d minutes "
        % (datetime.datetime.now(), duration)
    )
Exemple #38
0
def main():
    global exit_flag

    sched = Scheduler()
    sched.start()
    sched.add_interval_job(run_once, minutes=FREQUENCE_MINUTES)
    while True:
        if exit_flag:
            exit()
        time.sleep(10)
    def __init__(self, profile):
        self.q = Queue.Queue()
        self.profile = profile
        self.notifiers = [
            self.NotificationClient(self.handleEmailNotifications, None),
        ]

        sched = Scheduler()
        sched.start()
        sched.add_interval_job(self.gather, seconds=30)
Exemple #40
0
    def __init__(self, profile):
        self.q = Queue.Queue()
        self.profile = profile
        self.notifiers = [
            self.NotificationClient(self.handleEmailNotifications, None),
        ]

        sched = Scheduler()
        sched.start()
        sched.add_interval_job(self.gather, seconds=30)
Exemple #41
0
class HeartBeat(Singleton):
    def __init__(self):
        self.scheduler = Scheduler()
        credentials = pika.PlainCredentials('inspur', 'inspur')
        flag, rabbitmq_ip = utils.getRabbitmqIp()
        # 这里可以连接远程IP,请记得打开远程端口
        parameters = pika.ConnectionParameters(rabbitmq_ip, 5672, '/',
                                               credentials)
        self.connection = pika.BlockingConnection(parameters)
        self.channel = self.connection.channel()

    """
    heartbeat task
    """

    def agentHeartBeat(self):

        logger.debug("agent heart beat begin!")
        self.scheduler.add_interval_job(self.heartBeatWithServer, seconds=3)

        self.scheduler.start()
        print('Press Ctrl+{0} to exit'.format('Break' if os.name ==
                                              'nt' else 'C'))

        try:
            while True:
                if not utils.httpdstate:
                    sys.exit(0)
        except KeyboardInterrupt:
            self.scheduler.shutdown()
        finally:
            self.connection.close()

    def heartBeatWithServer(self):
        agentInfo = {'type': constr.NODE_STATUS, 'token': constr.NODE_TOKEN}
        flag, agentid = utils.getAgentId()
        if not (flag and agentid):
            return
        agentInfo['id'] = agentid
        logger.debug("node hb info : {}".format(str(agentInfo)))
        try:
            self.channel.basic_publish(exchange='exchangeTest',
                                       routing_key='heartBeatKey',
                                       body=json.dumps(agentInfo))
        except Exception, e:
            logger.error("heartbeat exception: {}".format(e.message))
            credentials = pika.PlainCredentials('inspur', 'inspur')
            flag, rabbitmq_ip = utils.getRabbitmqIp()
            # 这里可以连接远程IP,请记得打开远程端口
            parameters = pika.ConnectionParameters(rabbitmq_ip, 5672, '/',
                                                   credentials)
            self.connection = pika.BlockingConnection(parameters)
            self.channel = self.connection.channel()
            pass
        return True
Exemple #42
0
def main():
    logging.basicConfig()
    sched = Scheduler(standalone=True)
    latitude = 39.49
    longitude = -87.31
    w = ContentWeather(latitude, longitude)
    c = ContentCountdown(31, 5, 2014, 11, 00)
    contentToDisplay = [w, c]
    contentProvider = ContentProvider(contentToDisplay)
    sched.add_interval_job(updateContent, args=[contentProvider], seconds=30)
    sched.start()
Exemple #43
0
def server():
    trainer = Process(target=train_classifier, args=[classifier])
    trainer.start()

    sched = Scheduler()
    sched.start()
    sched.add_interval_job(dump_classifier, args=[classifier], hours=1)

    http_server = HTTPServer(WSGIContainer(app))
    http_server.listen(5000)
    IOLoop.instance().start()
Exemple #44
0
def main():
	logging.basicConfig()
	sched = Scheduler(standalone=True)
	latitude = 39.49
	longitude = -87.31
	w = ContentWeather(latitude, longitude)
	c = ContentCountdown(31,5,2014,11,00)
	contentToDisplay = [w, c]
	contentProvider = ContentProvider(contentToDisplay)
	sched.add_interval_job(updateContent, args = [contentProvider], seconds = 30)
	sched.start()
Exemple #45
0
    def __init__(self, profile):
        self.q = Queue.Queue()
        self.profile = profile
        self.notifiers = []

        if 'gmail_address' in profile and 'gmail_password' in profile:
            self.notifiers.append(self.NotificationClient(self.handleEmailNotifications, None))

        sched = Scheduler()
        sched.start()
        sched.add_interval_job(self.gather, seconds=30)
Exemple #46
0
def main():
    in_daemon_mode = conf.alarm_send_in_daemon_mode
    if not in_daemon_mode:
        check()
    else:
        from apscheduler.scheduler import Scheduler
        minuteScheduler = Scheduler()
        sleep_seconds = 60  # just 60 seconds
        minuteScheduler.add_interval_job(check, seconds=sleep_seconds)
        minuteScheduler.start()
        while 1:
            time.sleep(9999)
        minuteScheduler.shutdown()
Exemple #47
0
    def run(self):
        scheduler = Scheduler()

        if self.config["SCHEDULE"].get("packages") is not None:
            scheduler.add_interval_job(self.processor.process,
                                       **self.config["SCHEDULE"]["packages"])

        scheduler.start()

        try:
            while True:
                time.sleep(999)
        except KeyboardInterrupt:
            logger.info("Shutting down Carrier...")
            scheduler.shutdown(wait=False)
Exemple #48
0
def createTask( listTask ):
    """
    创建并开始文件提取任务
    """

    for dic in listTask:
        pfID = str(dic["pfid"])
        groupID = str(dic["groupid"])
        configPath = dic["configpath"]
        logSource = dic["logsource"]
        Protocol = dic["protocol"]
        Port = dic["port"]
        userName = dic["username"]
        userPass = dic["userpass"]
        fPath = dic["fpath"]
        Files = dic["files"]
        oneTime = dic["onetime"]
        schedStart = dic["schedstart"]
        schedEnd = dic["schedend"]
        schedTime = dic["schedtime"]
        schedCron = dic["schedcron"]

        argus = [ int(pfID), int(groupID), configPath, logSource, Protocol, Port, userName, userPass, fPath, Files ]
        
        sched = Scheduler()
        if oneTime.upper() == 'Y':  # 只执行一次
            if schedStart == None:
                schedStart = datetime.datetime.now() + datetime.timedelta( seconds = 2 )  # 延时两秒
            sched.add_date_job( taskFunc, schedStart, name='Job'+pfID, args=argus )
        elif schedTime != None:
            ( sWeeks, sDays, sHours, sMinutes, sSeconds ) = scheduletime.fmtSchedTime( schedTime )
            if schedStart == None:    # interval_job 在start_date为None时默认从当前算起,过一个设定的时间间隔第一次执行任务
                schedStart = datetime.datetime.now() + datetime.timedelta( seconds = 2 ) - datetime.timedelta( seconds = sSeconds, \
                        minutes = sMinutes, hours = sHours, days = sDays, weeks = sWeeks )
            sched.add_interval_job( taskFunc, weeks=sWeeks, days=sDays, hours=sHours, minutes=sMinutes, seconds=sSeconds, \
                    start_date=schedStart, name='Job'+pfID, args=argus )
        elif schedCron != None:
            ( cSecond, cMinute, cHour, cDay, cDayofWeek, cWeek, cMonth, cYear ) = scheduletime.frmSchedCron( schedCron )
            sched.add_cron_job( taskFunc, year=cYear, month=cMonth, week=cWeek, day_of_week=cDayofWeek, day=cDay, \
                    hour=cHour, minute=cMinute, second=cSecond, start_date=schedStart, name='Job'+pfID, args=argus )
        sched.add_listener( taskListener, events.EVENT_JOB_EXECUTED | events.EVENT_JOB_ERROR )

        # 保存计划任务的截止时间
        dicJob['T'+pfID] = oneTime.upper()
        dicJob['Job'+pfID] = schedEnd

        dicTask['Task'+pfID] = sched
        dicTask['Task'+pfID].start()
    def __init__(self, profile):
        self._logger = logging.getLogger(__name__)
        self.q = Queue.Queue()
        self.profile = profile
        self.notifiers = []

        if 'gmail_address' in profile and 'gmail_password' in profile:
            self.notifiers.append(
                self.NotificationClient(self.handleEmailNotifications, None))
        else:
            self._logger.warning('gmail_address or gmail_password not set ' +
                                 'in profile, Gmail notifier will not be used')

        sched = Scheduler()
        sched.start()
        sched.add_interval_job(self.gather, seconds=30)
Exemple #50
0
class Timer:
    def __init__(self, fieldCanvas, field):
        #self.sched = BackgroundScheduler()
        self.sched = Scheduler()
        self.sched.start()
        self.timerValue = 0
        self.fieldCanvas = fieldCanvas
        self.field = field


    def updateTimerValue(self):
        self.timerValue += 1
        minutes = "0" + str(int(self.timerValue/60)) if int(self.timerValue/60) < 9 else str(int(self.timerValue/60))
        secondes = "0" + str(self.timerValue%60) if self.timerValue%60 < 9 else str(self.timerValue%60) 
        timerString = minutes + ":" + secondes
        self.fieldCanvas.itemconfigure(self.field ,text=timerString)

    def getTimer(self):
        return self.timerValue

    def startTimer(self):
        #job = self.sched.add_job(self.updateTimerValue, 'interval', seconds = 1)
        job = self.sched.add_interval_job(self.updateTimerValue, seconds = 1)
        pass
        
    def stopTimer(self):
        try:
            model.user.setScore(self.getTimer())
            self.sched.shutdown(wait=False)
        except Exception as e:
            #print(e)
            pass
def createTask(listTask):
    """
    创建并开始计划任务
    """

    for dic in listTask:
        taskID = str(dic["schedid"])
        searchCond = dic["searchcond"].replace(DELIMITER, " ")
        searchStart = dic["searchstart"]
        searchEnd = dic["searchend"]
        schedStart = dic["schedstart"]
        schedEnd = dic["schedend"]
        schedTime = dic["schedtime"]
        schedCron = dic["schedcron"]
        warnOrNot = dic["warnornot"]
        warnCondOp = dic["warncondop"]
        warnCondVal = dic["warncondval"]
        warnLevel = dic["warnlevel"]
        saveResult = dic["saveresult"]

        argus = [
            int(taskID), searchCond, searchStart, searchEnd, warnOrNot,
            warnCondOp, warnCondVal, warnLevel, saveResult
        ]

        sched = Scheduler()
        if schedTime != None:
            (sWeeks, sDays, sHours, sMinutes,
             sSeconds) = scheduletime.fmtSchedTime(schedTime)
            if schedStart == None:  # interval_job 在start_date为None时默认从当前算起,过一个设定的时间间隔第一次执行任务
                schedStart = datetime.datetime.now() + datetime.timedelta( seconds = 2 ) - datetime.timedelta( seconds = sSeconds, \
                        minutes = sMinutes, hours = sHours, days = sDays, weeks = sWeeks )
            sched.add_interval_job( taskFunc, weeks=sWeeks, days=sDays, hours=sHours, minutes=sMinutes, seconds=sSeconds, \
                    start_date=schedStart, name='Job'+taskID, args=argus )
        elif schedCron != None:
            (cSecond, cMinute, cHour, cDay, cDayofWeek, cWeek, cMonth,
             cYear) = scheduletime.frmSchedCron(schedCron)
            sched.add_cron_job( taskFunc, year=cYear, month=cMonth, week=cWeek, day_of_week=cDayofWeek, day=cDay, \
                    hour=cHour, minute=cMinute, second=cSecond, start_date=schedStart, name='Job'+taskID, args=argus )
        sched.add_listener(taskListener,
                           events.EVENT_JOB_EXECUTED | events.EVENT_JOB_ERROR)

        # 保存计划任务的截止时间
        dicJob['Job' + taskID] = schedEnd

        dicTask['Task' + taskID] = sched
        dicTask['Task' + taskID].start()
Exemple #52
0
def run(start_type = None):
    global postdata

    conf = sys_config.SysConfig(configFile)
    # 进程号文件名
    pidFile = conf.getConfig("datastats", "pidFile")

    if start_type == None:
        if os.path.exists(pidFile):
            os.remove(pidFile)
        pFile = open(pidFile, "w")
        pFile.write(str(os.getpid()))
        pFile.close()

    # 生成postdata对象
    postdata = postgrestats.PostgresData(configFile)
    argus = [ postdata ]

    sched = Scheduler(standalone = True)

    sched.add_cron_job(funcNoon, year=cron_noon[0], month=cron_noon[1], \
            week=cron_noon[2], day_of_week=cron_noon[3], day=cron_noon[4], \
            hour=cron_noon[5], minute=cron_noon[6], second=cron_noon[7], args=argus)
    sched.add_cron_job(funcHour, year=cron_hour[0], month=cron_hour[1], \
            week=cron_hour[2], day_of_week=cron_hour[3], day=cron_hour[4], \
            hour=cron_hour[5], minute=cron_hour[6], second=cron_hour[7], args=argus)
    sched.add_cron_job(funcMidnight, year=cron_midnight[0], month=cron_midnight[1], \
            week=cron_midnight[2], day_of_week=cron_midnight[3], day=cron_midnight[4], \
            hour=cron_midnight[5], minute=cron_midnight[6], second=cron_midnight[7], args=argus)
    # 自定义dashboard统计服务
    sched.add_cron_job(stats_dashboard.stats_dashboard, year=cron_dashboard[0], month=cron_dashboard[1], \
            week=cron_dashboard[2], day_of_week=cron_dashboard[3], day=cron_dashboard[4], \
            hour=cron_dashboard[5], minute=cron_dashboard[6], second=cron_dashboard[7], args=argus)

    # 每隔几分钟(默认5分钟)检查是否需要删除原始pcap文件
    interval_chkdevice = conf.getConfig("datastats", "intervalCheckDevice")
    if interval_chkdevice == None:
        interval_chkdevice = 5
    else:
        interval_chkdevice = int(interval_chkdevice)

    sched.add_interval_job(chkdevice.checkDevice, weeks=0, days=0, hours=0, minutes=interval_chkdevice, seconds=0, args=argus)

    try:
        sched.start()
    except (KeyboardInterrupt, SystemExit):
        pass
Exemple #53
0
class MetaDataGenerationScheduler():
    def __init__(self, updateIntervalSeconds=30):
        self.interval = updateIntervalSeconds
        config = {'apscheduler.daemonic': False}
        self.sched = Scheduler(config)
        # initialize these per instance.
        self.repo_timestamps = {}
        self.jobs = {}


    repo_timestamps = {}  #dictionary with jobName (=reponame) : last scheduler modification timestamp (float)
    jobs = {} #dictionary with jobName (=reponame) : jobHandle

    configService = RepoConfigService()
    static_root_dir = configService.getStaticRepoDir()
    sched = None
    interval = None

    def start(self):
        self.update_program_config() #read configs, schedule jobs

        # schedule an update as a job
        self.sched.add_interval_job(self.update_program_config, seconds=self.interval)
        
        # schedule cleanup cache
        self.sched.add_cron_job(self.cleanupCacheDir, hour = 23, minute = 17, second = 20)

        self.sched.start()
        
    def createrepo_with_optional_cleanup_job(self, *argList):
        monitor = JobMonitorer()
        monitor.job_starts()
        repoDir = argList[0]
        reponame = argList[1]
        rpm_max_keep = argList[2]
        didCleanUp=False
        try:
            if rpm_max_keep != None:
                didCleanUp=True
                self.configService.doCleanup(repoDir, rpm_max_keep)
                logging.info("job RpmCleanup on "+reponame+" took "+str(monitor.get_execution_time_until_now_seconds())+" seconds")
            self.configService.doCreateRepo(repoDir, reponame)
            monitor.job_finishes()
            logging.info(monitor.get_pretty_job_summary("createrepo on "+reponame+" (cleanup included : "+str(didCleanUp)+")"))
        except Exception, ex:
            logging.error(traceback.format_exc())
Exemple #54
0
class WebCheckScheduler(object):

    def __init__(self, app, dbservice):
        self.dbservice = dbservice		
        self.sched = None
        self.url_check = URLCheck()
        
        notificator_email = app.config['NOTIFICATOR_EMAIL']
        notificator_user = app.config['NOTIFICATOR_USER']
        notificator_password = app.config['NOTIFICATOR_PASSWORD']
        notificator_mail_server = app.config['NOTIFICATOR_MAIL_SERVER']
        
        self.notificator_email_to = app.config['NOTIFICATOR_EMAIL_TO']
        
        self.mail_sender = MailSender(notificator_email, notificator_user, notificator_password, notificator_mail_server)  

    def main_job(self):		  
        websites = self.dbservice.query_db('SELECT * FROM sites')
  
        if len(websites) != 0:
			
            for	website in websites:	            
                url = website['url']    
                webid = website['id']
                status_code = self.url_check.get_url_status(url)	
                last_check = datetime.now().strftime('%Y-%m-%d %H:%M:%S')            
                
                self.dbservice.execute_db('UPDATE sites SET last_check=?, status_code=? WHERE id=?', [last_check, status_code, webid])
                            
                if status_code != '200':                                        
					title = "Notification : " + url
					message = "Web site is offline..."

					self.mail_sender.send_message(self.notificator_email_to,  title, message)
            
            self.dbservice.execute_db('UPDATE dbutil SET db_code=? WHERE id=?', [1, 1])
                  	
    def change_interval(self, interval_seconds):	
        self.sched.shutdown()
        self.start(interval_seconds)
        	
    def start(self, interval_seconds):
        self.sched = Scheduler()		
        self.sched.start()	
        self.sched.add_interval_job(self.main_job, seconds = interval_seconds)