Esempio n. 1
0
def run_or_schedule(job, schedule=False, exception_handler=None):
    """Runs a job and optionally schedules it to run later

    Args:
        job (func): The func to run
        schedule (bool): Schedule `func` to run in the future (default: False)
        exception_handler (func): The exception handler to wrap the function in
            (default: None)

    Examples:
        >>> job = partial(pprint, 'hello world')
        >>> run_or_schedule(job)
        u'hello world'
        >>> exception_handler = ExceptionHandler('*****@*****.**').handler
        >>> run_or_schedule(job, False, exception_handler)
        u'hello world'
    """
    if exception_handler and schedule:
        job = exception_handler(job)

    job()

    if schedule:
        sch.every(1).day.at(SCHEDULE_TIME).do(job)

        while True:
            sch.run_pending()
            time.sleep(1)
Esempio n. 2
0
def main_loop():
	global tweets

	# Uncomment for BBB
	# global led_on
	# global led


	tweets = _get_tweets() # initial loading of the tweets

	schedule.every(15).minutes.do(_trigger_tweet)
	schedule.every(15).minutes.do(random_job)
	schedule.every(2).minutes.do(tweet_job)

	# stream_tweets() # uncomment this line and comment out the two lines above it to use streaming api

	while True:
		schedule.run_pending()

		# Uncomment for BBB
		# if led_on == True:
		# 	GPIO.output(led, GPIO.LOW)
		# 	led_on = False
		# else:
		# 	GPIO.output(led, GPIO.HIGH)
		# 	led_on = True

		sleep(1.0)
def __main__():
    hus = json.loads(hint_url)
    for (index, url) in enumerate(hus):
        schedule.every(int(frequency_rate[index])).seconds.do(check_request, index, url)
    while True:
        schedule.run_pending()
        time.sleep(1)
Esempio n. 4
0
def main():
	
	global thingiverse
	global twitter
	auto_mode = True

	if DEBUG: print 'welcome'
	thingiverse.DEBUG = False
	thingiverse.txt_url_mode = False
	thingiverse.connect()
	print api.VerifyCredentials().name
	print '\n\nThingisimilar\n'

	schedule.every(2).minutes.do(exploring)


	if auto_mode:
		#main_loop()

		exploring()
		while True:
			schedule.run_pending()
			sleep(1.0)

	else:
		#while True:
		#num1 = raw_input('#1 --> ')
		#num2 = raw_input('#2 -->')
		num1 = test_things[0]
		num2 = test_things[4]
		standard_job(int(num1), int(num2))
def geotagx_harvestor(media_object):
    image_url = media_object['media_url']
    source_uri = media_object['expanded_url']
    create_time = media_object['tweet_created_at']
    id_str = media_object['id_str']

    try:
        foo = DATA_DUMP[image_url]
        #if the image does not exist, the control should move to the exception block
        print "Duplicate image found, ignoring.....", image_url
    except:
        # Create Object for pushing to geotagx-sourcerer-proxy
        print "Pushing image to geotagx-sourcerer-proxy : ", image_url
        _sourcerer_object = {}
        _sourcerer_object['source'] = GEOTAGX_SOURCERER_TYPE
        _sourcerer_object['type'] = "IMAGE_SOURCE"
        _sourcerer_object['categories'] = CATEGORIES
        _sourcerer_object['image_url'] = image_url
        _sourcerer_object['source_uri'] = source_uri
        _sourcerer_object['create_time'] =create_time
        _sourcerer_object['id'] = id_str

        # Push data via geotagx-sourcerer-proxy
        ARGUMENTS = base64.b64encode(json.dumps(_sourcerer_object))
        GEOTAGX_SOURCERER_PROXY_URL = TARGET_HOST+TARGET_URI+"?sourcerer-data="+ARGUMENTS;
        try:
            urllib2.urlopen(GEOTAGX_SOURCERER_PROXY_URL)
            print "SUCCESSFULLY PUSHED : ", image_url
            DATA_DUMP[image_url] = _sourcerer_object
        except:
            print "FAILURE", image_url

        schedule.run_pending()
Esempio n. 6
0
def otherstuff():
    from sdjson import sddata
    from mint import minttrans, mintaccounts
    from location import locationkml
    # from nesttest import neststart
    # neststart()
    locationkml()
    minttrans()
    mintaccounts()
    sddata()
    # testlocationquery()
    schedule.every(60).seconds.do(jobqueue.put, locationkml)
    schedule.every(2).hours.do(jobqueue.put, minttrans)
    schedule.every(6).hours.do(jobqueue.put, mintaccounts)
    schedule.every(12).hours.do(jobqueue.put, sddata)
    # schedule.every(20).seconds.do(jobqueue.put, grr)
    worker_thread = Thread(target=worker_main)
    worker_thread.start()
    worker_thread2 = Thread(target=worker_main)
    worker_thread2.start()
    worker_thread3 = Thread(target=worker_main)
    worker_thread3.start()
    worker_thread4 = Thread(target=worker_main)
    worker_thread4.start()
    while 1:
        schedule.run_pending()
        time.sleep(1)
Esempio n. 7
0
def sched():
    """Makes scheduling of download-create-playback cycles."""
    log.debug("[smp][.] Read schedule file: %s" % SCHEDULE_PATH)

    with open(SCHEDULE_PATH) as fp:
        time_table = fp.read().split()

    config = configparser.ConfigParser()
    config.read(PLAYER_CONFIG_PATH)

    minutes_offset = int(config['simple_media_player']['launch_time_offset'])

    now = str(datetime.now().date())

    smp = SimpleMediaPlayer()

    for entry in time_table:
        tp = datetime.strptime(now + ' ' + entry, '%Y-%m-%d %H:%M')
        shifted_time = tp - timedelta(minutes=minutes_offset)

        # start earlier to have time create video and handle issues
        actual_start = ':'.join(str(shifted_time.time()).split(':')[:2])

        schedule.every().day.at(actual_start).do(smp.run, tp)
        log.debug("[smp][.] Job scheduled on %s to be ended at %s"
                  % (actual_start, entry))

    log.debug("[smp][.] Start scheduling loop...")
    while True:
        schedule.run_pending()
        time.sleep(1)
Esempio n. 8
0
 def schedule_with_delay(self):
     for task in self.tasks:
         interval = task.get('interval')
         schedule.every(interval).minutes.do(self.schedule_task_with_lock, task)
     while True:
         schedule.run_pending()
         time.sleep(1)
Esempio n. 9
0
    def handle(self, *args, **options):
        schedule.every().week.do(_fetch_mozillians)
        schedule.every().week.do(_fetch_countries)

        while True:
            schedule.run_pending()
            time.sleep(3600)
Esempio n. 10
0
def run_schedule():
    global counter
    log.info('scheduler started')
    while 1:
        schedule.run_pending()
        if counter < parser.number_of_requests:
            # acquiring banks and rates data
            banks = parser.banks
            rates = parser.rates
            log.info('data gathered')
            log.info('banks: %s' % banks.__len__())
            log.info('rates: %s' % rates.__len__())

            # saving data into DB
            for logo, bank in banks.iteritems():
                saved_bank = Bank.query.filter_by(uri_logo=bank.uri_logo).first()
                rate = rates[logo]

                if saved_bank:
                    saved_bank.update_time = bank.update_time
                    rate.bank_id = saved_bank.id
                else:
                    db.session.add(bank)
                    rate.bank = bank

                db.session.add(rate)
                try:
                    db.session.commit()
                except SQLAlchemyError as e:
                    log.error(e.message, e)

            # increasing counter
            counter += 1
        time.sleep(1)
Esempio n. 11
0
 def schedule_raw(self, df_rawsources):
     # Iterate through all sources with 'raw' type
     for index, source in df_rawsources.iterrows():
         print "[SCHEDULER] Working with raw source: ",source['name']
         updateFrequency = source['updateFrequency']
         print "[SCHEDULER] Update frequency is <",updateFrequency,">"
         updates = source['updates']
         if len(updates) > 0:
             # Get the most recent update
             lastUpdate = dp.parse(updates[0]['createdAt'])
             # Get the current time in seconds
             now = int(round(time.time()))
             # If time between now and the last update is greater than the
             # update interval, schedule the event
             if(now - int(lastUpdate.strftime('%s')) > updateFrequency):
                 source_id = source['_id']
                 print "[SCHEDULER] Scheduling source <",source['name'],"> with id <",source_id,">"
                 schedule.every(updateFrequency).seconds.do(process_raw, source_id)
         source_id = source['_id']
         print "[SCHEDULER] Scheduling source <",source['name'],"> with id <",source_id,">"
         schedule.every(updateFrequency).seconds.do(self.process_raw, source_id)
     # Process all scheduled items
     while True:
         schedule.run_pending()
         time.sleep(1)
Esempio n. 12
0
def minecraftlistener():
    nextlineforlist = False
    numplayers = 0
    logfile = os.path.abspath(mcfolder + "/logs/latest.log")
    f = open(logfile, "r", encoding="utf-8")
    file_len = os.stat(logfile)[stat.ST_SIZE]
    f.seek(file_len)
    pos = f.tell()
    UUID = {}

    while True:
        pos = f.tell()
        line = f.readline().strip()
        schedule.run_pending()
        if not line:
            if os.stat(logfile)[stat.ST_SIZE] < pos:
                f.close()
                time.sleep( 1 )
                f = open(logfile, "r")
                pos = f.tell()
            else:
                time.sleep( 1 )
                f.seek(pos)
        else:
            
            eventData = vanillabean.genEvent(line)
            event = ""
            data = ()
            
            if eventData:
                # print(eventData)
                event, data = eventData
                print(event, data)
            
            if nextlineforlist:
               
                nextlineforlist = False    
                playerlist(numplayers, line)

            if event == "playerList":
                numplayers = data[1]
                nextlineforlist = True

            if event == "achievement":
                eventAchievement(data)

            if event == "command":
                eventCommand(data)

            if event == "UUID":
                eventUUID(data)

            if event == "chat":
                eventChat(data)

            if event == "logged":
                eventLogged(data)

            if event == "lag":
                eventLag(data)
Esempio n. 13
0
 def run(self):
     sendHour = str(self.parameters.get('maillist', 'sendhour'))
     print(sendHour)
     schedule.every().day.at(sendHour).do(self.sendMail)
     while(1):
         schedule.run_pending()
         time.sleep(5)
Esempio n. 14
0
def main():
    port = "5918"
    if len(sys.argv) > 1:
        port = sys.argv[1]
    
    socket = initiate_zmq(port)
    logging.basicConfig(filename='./log/ingest_lottery.log', level=logging.INFO)
    tz = pytz.timezone(pytz.country_timezones('cn')[0])
    schedule.every(30).seconds.do(run, socket, tz)
    while True:
        try:
            schedule.run_pending()
            time.sleep(1)
        except KeyboardInterrupt:
            now = datetime.now(tz)
            message = "CTRL-C to quit the program at [%s]" % now.isoformat()
            logging.info(message)
            break
        except Exception as e:
            now = datetime.now(tz)
            message = "Error at time  [%s]" % now.isoformat()
            logging.info(message)
            logging.info(e)
            # reschedule the job
            schedule.clear()
            socket = initiate_zmq(port)
            schedule.every(30).seconds.do(run, socket, tz)
Esempio n. 15
0
def main():
    for job in schedule.jobs:
        project_logger.info(job)

    while True:
        schedule.run_pending()
        time.sleep(10)
def process(run_once=False):
    """
    runs the processign loop as log as running_event is set or undefined
    :param running_event: Event or None
    :return: None
    """
    print("Starting qshape processing")

    # handle ctrl+c
    print('Press Ctrl+C to exit')
    running_event = threading.Event()
    running_event.set()
    def signal_handler(signal, frame):
        print('Attempting to close workers')
        running_event.clear()
    signal.signal(signal.SIGINT, signal_handler)

    def report_stats():
        with statsd.pipeline() as pipe:
            for stat, value in get_qshape_stats():
                pipe.incr(stat, value)

    report_stats()  # report current metrics and schedule them to the future
    if not run_once:
        schedule.every(STATSD_DELAY).seconds.do(report_stats)
        while running_event.is_set():
            schedule.run_pending()
            time.sleep(0.1)
    print("Finished qshape processing")
Esempio n. 17
0
  def run(self):
    logging.info('CrawlerDaemon run')
    sqlite_session = get_session( self.config.database )
    orm_engines = sqlite_session.query( ORM_Engine ).all()

    if not self.config.dry_run:
      if len( orm_engines ) == 0:
        logging.debug( 'Crawler has no engines' )
         
      # Start controllers in each thread 
      for orm_engine in orm_engines:
        logging.info('Load orm_engine: %s' % orm_engine.name )
        engine = Engine( orm_engine )
        self.controllers[ engine.name ] = Controller( engine, sqlite_session ) 
        self.controllers[ engine.name ].start()
      
      # Start scheduling searches 
      for orm_search in sqlite_session.query( Search ).all():
        for engine in orm_search.engines:
          job = lambda: self.controllers[ engine.name ].queue.put( orm_search )
          schedule.every( orm_search.periodicity ).seconds.do( job )
          logging.debug('Put %s to schedule with periodicity %i seconds' % ( orm_search.name, orm_search.periodicity ) )

    self.httpd = HTTPD( self.config, self.controllers )
    self.httpd.start()
     
    while True:
      if not self.config.dry_run:
        schedule.run_pending()

      time.sleep(1)
Esempio n. 18
0
    def run(self, path_local_log=None, branch='next', sched='false', launch_pause='false'):
        """
        :param str path_local_log: Path to the local log file copied from the remote server. If ``None``, do not copy
         remote log file.
        :param str branch: Target git branch to test.
        :param str sched: If ``'true'``, run tests only once. Otherwise, run tests at 23:00 hours daily.
        :param str launch_pause: If ``'true'``, pause at a breakpoint after launching the instance and mounting the data
         volume. Continuing from the breakpoint will terminate the instance and destroy the volume.
        """

        import schedule
        from logbook import Logger

        self.log = Logger('nesii-testing')

        self.path_local_log = path_local_log
        self.branch = branch
        self.launch_pause = launch_pause

        if self.launch_pause == 'true':
            self.log.info('launching instance then pausing')
            self._run_tests_(should_email=False)
        else:
            if sched == 'true':
                self.log.info('begin continous loop')
                schedule.every().day.at("6:00").do(self._run_tests_, should_email=True)
                while True:
                    schedule.run_pending()
                    time.sleep(1)
            else:
                self.log.info('running tests once')
                self._run_tests_(should_email=True)
Esempio n. 19
0
 def background_task(self):
     schedule.run_pending()
     # restart with proper interval here
     self.bg_task = threading.Timer(schedule.idle_seconds(),
                                    self.background_task)
     self.bg_task.daemon = True  # thread dies with main
     self.bg_task.start()
Esempio n. 20
0
def heartbeat():
    global encoder
    global photo_overlay
    global play_list
    global heartbeat_timer

    setup_core_tasks()
    # run some of the configured core tasks immediately on start up
    # and then on the normal schedule afterward
    registration_task()
    fetch_show_schedule_task()
    report_pi_status_task()


    while True:
        try:
            time.sleep(seconds_until_next_heartbeat())
            logger.debug('heartbeat')

            schedule.run_pending()

            if not encoder or not encoder.is_active():
                try:
                    video_info = transcode_queue.popleft()
                except IndexError:
                    pass
                else:
                    if encoder:
                        encoder.stop()
                    encoder = omx.Encoder(video_info.get('source_file'),
                                          video_info.get('target_file'),
                                          width=video_info.get('width'),
                                          height=video_info.get('height'))
        except:
            logger.exception('Error during heartbeat timer processing:')
Esempio n. 21
0
    def __init__(self):

        # Check weather every 5 minutes for more verbose detailing of conditions
        # during amber level scenarios. Mostly silent.
        schedule.every(5).minutes.do(self.checking)

        # Daily notifications for dawn outings
        schedule.every().day.at('06:00').do(self.outing_notify)

        # Notifies ahead of wednesday afternoon outings
        schedule.every().wednesday.at('12:00').do(self.outing_notify)

        # Notifies ahead of weekend morning outings
        schedule.every().saturday.at('07:00').do(self.outing_notify)
        schedule.every().sunday.at('07:00').do(self.outing_notify)

        # Notifies ahead of weekend afternoon (novice) outings
        schedule.every().saturday.at('11:00').do(self.outing_notify)
        schedule.every().sunday.at('11:00').do(self.outing_notify)

        # Terminates late at night to stay within free Heroku usage limits
        schedule.every().day.at('22:00').do(self.terminate)

        while True:
            schedule.run_pending()
            time.sleep(1)
Esempio n. 22
0
def dynamically_scrape_and_append_sales_data(filename,
                                             interval,
                                             num_retries = 10):
    """
    Dynamically scrapes sales data and appends the data to a file by generating
    a list of links, checking it against an old list and only keeping new links,
    and scraping those links for sales data.
    """

    old_list = []

    def job(old_list):
        new_list = collect_all_featured_links()
        new_links = remove_old_links(old_list, new_list)
        bad_links = collect_bad_links(new_links)
        clean_links = remove_bad_links_from_link_list(bad_links, new_links)

        scrape_and_append_sales_data_from_featured_links(filename,
                                                         clean_links,
                                                         num_retries)

        old_list = new_list

    job(old_list)
    schedule.every(interval).hours.do(job)

    while True:
        schedule.run_pending()
        time.sleep(30)

    print "Dynamic scraping finished"
Esempio n. 23
0
    def routine(self):
        # install schedule
        for entity in self.entities:
            pieces = entity.getschedule().split(" ")
            if re.match("^\d*$", pieces[1]):
                every = schedule.every(int(pieces[1]))
                pieces = pieces[2 : len(pieces)]
            else:
                every = schedule.every()
                pieces = pieces[1 : len(pieces)]

            timedes = getattr(every, pieces[0])
            pieces = pieces[1 : len(pieces)]

            if len(pieces) and pieces[0] == "at":
                finish = timedes.at(pieces[1])
            else:
                finish = timedes

            finish.do(self.monitor, entity)

        while True:
            time.sleep(1)
            for entity in self.entities:
                schedule.run_pending()
Esempio n. 24
0
def run(cfg, configToExecute):
    global remote, remoteUser
    remote     = cfg.get('remote', 'host')
    remoteUser = cfg.get('remote', 'user')
    
    # get task configs
    configs    = glob.glob(cfg.get('backup', 'task_dir', '/etc/backup/tasks/') + "*.conf")
    tasks      = []

    if (configToExecute == None):
        # load all config files
        for config in configs:
            cfg = ConfigParser.ConfigParser()
            cfg.read(config)
            tasks.append(cfg)

        # setup timers for each task
        for task in tasks:
            schedule_task(task)

        # run the scheduler loop
        while True:
            schedule.run_pending()
            time.sleep(60)
    else:
        # just execute the config then exit
        config = cfg.get('backup', 'task_dir', '/etc/backup/tasks/') + configToExecute
        cfg = ConfigParser.ConfigParser()
        cfg.read(config)
        execute(cfg)
    return
Esempio n. 25
0
def scheduler_init (parent):
    '''
        Schedule Init

        Start the main loop for the internal scheduler that
        ticks every second.

        --
        @param  parent:int  The PID of the parent.

        @return void
    '''

    # Define the jobs to run at which intervals
    schedule.every().minute.do(Reminder.run_remind_once)
    schedule.every().minute.do(Reminder.run_remind_recurring)

    # Start the main thread, polling the schedules
    # every second
    while True:

        # Check if the current parent pid matches the original
        # parent that started us. If not, we should end.
        if os.getppid() != parent:
            logger.error(
                'Killing scheduler as it has become detached from parent PID.')

            sys.exit(1)

        # Run the schedule
        schedule.run_pending()
        time.sleep(1)

    return
Esempio n. 26
0
def SendFrame(send_type=0):
	global acknowledgement_status

	#serial.write(consumption_frame)

	while True:
		if not send_type:
			schedule.run_pending()

		#frame = receiver.ReceiveFrame()

		if frame:
			frame_data = ConvertDatasToInt(frame[7:-1])

			if frame_data[0]==6 and frame_data[frame_data[1]+3]==frame_id:
				now = datetime.datetime.now()
				date_time = now.strftime("%Y-%m-%d %H:%M:%S")

				acknowledgement_status = 1

				sql.UpdateQuery("frames", [["frames_status", 1]], "frames_id={}".format(frame_id))

				sql.InsertQuery("room_consumptions", "{}, {}, {}".format(rooms_id, frame_data[frame_data[1]+5], date_time))
				sql.Commit()

				break
Esempio n. 27
0
def watch():
    # set up argument parsing
    parser = example.BigFixArgParser()
    parser.add_argument('-a', '--actions', required = False, help = 'List of actions to watch')
    parser.add_argument('-v', '--verbose', default = False, action = "store_true", required = False, help = 'To see the full list of commands that contain watched actions')
    parser.add_argument('-t', '--time', default = 60, required = False, help = 'To set the waiting period')
    parser.base_usage += """
  -a, --actions [ACTIONS/FILENAME]   Specify a list of actions to watch, seperated by comma(,); 
                                     if FILENAME with .wal extension detected, will read the file to get the list. 
  -v, --verbose                      To see the full list of commands that contain watched actions
  -t, --time [MINUTE]                   A number specifing the waiting period between checks"""
    
    parser.description = 'Used to watch certain actions'
    ext = ".wal"
    args = parser.parse_args()
    args_actions = ""
    if ext in args.actions:
        actions_file = open(args.actions, 'r')
        for line in actions_file:
                args_actions += line
    else:
        args_actions = args.actions
    actions_list = args_actions.split(",")

    watched_actions = gen_regex(actions_list)
    action_record = {}
    for a in actions_list:
        action_record[a] = False

    t = int(args.time)
    gen_summary(action_record, watched_actions, args)
    schedule.every(t).minutes.do(gen_summary, action_record, watched_actions, args)
    while True:
        schedule.run_pending()
Esempio n. 28
0
def main():
    args = parser.parse_args()

    log = logging.getLogger()
    log.level = logging.INFO
    stream = logging.StreamHandler()
    file_handler = logging.FileHandler(args.logfile)
    log.addHandler(stream)
    log.addHandler(file_handler)

    with open(args.config) as f:
        config = yaml.safe_load(f)

    log.info('Connecting to database')
    database = connect_to_database(**config['mongodb'])
    log.info('Connection established')

    services = [
        service(auxdir=args.auxdir)
        for service in supported_services.values()
    ]

    schedule.every().day.at('15:00').do(
        fill_last_night, services=services, database=database
    )

    log.info('Schedule started')
    try:
        while True:
            schedule.run_pending()
            sleep(60)
    except (KeyboardInterrupt, SystemExit):
        pass
def main():
    logging.info("Starting application")
    capture_nature()
    schedule.every(5).minutes.do(upload_to_gdrive)
    while True:
        schedule.run_pending()
        time.sleep(1)
Esempio n. 30
0
def dynamically_scrape_combined_data(data_filename,
                                     sales_filename,
                                     interval,
                                     num_retries = 10):
    """
    Dynamically scrapes a continuously updated list of unique clean links and
    appends the data to their respective files.
    """

    old_list = []

    def job(old_list):
        new_list = collect_all_featured_links()
        new_links = remove_old_links(old_list, new_list)
        bad_links = collect_bad_links(new_links)
        clean_links = remove_bad_links_from_link_list(bad_links, new_links)

        scrape_combined_data_from_all_featured_products(data_filename,
                                                        sales_filename,
                                                        clean_links,
                                                        num_retries)

        old_list = new_list

    job(old_list)
    schedule.every(interval).hours.do(job)

    while True:
        schedule.run_pending()
        time.sleep(30)

    print "Dynamic scraping finished"
Esempio n. 31
0
import playsound
import schedule
import time


def song():
    print("song")
    playsound.playsound("OnTheHouseTop.mp3")


schedule.every().day.at("09:00").do(song)

while 1:
    schedule.run_pending()
    time.sleep(1)
Esempio n. 32
0
    def test_run_every_n_days_at_specific_time(self):
        mock_job = make_mock_job()
        with mock_datetime(2010, 1, 6, 11, 29):
            every(2).days.at('11:30').do(mock_job)
            schedule.run_pending()
            assert mock_job.call_count == 0

        with mock_datetime(2010, 1, 6, 11, 31):
            schedule.run_pending()
            assert mock_job.call_count == 0

        with mock_datetime(2010, 1, 7, 11, 31):
            schedule.run_pending()
            assert mock_job.call_count == 0

        with mock_datetime(2010, 1, 8, 11, 29):
            schedule.run_pending()
            assert mock_job.call_count == 0

        with mock_datetime(2010, 1, 8, 11, 31):
            schedule.run_pending()
            assert mock_job.call_count == 1

        with mock_datetime(2010, 1, 10, 11, 31):
            schedule.run_pending()
            assert mock_job.call_count == 2
Esempio n. 33
0
def start_job():
    schedule.every().day.at('08:10').do(send)
    while True:
        schedule.run_pending()
        time.sleep(1)
Esempio n. 34
0
def run_schedule():
    while 1:
        schedule.run_pending()
        time.sleep(1)
def scheduler():
    while True:
        schedule.run_pending()
        time.sleep(1)
Esempio n. 36
0
def run():
    '''Start the scheduler'''
    while True:
        schedule.run_pending()
        time.sleep(.2)
Esempio n. 37
0
    current_time = rightNow.strftime("%H:%M:%S")
    current_date = rightNow.strftime("%m-%d-%Y")
    return "\nTime: " + current_time + "\nDate: " + current_date


# - - - Running the App - - -
def RunApp():

    #Run the 3 main methods and pass in the list of item objects
    print("\nApp has been called")

    ReadUserList(itemsList)
    print("App Has read the User's list")

    GetProductInfo(itemsList)
    print("App has collected the new product prices")

    UpdateSpreadSheet(itemsList)
    print("App Has updated the spreadsheet. No Errors occurred at:",
          TimeStamp(), "\n")


print("\n\nStarting App", TimeStamp())
RunApp()

# - - - Scheduling the App - - -
schedule.every().day.at("00:00").do(
    RunApp)  #app will be run every day at midnight
while True:
    schedule.run_pending()  #checks to see if it is midnight yet
    sleep(10)  #keeps the cpu usage percentages down on my server
Esempio n. 38
0
def executor():
    while True:
        schedule.run_pending()
        time.sleep(1)
Esempio n. 39
0
    def main(self):
        self.log.info('Start')
        self.log.debug('Started: ' + os.path.abspath(__file__))
        self.log.debug('Setting SIGTERM, SIGINT handlers')
        signal.signal(signal.SIGTERM, self.exit_handler)
        signal.signal(signal.SIGINT, self.exit_handler)

        # Read cam configs
        cam_cfg_dir = os.path.join(self.cfg_dir, self.cfg['cam_cfg_mask'])
        self.log.debug('Configs search path: ' + cam_cfg_dir)

        cam_cfg_list = glob2.glob(
            os.path.join(self.cfg_dir, self.cfg['cam_cfg_mask']))
        cam_cfg_list.remove(self.cfg_file)
        self.log.debug('Found configs: %s' % cam_cfg_list)

        if len(cam_cfg_list) == 0:
            self.log.critical('No cam config found. Exit')
            sys.exit(0)

        for cur_cam_cfg in cam_cfg_list:
            self.log.debug('Read cam config: ' + cur_cam_cfg)
            tmp_cfg = Config(file(cur_cam_cfg))
            cur_cam_cfg_active_flag = True

            try:
                tmp_cfg['active']
            except AttributeError:
                self.log.debug('active flag not found')
            else:
                cur_cam_cfg_active_flag = tmp_cfg['active']

            if cur_cam_cfg_active_flag:
                self.cam_cfg.append(tmp_cfg)
                self.cam_cfg_resolver_dict.clear()
                merger = ConfigMerger(resolver=self.configs_resolver)
                merger.merge(self.cam_cfg[-1], self.cfg)

                for key in self.cam_cfg_resolver_dict:
                    self.cam_cfg[-1][key] = self.cam_cfg_resolver_dict[key]

                self.log.debug('Loaded settings for: ' +
                               self.cam_cfg[-1]['name'])
            else:
                self.log.debug('Cam config is skipped due active flag: ' +
                               cur_cam_cfg)
        # End Read cam configs

        # Cleaner
        self.cfg['cleaner_max_removes_per_run'] = self.replacer(
            str(self.cfg['cleaner_max_removes_per_run']), 0)
        schedule.every(self.cfg['cleaner_run_every_minutes']).minutes.do(
            self.cleaner)
        # End Cleaner

        # PIDs full path
        for iterator, cam in enumerate(self.cam_cfg):
            try:
                pid_streamer = cam['pid_streamer']
            except AttributeError:
                self.log.debug('pid_streamer not found for cam: ' +
                               cam['name'])
                try:
                    pid_streamer = self.cfg['pid_streamer']
                except AttributeError:
                    self.log.critical("Can't find pid_streamer in config")
                    sys.exit(1)

            try:
                pid_capturer = cam['pid_capturer']
            except AttributeError:
                self.log.debug('pid_capturer not found for cam: ' +
                               cam['name'])
                try:
                    pid_capturer = self.cfg['pid_capturer']
                except AttributeError:
                    self.log.critical("Can't find pid_capturer in config")
                    sys.exit(1)

            self.cam_streamer_pid.append(
                self.replacer(os.path.join(self.cfg['pid_dir'], pid_streamer),
                              iterator))
            self.cam_capturer_pid.append(
                self.replacer(os.path.join(self.cfg['pid_dir'], pid_capturer),
                              iterator))
        # End PIDs full path

        self.kill_cams_process()
        self.write_main_pid()

        while self.main_loop_active_flag:
            for iterator, cam in enumerate(self.cam_cfg):
                if len(self.cam_streamer) == iterator:

                    # Create cam cap dir only if cap_cmd is not False
                    try:
                        cap_cmd = self.cam_cfg[iterator]['cap_cmd']
                    except AttributeError:
                        cap_cmd = None
                        self.log.debug('Capture command not found')

                    if cap_cmd is not False:
                        cap_dir_cam = self.replacer(self.cfg['cap_dir_cam'],
                                                    iterator)
                        if not os.path.exists(cap_dir_cam):
                            try:
                                os.makedirs(cap_dir_cam)
                            except OSError:
                                self.log.critical(
                                    'Failed to create directory: ' +
                                    cap_dir_cam)
                                sys.exit(1)
                    # End Create cam cap dir

                    self.cam_streamer_start_flag.append(True)

                    self.cam_streamer.append(None)
                    self.cam_streamer_start_time.append(0)
                    self.cam_streamer_poll_flag.append(False)

                    self.cam_capturer.append(None)
                    self.cam_capturer_start_flag.append(False)
                    self.cam_capturer_check_flag.append(False)
                else:
                    if self.cam_streamer[iterator].poll() is None:
                        self.log.debug('Streamer "%s" is alive' % cam['name'])
                    else:
                        self.log.warn('Streamer "%s" is dead (exit code: %s)' %
                                      (cam['name'],
                                       self.cam_streamer[iterator].returncode))
                        self.cam_streamer_start_flag[iterator] = True

                # Capturer alive check
                if self.cam_capturer_check_flag[iterator]:
                    if self.cam_capturer[iterator].poll() is None:
                        self.log.debug('Capturer "%s" is alive' % cam['name'])
                    else:
                        self.log.warn('Capturer "%s" is dead (exit code: %s)' %
                                      (cam['name'],
                                       self.cam_capturer[iterator].returncode))
                        self.cam_streamer_poll_flag[iterator] = True
                        self.cam_capturer_check_flag[iterator] = False
                # End Capturer alive check

                # Run streamer
                if self.cam_streamer_start_flag[iterator]:
                    self.log.info('Run "%s" streamer in background' %
                                  cam['name'])
                    self.cam_streamer[iterator] = self.bg_run(
                        cam['cmd'].strip(), self.cam_streamer_pid[iterator])
                    self.cam_streamer_start_time[iterator] = time.time()
                    self.cam_streamer_poll_flag[iterator] = True
                    self.cam_streamer_start_flag[iterator] = False
                # End Run streamer

                # Poll streamer
                if self.cam_streamer_poll_flag[iterator]:
                    cap_url = self.cfg['cap_url']
                    cap_url = self.replacer(cap_url, iterator)

                    self.log.debug('Getting HTTP status: ' + cap_url)
                    http_code = 0

                    try:
                        http_code = requests.head(cap_url,
                                                  timeout=1).status_code
                    except requests.exceptions.RequestException:
                        self.log.warn('Failed to connect: ' + cap_url)

                    if http_code != 0:
                        self.log.info('Checked "%s", status: %s' %
                                      (cam['name'], http_code))

                        if http_code == 200:
                            self.cam_capturer_start_flag[iterator] = True
                            self.cam_streamer_poll_flag[iterator] = False

                    start_time_delta = time.time(
                    ) - self.cam_streamer_start_time[iterator]
                    if self.cam_streamer_poll_flag[iterator]:
                        if start_time_delta > cam['max_start_seconds']:
                            self.log.warn('Time outed waiting data from: ' +
                                          cam['name'])
                            self.log.info('Kill: ' + cam['name'])
                            self.kill_cam_processes(iterator,
                                                    cam_reset_flag=True)
                            self.cam_streamer_start_flag[iterator] = True
                        else:
                            self.log.info('Attempt "%s": [%i/%i]' %
                                          (cam['name'], start_time_delta,
                                           cam['max_start_seconds']))
                # End Poll streamer

                # Run capturer
                if self.cam_capturer_start_flag[iterator]:
                    if self.cam_capturer[
                            iterator] is not None and self.cam_capturer[
                                iterator].poll() is None:
                        self.log.warn('Capturer "%s" is STILL alive' %
                                      cam['name'])
                    else:
                        cap_cmd = None
                        try:
                            cap_cmd = self.cam_cfg[iterator]['cap_cmd']
                        except AttributeError:
                            self.log.debug(
                                'Capture command not found in cam config. Using global'
                            )
                            try:
                                cap_cmd = self.cfg['cap_cmd']
                            except AttributeError:
                                self.log.critical(
                                    'Capture command not found. Exit')
                                self.exit_handler(None,
                                                  None,
                                                  log_signal=False,
                                                  exit_code=1)

                        if cap_cmd is not False:
                            cap_cmd = self.replacer(cap_cmd, iterator)

                            self.log.info('Run "%s" capturer in background' %
                                          cam['name'])
                            self.cam_capturer[iterator] = self.bg_run(
                                cap_cmd, self.cam_capturer_pid[iterator])
                            self.cam_capturer_check_flag[iterator] = True
                        else:
                            self.log.info('Capturer "%s" is turned off' %
                                          cam['name'])

                    self.cam_capturer_start_flag[iterator] = False
                # End Run capturer

            schedule.run_pending()
            time.sleep(1)

        self.log.info('Finish')
Esempio n. 40
0
def main(schedule_interval_seconds: int = SCHEDULE_INTERVAL_SECONDS) -> None:
    schedule.every(interval=schedule_interval_seconds).seconds.do(
        job_func=synchronise_everything)
    while True:
        schedule.run_pending()
        sleep(SCHEDULE_SLEEP_SECONDS)
Esempio n. 41
0
def tweet_every_three_days():
    """Tweets the end of the Stackathon calculation message every 3 days."""
    schedule.every(72).hour.do(tweet_every_three_days())
    while True:
        schedule.run_pending()
        time.sleep(1)
Esempio n. 42
0
def tick():
    schedule.run_pending()
Esempio n. 43
0
def create_update_stock_task():
    schedule.every().days.at('00:22').do()
    while True:
        schedule.run_pending()
Esempio n. 44
0
def pending_daily_thread():
    while True:
        schedule.run_pending()
        sleep(25)
Esempio n. 45
0
    drive_service_v3)  # SAM: Set file sync schedule to everyday at 23:59

while True:
    x = GPIO.input(button)

    if x == 0:  #button is pressed
        cc = cc + 1
        sleep(.1)
        print(cc)

    if x == 1:  #button is not pressed
        cc = 0
        GPIO.output(buzz, GPIO.LOW)
        if recording == True:
            cam_stop()
            recording = False
            googleCamera.camera(
                drive_service_v3
            )  # SAM: Run this to sync the file when door closed.

    if cc > 20:
        if recording == False:
            cam_record()
            recording = True

    if cc > 600:
        GPIO.output(buzz, GPIO.HIGH)

    schedule.run_pending()  # SAM: Run schedule
    time.sleep(0.5)
Esempio n. 46
0
def index():
    schedule.every(30).seconds.do(
        open_browser)  # обновляем список монет через 30 секунд

    while True:
        schedule.run_pending()
Esempio n. 47
0
def scheduleTask():
    schedule.every().day.at(taskTime).do(job)
    while True:
        # 启动服务
        schedule.run_pending()
        time.sleep(5)
Esempio n. 48
0
def main():
    run_date = datetime.now() + timedelta(minutes=1)
    schedule.every().day.at(run_date.strftime("%H:%M")).do(entry)
    while True:
        schedule.run_pending()
        time.sleep(1)
Esempio n. 49
0
def run_jobs():
    while True:
        schedule.run_pending()
        time.sleep(1)
Esempio n. 50
0
 def run(self):
     while True:
         schedule.run_pending()
         time.sleep(1)
Esempio n. 51
0
def main():
    schedule.every(2).hours.do(postToFacebook)
    while True:
        schedule.run_pending()
        time.sleep(60)
Esempio n. 52
0
def main():
    init()
    schedule.every().day.at("01:00").do(job)
    #schedule.every(1).minutes.do(job)
    while True:
        schedule.run_pending()
Esempio n. 53
0
def start_schedule():
    while True:
        schedule.run_pending()
        sleep(1)
Esempio n. 54
0
        current = current.replace('°', '').replace('\n',
                                                   '').replace('\r',
                                                               '').strip()
        real_feel = real_feel.replace('RealFeel®', '').replace(
            '°', '').replace('\n', '').replace('\r', '').strip()

        # Guardado de datos en un archivo
        f = open("./datos_clima_selenium.csv", "a")
        f.write(ciudad + "," + current + "," + real_feel + "\n")
        f.close()
        print(ciudad)
        print(current)
        print(real_feel)
        print()

    # Cierro el navegador
    driver.close()


# Logica de schedule (ver documentacion en recursos)
schedule.every(1).minutes.do(
    extraer_datos)  # Cada 1 minuto ejecutar la funcion extraer_datos

# Reviso la cola de procesos cada segundo, para verificar si tengo que correr algun proceso pendiente
while True:
    schedule.run_pending(
    )  # Correr procesos que esten pendientes de ser ejecutados.
    time.sleep(
        1
    )  # Para no saturar el CPU de mi maquina (por el while true), espero 1 segundo entre cada iteracion
Esempio n. 55
0
def main():
    current = current_ticket(crawlSite())

    schedule.every(10).seconds.do(controller)
    while True:
        schedule.run_pending()
Esempio n. 56
0
 def scheduler_parameters(func, time_interval):
     schedule.every(time_interval).minutes.do(func)
     while True:
         schedule.run_pending()
         time.sleep(time_interval)
Esempio n. 57
0
def run_schedule():
    while True:
        schedule.run_pending()
Esempio n. 58
0
def scheduler():
    schedule.every(600).minutes.do(
        send_email)  #sends email to server account after every 10 hours
    while True:
        schedule.run_pending()
        time.sleep(1)
Esempio n. 59
0
def run_Thread():#同步清除
    schedule.every(5).seconds.do(clear_model)   
    while True:
            schedule.run_pending()
            time.sleep(2)
Esempio n. 60
0
def run():
    show_now_time('Main')

    while True:
        time.sleep(30)
        schedule.run_pending()