def __init__(self): if sys.version_info[0] > 2: # Nice and easy. self._scheduler = sched.scheduler() # The documentation says that run() returns the next deadline, # but it's not true - it returns the remaining time. self._run_scheduler = lambda: self._scheduler.run(blocking=False) + self._scheduler.timefunc() else: # Nightmare inducing hacks class SayNoToBlockingSchedulingException(uavcan.UAVCANException): pass def delayfunc_impostor(duration): if duration > 0: raise SayNoToBlockingSchedulingException('No!') self._scheduler = sched.scheduler(time.monotonic, delayfunc_impostor) def run_scheduler(): try: self._scheduler.run() except SayNoToBlockingSchedulingException: q = self._scheduler.queue return q[0][0] if q else None self._run_scheduler = run_scheduler
def correctProc(mssqlDict, mongoDict, processDict, queueDict): #执行存储过程将源数据导入到SQL中间库 try: UpdateExtensionInfo(mssqlDict) except: print traceback.format_exc() timetuple = datetime.now().timetuple() if timetuple.tm_hour <= 6: updateScheduler = scheduler(time, sleep) #schedule run updateScheduler.enter(timedelta(minutes=30).total_seconds(), 1, UpdateExtensionInfo, (mssqlDict, mongoDict, processDict, queueDict)) updateScheduler.run() else: sys.exit() #删除Mongo库过期数据 try: dropOriginalData(mongoDict) except: print traceback.format_exc() timetuple = datetime.now().timetuple() if timetuple.tm_hour <= 6: updateScheduler = scheduler(time, sleep) #schedule run updateScheduler.enter(timedelta(minutes=30).total_seconds(), 1, dropOriginalData, (mongoDict)) updateScheduler.run() else: sys.exit() #计算更新Mongo库历史行情 try: CorrectAllSecurityInfo(mssqlDict, mongoDict, processDict, queueDict) except: print traceback.format_exc() timetuple = datetime.now().timetuple() if timetuple.tm_hour <= 6: updateScheduler = scheduler(time, sleep) #schedule run updateScheduler.enter(timedelta(minutes=30).total_seconds(), 1, CorrectAllSecurityInfo, (mssqlDict, mongoDict, processDict, queueDict)) updateScheduler.run() else: sys.exit() #更新Mongo库分表信息 try: UpdateSplitSecurityIndex(mssqlDict, mongoDict) except: print traceback.format_exc() timetuple = datetime.now().timetuple() if timetuple.tm_hour <= 6: updateScheduler = scheduler(time, sleep) #schedule run updateScheduler.enter(timedelta(minutes=30).total_seconds(), 1, UpdateSplitSecurityIndex, (mssqlDict, mongoDict)) updateScheduler.run() else: sys.exit()
def main(): if (len(sys.argv) - 1) % 3 != 0 or len(sys.argv) < 4: yes = raw_input('Would you like to schedule an existing reservation from the database [Y/n]? ').lower() if yes == 'y' or yes == '' or yes == 'yes': sch = sched.scheduler(time_module.time, time_module.sleep) scheduleAllExistingReservations(confirm=True, blocking=True, scheduler=sch) sys.exit(1) else: print 'Please provide name and confirmation code:' print ' %s <firstname> <lastname> <confirmation code> [...]' % sys.argv[0] sys.exit(1) args = sys.argv[1:] while len(args): (firstname, lastname, code) = args[0:3] res = db.findReservation(code) if res: print 'Reservation %s is already in the system...' % code else: res = db.addReservation(firstname, lastname, code) del args[0:3] # global config["SMTP_USER"], config["SMTP_PASSWORD"], config["EMAIL_FROM"], config["EMAIL_TO"], config["SEND_EMAIL"] sch = sched.scheduler(time_module.time, time_module.sleep) if config["SEND_EMAIL"]: if not config["EMAIL_FROM"]: config["EMAIL_FROM"] = raw_input('Email from: '); if config["EMAIL_FROM"]: if not config["EMAIL_TO"]: config["EMAIL_TO"] = raw_input('Email to: '); if not config["SMTP_USER"]: config["SMTP_USER"] = config["EMAIL_FROM"] if not config["SMTP_PASSWORD"] and config["SMTP_AUTH"]: config["SMTP_PASSWORD"] = getpass.getpass('Email Password: '******'Current time: %s' % DateTimeToString(datetime.now(utc)) sch.run()
def run(self): mainLogger.debug('Collecting basic system stats') # Get some basic system stats to post back for development/testing import platform systemStats = {'machine': platform.machine(), 'platform': sys.platform, 'processor': platform.processor(), 'pythonV': platform.python_version(), 'cpuCores': self.cpuCores()} if sys.platform == 'linux2': systemStats['nixV'] = platform.dist() elif sys.platform == 'darwin': systemStats['macV'] = platform.mac_ver() elif sys.platform.find('freebsd') != -1: version = platform.uname()[2] systemStats['fbsdV'] = ('freebsd', version, '') # no codename for FreeBSD mainLogger.info('System: ' + str(systemStats)) mainLogger.debug('Creating checks instance') # Checks instance c = checks(agentConfig, rawConfig, mainLogger) # Schedule the checks mainLogger.info('checkFreq: %s', agentConfig['checkFreq']) s = sched.scheduler(time.time, time.sleep) c.doChecks(s, True, systemStats) # start immediately (case 28315) s.run()
def rebot(username, passwd): # rss_url = 'http://www.sciencenet.cn/xml/news.aspx?di=0' rss_url = "http://www.sciencenet.cn/xml/news.aspx?news=0" cj_file = "robot-emuch-cookies.txt" cj = http.cookiejar.MozillaCookieJar(cj_file) try: cj.load(ignore_discard=True, ignore_expires=True) except Exception: """create a cookie file""" cj.save(ignore_discard=True, ignore_expires=True) opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cj)) poster = lambda msg: post_msg(opener, msg) login = lambda: login_emuch(opener, username, passwd) enter = lambda: enter_emuch(opener, login) msger = lambda: get_sciencenet_news(rss_url) creditd = lambda: (not enter()) or get_credit(opener) newserd = lambda: (not enter()) or poster(msger()) cjsaved = lambda: cj.save(ignore_discard=True, ignore_expires=True) jobs = [creditd, newserd, cjsaved] scheduler = sched.scheduler(time.time, time.sleep) scheduler.enter(1, 0, call_daily, (scheduler, jobs)) scheduler.run()
def aum_time_task(self): scheduler = sched.scheduler(timefunc=time.time, delayfunc=time.sleep) def ca_job(): scheduler.enter(24 * 60 * 60, 0, ca_job) print(datetime.datetime.now()) _time = str(datetime.datetime.now().strftime('%Y-%m-%d')) print('call init_balance(%s, %s)' % (_time, BusinessType.CURR_DEP)) # self.mysqlconn.execute_single('call Calc_balance(%s, %s)', (_time, str(1))) self.init_balance(_time, 1) def fa_job(): scheduler.enter(24 * 60 * 60, 0, fa_job) print(datetime.datetime.now()) _time = str(datetime.datetime.now().strftime('%Y-%m-%d')) print('call init_balance(%s, %s)' % (_time, BusinessType.FIX_TIME_DEP)) # self.mysqlconn.execute_single('call Calc_balance(%s)', (_time, str(2))) self.init_balance(_time, 2) def start(): now = datetime.datetime.now() late = datetime.datetime(now.year, now.month, now.day, 16, 29) if late > now: time_delay = (late - now).seconds print('time delay ' + str(time_delay) + ' s') time.sleep((late - now).seconds) fa_job() ca_job() scheduler.run() start()
def run_periodically(self): self.run() scheduler = sched.scheduler(time.time, time.sleep) while True: if not self.is_downloading: # Add to queue scheduler.enter(self.delay, 1, self.run, []) scheduler.run()
def run(self): s = sched.scheduler(time.time, time.sleep) logger.info('Hard cron daemon started') while not _cron_stopping: now = time.time() s.enter(60 - now % 60, 1, self.launch, ()) s.run()
def scheduletask(ci, nextRuntime): s = sched.scheduler(time.time, time.sleep) print("shedule time: ", nextRuntime) s.enterabs(nextRuntime.timestamp(), 1, scheduleAction) print("============== task scheduled") s.run() print("============== finish running")
def __init__(self, ip): self.serverSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.serverSocket.settimeout(20) #socketerror self.downloadSocket = socketError(socket.AF_INET, socket.SOCK_DGRAM) self.downloadSocket.settimeout(20) self.downloadSocket.settimeout(5.0) self.downloadSocket.setErrorProb(0.0) self.logFile = "log.txt" self.file = None #coleta self.statusColeta = None self.pacotes = {} self.contProtocolos = {"unknown":0, "all":0, "nonIp":0} self.fluxos = {} self.schedule = sched.scheduler(time.time, time.sleep) self.startSchedule = False #rabbit self.ip = ip self.connection = pika.BlockingConnection(pika.ConnectionParameters(self.getIp(), 5672, '/starwars', pika.PlainCredentials("skywalker", "luke"))) self.channel = self.connection.channel() self.channel.exchange_declare(exchange='topic_logs', type='topic') c3po = threading.Thread(target=self.localizarMonitor) c3po.start()
def __init__(self, logger, args, options): Thread.__init__(self) self.logger = logger self.args = args self.options = options self.tasks = scheduler(time.time, time.sleep) self.stop_flag = False
def run(self): agentLogger = logging.getLogger('agent') agentLogger.debug('Collecting basic system stats') # Get some basic system stats to post back for development/testing import platform systemStats = {'machine': platform.machine(), 'platform': sys.platform, 'processor': platform.processor(), 'pythonV': platform.python_version()} if sys.platform == 'linux2': systemStats['nixV'] = platform.dist() elif sys.platform == 'darwin': systemStats['macV'] = platform.mac_ver() agentLogger.debug('System: ' + str(systemStats)) # We use the system stats in the log but user might not want them posted back if 'reportAnonStats' in agentConfig and agentConfig['reportAnonStats'] == 'no': systemStats = None agentLogger.debug('Creating checks instance') # Checks instance c = checks(agentConfig) # Schedule the checks agentLogger.debug('Scheduling checks every ' + str(agentConfig['checkFreq']) + ' seconds') s = sched.scheduler(time.time, time.sleep) c.doChecks(s, True, systemStats) s.run()
def __init__(self, **kwds): threading.Thread.__init__(self, **kwds) self.setDaemon(True) # scheduler for timed signals self.scheduler = sched.scheduler(time.time, time.sleep) self.condition = threading.Condition()
def test_cancel_concurrent(self): q = queue.Queue() fun = q.put timer = Timer() scheduler = sched.scheduler(timer.time, timer.sleep) now = timer.time() event1 = scheduler.enterabs(now + 1, 1, fun, (1,)) event2 = scheduler.enterabs(now + 2, 1, fun, (2,)) event4 = scheduler.enterabs(now + 4, 1, fun, (4,)) event5 = scheduler.enterabs(now + 5, 1, fun, (5,)) event3 = scheduler.enterabs(now + 3, 1, fun, (3,)) t = threading.Thread(target=scheduler.run) t.start() timer.advance(1) self.assertEqual(q.get(timeout=TIMEOUT), 1) self.assertTrue(q.empty()) scheduler.cancel(event2) scheduler.cancel(event5) timer.advance(1) self.assertTrue(q.empty()) timer.advance(1) self.assertEqual(q.get(timeout=TIMEOUT), 3) self.assertTrue(q.empty()) timer.advance(1) self.assertEqual(q.get(timeout=TIMEOUT), 4) self.assertTrue(q.empty()) timer.advance(1000) support.join_thread(t, timeout=TIMEOUT) self.assertTrue(q.empty()) self.assertEqual(timer.time(), 4)
def test_enter_concurrent(self): q = queue.Queue() fun = q.put timer = Timer() scheduler = sched.scheduler(timer.time, timer.sleep) scheduler.enter(1, 1, fun, (1,)) scheduler.enter(3, 1, fun, (3,)) t = threading.Thread(target=scheduler.run) t.start() timer.advance(1) self.assertEqual(q.get(timeout=TIMEOUT), 1) self.assertTrue(q.empty()) for x in [4, 5, 2]: z = scheduler.enter(x - 1, 1, fun, (x,)) timer.advance(2) self.assertEqual(q.get(timeout=TIMEOUT), 2) self.assertEqual(q.get(timeout=TIMEOUT), 3) self.assertTrue(q.empty()) timer.advance(1) self.assertEqual(q.get(timeout=TIMEOUT), 4) self.assertTrue(q.empty()) timer.advance(1) self.assertEqual(q.get(timeout=TIMEOUT), 5) self.assertTrue(q.empty()) timer.advance(1000) support.join_thread(t, timeout=TIMEOUT) self.assertTrue(q.empty()) self.assertEqual(timer.time(), 5)
def start_daemon(): daemon = sched.scheduler() main(daemon) _logger.info('daemon %s is running', str(daemon)) daemon.run() _logger.info('daemon %s exited', str(daemon))
def open_snap(self, snap): dt = datetime.fromtimestamp(snap['sent'] / 1000) ext = self.client.media_type(snap['media_type'], binary=False) timestamp = str(snap['sent']).replace(':', '-') filename = '{}+{}+{}.{}'.format(timestamp, snap['sender'], snap['id'], ext) path = PATH + filename if not os.path.isfile(path): data = self.client.get_snap(snap['id']) with open(path, 'wb') as outfile: outfile.write(data) snap['path'] = path file_path = snap['path'] # open /dev/null or equivalent so we can redirect stdout/stderr to it nullfile = open(os.devnull, 'w') scheduler = sched.scheduler(time.time, time.sleep) if sys.platform.startswith('linux'): p = subprocess.Popen(['xdg-open', file_path], stdout=nullfile, stderr=nullfile, preexec_fn=os.setsid) elif sys.platform.startswith('darwin'): p = subprocess.Popen(['open', file_path], stdout=nullfile, stderr=nullfile) elif sys.platform.startswith('win'): p = subprocess.Popen(['start /WAIT', file_path], stdout=nullfile, stderr=nullfile) else: print 'I don\'t recognize your operating system: {0}'.format(sys.platform) self.window.after(snap['time'] * 1000 + 1000, lambda: self.mark_read(snap, p)) '''
def __init__(self, address, port, password=str()): self.address = address self.port = port self.password = password self.player_list = [] self.logged_off_player_list = [] self.player_list_lock = threading.Lock() self.world = World() self.world_lock = threading.Lock() self.force_player_list_update = False config = configparser.ConfigParser({"update_rate": "0.1"}) config.read("server.cfg") self.update_rate = config.getfloat("Settings", "update_rate") self.scheduler = sched.scheduler(time, sleep) self.scheduler.enter(self.update_rate, 1, self.update) thread = threading.Thread(target=self.scheduler.run) thread.setDaemon(True) thread.start() server_thread = threading.Thread(target=self.start_service) server_thread.setDaemon(True) server_thread.start()
def __init__(self): assert isinstance(self.requestAssembly, Assembly), 'Invalid request assembly %s' % self.requestAssembly assert isinstance(self.nameAuthorization, str), 'Invalid authorization name %s' % self.nameAuthorization assert isinstance(self.accessHeaders, dict), 'Invalid access headers %s' % self.accessHeaders assert isinstance(self.accessUriRoot, str), 'Invalid access URI root %s' % self.accessUriRoot assert isinstance(self.accessUri, str), 'Invalid access URI %s' % self.accessUri assert isinstance(self.accessParameters, list), 'Invalid access parameters %s' % self.accessParameters assert isinstance(self.accessResponseEncoding, str), \ 'Invalid access response encoding %s' % self.accessResponseEncoding assert isinstance(self.cleanupTimeout, int), 'Invalid cleanup time out %s' % self.cleanupTimeout requestProcessing = self.requestAssembly.create(NO_VALIDATION, request=Request, requestCnt=RequestContent, response=Response, responseCnt=ResponseContent) assert isinstance(requestProcessing, Processing), 'Invalid processing %s' % requestProcessing super().__init__(Function(requestProcessing.contexts, self.process)) self._requestProcessing = requestProcessing self._cache = Cache() self._authenticationTimeOut = timedelta(seconds=self.cleanupTimeout) schedule = scheduler(time.time, time.sleep) def executeCleanup(): self._cleanInactiveAccesses() schedule.enter(self.cleanupTimeout, 1, executeCleanup, ()) schedule.enter(self.cleanupTimeout, 1, executeCleanup, ()) scheduleRunner = Thread(name='Cleanup access/sessions thread', target=schedule.run) scheduleRunner.daemon = True scheduleRunner.start() self._contentType = 'text/json' self._charSet = 'utf-8' self._unauthorizedAccess = b'{"code":"401","message":"Unauthorized access"}' self._invalidAccess = b'{"code":"401","message":"Invalid authorization"}' self._forbiddenAccess = b'{"code":"403","message":"Forbidden access"}' self._notFound = b'{"code":"404","message":"Not Found"}'
def __init__(self,argv): self.gen_ops = {} self.once = False self.scheduler = sched.scheduler(time.time, time.sleep) self.dispatcher = [] self.config_parser(argv) self.generator = noise_generator(self.gen_ops) # Initialize markov factory and keywords
def __init__(self): self.t = 0 self.cpu_clock = 1024 #12*1024 self.interval = 1.0 / self.cpu_clock self.s = sched.scheduler(time.time, time.sleep) self.s.enter(self.interval, 1, self.tick, ()) self.stop = False
def main(): parser = argparse.ArgumentParser( description='A distributed cron-like daemon') parser.add_argument('-f', dest='foreground', action='store_const', const=False, help='run in foreground') args = parser.parse_args() if os.geteuid() != 0: sys.exit('%s must be run as root.' % os.path.basename(sys.argv[0])) context = daemon.DaemonContext( pidfile=PIDLockFile('/var/run/megacron.pid'), detach_process=args.foreground, ) context.signal_map = { signal.SIGINT: _signal_handler, signal.SIGTERM: _signal_handler } with context: worker.init_worker() events = sched.scheduler(time.time, time.sleep) scheduler.check_scheduler(events) worker.heartbeat(events) worker.update_schedules(events) events.run()
def startCandle(mssqlDict, mongoList, atOnce = False): ''' 历史行情更新进程创建 ''' if atOnce: candle(mssqlDict, mongoList) while True: try: # Get current datetime startTime = datetime.datetime.now() timetuple = startTime.timetuple() executeTime = datetime.datetime( year = timetuple.tm_year, month = timetuple.tm_mon, day = timetuple.tm_mday, hour = setup.setupDict['Candle']['Schedule']['Hour'], minute = setup.setupDict['Candle']['Schedule']['Minute'], second = setup.setupDict['Candle']['Schedule']['Second']) # Execute function next day this time if (executeTime < startTime): executeTime += datetime.timedelta(days=1) print 'Candle next execute time: %s' % executeTime.strftime('%Y-%m-%d %X') # Get delta time within next execute time and date time now. deltaTime = executeTime - startTime scheduleHandle = sched.scheduler(time.time, time.sleep) scheduleHandle.enter(deltaTime.total_seconds(), 1, candle, (mssqlDict, mongoList)) scheduleHandle.run() except: print traceback.format_exc()
def __init__(self): super(Conductor, self).__init__() # set performance constants self.tic_count = 0 self.time_interval = 60.0 / (TICS_PER_MINUTE * 1.0) # load music self.pieces = in_c.music.load() self.piece_events = in_c.music.build(self.pieces) # interprocess stuff self.web_process = None self.web_q = multiprocessing.Queue() self.inc_q = multiprocessing.Queue() # event scheduler self.scheduler = sched.scheduler(time.time, time.sleep) self.tic_event = None # initialize player objects self.players = {} for i in range(0, NUM_PLAYERS): player = in_c.player.Player(conductor=self, channel=(i+1)) self.players[player.uuid] = player # create MIDI bridge self.audio = in_c.audio.OSC2MIDI(host=OSC_SERVERS[0]['host'], port=OSC_SERVERS[0]['port'])
def __init__(self): self.api = wykop.WykopAPI(klucz, sekret) self._auth() s=sched.scheduler(time.time,time.sleep) def check_for_messages(sc): try: print "Odświeżam wykop o "+strftime("%H:%M:%S",localtime()) Notifier.notify("Service started", title="Wykop.pl", appIcon="images.png", sound="default") get_from_wypok = self.api.request("mywykop","notifications","JSON") for key in get_from_wypok: if key.new ==True: if key.type == "entry_comment_directed": print(key.author+" dodal komentarz") Notifier.notify(key.author+"\ndodal komentarz.", title="Wykop.pl", appIcon="images.png", sound="default", open=key.url) elif key.type=="pm": print("PW od "+key.author) Notifier.notify("PW od\n"+key.author, title="Wykop.pl", appIcon="images.png", sound="default", open=key.url) elif key.type=="observe": print("#stalkujo !"+key.author) Notifier.notify("#stalkujo !"+key.author, title="Wykop.pl", appIcon="images.png", sound="default", open=key.url) except: self._auth() sc.enter(300, 1, check_for_messages, (sc,)) s.enter(1,1,check_for_messages,(s,)) s.run()
def __init__(self, ut_time = time.time): threading.Thread.__init__(self) self.setDaemon(True) self.s = sched.scheduler(ut_time, time.sleep) self.settings = SchedulerSettings() self.settings.Load() self.LoadSettings()
def __init__(self, clock=None): self.nodes = [] self.automators = AutomatorPool() self.buffer_length = 0.1 self.sched = scheduler(time.time, time.sleep) self.default_clock = clock or TempoClock() self._stopped = False
def __init__(self, config_fd): """Construct a new Recorder. :param config_fd: a readable File-like object for the config file """ # Load the JSON config. We don't do any validation right now # but validation will happen as the program runs. self.config = json.load(config_fd) # Build the list of server objects self.server_list = self._build_server_list() # Create the SQLite database connection self.conn = sqlite3.connect(self.config['database']) self._init_database() # Create the scheduler self.timer = sched.scheduler() # Disable collecting data self.collecting = False # sched task handle so we can cancel it later self.collection_task = None # Blocking queue used for queueing the servers to be collected self.collection_queue = queue.Queue() # Queue used to return lists of tuples to be added to the database self.sql_insert_queue = queue.Queue() # List of threads used to process the collection_queue self.collection_threads = None
def __init__(self, serverAddress, RequestHandlerClass, *args, counts, threads, timeout): ''' Constructs the multiprocess server.RequestHandlerClass ''' assert isinstance(counts, int), 'Invalid processes pool size %s' % counts super().__init__(serverAddress, None) processes, pipes = [], [] schedule = scheduler(time.time, time.sleep) def pingProcesses(): for pipe in pipes: pipe.send(True) schedule.enter(timeout, 1, pingProcesses, ()) schedule.enter(timeout, 1, pingProcesses, ()) scheduleRunner = Thread(name='Ping processes thread', target=schedule.run) scheduleRunner.daemon = True scheduleRunner.start() for k in range(0, counts): receiver, sender = Pipe(False) args = RequestHandlerClass, receiver, threads, 2 * timeout process = Process(name='Process %s' % k, target=prepareServer, args=args) processes.append(process) pipes.append(sender) process.start() self.processes = processes self.pipes = deque(pipes)
def run(self): db_session = DBSession() try: system_option = SystemOption.get(db_session) inventory_hour = system_option.inventory_hour db_session.close() # Build a scheduler object that will look at absolute times scheduler = sched.scheduler(time.time, time.sleep) current_hour = datetime.datetime.now().hour # Put task for today at the designated hour. daily_time = datetime.time(inventory_hour) # If the scheduled time already passed, schedule it for tomorrow if current_hour > inventory_hour: first_time = datetime.datetime.combine(datetime.datetime.now() + datetime.timedelta(days=1), daily_time) else: first_time = datetime.datetime.combine(datetime.datetime.now(), daily_time) scheduler.enterabs(time.mktime(first_time.timetuple()), 1, self.scheduling, (scheduler, daily_time,)) scheduler.run() except: logger.exception('InventoryManagerScheduler hit exception') db_session.close()
def on_connect_failed(self): print("going to retry 30s later") s = sched.scheduler(time.time, time.sleep) s.enter(30, 1, self.connect, ()) s.run()
def __init__(self): self.s = sched.scheduler(time.time, time.sleep)
try: #TO DO: execute this in a loop cursor.execute(common.Q_CREATE_SYSTEM_COUNTERS) cursor.execute(common.Q_LIST_INSERT_SYSTEM_COUNTERS) cursor.execute(common.Q_CREATE_SYSTEM_CONTENT) cursor.execute(common.Q_CREATE_DOCKER_COUNTERS) cursor.execute(common.Q_LIST_INSERT_DOCKER_COUNTERS) cursor.execute(common.Q_CREATE_DOCKER_MASTER) cursor.execute(common.Q_CREATE_DOCKER_CONTENT) cursor.execute(common.Q_CREATE_DOCKER_OVERVIEW) except sqlite3.Error as er: print('Error', er) db.commit() data_collection = 30 s = sched.scheduler(time.time, time.sleep) # TO DO: make an option to disable all monitoring from UI # def signal_handler(signal, frame): # print('Exiting Monit Script') # db.close() # sys.exit(0) def split_at_num(s): return re.split(r'(\d+)', s) def convert_to_bytes(input): p = split_at_num(input) num = float(p[1])
import sched import time from plyer import notification from calculator import Convert import api event_schedule = sched.scheduler(time.time, time.sleep) valute = 'ევრო' current = 0 def valute_notification(): global current user_valute = valute valute_current = Convert.money_find(name=user_valute) if valute_current > current: notification.notify( title='მოიმატა', message="მოიმატა და გახდა {}".format(valute_current), timeout=5, ) current = valute_current if valute_current < current: notification.notify( title='დაიკლო', message="დაიკლო და გახდა {}".format(valute_current), timeout=5,
def main(): signal.signal(signal.SIGTERM, signal_handler) parser = argparse.ArgumentParser( description='Export ES query results to Prometheus.') parser.add_argument( '-e', '--es-cluster', default='localhost', help= 'addresses of nodes in a Elasticsearch cluster to run queries on. Nodes should be separated by commas e.g. es1,es2. Ports can be provided if non-standard (9200) e.g. es1:9999 (default: localhost)' ) parser.add_argument( '--ca-certs', help= 'path to a CA certificate bundle. Can be absolute, or relative to the current working directory. If not specified, SSL certificate verification is disabled.' ) parser.add_argument( '-p', '--port', type=int, default=9206, help='port to serve the metrics endpoint on. (default: 9206)') parser.add_argument('--basic-user', help='User for authentication. (default: no user)') parser.add_argument( '--basic-password', help='Password for authentication. (default: no password)') parser.add_argument( '--query-disable', action='store_true', help= 'disable query monitoring. Config file does not need to be present if query monitoring is disabled.' ) parser.add_argument( '-c', '--config-file', default='exporter.cfg', help= 'path to query config file. Can be absolute, or relative to the current working directory. (default: exporter.cfg)' ) parser.add_argument('--cluster-health-disable', action='store_true', help='disable cluster health monitoring.') parser.add_argument( '--cluster-health-timeout', type=float, default=10.0, help= 'request timeout for cluster health monitoring, in seconds. (default: 10)' ) parser.add_argument( '--cluster-health-level', default='indices', choices=['cluster', 'indices', 'shards'], help= 'level of detail for cluster health monitoring. (default: indices)') parser.add_argument('--nodes-stats-disable', action='store_true', help='disable nodes stats monitoring.') parser.add_argument( '--nodes-stats-timeout', type=float, default=10.0, help= 'request timeout for nodes stats monitoring, in seconds. (default: 10)' ) parser.add_argument( '--nodes-stats-metrics', type=nodes_stats_metrics_parser, help= 'limit nodes stats to specific metrics. Metrics should be separated by commas e.g. indices,fs.' ) parser.add_argument('--indices-stats-disable', action='store_true', help='disable indices stats monitoring.') parser.add_argument( '--indices-stats-timeout', type=float, default=10.0, help= 'request timeout for indices stats monitoring, in seconds. (default: 10)' ) parser.add_argument( '--indices-stats-mode', default='cluster', choices=['cluster', 'indices'], help='detail mode for indices stats monitoring. (default: cluster)') parser.add_argument( '--indices-stats-metrics', type=indices_stats_metrics_parser, help= 'limit indices stats to specific metrics. Metrics should be separated by commas e.g. indices,fs.' ) parser.add_argument( '--indices-stats-fields', type=indices_stats_fields_parser, help= 'include fielddata info for specific fields. Fields should be separated by commas e.g. indices,fs. Use \'*\' for all.' ) parser.add_argument('-j', '--json-logging', action='store_true', help='turn on json logging.') parser.add_argument( '--log-level', default='INFO', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'], help='detail level to log. (default: INFO)') parser.add_argument( '-v', '--verbose', action='store_true', help='turn on verbose (DEBUG) logging. Overrides --log-level.') args = parser.parse_args() if args.basic_user and args.basic_password is None: parser.error('Username provided with no password.') elif args.basic_user is None and args.basic_password: parser.error('Password provided with no username.') elif args.basic_user: http_auth = (args.basic_user, args.basic_password) else: http_auth = None log_handler = logging.StreamHandler() log_format = '[%(asctime)s] %(name)s.%(levelname)s %(threadName)s %(message)s' formatter = JogFormatter( log_format) if args.json_logging else logging.Formatter(log_format) log_handler.setFormatter(formatter) log_level = getattr(logging, args.log_level) logging.basicConfig(handlers=[log_handler], level=logging.DEBUG if args.verbose else log_level) logging.captureWarnings(True) port = args.port es_cluster = args.es_cluster.split(',') if args.ca_certs: es_client = Elasticsearch(es_cluster, verify_certs=True, ca_certs=args.ca_certs, http_auth=http_auth) else: es_client = Elasticsearch(es_cluster, verify_certs=False, http_auth=http_auth) scheduler = None if not args.query_disable: scheduler = sched.scheduler() config = configparser.ConfigParser() config.read_file(open(args.config_file)) query_prefix = 'query_' queries = {} for section in config.sections(): if section.startswith(query_prefix): query_name = section[len(query_prefix):] query_interval = config.getfloat(section, 'QueryIntervalSecs', fallback=15) query_timeout = config.getfloat(section, 'QueryTimeoutSecs', fallback=10) query_indices = config.get(section, 'QueryIndices', fallback='_all') query = json.loads(config.get(section, 'QueryJson')) queries[query_name] = (query_interval, query_timeout, query_indices, query) if queries: for name, (interval, timeout, indices, query) in queries.items(): func = partial(run_query, es_client, name, indices, query, timeout) run_scheduler(scheduler, interval, func) else: logging.warn('No queries found in config file %s', args.config_file) if not args.cluster_health_disable: REGISTRY.register( ClusterHealthCollector(es_client, args.cluster_health_timeout, args.cluster_health_level)) if not args.nodes_stats_disable: REGISTRY.register( NodesStatsCollector(es_client, args.nodes_stats_timeout, metrics=args.nodes_stats_metrics)) if not args.indices_stats_disable: parse_indices = args.indices_stats_mode == 'indices' REGISTRY.register( IndicesStatsCollector(es_client, args.indices_stats_timeout, parse_indices=parse_indices, metrics=args.indices_stats_metrics, fields=args.indices_stats_fields)) logging.info('Starting server...') start_http_server(port) logging.info('Server started on port %s', port) try: if scheduler: scheduler.run() else: while True: time.sleep(5) except KeyboardInterrupt: pass shutdown()
def __init__(self, settings): # local variables self.settings = settings self.queue_tx = Queue.Queue() self.f_start_signal_LED = False self.f_reset_button = False self.f_exit = False self.f_cancel_exp = False self.hours = 0 self.minutes = 0 self.scheduler = sched.scheduler(time.time, time.sleep) self.list_events_sched = [ None for i in range(len(self.settings["test_settings"])) ] self.schedule_time = [ 'time' for i in range(len(self.settings["test_settings"])) ] self.led_array_pins = [29, 31, 33, 35, 37] self.TRX_frame_pin = [36] self.radio_isr_pin = 11 self.push_button_pin = 13 self.scheduler_aux = None self.time_to_start = None self.started_time = None self.experiment_tx_led_start = None self.experiment_scheduled = None self.experiment_tx_thread = None self.experiment_counter = 0 self.modem_base_band_state = MODEM_SUB_GHZ self.dataLock = threading.RLock() # start the threads self.f_reset = threading.Event() self.start_experiment = threading.Event() self.end_experiment = threading.Event() self.f_schedule = threading.Event() self.f_reset.clear() self.start_experiment.clear() self.end_experiment.clear() self.f_schedule.clear() self.radio_driver = None self.LoggerTx = None self.gps = None self.gpio_handler = None # start all the drivers # self._gps_init() logging.info('radio setup') self._radio_setup() logging.info('logger init') self._logger_init() logging.info('gpio handler init') self._gpio_handler_init() logging.info('radio init') self._radio_init() logging.debug('INIT COMPLETE') # start the thread threading.Thread.__init__(self) self.name = 'ExperimentTx_' self.daemon = True self.start()
from builtins import any as b_any import sched import PySimpleGUI as sg layout = [[sg.Text('Some text on Row 1')], [sg.Text('Send message'), sg.InputText(key='global_message'), sg.Button('Send')], [sg.Button('Ok'), sg.Button('Cancel')], [sg.Checkbox('On', change_submits = True, enable_events=True, default='0', key='spam')], [sg.Multiline("", disabled=True, key="log")] ] is_thead_active = False # Change the working directory to the folder this script is in. # Doing this because I'll be putting the files from each video in their own folder on GitHub s = sched.scheduler(time, sleep) rules_list = ["\n cheating\n spawnganking / running in the enemy grey zone\n fla flaming admin for keeping order"] #helper to extract player name who is reporting another player welcoming = "Welcome to the OFC community server" server_info = "Visit our page at ofcug.org Discord: https://discord.gg/SA2Eht" rules_empty = "No A nor C until 12 VS 12" no_shotgun_rule = "No Shotgun TODAY!" rules = "No Profanity, No Racism, No Whining, No Grey zone, No Spawn killing!" join_ours = "Join our community servers we have OFCInfantery #81725 and Underground24-7 #32192" def callback(): if (GetWindowText(GetForegroundWindow()) == 'Battlefield™ V'): rect = win32gui.GetWindowRect(GetForegroundWindow()) x = rect[0] y = rect[1] w = rect[2] - x
import asyncio import datetime import discord import functools import io import os import sched import threading import time import tokenFubar serverID = tokenFubar.serverID outputFile = "memberLogger" localScheduler = sched.scheduler(time.time, time.sleep) def fixedIntervalScheduler(server, interval, action, actionargs): # DON'T CHANGE THE ORDER OF THESE OPERATIONS threading.Timer(interval, fixedIntervalScheduler, (server, interval, action, actionargs)).start() action(server, actionargs) def dateTimeStamp(member): # returns the string, "yyyy-mmm-dd HH:MM:SS" return '{:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.utcnow()) def memberGame(member):
def __init__(self, **kwargs): self._config = self._read_config(kwargs.get('config', CONFIG_PATH)) self._scheduler = sched.scheduler(time.time, time.sleep) self._template_resources = self._parse_template_configs( kwargs.get('confd', CONFD_PATH))
try: rnd_string = ''.join( random.choice(string.ascii_uppercase + string.ascii_lowercase) for _ in range(7)) webbrowser.open(f"{url}?={rnd_string}") except Exception: print("Couldn't open web browser. This is all your fault") try: playsound(self.sound_file, False) except Exception: print("Couldn't play alarm. This is all your fault.") if __name__ == "__main__": hb = HulkBuster() s = sched.scheduler(time.time, time.sleep) item_reset = sched.scheduler(time.time, time.sleep) try: s.enter(1, 1, hb.start, (s, )) s.run() # Reset the item list so it doesn't perpetually refuse to open previously unopened urls. # item_reset.enter(28800, 1, hb.reset_items, (item_reset,)) except urllib3.exceptions.ProtocolError: print("damn") pass except requests.exceptions.ConnectionError: print("wham") pass # When you switch VPNs. except requests.exceptions.ChunkedEncodingError: print("in")
def init_app(first_run=False): DATABASE = config.MYSQL_URI ELASTIC = config.ELASTIC SLAVE_SERVER = config.SLAVE_SERVER db = DB(DATABASE) es = ES(ELASTIC) key_statement = "SHOW KEYS FROM {table} WHERE Key_name = 'PRIMARY'" records = "SELECT COUNT({key_name}) FROM {table}" def query(statement): cursor = db.query(statement) return cursor.fetchall() data = query("SHOW MASTER STATUS")[0] last_log = data['File'] last_pos = data['Position'] resume = True del data cursor = db.query("SHOW TABLES") tables = [value for data in cursor.fetchall() for value in data.values()] detail_tables = [] key_check = True for table in tables[:]: if (ALLOW_TABLES): if (table in ALLOW_TABLES and table not in IGNORE_TABLES): key_check = True else: key_check = False elif table in IGNORE_TABLES: key_check = False if key_check: key_name = query( key_statement.format(table=table))[0]['Column_name'] detail = {'table_name': table, 'primary_key': key_name} log.info(detail) detail['total_records'] = query( records.format( table=table, key_name=key_name))[0]['COUNT({key_name})'.format( key_name=key_name)] detail_tables.append(detail) key_check = True if first_run: for detail in detail_tables: init_worker(DATABASE=DATABASE, es=es, number=detail['total_records'], table=detail['table_name'], key_name=detail['primary_key']) first_run = False scheduler = sched.scheduler(time.time, time.sleep) log.info('START') log.info(last_log) log.info(last_pos) def sync_from_log(DATABASE, SLAVE_SERVER, resume, last_log, last_pos): actions = list() try: stream = binlog_streaming(DATABASE, SLAVE_SERVER, resume=resume, log_file=last_log, log_pos=last_pos) for binlogevent in stream: if (binlogevent.table in IGNORE_TABLES): continue for row in binlogevent.rows: action = { "_index": DATABASE['db'], "_type": binlogevent.table } log.info('-----------------------------') for detail in detail_tables: if detail['table_name'] == binlogevent.table: primary_key = detail['primary_key'] if isinstance(binlogevent, DeleteRowsEvent): action["_op_type"] = "delete" action["_id"] = row["values"].pop(primary_key) elif isinstance(binlogevent, UpdateRowsEvent): action["_op_type"] = "index" action["_id"] = row["after_values"].pop(primary_key) action["_source"] = row["after_values"] elif isinstance(binlogevent, WriteRowsEvent): action["_op_type"] = "index" action["_id"] = row["values"].pop(primary_key) action["_source"] = row["values"] else: continue actions.append(action) stream.close() log.info(actions) if len(actions) > LARGEST_SIZE: log.info(actions) parts = int(ceil(float(len(actions) / LARGEST_SIZE))) for part in range(parts - 1): log.info( es.bulk(actions[part * LARGEST_SIZE:(part + 1) * LARGEST_SIZE])) else: log.info(actions) success, error = es.bulk(actions) if error: raise Warning(error) cursor = db.query("SHOW MASTER STATUS") data = cursor.fetchall()[0] last_log = data['File'] last_pos = data['Position'] log.info('RENEW LOG') log.info('last log: %s', last_log) log.info('last pos: %s', last_pos) except pymysql.err.OperationalError as e: log.error("Connection ERROR: %s", e) log.info("LAST_LOG: %s", last_log) log.info("LAST_POS: %s", last_pos) except elasticsearch.BulkIndexError as e: log.error("Elasticsearch error: %s", e) log.info("LAST_LOG: %s", last_log) log.info("LAST_POS: %s", last_pos) except Warning as e: log.error("Elasticsearch error: %s", e) log.info("LAST_LOG: %s", last_log) log.info("LAST_POS: %s", last_pos) scheduler.enter(config.FREQUENCY, 1, sync_from_log, (DATABASE, SLAVE_SERVER, resume, last_log, last_pos)) scheduler.enter(1, 1, sync_from_log, (DATABASE, SLAVE_SERVER, resume, last_log, last_pos)) scheduler.run()
def run_housekeeper(interval, action, actionargs=()): s = scheduler(time.time, time.sleep) housekeeper(s, interval, action, actionargs) s.run()
def __init__(self, weather): self.sched = sched.scheduler(time.time, time.sleep) self.weather = weather super(UpdateThread, self).__init__()
} # API end points config['ww_url'] = os.getenv('WW_URL', config['ww_url']) config['ww_msg_url'] = os.getenv( 'WW_MSG_URL', config['ww_msg_url']) + "?api_key=" + os.getenv( 'WW_API_KEY', config['ww_api_key']) config['ww_msg_update_url'] = os.getenv( 'WW_MSG_UPDATE_URL', config['ww_msg_update_url']) + "?api_key=" + os.getenv( 'WW_API_KEY', config['ww_api_key']) print("Env") print config['ww_url'] message_scheduler = sched.scheduler(time.time, time.sleep) last_printed_msg_id = 0 last_thread_name = '' # colors in console class bcolors: HEADER = '\033[95m' OKBLUE = '\033[94m' OKGREEN = '\033[92m' WARNING = '\033[93m' FAIL = '\033[91m' ENDC = '\033[0m' BOLD = '\033[1m' UNDERLINE = '\033[4m'
def load(self, ctx): super(AutopickPlugin, self).load(ctx) self.scheduler = sched.scheduler(time.time, time.sleep)
def __init__(self, file, host=None): self.file = file self.host = host self.scheduler = sched.scheduler(time.time, time.sleep)
def TellRandomJoke(self, bTxt2spch, bTweet, bTweetConfirmation): """ TellRandomJoke(bTxt2spch, bTweet, bTweetConfirmation) Tells first part of joke then pauses then completes joke. If bTxt2spch = True then will use TXT2Speech If bTweet = True then tweet joke If bTweetConfirmation = then print if success or fail """ #Load the JOKES!:P!:P!: jokes = list(["What's the best thing about Switzerland?|Not sure, but the flag is a big plus."]) jokes += list(["MapQuest really needs to start its directions on number five. Pretty sure I know how to get out of my neighborhood."]) jokes += list(["For Christmas, I gave my kid a BB gun. He gave me a sweater with a bullseye on the back."]) jokes += list(["I bet cats have a secret website where they upload clips of cute humans trying to open DVD packaging and jump-start cars."]) #pick the random joke now, just get it over with JOKE = random.sample(jokes, 1) JOKE = str(JOKE).decode('utf8') #Is there a pipe in the text? if so then its a two-part joke with external interaction # if not, then just do a one-liner if (str(JOKE).find("|") == -1): if(bTxt2spch == False): #NO TEXT TO SPEECH SO JUST PRINT JOKES # tell joke print str(JOKE).strip() else: mouth = TXT2SPEECH.txttospeech(100, 0) # tell joke text1 = str(JOKE).strip() mouth.Say(text1, True) #ARE WE TWEETING THIS JOKE? if (bTweet == True): self.TweetJoke(text1, bTweet, bTweetConfirmation) else: #A PIPE WAS FOUND, TWO-PART JOKE if(bTxt2spch == False): #NO TEXT TO SPEECH SO JUST PRINT JOKES # tell first part of joke : Whats the best thing about Switzerland? print str(str(jokes[0]).split('|')[0]).strip().decode('utf8') # prompt the user for any input, just ignore s = sched.scheduler(time.time, time.sleep) raw_input("ANY RESPONSE? > ") time.sleep(2) # Rell rest of joke : Not sure, but the flag is a big plus. print str(str(jokes[0]).split('|')[1]).strip().decode('utf8') # Play random laugh else: mouth = TXT2SPEECH.txttospeech(100, 0) # tell first part of joke : Whats the best thing about Switzerland? text1 = str(str(jokes[0]).split('|')[0]).strip().decode('utf8') mouth.Say(text1, True) # prompt the user for any input, just ignore s = sched.scheduler(time.time, time.sleep) raw_input("ANY RESPONSE? > ") time.sleep(2) # Rell rest of joke : Not sure, but the flag is a big plus. text2 = str(str(jokes[0]).split('|')[1]).strip().decode('utf8') mouth.Say(text2, True) # Play random laugh mouth.Say("Ha HAA ha. Woooha", True) #ARE WE TWEETING THIS JOKE? if (bTweet == True): self.TweetJoke(text1 + "|" + text2, bTweet, bTweetConfirmation)
def __init__(self, options: list, DB): super(Alert, self).__init__() # DB Connection self.db = DB # Setting custom params if 'custom' in options: print('Using the defaults of alert_settings.json') with open('alert_settings.json', 'r') as f: params = load(f) self.metrics = params['params'] self.coins = params['coins'] self.myValuation = params['valuation'] self.period = params['period'] self.actions = params['actions'] self.forecast = params['forecast'] metrics = [DataFrame(params[metric]) for metric in self.metrics] growth = DataFrame(params['growth']) dif = DataFrame(params['dif']) day = DataFrame(params['day']) custom = DataFrame(params['custom']) self.actions = params['actions'] self.upperLimit = DataFrame([S.loc['over'] for S in metrics], index=self.metrics) self.lowerLimit = DataFrame([S.loc['under'] for S in metrics], index=self.metrics) # Default params else: self.metrics = settings['alert']['params'] self.coins = default['coins'] self.myValuation = default['valuation'] self.period = default['period'] self.forecast = default['forecast'] self.actions = default['actions'] growth = DataFrame(default['growth']) dif = DataFrame(default['dif']) day = DataFrame(default['day']) custom = DataFrame(default['custom']) self.upperLimit = DataFrame( [S.loc['over'] for S in [growth, dif, day, custom]], index=['growth', 'dif', 'day', 'custom']) self.lowerLimit = DataFrame( [S.loc['under'] for S in [growth, dif, day, custom]], index=['growth', 'dif', 'day', 'custom']) self.actions = default['actions'] try: # Generando el colector thunder = Thunder() # Generando los fire self.fires = { coin: Fire(coin, thunder.getWindow(coin)) for coin in self.coins } # DataFrame de apoyo self.history = DataFrame([ Series({coin: 0 for coin in self.coins}, name=param) for param in self.metrics ], index=self.metrics) # Configuring the event self.canSend = False self.looper = scheduler(time, sleep) self.checkStocks() except UnboundLocalError: print('Hubo un error, intentalo de nuevo')
from src.TempChecker import TempChecker from src.RainChecker import RainChecker import sched, time rainScheduler = sched.scheduler(time.time, time.sleep) tempScheduler = sched.scheduler(time.time, time.sleep) def rainChecker(sc): rainCheck = RainChecker() rainCheck.checkRain() rainScheduler.enter(300, 1, rainChecker, (sc,)) rainScheduler.enter(10, 1, rainChecker, (rainScheduler,)) rainScheduler.run() def tempCheck(tempSched): tempChecker = TempChecker() tempChecker.check() tempScheduler.enter(14400, 1, tempCheck, (tempSched,)) tempScheduler.enter(11, 1, tempCheck, (tempScheduler,)) tempScheduler.run()
def run_updater(): delay = 7200 db_updater = sched.scheduler(time.time, time.sleep) db_updater.enter(0, 2, update_db, argument=(db_updater, delay)) db_updater.run()
params["database"]["port"]) db_name = params["database"]["name"] db = client[db_name] main(db, params["branches"]) delay = params["updater"]["frequency"] if delay is not None: errors = 0 while True: try: # the schedule delay starts only when the branch updates are finished log("Scheduling the next update (in {} seconds)...".format( delay)) s = sched.scheduler() s.enter(delay, 1, main, (db, params["branches"])) try: s.run(blocking=True) except DownloadError as e: m = "Couldn't download a file. Something seems wrong, maybe better luck next time ?" log(m, LOG_WARNING) except UpdaterError as e: errors += 1 m = "An error happened in this updater instance ({}/{} in a row)".format( errors, params["updater"]["error_tolerance"]) log(m, LOG_WARNING) if errors == params["updater"]["error_tolerance"]: m = "Reached the maximum number of errors tolered in a row. The script will totally stop." log(m, LOG_WARNING) break
def startVoting(): scheduler = sched.scheduler(time.time, time.sleep) scheduler.enter(VOTING_DURATION_IN_SECS, 0, finishVoting) scheduler.run()
import os import time import threading import json import sched from mqtt import * from helper import * from timestamp import timestamp from AWSIoTPythonSDK.MQTTLib import AWSIoTMQTTClient topic = "picloud/test" TIME_INTERVAL = 5.0 SLEEP_INTERVAL = 1.0 function_scheduler = sched.scheduler(time.time, time.sleep) packet = {} def measure_cpu(): temp = os.popen("cat /sys/class/thermal/thermal_zone0/temp").readline() return (float(temp) / 1000.0) def measure_gpu(): temp = os.popen("vcgencmd measure_temp").readline() return float((temp.replace("temp=", "").replace("'C", ""))) def fahrenheit_to_celsius(value): return ((value - 32) * 5.0 / 9.0)
def __init__(self, remote_ip, topic="beaglebone/led"): self.client = mqtt.Client() self.client.connect(remote_ip) self.scheduler_prio = 1 self.scheduler = sched.scheduler(time.time, time.sleep) self.topic = topic
def timing_exe(delay=300): schedule = sched.scheduler(time.time, time.sleep) schedule.enter(0, 0, perform_command, (schedule, delay)) schedule.run()
def __init__(self): super().__init__() self.subject = Subject() self.events = {} self.counter = -1 self.scheduler = sched.scheduler(time.time, time.sleep)
import threading import colorsys import sched from queue import Queue # DW = 10 # WINDOW = 60 # DESK = 74 # PC = 36 pixelCount = 74 pixels = neopixel.NeoPixel(board.D18, pixelCount, auto_write=False) animate = False animationSpeed = 25 pixelLock = threading.Lock() rainbowSched = sched.scheduler() state = "pride" # Animation on launch param1 = 50 param2 = 0 param3 = 0 STATIC = -1 # Used for mode setting in setColor gamma8 = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10, 11, 11, 11, 12, 12, 13, 13, 13, 14, 14, 15, 15, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22, 23, 24, 24, 25, 25, 26, 27, 27, 28, 29, 29, 30, 31, 32, 32, 33, 34, 35, 35, 36, 37, 38, 39, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 50, 51, 52, 54, 55, 56, 57, 58, 59,
#prettyTable1.add_row(["","","","","","","","",""]) ## Write to txt file def writeToFile(): file = open(filename, "w+") file.write(str(prettyTable1)) file.write ## Remote txt file def removeFile(): os.remove(filename) ## Start a timer to refresh the prices and calculations every for whatever interval you set at the top startTime = sched.scheduler(time.time, time.sleep) def do_something(sc): prettyTable1.clear_rows() addToTable() writeToFile() print(prettyTable1) startTime.enter(refreshTimer, 1, do_something, (sc, )) ## Initialise() startTime.enter(refreshTimer, 1, do_something, (startTime, )) startTime.run()
def run_logger(tc): mow = API() mow.set_token(tc.token, tc.provider) mow.select_robot(args.mower) sch = scheduler(timefunc=time) status = {'status': None, 'status_changed': None} def write_log(*strings, fName=args.file, mode='a'): out = open(fName, mode) if fName else stdout print(*strings, sep=',', file=out) if fName: out.close() def now(): return datetime.now().replace(microsecond=0) def log_status(): mow_status = mow.status() start = datetime.utcfromtimestamp( mow_status['nextStartTimestamp']) if mow_status['nextStartTimestamp'] else None if status['status'] != mow_status['mowerStatus']: if args.summary and status['status'] is not None: # Write the summary. Skip the first iteration write_log( now().isoformat(), status['status'], now() - status['status_changed'], fName=args.summary) status['status'] = mow_status['mowerStatus'] status['status_changed'] = now() # The latest location has index 0 location = mow_status['lastLocations'][0] currentTime = datetime.now() write_log(currentTime.isoformat(), mow_status['mowerStatus'], mow_status['batteryPercent'], start.isoformat() if start else '', now() - status['status_changed'], location['latitude'], location['longitude']) if stop_time >= currentTime: if mow_status['mowerStatus'] == 'PARKED_TIMER' and mow_status['batteryPercent'] == 100: # The mower has a full battery and is waiting for the next timer. # Skip until 2 minutes before the next timer start nextStart = start - timedelta(0, 2 * 60) if nextStart > datetime.now(): # fallback to the usual operation if the nextStart is not in the future return sch.enterabs(nextStart.timestamp(), 1, log_status) sch.enter(args.delay, 1, log_status) elif args.summary and status['status'] is not None: write_log( now(), status['status'], now() - status['status_changed'], fName=args.summary) write_log( 'time', 'status', 'battery %', 'next start time', 'status duration', 'latitude', 'longitude', mode='w') if args.summary: write_log('time', 'status', 'status duration', fName=args.summary, mode='w') log_status() sch.run()
def insert_data_to_db_timestamp(self,time_stamp): s = sched.scheduler(time.time, time.sleep) while 1: s.enter(time_stamp, 1, self.insert_data_to_db, ()) s.run()