def schedule_relative(self, duetime, action: typing.ScheduledAction, state: typing.TState = None): """Schedules an action to be executed after duetime.""" scheduler = self timespan = self.to_timedelta(duetime) if timespan == DELTA_ZERO: return scheduler.schedule(action, state) sad = SingleAssignmentDisposable() def interval(): sad.disposable = self.invoke_action(action, state) seconds = timespan.total_seconds() timer = Timer(seconds, interval) timer.setDaemon(True) timer.start() def dispose(): timer.cancel() return CompositeDisposable(sad, Disposable(dispose))
class PerpetualTimer(object): """Timer with identical syntax as `threading.Timer`, but starting itself continously until `cancel` is called. """ def __init__(self, t, hFunction): self.t = t self.was_cancelled = False self.hFunction = hFunction self.thread = Timer(self.t, self.handle_function) self.thread.setDaemon(True) def handle_function(self): if self.was_cancelled: return self.hFunction() self.thread = Timer(self.t, self.handle_function) self.thread.start() def start(self): self.thread.start() self.was_cancelled = False def cancel(self): self.thread.cancel() self.was_cancelled = True
def program_next_poll(self, interval, method, args, kwargs): t = Timer(interval=interval, function=self.poller, kwargs={'interval': interval, 'method': method, 'args': args, 'kwargs': kwargs}) self.current_timers.append(t) # save the timer to be able to kill it t.setName('Poller thread for %s' % type(method.__self__).__name__) t.setDaemon(True) # so it is not locking on exit t.start()
class Scheduler: ''' Schedule task/funtion every X seconds Clock master, time bender, thread slayer, core dumped father :param delay: intervals in seconds between tasks launching :param funtion: task to execute :param args: function arguments ''' def __init__(self, delay, function, *args): self.function = function self.delay = delay self.args = args self._timer = None self.running = False def _run(self): self.running = False self.start() self.function(*self.args) def start(self): if not self.running: self._timer = Timer(self.delay, self._run) self._timer.setDaemon(True) # stop when application stopped self._timer.start() self.running = True def stop(self): self._timer.cancel() self.running = False
def addNewData(): """ Reads parsed data from the MTA feed and batch writes them to DynamoDB. :param delay: How long to sleep in seconds on each iteration :param mtaUpdate: An instance of an mtaUpdates class object :return: None """ # Create a thread that recursively calls this function every 30 seconds try: delay = 30.0 if addNewData: # Recursively call the function again in 30 seconds # with a delayed thread call # Set as a daemon so it dies gracefully with the main thread t = Timer(delay, addNewData) t.setDaemon(True) t.start() else: return except TypeError as e: print "Thread for adding, finishing." return print getCurrentTime(), " Adding new data to Dynamo" insertData = parseData(mtaUpdate) with table.batch_writer(overwrite_by_pkeys=['tripId']) as batch: for dict in insertData: batch.put_item(Item=dict) print "Finished adding data"
class TimeHandler: def __init__(self, name, interval, get_msg=None): self.name = name self.interval = interval self.get_msg = get_msg def run(self): try: msg = self.get_msg() if self.get_msg else None self.handle(msg) except Exception as e: logger.exception(f'<TimeHandler>-{self.name} exception:{e}') def stop(self): self.timer.cancel() self.timer.join() def start(self): self.timer = Timer(self.interval, self.run) self.timer.setName(self.name) self.timer.setDaemon(True) self.timer.start() @abstractmethod def handle(self, topic, msg): ...
def activate(self, conf, glob): protocols.activate(self, conf, glob) self.request = Queue() self.response = Queue() self.p = Process(target=root, args=(self.request, self.response)) self.p.start() self.valuestore = ConfigParser() self.valuestore.add_section('values') self.valuesfile = path.join(path.dirname(__file__), 'values.conf') for item in itemList: self.valuestore.set('values', item['name'], item['value']) self.valuestore.read(self.valuesfile) f = open(self.valuesfile, 'w') self.valuestore.write(f) f.close() try: uid = pwd.getpwnam(self.glob['conf'].USER).pw_uid gid = grp.getgrnam(self.glob['conf'].GROUP).gr_gid os.chown(self.valuesfile, uid, gid) except: pass t = Timer(5, self.calc_thread) t.setDaemon(True) t.start()
def start_thread_timer(callback, timer=1): ''' util: start thread timer ''' temp_thread = Timer(timer, callback) temp_thread.setDaemon(True) temp_thread.start()
def update_label_show(label): label['text'] = datetime.now().strftime('%Y-%m-%d %H:%M:%S') label.update() global timer03 timer03 = Timer(1.0, update_label_show, (label, )) timer03.setDaemon(True) timer03.start()
def fill_text(text, flag): text.delete(0, tk.END) t = time.time() if flag == 1: text.insert(tk.END, str(int(t))) else: text.insert(tk.END, str(int(t * 1000))) text.update() if flag == 1: global timer01 timer01 = Timer(1.0, fill_text, ( text, flag, )) timer01.setDaemon(True) timer01.start() else: global timer02 timer02 = Timer(1.0, fill_text, ( text, flag, )) timer02.setDaemon(True) timer02.start()
class NotifyingPropertyReader(SimplePropertyReader): def __init__(self, _file, _separator, startAsDaemon=False): super(NotifyingPropertyReader, self).__init__(_file, _separator) self.listeners = [] self.startAsDaemon = startAsDaemon self.lastModifiedDate = os.path.getmtime(self.file) self.startWatcher() def startWatcher(self): self.timer = Timer(1, self.determineIfPropertyHasChanged) self.timer.setDaemon(self.startAsDaemon) self.timer.start() def addListener(self, propertyChangedListener): self.listeners.append(propertyChangedListener) def removeListener(self, propertyChangedListener): self.listeners.remove(propertyChangedListener) def notifyListeners(self): print "notifying listeners" map(lambda x: x(self.properties), self.listeners) def determineIfPropertyHasChanged(self): print "timer has fired" if self.lastModifiedDate != os.path.getmtime(self.file): self.readProperties() self.notifyListeners() self.lastModifiedDate = os.path.getmtime(self.file) self.startWatcher()
def _func(): open_view(im) if auto_close: minutes = 2 t = Timer(60 * minutes, im.close_ui) t.setDaemon(True) t.start()
class UpdatedURL: def __init__(self, url): self.url = url self.contents = "" self.last_updated = None self.update() def update(self): self.contents = urlopen(self.url).read() self.last_updated = datetime.datetime.now() self.schedule() def schedule(self): self.timer = Timer(3600, self.update) self.timer.setDaemon(True) self.timer.start() def __getstate__(self): new_state = self.__dict__.copy() if 'timer' in new_state: del new_state['timer'] return new_state def __setstate__(self, data): self.__dict__ = data self.schedule()
class UpdatedURL: def __init__(self, url): self.url = url self.contents = '' self.last_updated = None self.update() def update(self): self.contents = urlopen(self.url).read() self.last_updated = datetime.datetime.now() self.schedule() def schedule(self): self.timer = Timer(3600, self.update) self.timer.setDaemon(True) self.timer.start() def __getstate__(self): new_state = self.__dict__.copy() if 'timer' in new_state: del new_state['timer'] return new_state def __setstate__(self, data): self.__dict__ = data self.schedule()
def _create_dasd_part(dev, size): """ This method creates a DASD partition :param dev: name of DASD device for creation of partition :param size: block size :return: """ devname = '/dev/' + dev device = PDevice(devname) disk = PDisk(device) num_parts = len(disk.partitions) if num_parts == 3: raise OperationFailed("GINDASDPAR0016E") def kill_proc(proc, timeout_flag): try: parent = psutil.Process(proc.pid) for child in parent.get_children(recursive=True): child.kill() # kill the process after no children is left proc.kill() except OSError: pass else: timeout_flag[0] = True dasd_devs = _get_dasd_names() if dev not in dasd_devs: raise NotFoundError("GINDASDPAR0012E", {'name': dev}) p_str = _form_part_str(size) try: p1_out = subprocess.Popen(["echo", "-e", "\'", p_str, "\'"], stdout=subprocess.PIPE) p2_out = subprocess.Popen(["fdasd", devname], stdin=p1_out.stdout, stderr=subprocess.PIPE, stdout=subprocess.PIPE) p1_out.stdout.close() timeout = 2.0 timeout_flag = [False] timer = Timer(timeout, kill_proc, [p2_out, timeout_flag]) timer.setDaemon(True) timer.start() out, err = p2_out.communicate() if timeout_flag[0]: msg_args = {'cmd': "fdasd " + devname, 'seconds': str(timeout)} raise TimeoutExpired("WOKUTILS0002E", msg_args) if p2_out.returncode != 0: if 'error while rereading partition table' in err.lower(): run_command(["partprobe", devname, "-s"]) else: raise OperationFailed("GINDASDPAR0007E", { 'name': devname, 'err': err }) except TimeoutExpired: raise finally: if timer and not timeout_flag[0]: timer.cancel()
class RESTAnovaController(AnovaController): """ This version of the Anova Controller will keep a connection open over bluetooth until the timeout has been reach. NOTE: Only a single BlueTooth connection can be open to the Anova at a time. """ TIMEOUT = 5 * 60 # Keep the connection open for this many seconds. TIMEOUT_HEARTBEAT = 20 def __init__(self, mac_address, connect=True, logger=None): self.last_command_at = datetime.datetime.now() if logger: self.logger = logger else: self.logger = logging.getLogger() super(RESTAnovaController, self).__init__(mac_address, connect=connect) def set_timeout(self, timeout): """ Adjust the timeout period (in seconds). """ self.TIMEOUT = timeout def timeout(self, seconds=None): """ Determines whether the Bluetooth connection should be timed out based on the timestamp of the last exectuted command. """ if not seconds: seconds = self.TIMEOUT timeout_at = self.last_command_at + datetime.timedelta(seconds=seconds) if datetime.datetime.now() > timeout_at: self.close() self.logger.info('Timeout bluetooth connection. Last command ran at {0}'.format(self.last_command_at)) else: self._timeout_timer = Timer(self.TIMEOUT_HEARTBEAT, lambda: self.timeout()) self._timeout_timer.setDaemon(True) self._timeout_timer.start() self.logger.debug('Start connection timeout monitor. Will idle timeout in {0} seconds.'.format( (timeout_at - datetime.datetime.now()).total_seconds())) def connect(self): super(RESTAnovaController, self).connect() self.last_command_at = datetime.datetime.now() self.timeout() def close(self): super(RESTAnovaController, self).close() try: self._timeout_timer.cancel() except AttributeError: pass def _send_command(self, command): if not self.is_connected: self.connect() self.last_command_at = datetime.datetime.now() return super(RESTAnovaController, self)._send_command(command)
def schedule(self, action: typing.ScheduledAction, state: Optional[typing.TState] = None ) -> typing.Disposable: """Schedules an action to be executed. Args: action: Action to be executed. state: [Optional] state to be given to the action function. Returns: The disposable object used to cancel the scheduled action (best effort). """ sad = SingleAssignmentDisposable() def interval() -> None: sad.disposable = self.invoke_action(action, state) timer = Timer(0, interval) timer.setDaemon(True) timer.start() def dispose() -> None: timer.cancel() return CompositeDisposable(sad, Disposable(dispose))
def hello(): global k, t, root, label1 k += 1 r = load_data() label["text"] = str(k) + ':' + r # bg = ImageTk.PhotoImage(Image.open("images/" + r + ".png")) # Show image using label if r == "microwave_off": label1.configure(image=img_microwave_off) elif r == "microwave_on": label1.configure(image=img_microwave_on) elif r == "fridge_on": label1.configure(image=img_fridge_on) elif r == "fridge_off": label1.configure(image=img_fridge_off) elif r == "cooker_on": label1.configure(image=img_cooker_on) elif r == "cooker_off": label1.configure(image=img_cooker_off) # label1.place(x = 0,y = 0) t = Timer(1, hello) t.setDaemon(True) t.start()
class ChatRoom(BotPlugin): connected = False def keep_alive(self): logging.debug('Keep alive sent') self.send('nobody', ' ', message_type='groupchat') # hack from hipchat itself self.t = Timer(60.0, self.keep_alive) self.t.setDaemon(True) # so it is not locking on exit self.t.start() def callback_connect(self): logging.info('Callback_connect') if not self.connected: self.connected = True for room in CHATROOM_PRESENCE: logging.info('Join room ' + room) self.join_room(room, CHATROOM_FN) logging.info('Start kepp alive') self.keep_alive() def callback_message(self, conn, mess): #if mess.getBody(): # logging.debug(u'Received message %s' % mess.getBody()) if mess.getType() in ('groupchat', 'chat'): try: username = get_jid_from_message(mess) if username in CHATROOM_RELAY: logging.debug('Message to relay from %s.' % username) body = mess.getBody() rooms = CHATROOM_RELAY[username] for room in rooms: self.send(room, body, message_type='groupchat') except Exception, e: logging.exception('crashed in callback_message %s' % e)
def wrapper(*args, **kwargs): semaphore.acquire() result = fn(*args, **kwargs) timer = Timer(every, semaphore.release) timer.setDaemon(True) timer.start() return result
class NotifyingPropertyReader(SimplePropertyReader): def __init__(self, _file, _separator, startAsDaemon = False): super(NotifyingPropertyReader, self).__init__(_file, _separator) self.listeners = [] self.startAsDaemon = startAsDaemon self.lastModifiedDate = os.path.getmtime(self.file) self.startWatcher() def startWatcher(self): self.timer = Timer(1, self.determineIfPropertyHasChanged) self.timer.setDaemon(self.startAsDaemon) self.timer.start() def addListener(self, propertyChangedListener): self.listeners.append(propertyChangedListener) def removeListener(self, propertyChangedListener): self.listeners.remove(propertyChangedListener) def notifyListeners(self): print "notifying listeners" map(lambda x : x(self.properties), self.listeners) def determineIfPropertyHasChanged(self): print "timer has fired" if self.lastModifiedDate != os.path.getmtime(self.file): self.readProperties() self.notifyListeners() self.lastModifiedDate = os.path.getmtime(self.file) self.startWatcher()
def schedule(self, action: typing.ScheduledAction, state: Optional[typing.TState] = None) -> typing.Disposable: """Schedules an action to be executed. Args: action: Action to be executed. state: [Optional] state to be given to the action function. Returns: The disposable object used to cancel the scheduled action (best effort). """ sad = SingleAssignmentDisposable() def interval() -> None: sad.disposable = self.invoke_action(action, state) timer = Timer(0, interval) timer.setDaemon(True) timer.start() def dispose() -> None: timer.cancel() return CompositeDisposable(sad, Disposable(dispose))
def start_dev_server( saved_bundle_path: str, port: int = Provide[BentoMLContainer.config.api_server.port], enable_microbatch: bool = Provide[ BentoMLContainer.config.api_server.enable_microbatch], mb_max_batch_size: int = Provide[ BentoMLContainer.config.marshal_server.max_batch_size], mb_max_latency: int = Provide[ BentoMLContainer.config.marshal_server.max_latency], run_with_ngrok: bool = Provide[ BentoMLContainer.config.api_server.run_with_ngrok], enable_swagger: bool = Provide[ BentoMLContainer.config.api_server.enable_swagger], ): logger.info("Starting BentoML API server in development mode..") import multiprocessing from bentoml.saved_bundle import load_from_dir from bentoml.server.api_server import BentoAPIServer from bentoml.utils import reserve_free_port if run_with_ngrok: from threading import Timer from bentoml.utils.flask_ngrok import start_ngrok thread = Timer(1, start_ngrok, args=(port, )) thread.setDaemon(True) thread.start() if enable_microbatch: with reserve_free_port() as api_server_port: # start server right after port released # to reduce potential race marshal_proc = multiprocessing.Process( target=start_dev_batching_server, kwargs=dict( api_server_port=api_server_port, saved_bundle_path=saved_bundle_path, port=port, mb_max_latency=mb_max_latency, mb_max_batch_size=mb_max_batch_size, ), daemon=True, ) marshal_proc.start() bento_service = load_from_dir(saved_bundle_path) api_server = BentoAPIServer(bento_service, port=api_server_port, enable_swagger=enable_swagger) api_server.start() else: bento_service = load_from_dir(saved_bundle_path) api_server = BentoAPIServer(bento_service, port=port, enable_swagger=enable_swagger) api_server.start()
def schedule_relative(self, duetime: typing.RelativeTime, action: typing.ScheduledAction, state: Optional[typing.TState] = None ) -> typing.Disposable: """Schedules an action to be executed after duetime. Args: duetime: Relative time after which to execute the action. action: Action to be executed. state: [Optional] state to be given to the action function. Returns: The disposable object used to cancel the scheduled action (best effort). """ seconds = self.to_seconds(duetime) if seconds <= 0.0: return self.schedule(action, state) sad = SingleAssignmentDisposable() def interval() -> None: sad.disposable = self.invoke_action(action, state) timer = Timer(seconds, interval) timer.setDaemon(True) timer.start() def dispose() -> None: timer.cancel() return CompositeDisposable(sad, Disposable(dispose))
class RepeatedTimer(object): def __init__(self, interval, function, *args, **kwargs): self._timer = None self.interval = interval self.function = function self.args = args self.kwargs = kwargs self.is_running = False self.start() def _run(self): self.is_running = False self.start() self.function(*self.args, **self.kwargs) def start(self): if not self.is_running: self._timer = Timer(self.interval, self._run) self._timer.setDaemon(True) self._timer.start() self.is_running = True def stop(self): self._timer.cancel() self.is_running = False
def start_dev_server(saved_bundle_path: str, port: int, enable_microbatch: bool, run_with_ngrok: bool): logger.info("Starting BentoML API server in development mode..") from bentoml import load from bentoml.server.api_server import BentoAPIServer from bentoml.marshal.marshal import MarshalService from bentoml.utils import reserve_free_port bento_service = load(saved_bundle_path) if run_with_ngrok: from bentoml.utils.flask_ngrok import start_ngrok from threading import Timer thread = Timer(1, start_ngrok, args=(port, )) thread.setDaemon(True) thread.start() if enable_microbatch: with reserve_free_port() as api_server_port: # start server right after port released # to reduce potential race marshal_server = MarshalService( saved_bundle_path, outbound_host="localhost", outbound_port=api_server_port, outbound_workers=1, ) api_server = BentoAPIServer(bento_service, port=api_server_port) marshal_server.async_start(port=port) api_server.start() else: api_server = BentoAPIServer(bento_service, port=port) api_server.start()
def _upload_file(filename): """ Upload the file `filename` to the configured sharing service, and shortens the URL with the configured URL shortener. Also copies the shortened URL to the clipboard, and runs the post upload hook (if there is one). """ configmanager = get_conf_manager() sharingservice = get_sharing_service_from_conf(configmanager) urlprovider = get_url_shortener_from_conf(configmanager) try: StatusIcon().statusicon.set_icon_from_file(os.path.join(PATHS['ICONS_PATH'], 'icon-uploading.png')) # Store the file online url = sharingservice.store(filename) print 'Saved to', url except Exception, e: import traceback traceback.print_exc() print type(e) if isinstance(e, SharingError): try: title = e.args[1] except IndexError: title = e.default_title notify(title, e.args[0]) elif isinstance(e, URLError): notify('Connection error', 'You may be disconnected from the internet, or the server you are using may be down.') StatusIcon().statusicon.set_icon_from_file(os.path.join(PATHS['ICONS_PATH'], 'icon-uploadfailed.png')) timer = Timer(5, StatusIcon().reset_icon) timer.setDaemon(True) timer.start() return None
class UpdateURL: def __init__(self, url): self.url = url self.content = "" self.last_update = None self.update() self.timer = None def update(self): self.content = urlopen(self.url).read() self.last_update = datetime.now() self.schedule() def schedule(self): self.timer = Timer(3600, self.update) # thread self.timer.setDaemon(True) self.timer.start() def __getstate__(self): new_state = self.__dict__.copy() if "timer" in new_state: del (new_state["timer"]) return new_state def __setstate__(self, data): self.__dict__ = data self.schedule()
def start_timer_start(): """ Metoda za pokretanje tajmera koji ce pozvati metodu za pokretanje instanci """ start_time = random.gauss(start_interval, deviation) timer = Timer(start_time, start_instances) timer.setDaemon(True) timer.start() print('Scheduled start timer task %d sec' % (start_time))
class TimedClose: def __init__(self, app: 'WebApp', timeout=TIMEOUT): self._timeout = timeout self._app = app self._timer = None def activateTimer(self): if self._timer is None: self._startTimer() else: if self._timer.is_alive(): # cancel and reschedule self.closeTimer() # new timer self._startTimer() def _startTimer(self): self._timer = Timer(self._timeout, self._timerFinished) self._timer.setDaemon(True) self._timer.start() def closeTimer(self): if self._timer is not None: self._timer.cancel() self._timer.join() self._timer = None def _timerFinished(self): # just showing the main screen self._timer = None self._app.showPrevFSBox()
def activate(self, conf, glob): protocols.activate(self, conf, glob) global itemList if not 'timer' in conf: itemList += counter_mode_items itemTags.update(counter_mode_tags) if not 'state_tracker' in self.conf: self.conf['state_tracker'] = 'generic' try: self.power_window = int(self.conf['power_window']) if self.power_window < 60 or self.power_window > 1800: raise ValueError except: self.power_window = 300 try: self.running_timeout = int(self.conf['running_timeout']) if self.running_timeout < 5 or self.power_window > 300: raise ValueError except: self.running_timeout = 60 try: self.ignition_timeout = int(self.conf['ignition_timeout']) if self.ignition_timeout < 60 or self.ignition_timeout > 1200: raise ValueError except: self.ignition_timeout = 600 try: self.starting_power = float(self.conf['starting_power']) if self.starting_power < 0.5 or self.starting_power > 10: raise ValueError except: self.starting_power = 5 try: self.startup_feed_wait = float(self.conf['startup_feed_wait']) if self.startup_feed_wait < 10 or self.startup_feed_wait > 300: raise ValueError except: self.startup_feed_wait = 60 try: self.log_changes = [ s.strip() for s in self.conf['log_changes'].split(',') ] except: self.log_changes = ['mode', 'alarm'] if conf['state_tracker'] == 'generic': itemList += state_tracker_items itemTags.update(state_tracker_tags) for item in itemList: if item['type'] == 'R/W': self.store_setting(item['name'], confval=str(item['value'])) self.migrate_settings('pelletcalc') if self.conf['state_tracker'] == 'generic': t = Timer(5, self.calc_thread) t.setDaemon(True) t.start()
def start_timer_delete(): """ Metoda za pokretanje tajmera koji ce pozvati metodu za brisanje instanci """ delete_time = random.gauss(delete_interval, deviation) timer = Timer(delete_time, delete_instances) timer.setDaemon(True) timer.start() print('Scheduled delete timer task %d sec' % (delete_time))
def on_disconnect(self): self.is_connected = False self.update_module_status() log.warning("Disconnected - scheduling reconnect in 10 s") timer = Timer(10, self.connect) timer.setDaemon(True) timer.start()
def certificate_renewal(): # get next renewal time from config file # if the renewal time has come, renew the certificate os.system("sudo python2.7 certificate_renewal.py") certificate_renewal_timer = Timer(certificate_renewal_interval, certificate_renewal) certificate_renewal_timer.setDaemon(True) certificate_renewal_timer.start()
def check_update_timer(server: classmethod) -> bool: """ 检查更新定时器函数,用作多线程thread.timer """ url = rooturl + '/ver' http = urllib3.PoolManager() res = http.request('GET', url) ver = _read_config()['version'] ver_res = str(res.data, encoding='UTF-8') if ver != ver_res: update(server, '@a') try: update_timer = None except: pass return if udstopflag == False: try: update_timer.cancel() update_timer = None except: pass return try: update_timer = Timer(300, check_update_timer, [server]) update_timer.setDaemon(True) update_timer.start() except: return False else: return True return None
def set_stable2jitter_path(delay, loss, bw): real_delay_time = delay / 2 timer_interval = update_jitter_para() if is_jitter: for path_name in PEER_PATH_DICT: setting_path_params(path_id=PEER_PATH_DICT[path_name], path_name=path_name, path_director=PATH_DIRECTOR, pltr_delay_co_devalue=real_delay_time, pltr_loss_random_rate=loss, pltr_bandwidth_rate=bw, prtl_delay_co_devalue=real_delay_time, prtl_loss_random_rate=loss, prtl_bandwidth_rate=bw) else: for path_name in PEER_PATH_DICT: setting_path_params_jitter(path_id=PEER_PATH_DICT[path_name], path_name=path_name, min_value=real_delay_time + 50, max_value=real_delay_time + 150, bw=bw, loss=loss, pltr_delay=real_delay_time) t = Timer(timer_interval, set_stable2jitter_path, args=(delay, loss, bw)) t.setDaemon(True) t.start()
def ddns_update(): h = httplib2.Http() my_addr_host = "http://ipecho.net/plain" try: resp, external_ip = h.request(my_addr_host) except: print("failed to get address from [" + my_addr_host + "]") try: my_addr_host = "http://myexternalip.com/raw" # IPv6 resp, external_ip = h.request(my_addr_host) except: print("failed to get address from [" + my_addr_host + "]") ddns_timer = Timer(ddns_update_interval_sec, ddns_update) ddns_timer.setDaemon(True) ddns_timer.start() return print("My IP address is [" + str(external_ip).strip() + "]") #h.add_credentials(ddns_username, ddns_password) update_dynu_ddns_url = "https://api.dynu.com/nic/update?hostname=" + ddns_hostname + "&username="******"&myip=" + str(external_ip).strip() + "&password="******"http://dynupdate.no-ip.com/nic/update?hostname=" + ddns_hostname + "&myip=" + external_ip.strip() + "" print("Update DYNU url [" + update_dynu_ddns_url + "]") resp = requests.get(update_dynu_ddns_url) print("DDNS response [" + str(resp.content) + "]") ddns_timer = Timer(ddns_update_interval_sec, ddns_update) ddns_timer.setDaemon(True) ddns_timer.start()
def timer_thread(timeout_arg): for neighbor in deepcopy(neighbors): if neighbor in timer_log: t_threshold = (3 * timeout_arg) if ((int(time.time()) - timer_log[neighbor]) > t_threshold): if routing_table[neighbor]['cost'] == INFINITY: broadcast_routing_table() else: routing_table[neighbor]['cost'] = INFINITY routing_table[neighbor]['link'] = "NULL" del neighbors[neighbor] for node in routing_table: if node in neighbors: routing_table[node]['cost'] = adjacent_links[node] routing_table[node]['link'] = node else: routing_table[node]['cost'] = INFINITY routing_table[node]['link'] = "NULL" send_dict = { 'source': 'close', 'target': neighbor } for neighbor in neighbors: temp = neighbor.split(':') recvSock.sendto(json.dumps(send_dict), (temp[0], int(temp[1]))) timer = Timer(3, timer_thread, [timeout_arg]) timer.setDaemon(True) timer.start()
def program_next_poll( self, interval: float, method: Callable[..., None], times: int = None, args: Tuple = None, kwargs: Mapping = None, ): if times is not None and times <= 0: return t = Timer( interval=interval, function=self.poller, kwargs={ "interval": interval, "method": method, "times": times, "args": args, "kwargs": kwargs, }, ) self.current_timers.append(t) # save the timer to be able to kill it t.setName(f"Poller thread for {type(method.__self__).__name__}") t.setDaemon(True) # so it is not locking on exit t.start()
class Repeater(): def __init__(self, interval, function, *args, **kwargs): self._lock = Lock() self._timer = None self.function = function self.interval = interval self.args = args self.kwargs = kwargs self._stopped = True def start(self, from_run=False): if from_run or self._stopped: self._lock.acquire() self._stopped = False self._timer = Timer(self.interval, self._run) self._timer.setDaemon(True) self._timer.start() self._lock.release() def _run(self): self.start(from_run=True) self.function(*self.args, **self.kwargs) def stop(self): self._lock.acquire() self._stopped = True self._timer.cancel() self._lock.release()
def trigger(): global t if dead: return PINS.trigger.set() t = Timer(PINS.period.get(), trigger) t.setDaemon(True) t.start()
def wrapper(*args, **kwargs): semaphore.acquire() try: return func(*args, **kwargs) finally: # don't catch but ensure semaphore release timer = Timer(every, semaphore.release) timer.setDaemon(True) # allows the timer to be canceled on exit timer.start()
def wait(self): """ Makes sure our api calls don't go past the api call limit """ self.semaphore.acquire() # blocking call # delayed release timer = Timer(self.timeFrame, self.semaphore.release) # allows the timer to be canceled on exit timer.setDaemon(True) timer.start()
def set_interval(func, sec): def _wrapper(): watchdog = set_interval(func, sec) func() watchdog = Timer(sec, _wrapper) watchdog.setDaemon(True) watchdog.start()
def activate(self, conf, glob): protocols.activate(self, conf, glob) global itemList if not 'timer' in conf: itemList += counter_mode_items itemTags.update(counter_mode_tags) if not 'state_tracker' in self.conf: self.conf['state_tracker'] = 'generic' try: self.power_window = int(self.conf['power_window']) if self.power_window < 60 or self.power_window > 1800: raise ValueError except: self.power_window = 300 try: self.running_timeout = int(self.conf['running_timeout']) if self.running_timeout < 5 or self.power_window > 300: raise ValueError except: self.running_timeout = 60 try: self.ignition_timeout = int(self.conf['ignition_timeout']) if self.ignition_timeout < 60 or self.ignition_timeout > 1200: raise ValueError except: self.ignition_timeout = 600 try: self.starting_power = float(self.conf['starting_power']) if self.starting_power < 0.5 or self.starting_power > 10: raise ValueError except: self.starting_power = 5 try: self.startup_feed_wait = float(self.conf['startup_feed_wait']) if self.startup_feed_wait < 10 or self.startup_feed_wait > 300: raise ValueError except: self.startup_feed_wait = 60 try: self.log_changes = [s.strip() for s in self.conf['log_changes'].split(',')] except: self.log_changes = ['mode', 'alarm'] if conf['state_tracker'] == 'generic': itemList += state_tracker_items itemTags.update(state_tracker_tags) for item in itemList: if item['type'] == 'R/W': self.store_setting(item['name'], confval = str(item['value'])) self.migrate_settings('pelletcalc') if self.conf['state_tracker'] == 'generic': t = Timer(5, self.calc_thread) t.setDaemon(True) t.start()
def _timed_backup(self): timer = Timer(DEFAULT_TIME_TO_BACKUP, self._timed_backup) timer.setDaemon(True) timer.start() if (self.metadata['title'] != ""): backup = 'backup_'+self.backup_number.__str__()+'.txt' self.logger.info('Perform backup: '+backup) self.execute(backup) self.backup_number += 1
def program_next_poll(self, interval, method, args, kwargs): t = Timer( interval=interval, function=self.poller, kwargs={"interval": interval, "method": method, "args": args, "kwargs": kwargs}, ) self.current_timers.append(t) # save the timer to be able to kill it t.setDaemon(True) # so it is not locking on exit t.start()
def _create_dasd_part(dev, size): """ This method creates a DASD partition :param dev: name of DASD device for creation of partition :param size: block size :return: """ devname = '/dev/' + dev device = PDevice(devname) disk = PDisk(device) num_parts = len(disk.partitions) if num_parts == 3: raise OperationFailed("GINDASDPAR0016E") def kill_proc(proc, timeout_flag): try: parent = psutil.Process(proc.pid) for child in parent.get_children(recursive=True): child.kill() # kill the process after no children is left proc.kill() except OSError: pass else: timeout_flag[0] = True dasd_devs = _get_dasd_names() if dev not in dasd_devs: raise NotFoundError("GINDASDPAR0012E", {'name': dev}) p_str = _form_part_str(size) try: p1_out = subprocess.Popen(["echo", "-e", "\'", p_str, "\'"], stdout=subprocess.PIPE) p2_out = subprocess.Popen(["fdasd", devname], stdin=p1_out.stdout, stderr=subprocess.PIPE, stdout=subprocess.PIPE) p1_out.stdout.close() timeout = 2.0 timeout_flag = [False] timer = Timer(timeout, kill_proc, [p2_out, timeout_flag]) timer.setDaemon(True) timer.start() out, err = p2_out.communicate() if timeout_flag[0]: msg_args = {'cmd': "fdasd " + devname, 'seconds': str(timeout)} raise TimeoutExpired("WOKUTILS0002E", msg_args) if p2_out.returncode != 0: if 'error while rereading partition table' in err.lower(): run_command(["partprobe", devname, "-s"]) else: raise OperationFailed("GINDASDPAR0007E", {'name': devname, 'err': err}) except TimeoutExpired: raise finally: if timer and not timeout_flag[0]: timer.cancel()
def upnp_update(): # first let's find the gateway router gateway = check_output(["ip", "route"]).split(" ")[2] upnpc_cmd = "upnpc -e 'Sensei' -r " + str(port) + " TCP -G " + str(gateway) debug_print("upnp command [" + upnpc_cmd + "]") os.system(upnpc_cmd) upnp_timer = Timer(upnp_update_interval, upnp_update) upnp_timer.setDaemon(True) upnp_timer.start()
def _timed_refresh(self): self.log.info("Timed refresh every %ss" % self._periodic_refresh_interval) self.refresh_all() self.log.info( "Today is a new day! you can request up to %s certificates." % self._certs_issued_for_period["nb"] ) t = Timer(float(self._periodic_refresh_interval), self._timed_refresh) t.setDaemon(True) t.start() self._threaded_jobs["timed"] = t
def enter(self, event_data): """ Extends `transitions.core.State.enter` by starting a timeout timer for the current model when the state is entered and self.timeout is larger than 0. """ if self.timeout > 0: timer = Timer(self.timeout, self._process_timeout, args=(event_data,)) timer.setDaemon(True) timer.start() self.runner[id(event_data.model)] = timer super(Timeout, self).enter(event_data)
def init_config(json): cfg["token_type"] = json["token_type"] cfg["token"] = json["access_token"] cfg["refresh_token"] = json["refresh_token"] cfg["expires_in"] = json["expires_in"] interval = cfg["expires_in"] - (cfg["expires_in"] / 10) timer = Timer(interval, prolong_token) timer.setDaemon(True) timer.start() print("Config initialized.")
def run_command(cmd, timeout=None): """ cmd is a sequence of command arguments. timeout is a float number in seconds. timeout default value is None, means command run without timeout. """ def kill_proc(proc, timeout_flag): try: proc.kill() except OSError: pass else: timeout_flag[0] = True proc = None timer = None timeout_flag = [False] try: proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) if timeout is not None: timer = Timer(timeout, kill_proc, [proc, timeout_flag]) timer.setDaemon(True) timer.start() out, error = proc.communicate() kimchi_log.debug("Run command: '%s'", " ".join(cmd)) if out or error: kimchi_log.debug("out:\n %s\nerror:\n %s", out, error) if timeout_flag[0]: msg = ("subprocess is killed by signal.SIGKILL for " "timeout %s seconds" % timeout) kimchi_log.error(msg) msg_args = {'cmd': cmd, 'seconds': timeout} raise TimeoutExpired("KCHUTILS0002E", msg_args) return out, error, proc.returncode except TimeoutExpired: raise except Exception as e: msg = "Failed to run command: %s." % " ".join(cmd) msg = msg if proc is None else msg + "\n error code: %s." kimchi_log.error("%s\n %s", msg, e) if proc: return out, error, proc.returncode else: return None, None, None finally: if timer and not timeout_flag[0]: timer.cancel()
def doAlertChecks(): global t print "checking alerts thread" accounts = getAccounts() for account in accounts: openalerts = getOpenAlerts(account) for item in openalerts: handleAlert(item) t = Timer(AlertsCheckInterval,doAlertChecks) t.setDaemon(True) t.start()
def program_next_poll( self, interval: float, method: Callable[..., None], args: Tuple = None, kwargs: Mapping = None ): t = Timer( interval=interval, function=self.poller, kwargs={"interval": interval, "method": method, "args": args, "kwargs": kwargs}, ) self.current_timers.append(t) # save the timer to be able to kill it t.setName("Poller thread for %s" % type(method.__self__).__name__) t.setDaemon(True) # so it is not locking on exit t.start()