def run_server(port, site_dir, ctx): output_dir = ctx["OutputDir"] # start server class RequestHandler(SimpleHTTPRequestHandler): def do_GET(self): if hasattr(self, "error") and self.error is not None: self.send_response(200, 'OK') self.send_header('Content-type', 'html') self.end_headers() self.wfile.write(bytes(self.error, 'UTF-8')) else: super().do_GET() def translate_path(self, path): return os.path.join(site_dir, output_dir, path[1:]) handler = RequestHandler httpd = TCPServer(("", port), handler) atexit_register(lambda: httpd.shutdown()) # start update thread thread = Thread(target=update, args=(site_dir, ctx, handler, port)) thread.daemon = True thread.start() print("Serving at http://localhost:" + str(port) + "/") httpd.serve_forever()
def __init__(self, *args, **kwargs) -> None: super().__init__(*args, **kwargs) self.is_intel = "Intel" in self.get_cpuinfo() if not self.is_intel and which("sensors") is None: self.no_temps = True else: self.no_temps = False self.stat_file = open("/proc/stat", "r") atexit_register(self.stat_file.close) self.tt0, self.tu0, self.tk0 = self._read_cpu_times() self.show_breakdown = False # to make sure that cpuinfo is updated at least once on read time.sleep(0.01) self.q_len = int(2 / self.poll_interval) self._usage: dq_t[complex] = deque([], maxlen=self.q_len) self._usage_tot: complex = 0.0 self._times: dq_t[float] = deque([], maxlen=self.q_len) self._time_tot: float = 0.0 self._temps: dq_t[float] = deque([], maxlen=self.q_len) self._temp_tot: float = 0.0
def _load_pickle(pickle_path, save=True): if isfile(pickle_path): with open(pickle_path, 'rb') as pickle_file: loaded_pickle = pickle_load(pickle_file) else: loaded_pickle = IdMapper() if save: atexit_register(_save_pickle_func(loaded_pickle, pickle_path)) return loaded_pickle
def __init__(self): # _event is used to wakeup the thread when new actions arrive self._event = Event() self._event_set = self._event.set self._event_is_set = self._event.isSet # _lock is used to protect variables that are written to on multiple threads self._lock = Lock() # _thread_ident is used to detect when methods are called from the same thread self._thread_ident = 0 # _state contains the current state of the thread. it is protected by _lock and follows the # following states: # # --> fatal-exception -> STATE_EXCEPTION # / # STATE_INIT -> start() -> PLEASE_RUN -> STATE_RUNNING # \ \ # --------------> stop() -> PLEASE_STOP -> STATE_FINISHED # self._state = "STATE_INIT" if __debug__: dprint("STATE_INIT") # _exception is set to SystemExit, KeyboardInterrupt, GeneratorExit, or AssertionError when # any of the registered callbacks raises any of these exceptions. in this case _state will # be set to STATE_EXCEPTION. it is protected by _lock self._exception = None # _exception_handlers contains a list with callable functions of methods. all handlers are # called whenever an exception occurs. first parameter is the exception, second parameter # is a boolean indicating if the exception is fatal (i.e. True indicates SystemExit, # KeyboardInterrupt, GeneratorExit, or AssertionError) self._exception_handlers = [] # _id contains a running counter to ensure that every scheduled callback has its own unique # identifier. it is protected by _lock self._id = 0 # requests are ordered by deadline and moved to -expired- when they need to be handled # (deadline, priority, root_id, (call, args, kargs), callback) self._requests = [] # expired requests are ordered and handled by priority # (priority, root_id, None, (call, args, kargs), callback) self._expired = [] if __debug__: def must_close(callback): assert callback.is_finished atexit_register(must_close, self)
def write_pid(path): from os import remove, getpid from fcntl import flock, LOCK_EX from atexit import register as atexit_register def clean_pid(): with open(path, 'w') as fd: flock(fd, LOCK_EX) remove(path) atexit_register(clean_pid) with open(path, 'w') as fd: flock(fd, LOCK_EX) fd.write(bytes(getpid()))
def __init__(self, *args, bat_id=0, **kwargs) -> None: """ Args: bat_id: numerical id of the battery to monitor. will be the default of 0 in most cases """ super().__init__(*args, **kwargs) self.bat_id = bat_id self.min_rem_smooth = None self.called = 0 self._clicked = False self._cur_status = None self.P_hist: dq_t[float] = deque( [], maxlen=int(10 / self.poll_interval) ) self.f_uevent = open(f"/sys/class/power_supply/BAT{bat_id}/uevent") atexit_register(self.f_uevent.close)
logger.info(' --sleep-time {}', sleep_time) logger.info(' --log-level "{}"', log_level) logger.info(' --slack-webhook "{}"', slack_webhook) logger.info(' --slack-username "{}"', slack_username) logger.info(' --slack-format "{}"', slack_format) try: makedirs(target, exist_ok=True) makedirs(recycle, exist_ok=True) h = Harvestr(target, recycle, source, exclude=[exclude] if exclude else None, dry_run=dry_run) while True: logger.debug(f'Sleeping {sleep_time} seconds') sleep(sleep_time) h.main() finally: lwt() def lwt(): logger.success('{} Exiting after {}', basename(argv[0]), duration_human(time() - start_time)) if __name__ == "__main__": atexit_register(lwt) main()
with open(CACHE_PATH, 'rb') as f: return load(f) except FileNotFoundError: return {} def invalidate_cache(in_url): global CHACHE_CHANGE lower_url = in_url.lower() for k in cache.copy(): if lower_url in k: del cache[k] CHACHE_CHANGE = True @contextmanager def real_request(): Session.request = original_request yield Session.request = fake_request original_request = Session.request Session.request = fake_request cache = load_cache() # invalidate_cache('shora') print('len(cache) ==', len(cache)) atexit_register(save_cache, cache)
:param obj: Value object. :type obj: concurrent.futures.Future | object :param timeout: The number of seconds to wait for the result if the future isn't done. If None, then there is no limit on the wait time. :type timeout: int :return: Result. :rtype: object Example:: >>> from concurrent.futures import Future >>> fut = Future() >>> fut.set_result(3) >>> await_result(fut), await_result(4) (3, 4) """ return obj.result(timeout) if isinstance(obj, Future) else obj try: from atexit import atexit_register atexit_register(shutdown_executors, wait=False) except ImportError: # MicroPython. pass
return new_id def _load_pickle(pickle_path, save=True): if isfile(pickle_path): try: with open(pickle_path, 'rb') as pickle_file: loaded_pickle = pickle_load(pickle_file) except EOFError, e: print >> stderr, "failed to load pickle %s: %s" % (pickle_path, e) raise else: loaded_pickle = IdMapper() if save: atexit_register(_save_pickle_func(loaded_pickle, pickle_path)) return loaded_pickle def _save_pickle_func(pickle, pickle_path): def save_pickle(): with open(pickle_path, 'wb') as pickle_file: pickle_dump(pickle, pickle_file, HIGHEST_PROTOCOL) return save_pickle def _tests(): from tempfile import NamedTemporaryFile from os import remove tmp_path = None try:
def __init__(self, *args, **kwargs) -> None: super().__init__(*args, **kwargs) self.f_mem = open("/proc/meminfo", "r") atexit_register(self.f_mem.close)
def __init__(self, name="Generic-Callback"): assert isinstance(name, str), type(name) # _name will be given to the thread when it is started self._name = name # _event is used to wakeup the thread when new actions arrive self._event = Event() self._event_set = self._event.set self._event_is_set = self._event.isSet # _lock is used to protect variables that are written to on multiple threads self._lock = Lock() # _thread contains the actual Thread object self._thread = Thread(target=self.loop, name=self._name) self._thread.daemon = True # _thread_ident is used to detect when methods are called from the same thread self._thread_ident = 0 # _state contains the current state of the thread. it is protected by _lock and follows the # following states: # # --> fatal-exception # / \ # STATE_INIT -> start() -> PLEASE_RUN -> STATE_RUNNING \ # \ \ \ # --------------> stop() -> PLEASE_STOP -> STATE_FINISHED # self._state = "STATE_INIT" logger.debug("STATE_INIT") # _exception is set to SystemExit, KeyboardInterrupt, GeneratorExit, or AssertionError when # any of the registered callbacks raises any of these exceptions. in this case _state will # be set to STATE_PLEASE_STOP, causing a shutdown. it is protected by _lock self._exception = None self._exception_traceback = None # _exception_handlers contains a list with callable functions of methods. all handlers are # called whenever an exception occurs. first parameter is the exception, second parameter # is a boolean indicating if the exception is fatal (i.e. True indicates SystemExit, # KeyboardInterrupt, GeneratorExit, or AssertionError) self._exception_handlers = [] # _id contains a running counter to ensure that every scheduled callback has its own unique # identifier. it is protected by _lock. tasks will get u"dispersy-#<ID>" assigned self._id = 0 # _requests are ordered by deadline and moved to -expired- when they need to be handled # (deadline, priority, root_id, (call, args, kargs), callback) self._requests = [] # expired requests are ordered and handled by priority # (priority, deadline, root_id, (call, args, kargs), callback) self._expired = [] # _requests_mirror and _expired_mirror contains the same list as _requests and _expired, # respectively. when the callback closes _requests is set to a new empty list while # _requests_mirror continues to point to the existing one. because all task 'deletes' are # done on the _requests_mirror list, these actions will still be allowed while no new tasks # will be accepted. self._requests_mirror = self._requests self._expired_mirror = self._expired # _final_func is called directly on the callback thread directly before the state is set to # STATE_FINISHED self._final_func = None if __debug__: def must_close(callback): assert callback.is_finished, self atexit_register(must_close, self) self._debug_call_name = None