def tail(self): watch_mask = (constants.IN_ALL_EVENTS ^ constants.IN_ACCESS ^ constants.IN_OPEN ^ constants.IN_CLOSE_NOWRITE) i = Inotify() i.add_watch(self.filename, mask=watch_mask) while True: # Consume as much as possible. yield from self.resume() # Now wait for a change that we can react to ev = self.wait_actionable(i) if ev == 'append': continue if ev == 'swap': # Need to reach around since file-deletion removes C watches, # but not the python one... try: i.remove_watch(self.filename) except Exception: pass i.add_watch(self.filename, mask=watch_mask) self.pos = 1 continue
def inotify(args): "Setup to run inotify loop" setup_logging(args) inot = Inotify() inot.add_watch(str(args.session), mask=IN_CREATE ^ IN_DELETE) global hooks if args.hooks: hooks = import_user(args.hooks) else: hooks = None try: con = sqlite3.connect(str(args.sql_file), detect_types=sqlite3.PARSE_DECLTYPES) logging.info("Repopulating database.") populate_session_tbl(con, args.session, args.no_action, args=args) queues = OrderedDict((('create', deque()), ('retry_create', deque()), ('remove', deque()))) qfuncs = { 'create': qfunc_create, 'retry_create': qfunc_retry_create, 'remove': hooks_and_remove_torrent } inot_funcs = { ('torrent', ('IN_CREATE', )): queues['create'].append, ('torrent', ('IN_DELETE', )): queues['remove'].append } complete_hook = getattr(hooks, 'complete', None) if complete_hook: queues['complete'] = deque() inot_funcs[('complete', ('IN_CREATE', ))] = queues['complete'].append qfuncs['complete'] = complete_hook logging.info("Entering inotify loop.") preloop_hook = getattr(hooks, 'pre_loop', None) if preloop_hook: preloop_hook(con, inot, args, queues, qfuncs, inot_funcs) queues.move_to_end('remove') while True: try: inotify_loop(con, inot, args, queues, qfuncs, inot_funcs) except rTorEventException as e: logging.exception("Something happened.") except (KeyboardInterrupt, SystemExit): logging.info("Exiting due to interrupt.") raise except Exception: logging.exception("Unhandled exception.") raise finally: postloop_hook = getattr(hooks, 'post_loop', None) if postloop_hook: postloop_hook(con, inot, args) inot.remove_watch(bytes(args.session)) con.close()
def run(self): watch_mask = constants.IN_ALL_EVENTS print("Starting FsMonitor") i = Inotify() i.add_watch('gossip_store', mask=watch_mask) for event in i.event_gen(yield_nones=False): (e, type_names, path, filename) = event if e.mask & constants.IN_DELETE_SELF: i.remove_watch('gossip_store') i.add_watch('gossip_store', mask=watch_mask)
class InotifyRecursive(object): def __init__(self, path, mask=IN_ALL_EVENTS, block_duration_s=1): self.__root_path = path # No matter what we actually received as the mask, make sure we have # the minimum that we require to curate our list of watches. self.__mask = mask | IN_ISDIR | IN_CREATE | IN_DELETE self.__i = Inotify(block_duration_s=block_duration_s) self.__load_tree(path) def __load_tree(self, path): q = [path] while q: current_path = q[0] del q[0] self.__i.add_watch(current_path, self.__mask) for filename in os.listdir(current_path): entry_filepath = os.path.join(current_path, filename) if os.path.isdir(entry_filepath) is False: continue q.append(entry_filepath) def event_gen(self): for event in self.__i.event_gen(): if event is not None: (header, type_names, path, filename) = event if header.mask & IN_ISDIR: full_path = os.path.join(path, filename) if header.mask & IN_CREATE: self.__i.add_watch(full_path, self.__mask) # solved minor situations that the inotify cannot discover the change for root, dirs, files in os.walk(full_path): for name in dirs: self.__i.add_watch(os.path.join(root, name)) elif header.mask & IN_DELETE: self.__i.remove_watch(full_path, superficial=True) yield event
def watch_prefix_file(file_name): """ Using inotify function is looking for IN_CLOSE_WRITE events, that happens when pmacct is pushing new data to _PMACCT_DATA file. write_to_db is called to store new data into database. On every iteration main thread status is checked. """ inotify_obj = Inotify() inotify_obj.add_watch(file_name) try: for event in inotify_obj.event_gen(): if event is not None: if event[1] == ['IN_CLOSE_WRITE']: logger.debug("Found IN_CLOSE_WRITE event") write_to_db() else: if not main_thread().is_alive(): logger.error('Main thread died, stopping all child threads') # Canceling Timer thread timer_obj.cancel() # Breaking watcher thread loop break finally: inotify_obj.remove_watch(file_name)
def main(): monitor = Inotify() monitor_messaging_manager = MessagingManager(MessagingManagerType.SERVER, "tcp://127.0.0.1:5555") watch_paths_list = [ b'/home/anton/test/' ] pair_events_list = [] stop_events_processing_flag = Event() for path in watch_paths_list: monitor.add_watch(path, inotify_constants.IN_MOVE | inotify_constants.IN_MOVED_TO | inotify_constants.IN_MOVED_FROM | inotify_constants.IN_CREATE) try: while True: messages = monitor_messaging_manager.get_all_received_messages() process_received_messages(monitor, messages) stop_events_processing_timer = Timer(10, stop_events_processing_flag.set) stop_events_processing_timer.start() process_received_events(monitor, monitor_messaging_manager, pair_events_list, stop_events_processing_flag) except KeyboardInterrupt: for path in watch_paths_list: monitor.remove_watch(path) raise SystemExit()
print('file move planned', flush=True) s.enterabs(targettime, 1, do_move, (file, )) def unregister(file): if [item for item in s.queue if item[3] == (file, )]: s.cancel([item for item in s.queue if item[3] == (file, )][0]) print('file "' + file.name + '" not already unregistered, removing from list', flush=True) print('dirrotate started, scanning directory the first time', flush=True) for f in Path('/mnt/current').iterdir(): register(f) print('waiting for changes', flush=True) try: while True: for event in i.event_gen(): if event is not None: (header, type_names, watch_path, filename) = event if 'IN_CREATE' in type_names or 'IN_MOVED_TO' in type_names: register(Path('/mnt/current') / filename) elif 'IN_DELETE' in type_names or 'IN_MOVED_FROM' in type_names: unregister(Path('/mnt/current') / filename) s.run(blocking=False) finally: print('clean exit', flush=True) i.remove_watch(b'/mnt/current')
class Monitor(multiprocessing.Process): def __init__(self): self._inotify = Inotify() self._inotify_mask = IN_ALL_EVENTS & ( ~IN_ACCESS & ~IN_OPEN & ~IN_CLOSE_NOWRITE & ~IN_CLOSE_WRITE) self._dir_queue = multiprocessing.Queue() self._sync_queue = multiprocessing.Queue() self._watched_dirs = {} # {lib_name: set(dirs)} super().__init__(target=self._main, args=(self._dir_queue, self._sync_queue)) def _main(self, dir_queue, sync_queue): next_sync_t = time() + SYNC_INTERVAL sync_dirs = set() try: while True: # Check for new directories to watch while not dir_queue.empty(): lib, path = dir_queue.get() watched = (path in set(chain(*self._watched_dirs.values()))) if lib not in self._watched_dirs: self._watched_dirs[lib] = set() self._watched_dirs[lib].add(path) if not watched: self._inotify.add_watch( str(path).encode(LOCAL_FS_ENCODING), self._inotify_mask) print("Watching {} (lib: {}) (total dirs: {})".format( path, lib, len(watched))) # Process Inotify for event in self._inotify.event_gen(): if event is None: break (header, type_names, watch_path, filename) = event watch_path = Path(str(watch_path, LOCAL_FS_ENCODING)) filename = Path(str(filename, LOCAL_FS_ENCODING)) print("WD=({:d}) MASK=({:d}) " "MASK->NAMES={} WATCH-PATH={} FILENAME={}".format( header.wd, header.mask, type_names, watch_path, filename)) if header.mask & (IN_ATTRIB | IN_CREATE | IN_DELETE | IN_MODIFY | IN_MOVED_TO | IN_MOVED_FROM): if IN_ISDIR & header.mask and header.mask & IN_CREATE: watch_path = watch_path / filename elif IN_ISDIR & header.mask and header.mask & IN_DELETE: self._inotify.remove_watch( str(watch_path).encode(LOCAL_FS_ENCODING)) sync_dirs.add(watch_path) def _reqSync(l, d): if d.exists(): sync_queue.put((lib, d)) print("Requesting sync {} (lib: {})".format(d, l)) if time() > next_sync_t: for d in sync_dirs: for lib in self._watched_dirs: lib_paths = self._watched_dirs[lib] if d in lib_paths: _reqSync(lib, d) if not d.exists(): self._watched_dirs[lib].remove(d) elif d.parent in lib_paths: _reqSync(lib, d) self.dir_queue.put((lib, d)) sync_dirs.clear() next_sync_t = time() + SYNC_INTERVAL except KeyboardInterrupt: pass finally: for path in set(chain(*self._watched_dirs.values())): self._inotify.remove_watch(str(path).encode(LOCAL_FS_ENCODING)) @property def dir_queue(self): return self._dir_queue @property def sync_queue(self): return self._sync_queue
class Monitor(multiprocessing.Process): def __init__(self): self._inotify = Inotify() self._inotify_mask = IN_ALL_EVENTS & (~IN_ACCESS & ~IN_OPEN & ~IN_CLOSE_NOWRITE & ~IN_CLOSE_WRITE) self._dir_queue = multiprocessing.Queue() self._sync_queue = multiprocessing.Queue() self._watched_dirs = {} # {lib_name: set(dirs)} super().__init__(target=self._main, args=(self._dir_queue, self._sync_queue)) def _main(self, dir_queue, sync_queue): next_sync_t = time() + SYNC_INTERVAL sync_dirs = set() try: while True: # Check for new directories to watch while not dir_queue.empty(): lib, path = dir_queue.get() watched = (path in set(chain(*self._watched_dirs.values()))) if lib not in self._watched_dirs: self._watched_dirs[lib] = set() self._watched_dirs[lib].add(path) if not watched: self._inotify.add_watch(str(path), self._inotify_mask) log.info(f"Watching {path} (lib: {lib}) " f"(total dirs: {len(self._watched_dirs[lib])}") # Process Inotify for event in self._inotify.event_gen(): if event is None: break (header, type_names, watch_path, filename) = event watch_path = Path(watch_path) filename = Path(filename) log.debug( f"WD=({header.wd}) MASK=({header.mask}) " f"MASK->NAMES={type_names} WATCH-PATH={watch_path} FILENAME={filename}") if header.mask & (IN_ATTRIB | IN_CREATE | IN_DELETE | IN_MODIFY | IN_MOVED_TO | IN_MOVED_FROM): if IN_ISDIR & header.mask and header.mask & IN_CREATE: watch_path = watch_path / filename elif IN_ISDIR & header.mask and header.mask & IN_DELETE: self._inotify.remove_watch(str(watch_path)) sync_dirs.add(watch_path) def _reqSync(l, d): if d.exists(): sync_queue.put((lib, d)) log.info(f"Requesting sync {d} (lib: {l})") if time() > next_sync_t: for d in sync_dirs: for lib in self._watched_dirs: lib_paths = self._watched_dirs[lib] if d in lib_paths: _reqSync(lib, d) if not d.exists(): self._watched_dirs[lib].remove(d) elif d.parent in lib_paths: _reqSync(lib, d) self.dir_queue.put((lib, d)) sync_dirs.clear() next_sync_t = time() + SYNC_INTERVAL except KeyboardInterrupt: pass finally: for path in set(chain(*self._watched_dirs.values())): self._inotify.remove_watch(str(path)) @property def dir_queue(self): return self._dir_queue @property def sync_queue(self): return self._sync_queue