log.info("Starting...") if options.sync: log.info("Starting manual sync...") symsyncsource(options.source, options.storage, options) symsyncstorage(options.source, options.storage, options) log.info("Sync done.") if not options.monitor: exit() if options.mail: log.info("Starting mailer thread...") mailer.start() log.info("Source: %s" % options.source) log.info("Storage: %s" % options.storage) log.info("Permissions: %o" % options.perms) # Instanciate a new WatchManager (will be used to store watches). wm = WatchManager() # Associate this WatchManager with a Notifier (will be used to report and # process events). notifier = Notifier(wm, default_proc_fun=DummyHandler()) # Add a new watch on /tmp for ALL_EVENTS. mask = IN_CLOSE_WRITE | IN_MOVED_TO | IN_DELETE | IN_CREATE | IN_ATTRIB | IN_MOVED_FROM wm.add_watch(options.source, mask, rec=True, auto_add=True, proc_fun=SourceHandler(options=options)) wm.add_watch(options.storage, mask, rec=True, auto_add=True, proc_fun=StorageHandler(options=options)) # Loop forever and handle events. log.info("Waiting for events...") try: notifier.loop() except Exception: log.exception("EXCEPTION:") mailer.addmessage("ALERT: PROGRAM CLOSING:\n%s - %s" % (datetime.now(), format_exception_only(*exc_info()[:2]))) if options.mail: mailer.quit()
def main(): vm = WatchManager() vm.add_watch(monitor_dirs,ALL_EVENTS,rec = True) en = MyEvent() notifier = Notifier(vm,en) notifier.loop()
def watch_path(path, add_watch_opt=None, watcher_opt=None): """Tail all the files specify by path. path: By default all files under the path, which should be a directory, are tailed. Path could also be list of paths or a glob pattern. The behavior can be modified by add_watch_opt. See pyinotify.WatchManager.add_watch. output: defaults to stdout. can be diverted any callable with: watcher_opt=dict(out=got_log_line) where got_log_line() takes a tuple (log_path, log_line) *_opt: Are pass-through to pyinotify.WatchManager.add_watch and tailall.Monitor. See respective functions for detail. """ wm = WatchManager() notifier = Notifier(wm, default_proc_fun=FsEvent()) #mask=ALL_EVENTS #mask=IN_MOVED_TO|IN_CREATE|IN_MODIFY mask=IN_MODIFY|IN_CLOSE_WRITE kw=dict(rec=True, auto_add=False) kw.update(add_watch_opt or {}) wm.add_watch(path, mask, **kw) monitor=Monitor(watcher_opt=watcher_opt) notifier.loop(callback=monitor.got_event)
def test_update_conf(self, default_config, show_notification): conf_time_1 = path.getmtime(self.tmp.conf.join("config.py")) out_file = self.tmp_output.join("out.log") command_args = [ "-c", self.config_file, "-r", "bash -c 'echo a | tee -a {}'".format(out_file), "-d", unicode(self.tmp.src), ] events = [ (self.tmp.conf, "config.py", "# some new data"), (self.tmp.conf, "config.py", "# some new data"), ] self._copy_default_config(default_config) default_config.RUNNER_DELAY = -1 wm = WatchManager() config = Config(watch_manager=wm, command_args=command_args) handler = FileChangeHandler(config=config) notifier = Notifier(wm, handler, timeout=1000) notifier.loop(callback=partial(self._event_generator, events)) # There are some stupid race conditions (possibly due to the callbacks) # Sleep time allows to execute all needed code sleep(0.2) conf_time_2 = path.getmtime(self.tmp.conf.join("config.py")) self.assertNotEqual(conf_time_1, conf_time_2) self.assertTrue(path.exists(out_file)) self.assertEqual(show_notification.call_count, 2)
def watch(pathes, extensions): manager = WatchManager() handler = Handler(extensions=extensions) notifier = Notifier(manager, default_proc_fun=handler) for path in pathes: manager.add_watch(path, IN_MODIFY, rec=True, auto_add=True) notifier.loop()
def main(): vm = WatchManager() vm.add_watch(monitor_dirs, ALL_EVENTS, rec=True) en = MyEvent() notifier = Notifier(vm, en) notifier.loop()
def test_update_filtered(self, default_config, show_notification): out_file = self.tmp_output.join("out.log") command_args = [ "-c", self.config_file, "-r", "bash -c 'echo a | tee -a {}'".format(out_file), "-d", unicode(self.tmp.src), ] events = [ (self.tmp.src, "filtered_1.pyc", "some new data"), (self.tmp.src, "filtered_2.tmp", "some new data"), (self.tmp.src, ".hidden", "some new data"), ] self._copy_default_config(default_config) default_config.RUNNER_DELAY = -1 wm = WatchManager() config = Config(watch_manager=wm, command_args=command_args) handler = FileChangeHandler(config=config) notifier = Notifier(wm, handler) notifier.loop(callback=partial(self._event_generator, events)) # There are some stupid race conditions (possibly due to the callbacks) # Sleep time allows to execute all needed code sleep(0.2) self.assertTrue(path.exists(self.tmp.src.join("filtered_1.pyc"))) self.assertTrue(path.exists(self.tmp.src.join("filtered_2.tmp"))) self.assertTrue(path.exists(self.tmp.src.join(".hidden"))) self.assertFalse(path.exists(out_file)) self.assertFalse(show_notification.called)
class Monitor(object): ALL = ALL_EVENTS CREATE = IN_CREATE DELETE = IN_DELETE MODIFY = IN_MODIFY ISDIR = IN_ISDIR def __init__(self, config): self._config = config self._logger = Logger() self._watchManager = WatchManager() self._notifier = Notifier(self._watchManager) def watch_config(self): for d in self._config.get_directories(): self.watch(d) def watch(self, directory): self._logger.info("watching directory %s" % directory.get_path()) self._watchManager.add_watch(directory.get_path(), directory.get_eventmask(), proc_fun=directory) def run(self): self._logger.info("start monitor loop") self._notifier.loop()
def inotify(): setproctitle('event-report') print('EventReport %d Process start...' % getpid()) file_event = FileEvent() dir_event = DirEvent() #需要保证,监听前,该路径已存在 #dir_path = '/home/lz/Desktop/test_inotify/' #需要保证,监听前,该文件已存在 #file_path = '/home/lz/Desktop/test_inotify/alarm' #dir_muti_event = EventsCodes.FLAG_COLLECTIONS['OP_FLAGS']['IN_CREATE']\ # | EventsCodes.FLAG_COLLECTIONS['OP_FLAGS']['IN_DELETE']\ # | EventsCodes.FLAG_COLLECTIONS['OP_FLAGS']['IN_MODIFY'] #file_muti_event = EventsCodes.FLAG_COLLECTIONS['OP_FLAGS']['IN_MODIFY'] #file_handler = MyFileEventHandler() #dir_handler = MyDirEventHandler() wm = WatchManager() wm.add_watch(dir_event.get_path(), dir_event.get_muti_event(), dir_event.get_event_handler()) #可以这样使用,同时监听,不同路径/文件的不同操作 wm.add_watch(file_event.get_path(), file_event.get_muti_event(), file_event.get_event_handler()) notifier = Notifier(wm) print('inotify loop...') notifier.loop()
def fsMonitor(path="/data"): wm = WatchManager() mask = IN_DELETE | IN_MODIFY | IN_CREATE notifier = Notifier(wm, EventHandler(), read_freq=10) notifier.coalesce_events() wm.add_watch(path, mask, rec=True, auto_add=True) notifier.loop()
def run(self): p = PTmp() notifier = Notifier(Settings.wm, p) # TODO: Not necessary to watch the directories that already have # a procedures.xml Settings.wdd = Settings.wm.add_watch(Settings.monitor_path, Settings.mask, rec=True) notifier.loop()
def main_to_ascii(): setup_cli_logger(logging.INFO) parser = create_ascii_arg_parser() args = parser.parse_args() if not args.deployments_path: L.error("Please provide a --deployments_path agrument or set the " "GUTILS_DEPLOYMENTS_DIRECTORY environmental variable") sys.exit(parser.print_usage()) wm = WatchManager() mask = IN_MOVED_TO | IN_CLOSE_WRITE wm.add_watch(args.deployments_path, mask, rec=True, auto_add=True) # Convert binary data to ASCII if args.type == 'slocum': processor = Slocum2AsciiProcessor( deployments_path=args.deployments_path) notifier = Notifier(wm, processor, read_freq=10) # Read every 10 seconds # Enable coalescing of events. This merges event types of the same type on the same file # together over the `read_freq` specified in the Notifier. notifier.coalesce_events() try: L.info(f"Watching {args.deployments_path} for new binary files") notifier.loop(daemonize=args.daemonize) except NotifierError: L.exception('Unable to start notifier loop') return 1 L.info("GUTILS binary_to_ascii Exited Successfully") return 0
def FSMonitor(path='/root/wpf'): wm = WatchManager() mask = IN_DELETE | IN_MODIFY | IN_CREATE notifier = Notifier(wm, EventHandler(), read_freq=10) notifier.coalesce_events() # 设置受监视的事件,这里只监视文件创建事件,(rec=True, auto_add=True)为递归处理 wm.add_watch(path, mask, rec=True, auto_add=True) notifier.loop()
def tailwatch(dir): FLAGS = EventsCodes.ALL_FLAGS mask = FLAGS['IN_CREATE'] |FLAGS['IN_DELETE'] | FLAGS['IN_MODIFY'] wm = WatchManager() p = PTmp() notifier = Notifier(wm, p) wdd = wm.add_watch(dir, mask, rec=True) notifier.loop()
def FSMonitor(path='/root/wpf'): wm = WatchManager() mask = IN_DELETE | IN_MODIFY | IN_CREATE notifier = Notifier(wm, EventHandler(),read_freq=10) notifier.coalesce_events() # 设置受监视的事件,这里只监视文件创建事件,(rec=True, auto_add=True)为递归处理 wm.add_watch(path,mask,rec=True, auto_add=True) notifier.loop()
def watch_dir(self): wm = WatchManager() handler = EventHandler(self.set_color) notifier = Notifier(wm, handler) wm.add_watch(self.path_to_leddir, IN_CLOSE_WRITE, rec=True) notifier.loop()
def main_to_netcdf(): setup_cli_logger(logging.INFO) parser = create_netcdf_arg_parser() args = parser.parse_args() filter_args = vars(args) # Remove non-filter args into positional arguments deployments_path = filter_args.pop('deployments_path') subset = filter_args.pop('subset') daemonize = filter_args.pop('daemonize') template = filter_args.pop('template') profile_id_type = int(filter_args.pop('profile_id_type')) # Move reader_class to a class reader_class = filter_args.pop('reader_class') if reader_class == 'slocum': reader_class = SlocumReader if not deployments_path: L.error("Please provide a --deployments_path argument or set the " "GUTILS_DEPLOYMENTS_DIRECTORY environmental variable") sys.exit(parser.print_usage()) # Add inotify watch wm = WatchManager() mask = IN_MOVED_TO | IN_CLOSE_WRITE wm.add_watch( deployments_path, mask, rec=True, auto_add=True ) # Convert ASCII data to NetCDF using a specific reader class if reader_class == SlocumReader: processor = Slocum2NetcdfProcessor( deployments_path=deployments_path, subset=subset, template=template, profile_id_type=profile_id_type, prefer_file_filters=True, **filter_args ) notifier = Notifier(wm, processor, read_freq=10) # Enable coalescing of events. This merges event types of the same type on the same file # together over the `read_freq` specified in the Notifier. notifier.coalesce_events() try: L.info(f"Watching {deployments_path} for new ascii files") notifier.loop(daemonize=daemonize) except NotifierError: L.exception('Unable to start notifier loop') return 1 L.info("GUTILS ascii_to_netcdf Exited Successfully") return 0
def change_check(): wm = WatchManager() mask = IN_CREATE | IN_MODIFY notifier = Notifier(wm, EventHandler()) wm.add_watch(conf_dir, mask, rec=True) try: notifier.loop() except NotifierError, err: print err
def builder_process(): logger.debug(' - Watched static files for changes to rebuild') wm = WatchManager() notifier = Notifier(wm, default_proc_fun=_build) wm.add_watch( watched_dir, IN_MODIFY, # | IN_CREATE | IN_DELETE, rec=True, auto_add=True ) notifier.loop()
def builder_process(): logger.debug(' - Watched static files for changes to rebuild') wm = WatchManager() notifier = Notifier(wm, default_proc_fun=_build) wm.add_watch( watched_dir, IN_MODIFY, # | IN_CREATE | IN_DELETE, rec=True, auto_add=True) notifier.loop()
def main(): """ 文件监听的入口程序 """ check_dir_exist() wm = WatchManager() notifier = Notifier(wm, EventHandler()) wm.add_watch(FILE_DIR, IN_CLOSE_WRITE, rec=True, auto_add=True) log.info('Now starting monitor %s' % (FILE_DIR)) notifier.loop()
def monitor(file_name='.'): global file file = open(file_name, 'r') st_results = os.stat(file_name) st_size = st_results[6] file.seek(st_size) wm = WatchManager() notifier = Notifier(wm) wm.watch_transient_file(file_name, IN_MODIFY, ProcessTransientFile) print('now starting monitor %s' % file_name) notifier.loop()
def create_monitor(to_watch, name): "Create and start a new directory monitor." messenger = NetworkSender(name) p = Monitor(messenger) wm = WatchManager() # Watch Manager notifier = Notifier(wm, p) # Notifier try: wdd = wm.add_watch(to_watch, IN_DELETE | IN_CREATE | IN_MODIFY) notifier.loop() except WatchManagerError, err: print err, err.wmd
def do_monit(monit_path='./'): wm = WatchManager() #create a watchmanager() mask = pyinotify.IN_DELETE | pyinotify.IN_MODIFY | pyinotify.IN_CREATE # 需要监控的事件 notifier = Notifier(wm, EventHandler()) wdd = wm.add_watch(monit_path, mask, rec=True) # 加入监控,mask,rec递归 try: #防止启动多个的命令 设置进程号文件就可以防止启动多个 #notifier.loop(daemonize=True, pid_file='/tmp/pyinotify2.pid') notifier.loop(daemonize=True) except pyinotify.NotifierError, err: print >> sys.stderr, err
def do_monit(monit_path, log_file_path, source_path, des_ip, des_path): wm = WatchManager() #create a watchmanager() mask = pyinotify.IN_DELETE | pyinotify.IN_MODIFY # 需要监控的事件 notifier = Notifier( wm, EventHandler(log_file_path, source_path, des_ip, des_path)) wdd = wm.add_watch(monit_path, mask, rec=True) # 加入监控,mask,rec递归 #notifier.loop() #开始循环监控 try: #防止启动多个的命令 设置进程号文件就可以防止启动多个 notifier.loop(daemonize=True, pid_file='/tmp/pyinotify.pid') except pyinotify.NotifierError, err: print >> sys.stderr, err
def main(): logger.setLevel(logging.INFO) logger.addHandler(logging.StreamHandler()) parser = argparse.ArgumentParser( description="Monitor a directory for new glider data. " "Processes and uploads new data to a Mongo Database. " "Announce changes via ZMQ.") parser.add_argument("-d", "--data_path", help="Path to Glider data directory", default=os.environ.get('GDB_DATA_DIR')) parser.add_argument("--zmq_url", help='Port to publish ZMQ messages on. ' 'Default is "tcp://127.0.0.1:44444".', default=os.environ.get('ZMQ_URL', 'tcp://127.0.0.1:44444')) parser.add_argument( "--mongo_url", help='Mongo Database URL. Can include authentication parameters. ' 'Default is "mongodb://localhost:27017".', default=os.environ.get('MONGO_URL', 'mongodb://localhost:27017')) parser.add_argument("--daemonize", help="To daemonize or not to daemonize", type=bool, default=False) args = parser.parse_args() if not args.data_path: logger.error("Please provide a --data_path agrument or set the " "GDB_DATA_DIR environmental variable") sys.exit(parser.print_usage()) wm = WatchManager() mask = IN_MOVED_TO | IN_CLOSE_WRITE wm.add_watch(args.data_path, mask, rec=True, auto_add=True) processor = GliderFileProcessor(zmq_url=args.zmq_url, mongo_url=args.mongo_url) notifier = Notifier(wm, processor) try: logger.info("Watching {}\nInserting into {}\nPublishing to {}".format( args.data_path, args.mongo_url, args.zmq_url)) notifier.loop(daemonize=args.daemonize) except NotifierError: logger.exception('Unable to start notifier loop') return 1 logger.info("GDAM Exited Successfully") return 0
def start_queue(): dir_queue = vmcheckerpaths.dir_queue() # register for inotify envents before processing stale jobs wm = WatchManager() notifier = Notifier(wm, _QueueManager()) wm.add_watch(dir_queue, EventsCodes.ALL_FLAGS['IN_CLOSE_WRITE']) process_stale_jobs(dir_queue) # set callback to receive notifications (includes queued jobs after # setting up inotify but before we finished processing stale jobs) notifier.loop(callback=_callback)
def monitor(watch_path, callback): watch_path = os.path.abspath(watch_path) if os.path.isfile(watch_path): path_for_manager = os.path.dirname(watch_path) else: path_for_manager = watch_path manager = WatchManager() notifier = Notifier(manager, AutoRunner(watch_path, callback)) manager.add_watch(path_for_manager, IN_MODIFY) notifier.loop()
def main_to_erddap(): setup_cli_logger(logging.WARNING) parser = create_erddap_arg_parser() args = parser.parse_args() if not args.data_path: L.error("Please provide an --data_path agrument or set the " "GUTILS_NETCDF_DIRECTORY environmental variable") sys.exit(parser.print_usage()) if not args.erddap_content_path: L.error("Please provide an --erddap_content_path agrument or set the " "GUTILS_ERDDAP_CONTENT_PATH environmental variable") sys.exit(parser.print_usage()) wm = WatchManager() mask = IN_MOVED_TO | IN_CLOSE_WRITE wm.add_watch( args.data_path, mask, rec=True, auto_add=True ) processor = Netcdf2ErddapProcessor( outputs_path=args.data_path, erddap_content_path=args.erddap_content_path, erddap_flag_path=args.erddap_flag_path ) notifier = Notifier(wm, processor, read_freq=30) # Read every 30 seconds # Enable coalescing of events. This merges event types of the same type on the same file # together over the `read_freq` specified in the Notifier. notifier.coalesce_events() try: L.info("Watching {}, updating content at {} and flags at {}".format( args.data_path, args.erddap_content_path, args.erddap_flag_path )) notifier.loop(daemonize=args.daemonize) except NotifierError: L.exception('Unable to start notifier loop') return 1 except BaseException as e: L.exception(e) return 1 L.info("GUTILS netcdf_to_erddap Exited Successfully") return 0
def monitor_photos(directory='/mnt/photos'): wm = WatchManager() # watched events mask = IN_DELETE | IN_CREATE |IN_MODIFY | IN_CLOSE_WRITE | IN_MOVED_FROM | IN_MOVED_TO emails = ["[email protected]@163.com"] class PFilePath(ProcessEvent): def process_IN_CREATE(self, event): print(datetime.now().strftime('%Y%m%d %H%M%S: ')) print("***Create file: %s" % os.path.join(event.path, event.name)) print("%s sending email"%event.name) subject = "%s uploaded."%event.name body_text = "<a href=http://7xrst7.com1.z0.glb.clouddn.com/%s>%s</a>"%(event.name, event.name) #send_email(subject, body_text, emails) p = Process(target=send_email, args=(subject, body_text, emails)) p.start() p.join() def process_IN_MODIFY(self, event): #print(datetime.now().strftime('%Y%m%d %H%M%S: ')) #print("***Modify file: %s" % os.path.join(event.path, event.name)) pass def process_IN_CLOSE_WRITE(self, event): print(datetime.now().strftime('%Y%m%d %H%M%S: ')) print("***Close file: %s" % os.path.join(event.path, event.name)) update_list(directory, 'add', event.name) def process_IN_MOVED_TO(self, event): print(datetime.now().strftime('%Y%m%d %H%M%S: ')) print("***Movedto file: %s" % os.path.join(event.path, event.name)) update_list(directory, 'add', event.name) def process_IN_MOVED_FROM(self, event): print(datetime.now().strftime('%Y%m%d %H%M%S: ')) print("***Movedfrom file: %s" % os.path.join(event.path, event.name)) update_list(directory, 'delete', event.name) def process_IN_DELETE(self, event): print(datetime.now().strftime('%Y%m%d %H%M%S: ')) print("***Delete file: %s" % os.path.join(event.path, event.name)) update_list(directory, 'delete', event.name) notifier = Notifier(wm, PFilePath()) wdd = wm.add_watch(directory, mask) print(datetime.now().strftime('%Y%m%d %H%M%S: ')) print('***Start watching***') notifier.loop()
def main_to_ftp(): setup_cli_logger(logging.INFO) parser = create_ftp_arg_parser() args = parser.parse_args() if not args.data_path: L.error("Please provide an --data_path agrument or set the " "GUTILS_NETCDF_DIRECTORY environmental variable") sys.exit(parser.print_usage()) if not args.ftp_url: L.error("Please provide an --ftp_url agrument or set the " "GUTILS_FTP_URL environmental variable") sys.exit(parser.print_usage()) wm = WatchManager() mask = IN_MOVED_TO | IN_CLOSE_WRITE wm.add_watch( args.data_path, mask, rec=True, auto_add=True ) processor = Netcdf2FtpProcessor( ftp_url=args.ftp_url, ftp_user=args.ftp_user, ftp_pass=args.ftp_pass, ) notifier = Notifier(wm, processor, read_freq=10) # Read every 10 seconds # Enable coalescing of events. This merges event types of the same type on the same file # together over the `read_freq` specified in the Notifier. notifier.coalesce_events() try: L.info("Watching {} and Uploading to {}".format( args.data_path, args.ftp_url) ) notifier.loop(daemonize=args.daemonize) except NotifierError: L.exception('Unable to start notifier loop') return 1 except BaseException as e: L.exception(e) return 1 L.info("GUTILS netcdf_to_ftp Exited Successfully") return 0
def watch(watch_file): """Watch the given file for changes.""" wm = WatchManager() with TailHandler(watch_file) as th: notifier = Notifier(wm, th) wm.add_watch(watch_file, TailHandler.MASK) notifier.loop() # flush queue before exiting th.publish_queue.publish() print 'Exiting'
def _monitor_lockfile(self): eh = ProcessEvent() events_to_watch = 0 if self.when_user_stops_using_oled: eh.process_IN_CLOSE_WRITE = lambda event: self.when_user_stops_using_oled( ) events_to_watch = events_to_watch | IN_CLOSE_WRITE if self.when_user_starts_using_oled: eh.process_IN_OPEN = lambda event: self.when_user_starts_using_oled( ) events_to_watch = events_to_watch | IN_OPEN wm = WatchManager() wm.add_watch(self.lock_path, events_to_watch) notifier = Notifier(wm, eh) notifier.loop()
def main(argv): server_config_file = _parseCommandArguments(argv) [static_path, minify_enabled] = configure_server_and_app(server_config_file) print "server_config_file = " + server_config_file print "static_path = " + static_path print "minify_enabled = " + str(minify_enabled) store = RedisStore(redis.StrictRedis()) jso = jsOptimizer(minify_enabled) jso.watch(static_path, store, force=True) try: wm = WatchManager() notifier = Notifier(wm, StaticsChangesProcessor(jso, store)) wm.add_watch(static_path, IN_CREATE | IN_MODIFY | IN_DELETE | IN_MOVED_TO, rec=True) notifier.loop() finally: pass
def main(): banner() logger.info("Initializing...") logger.info("Starting base docker") container = create_container() if not container: return host_port = docker_host_port( CURRENT_CONTAINER)[HONEYPOT_DOCKER_SERVICE_PORT] logger.info("Base docker has started") logger.info("Creating initial iptables rules...") local_ip = get_local_ip(INTERFACE) out, ok = command("iptables -t nat -A PREROUTING -p tcp " f"-d { local_ip } --dport { HONEYPOT_SERVICE_PORT } " f"-j DNAT --to { local_ip }:{ host_port }") if not ok: return out, ok = command(f"iptables -A INPUT -p tcp -i { INTERFACE } " "--dport 22 -m state --state NEW,ESTABLISHED " "-j ACCEPT") if not ok: return out, ok = command(f"iptables -A OUTPUT -p tcp -o { INTERFACE } " "--sport 22 -m state --state ESTABLISHED " "-j ACCEPT") if not ok: return out, ok = command( f"iptables -A OUTPUT -p tcp --tcp-flags SYN,ACK SYN,ACK", ["-j", "LOG", "--log-prefix", "Connection established: "]) if not ok: return logger.info("Rules created. Honeydock is ready to go. :)") handler = EventHandler(KERN_LOG_PATH) watch_manager = WatchManager() watch_manager.add_watch(handler.file_path, IN_MODIFY) notifier = Notifier(watch_manager, handler) notifier.loop()
def monitor_directory(directory_path): """ Loop indefinately while the directory path is monitored When a new file is added kick off the pipeline :param directory_path: The path to the directory :return: Never """ wm = WatchManager() # sftp/scp modules usually create temporary files during transfer and move them to permanent # file after entire transfer is complete. This way we only need to watch for IN_MOVE_TO events mask = IN_MOVED_TO # watched events handler = EventHandler() notifier = Notifier(wm, handler) _wdd = wm.add_watch(directory_path, mask, rec=True, auto_add=True) notifier.loop()
def monitor(watch): gru_path = os.path.join(watch, 'series') real_path = os.path.join(watch, 'real_time') mask = IN_CREATE config_log() wm = WatchManager() wm.add_watch(gru_path, mask, proc_fun=GruEventHandler(), rec=True) wm.add_watch(real_path, mask, proc_fun=RealTimeEventHandler(), rec=True) notifier = Notifier(wm) # print('now starting monitor %s' % (watch)) logging.info('now starting monitor %s' % (watch)) notifier.loop()
def watch(watch_file): ''' Watch the given file for changes ''' wm = WatchManager() with TailHandler(watch_file) as th: notifier = Notifier(wm, th) wdd = wm.add_watch(watch_file, TailHandler.MASK) notifier.loop() # flush queue before exiting th.publish_queue.publish() print 'Exiting'
def loop(self): """Loop until done.""" self.start() try: # inotify interface wm_ = WatchManager() mask = IN_CLOSE_WRITE | IN_MOVED_TO # create notifier notifier = Notifier(wm_, self) # add watches for idir in self.input_dirs: wm_.add_watch(idir, mask) # loop forever notifier.loop() finally: self.stop() self.join()
def start(actual_directories): wm = WatchManager() flags = EventsCodes.ALL_FLAGS mask = flags['IN_MODIFY'] #| flags['IN_CREATE'] p = PTmp() notifier = Notifier(wm, p) for actual_directory in actual_directories: print "DIRECTORY", actual_directory wdd = wm.add_watch(actual_directory, mask, rec=True) # notifier = Notifier(wm, p, timeout=10) try: print "Waiting for stuff to happen..." notifier.loop() except KeyboardInterrupt: pass return 0
def watch(watch_file): """Watch the given file for changes.""" logger.info('Starting watch') wm = WatchManager() try: with TailHandler(filename=watch_file) as th: logger.debug('adding pyinotify watcher/notifier') notifier = Notifier(wm, th, read_freq=settings.NOTIFIER_READ_FREQ, timeout=settings.NOTIFIER_POLL_TIMEOUT) wm.add_watch(watch_file, TailHandler.MASK) notifier.loop() except NotifierLostINodeException: # end and restart watch logger.info("stopping notifier and restarting watch") notifier.stop() # close inotify instance watch(watch_file) finally: logger.info('Exiting watch')
def watch(directory): logger.info("Watching {0}".format(directory)) flags = EventsCodes.ALL_FLAGS mask = flags['IN_CREATE'] | flags['IN_MODIFY'] | flags['IN_DELETE'] wm = WatchManager() wm.add_watch(directory, mask, rec=True) process = IndexProcess(wm, mask) notifier = Notifier(wm, process) def update_index(*args): while process.queue: # This is slightly sub-optimal, would be better to pop all # elements at once but this operation needs to be atomic. dist_dir = process.queue.pop() index(directory, only=[dist_dir]) signal.signal(signal.SIGALRM, update_index) notifier.loop()
def main(): me = os.path.splitext(os.path.basename(sys.argv[0]))[0] lgg = logging.getLogger('cli.' + me) runner = Runner() # Main parser parser = argparse.ArgumentParser(description="""Create test data.""") runner.add_parser_args(parser, which=[('config', True), ('locale', False)]) args = parser.parse_args() runner.init_app(args, lgg=lgg) watcher = Watcher(lgg, runner.in1, runner.out1, runner.in2, runner.out2) wm = WatchManager() notifier = Notifier(wm, watcher) watchdir = os.path.dirname(watcher.in1) lgg.info("Watching " + watchdir) wdd = wm.add_watch(watchdir, EventsCodes.ALL_FLAGS['IN_MODIFY'], rec=False) notifier.loop()
def loop(self): """The main function. """ self.start() try: # inotify interface wm_ = WatchManager() mask = IN_CLOSE_WRITE | IN_MOVED_TO # create notifier notifier = Notifier(wm_, self) # add watches for idir in self.input_dirs: wm_.add_watch(idir, mask) # loop forever notifier.loop() finally: self.stop() self.join()
def FSMonitor(path): result_str = is_empty(path) if result_str: initialization(result_str) # watch manager wm = WatchManager() mask = IN_CREATE | IN_MODIFY # mask = IN_MODIFY # event handler handler = EventHandler() # notifier notifier = Notifier(wm, handler) # wm.add_watch(path, mask, auto_add=True, rec=True) wm.add_watch(path, mask, rec=True) notifier.loop() else: # watch manager wm = WatchManager() mask = IN_CREATE | IN_MODIFY # mask = IN_MODIFY # event handler handler = EventHandler() # notifier notifier = Notifier(wm, handler) # wm.add_watch(path, mask, auto_add=True, rec=True) wm.add_watch(path, mask, rec=True) notifier.loop()
class EventWatcherClass(object): FLAGS = EventsCodes.ALL_FLAGS mask = ( FLAGS["IN_DELETE"] | FLAGS["IN_CREATE"] | FLAGS["IN_MOVED_FROM"] | FLAGS["IN_MODIFY"] | FLAGS["IN_MOVED_TO"] | FLAGS["IN_ATTRIB"] | FLAGS["IN_IGNORED"] | FLAGS["IN_MOVE_SELF"] ) def __init__(self, config, fs): self.__config = config self.__listeners = [] self.__fs = fs self.__watchManager = WatchManager() self.configFile = self.__config.configFile self.cp = CustomProcess(self, self.__listener, self.configFile) self.__daemonize = self.__config.daemon self.dirToBeWatched = [] self.wm = None def __getDirToBeWatched(self): self.dirToBeWatched = self.__config.watchDirs if self.dirToBeWatched == []: self.dirToBeWatched.append(os.path.expanduser("/tmp")) # watching /tmp directory by default # also watch the config file self.configFile = self.__config.configFile if self.configFile not in self.dirToBeWatched: self.dirToBeWatched.append(self.configFile) def __processWatchedDir(self, stopWatchDir=[]): self.__getDirToBeWatched() for dir in self.dirToBeWatched: if dir.rstrip("/") != self.configFile.rstrip("/"): if self.__fs.isDirectory(dir): # modifiedDate = os.stat(dir)[ST_MTIME] #when a file start being watched, use the current system time modifiedDate = time.time() detail = ("file://" + dir, int(modifiedDate), "start", True) self.__listener(eventDetail=detail, initialization=True) else: print "fail to add dir to watched, the dir %s might not exist" % dir for dir in stopWatchDir: if dir.rstrip("/") != self.configFile.rstrip("/"): if self.__fs.isDirectory(dir): # modifiedDate = os.stat(dir)[ST_MTIME] #when a file start being watched, use the current system time modifiedDate = time.time() detail = ("file://" + dir, int(modifiedDate), "stop", True) self.__listener(eventDetail=detail, initialization=True) else: print "fail to add dir to watched, the dir %s might not exist" % dir def __startWatcher(self): self.notifier = Notifier(self.__watchManager, default_proc_fun=self.cp) self.resetDirToBeWatched() self.notifier.loop( daemonize=self.__daemonize, pid_file="/tmp/pyinotify.pid", force_kill=True, stdout="/tmp/stdout.txt" ) def resetDirToBeWatched(self, configChanged=False): if configChanged: oldDirToBeWatched = self.dirToBeWatched oldDirToBeWatched.sort() # reload the config file self.__config.reload() self.__getDirToBeWatched() stopWatchDir = [] if oldDirToBeWatched != self.dirToBeWatched.sort(): stopWatchDir = [item for item in oldDirToBeWatched if not item in self.dirToBeWatched] self.__processWatchedDir(stopWatchDir) if self.wm and self.wm.values(): self.wm = self.__watchManager.rm_watch(self.wm.values()) self.wm = self.__watchManager.add_watch(self.dirToBeWatched, self.mask, rec=True, auto_add=True) # self.wm = self.__watchManager.add_watch(self.__dirToBeWatched, self.mask, rec=True, # auto_add=True, quiet=False, exclude_filter=self.excl) def addListener(self, listener): self.__listeners.append(listener) self.__processWatchedDir() self.__startWatcher() def removeListener(self, listener): if self.__listeners.count(listener) > 0: self.__listeners.remove(listener) return True return False def __listener(self, *args, **kwargs): for listener in self.__listeners: try: listener(*args, **kwargs) except Exception, e: print str(e) pass
from pyinotify import WatchManager from pyinotify import Notifier from pyinotify import ALL_EVENTS wm = WatchManager() def cb(s): print s def process_event(event, *args, **kwargs): print event wm.add_watch('.', mask=ALL_EVENTS) notifier = Notifier(wm, default_proc_fun=process_event) notifier.loop(callback=cb)
notification_queue.append((event.name, event.pathname,moved_from_loc, "RENAMEDIR")) moved_from_flag = 0 moved_from_name = '' moved_from_loc = '' else: notification_queue.append((event.name, event.pathname,'', "MOVED_TO")) def process_default(self, event): check_moved_from() rootpath='/home/madhu/' foldername='ishani' myp=12350 myftp=12351 myhs='' serp=12345 serhs='' wm = WatchManager() #somethng which creates a manager like thing to look wat all folders are to take care of mask = pyinotify.ALL_EVENTS #wat all events r to be notified wm.add_watch(rootpath+foldername, mask, rec=True,auto_add=True) notifier = Notifier(wm, MyProcessing()) # connecting d manager and methods to call thread1=myThread(1,'snd-'+foldername+'-thread', foldername, rootpath, myhs, myp, myftp, serhs, serp) thread2=myThread(2,'rcv-'+foldername+'-thread', foldername, rootpath, myhs, myp, myftp, serhs, serp) thread3=myThread(3,'pro-'+foldername+'-thread', foldername, rootpath, myhs, myp, myftp, serhs, serp) thread1.start() thread2.start() thread3.start() notifier.loop() # start print "thread started"
def watch(dirs, extensions, callback): wm = WatchManager() notifier = Notifier(wm, EventHandler(callback, extensions)) for d in dirs: wm.add_watch(d, IN_CREATE | IN_MOVED_TO) notifier.loop()
def main(): parser = argparse.ArgumentParser( description="Monitor a directory for new glider data. " "Announce changes via ZMQ." ) parser.add_argument( "glider_directory_path", help="Path to configuration file" ) parser.add_argument( "--zmq_port", help="Port to publish ZMQ messages on. 8008 by default.", type=int, default=8008 ) parser.add_argument( "--daemonize", help="To daemonize or not to daemonize", type=bool, default=False ) parser.add_argument( "--pid_file", help="Where to look for and put the PID file", default="./gsps.pid" ) parser.add_argument( "--log_file", help="Full path of file to log to", default="./gsps.log" ) args = parser.parse_args() # Setup logger logger.setLevel(logging.INFO) formatter = logging.Formatter("%(asctime)s - %(name)s " "- %(levelname)s - %(message)s") log_handler = logging.FileHandler(args.log_file) log_handler.setFormatter(formatter) logger.addHandler(log_handler) monitor_path = args.glider_directory_path if monitor_path[-1] == '/': monitor_path = monitor_path[:-1] wm = WatchManager() mask = IN_MOVED_TO | IN_CLOSE_WRITE wdd = wm.add_watch(args.glider_directory_path, mask, rec=True, auto_add=True) processor = GliderFileProcessor(args.zmq_port) notifier = Notifier(wm, processor) def handler(signum, frame): wm.rm_watch(wdd.values()) processor.stop() notifier.stop() signal.signal(signal.SIGTERM, handler) try: logger.info("Starting") notifier.loop(daemonize=args.daemonize, pid_file=args.pid_file) except NotifierError, err: logger.error('Unable to start notifier loop: %s' % err) return 1
'IN_MOVED_TO' : 0x00000080, # File was moved to Y 'IN_CREATE' : 0x00000100, # Subfile was created 'IN_DELETE' : 0x00000200, # Subfile was deleted 'IN_DELETE_SELF' : 0x00000400, # Self (watched item itself) # was deleted 'IN_MOVE_SELF' : 0x00000800, # Self (watched item itself) was moved }, 'EVENT_FLAGS': { 'IN_UNMOUNT' : 0x00002000, # Backing fs was unmounted 'IN_Q_OVERFLOW' : 0x00004000, # Event queued overflowed 'IN_IGNORED' : 0x00008000, # File was ignored }, 'SPECIAL_FLAGS': { 'IN_ONLYDIR' : 0x01000000, # only watch the path if it is a # directory 'IN_DONT_FOLLOW' : 0x02000000, # don't follow a symlink 'IN_MASK_ADD' : 0x20000000, # add to the mask of an already # existing watch 'IN_ISDIR' : 0x40000000, # event occurred against dir 'IN_ONESHOT' : 0x80000000, # only send event once }, } """ # What mask to apply mask = 0x00000002 | 0x00000008 | 0x00000040 | 0x00000080 | 0x00000100 | 0x00000200 wm.add_watch(path, mask, rec=True, auto_add=True) # Loop forever (until sigint signal get caught) notifier.loop()
log = logging.getLogger('pag.main') parser = argparse.ArgumentParser(description='Process some integers.') parser.add_argument('path') parser.add_argument('git_path') parser.add_argument('--debug', dest='debug', action='store_true') args = parser.parse_args() if __name__ == '__main__': start_logging(FIREHOSE, debug=args.debug) q = Queue.Queue() t = threading.Thread(target=worker, args=(q, args.path, args.git_path)) t.daemon = True t.start() watch_manager = WatchManager() handler = EventHandler(args.path, q) notifier = Notifier(watch_manager, handler) # watch this directory, with mask(s) wdd = watch_manager.add_watch(args.path, IN_MOVED_TO, rec=True, auto_add=True) # setup options notifier.loop(daemonize=False)