def _reloader_inotify(extra_files=None, interval=None): # Mutated by inotify loop when changes occur. changed = [False] # Setup inotify watches from pyinotify import WatchManager, Notifier # this API changed at one point, support both try: from pyinotify import EventsCodes as ec ec.IN_ATTRIB except (ImportError, AttributeError): import pyinotify as ec wm = WatchManager() mask = ec.IN_DELETE_SELF | ec.IN_MOVE_SELF | ec.IN_MODIFY | ec.IN_ATTRIB def signal_changed(event): if changed[0]: return _log('info', ' * Detected change in %r, reloading' % event.path) changed[:] = [True] for fname in extra_files or (): wm.add_watch(fname, mask, signal_changed) # ... And now we wait... notif = Notifier(wm) try: while not changed[0]: # always reiterate through sys.modules, adding them for fname in _iter_module_files(): wm.add_watch(fname, mask, signal_changed) notif.process_events() if notif.check_events(timeout=interval): notif.read_events() # TODO Set timeout to something small and check parent liveliness finally: notif.stop() sys.exit(3)
def main(): upload_queue = Queue.Queue() thread.start_new_thread(upload_thread, (upload_queue,)) # # Setup a watcher on the images folder. # wm = WatchManager() notifier = Notifier(wm, PTmp(upload_queue)) wm.add_watch(BASE_FOLDER, EventsCodes.OP_FLAGS['IN_CREATE'], rec=True) # # loop forever # while True: try: # # process the queue of events # notifier.process_events() if notifier.check_events(): # # read notified events and enqeue them # notifier.read_events() except KeyboardInterrupt: # # destroy the inotify's instance on this # interrupt (stop monitoring) # notifier.stop() # # Stop the upload queue. # upload_queue.put((None, None)) break
def FSMonitor(path='.'): wm = WatchManager() mask = IN_CREATE | IN_MODIFY | IN_MOVED_TO notifier = Notifier(wm, EventHandler()) path = join(_env.PREFIX, 'coffee') for i in os.listdir(path): p = join(path, i) if not isdir(p): continue p = link_path(p) wm.add_watch(p, mask, rec=True) wm.add_watch(path, mask, rec=True) print '开始实时编译 COFFEE SCRIPT %s' % (path) while True: try: notifier.process_events() if notifier.check_events(): notifier.read_events() except KeyboardInterrupt: notifier.stop() break
def Monitor(path): class PCreate(ProcessEvent): def process_IN_CREATE(self, event): f = event.name and os.path.join(event.path, event.name) or event.path # remove path element from entity item = f[len(path):] # start a socat instance socat_cmd = "socat " socat_cmd += path socat_cmd += item socat_cmd += " pty:,link=" socat_cmd += ptypath socat_cmd += item socat_cmd += " &" os.system(socat_cmd) # load in a screen session screen_cmd = "screen -S vmware -X eval 'screen -t " screen_cmd += item screen_cmd += " -L " screen_cmd += ptypath screen_cmd += item screen_cmd += " 9600'" os.system(screen_cmd) wm = WatchManager() notifier = Notifier(wm, PCreate()) wm.add_watch(path, pyinotify.IN_CREATE) try: while 1: notifier.process_events() if notifier.check_events(): notifier.read_events() except KeyboardInterrupt: notifier.stop() return
def start(conf): global_conf = conf wm = WatchManager() mask = EventsCodes.ALL_FLAGS['IN_CREATE'] | EventsCodes.ALL_FLAGS[ 'IN_MOVED_TO'] notifier = Notifier(wm, PTmp()) for watch_dir in conf['watch_directories']: watch_dir = os.path.expanduser(watch_dir) print("Now watching %s" % watch_dir) wm.add_watch(watch_dir, mask, rec=True) while True: try: notifier.process_events() if (notifier.check_events()): notifier.read_events() except KeyboardInterrupt: notifier.stop() break
def wait_for_vm_boot(path, timeout=300): wait_for_vm_boot.ks_read = False def proc(_): wait_for_vm_boot.ks_read = True wm = WatchManager() notifier = Notifier(wm, timeout=1000) wm.add_watch(path, mask=IN_CLOSE_NOWRITE, rec=False, proc_fun=proc) start_time = int(time.time()) notifier.process_events() while not wait_for_vm_boot.ks_read: if int(time.time()) - start_time > timeout: break if notifier.check_events( ): #loop in case more events appear while we are processing notifier.read_events() notifier.process_events() else: return True print 'Timeout {} sec occured while waiting for Master Node reboot'.format( timeout)
def FSMonitor(path='.', deploy=None): """ Monitor the path-folder,when there is a new file generated,the IN_Create event will be triggered :param path:the path to be monitored :param deploy:the object if gru-model :return: """ wm = WatchManager() mask = IN_CREATE if deploy is None: logging.error('Model Uninitial!') return notifier = Notifier(wm, EventHandler(predict=deploy)) wm.add_watch(path, mask, auto_add=True, rec=True) print('now starting monitor %s' % (path)) while True: try: notifier.process_events() if notifier.check_events(): notifier.read_events() except KeyboardInterrupt: notifier.stop() break
def __inotify(self): wm = WatchManager() mask = pyinotify.IN_CLOSE_WRITE class Process_handler(ProcessEvent): def __init__(self, main): self.main = main def process_IN_CLOSE_WRITE(self, event): for user in pwd.getpwall(): if user.pw_name == event.name: f = open(event.path + "/" + event.name) ticket = f.readlines()[0] f.close() self.main.tickets[user.pw_name] = {} self.main.tickets[user.pw_name]["password"] = ticket self.main.tickets[user.pw_name]["date"] = time.time() break notifier = Notifier(wm, Process_handler(self)) wdd = wm.add_watch(TicketsManager.WATCH_DIR, mask, rec=True) while True: try: notifier.process_events() if notifier.check_events(): notifier.read_events() except Exception as e: print(e) notifier.stop() return False
def __init__(s, nodename): Thread.__init__(s) # Init logger s.log = logging.getLogger('Connector-{0}'.format(s.name)) # Init new database session s._sess = s.Session() # Init firewall tables s.f = Firewall(s.log, s._sess) # Init DNS daemon s.dns = DNSDaemon(s.log, s._sess) # Init database connection status table s.connections = ConnectionStatus(s.log, s._sess, nodename) # Make inotify watcher wm = WatchManager() ## OpenVPN status update handler class PUpdateStatus(ProcessEvent): ## Close file event method def process_IN_MODIFY(self, event): # Update status and firewall rules s.updateStatus() # Make notificator s.notifier = Notifier(wm, PUpdateStatus()) # Add OpenVPN status file watcher wm.watch_transient_file(OPENVPN_STATUS_FILE, IN_MODIFY, PUpdateStatus) s._sess.close()
def enabled(self): if not self.running: wm = WatchManager() self.event_handler = LibraryEvent(library=app.library) FLAGS = [ 'IN_DELETE', 'IN_CLOSE_WRITE', # 'IN_MODIFY', 'IN_MOVED_FROM', 'IN_MOVED_TO', 'IN_CREATE' ] masks = [ EventsCodes.FLAG_COLLECTIONS['OP_FLAGS'][s] for s in FLAGS ] mask = reduce(operator.or_, masks, 0) if self.USE_THREADS: print_d("Using threaded notifier") self.notifier = ThreadedNotifier(wm, self.event_handler) # Daemonize to ensure thread dies on exit self.notifier.daemon = True self.notifier.start() else: self.notifier = Notifier(wm, self.event_handler, timeout=100) GLib.timeout_add(1000, self.unthreaded_callback) for path in get_scan_dirs(): real_path = os.path.realpath(path) print_d('Watching directory %s for %s (mask: %x)' % (real_path, FLAGS, mask)) # See https://github.com/seb-m/pyinotify/wiki/ # Frequently-Asked-Questions wm.add_watch(real_path, mask, rec=True, auto_add=True) self.running = True
def _reloader_inotify(fnames, interval=None): #: Mutated by inotify loop when changes occur. changed = [False] # Setup inotify watches import pyinotify from pyinotify import WatchManager, EventsCodes, Notifier wm = WatchManager() mask = "IN_DELETE_SELF IN_MOVE_SELF IN_MODIFY IN_ATTRIB".split() try: # pyinotify 0.7 mask = reduce(lambda m, a: m | getattr(EventsCodes, a), mask, 0) except AttributeError: # pyinotify 0.8 and up mask = reduce(lambda m, a: m | getattr(pyinotify, a), mask, 0) def signal_changed(event): if changed[0]: return _log('info', ' * Detected change in %r, reloading' % event.path) changed[:] = [True] for fname in fnames: wm.add_watch(fname, mask, signal_changed) # ... And now we wait... notif = Notifier(wm) try: while not changed[0]: notif.process_events() if notif.check_events(timeout=interval): notif.read_events() # TODO Set timeout to something small and check parent liveliness finally: notif.stop() sys.exit(3)
print "Recompiling %s" % outName call([ "dot", "-Tpng", os.path.join(event.path, event.name), '-o', os.path.join(event.path, outName) ]) def process_IN_MOVED_TO(self, event): #print "modify {0}, {1}".format(event.path, event.name) if (event.name.endswith('.dot')): outName = event.name.replace('.dot', '.png') print "Recompiling %s" % outName call([ "dot", "-Tpng", os.path.join(event.path, event.name), '-o', os.path.join(event.path, outName) ]) notifier = Notifier(wm, PDir()) wdd = wm.add_watch('.', mask, rec=True) while True: try: notifier.process_events() if notifier.check_events(): notifier.read_events() except KeyboardInterrupt: notifier.stop() break
def main(): logger.setLevel(logging.INFO) logger.addHandler(logging.StreamHandler()) parser = argparse.ArgumentParser( description="Monitor a directory for new netCDF glider data and " "upload the netCDF files to an FTP site." ) parser.add_argument( "--ftp_url", help="Path to the glider data netCDF output directory", default=os.environ.get('NC2FTPURL') ) parser.add_argument( "--ftp_user", help="FTP username, defaults to 'anonymous'", default=os.environ.get('NC2FTPUSER', 'anonymous') ) parser.add_argument( "--ftp_pass", help="FTP password, defaults to an empty string", default=os.environ.get('NC2FTPPASS', '') ) parser.add_argument( "-i", "--input", help="Path to the glider data netCDF output directory", default=os.environ.get('GDAM2NC_OUTPUT') ) parser.add_argument( "--daemonize", help="To daemonize or not to daemonize", type=bool, default=False ) args = parser.parse_args() if not args.input: logger.error("Please provide an --input agrument or set the " "GDAM2NC_OUTPUT environmental variable") sys.exit(parser.print_usage()) if not args.ftp_url: logger.error("Please provide an --ftp_url agrument or set the " "NC2FTPURL environmental variable") sys.exit(parser.print_usage()) wm = WatchManager() mask = IN_MOVED_TO | IN_CLOSE_WRITE wm.add_watch( args.input, mask, rec=True, auto_add=True ) processor = GliderNc2FtpProcessor( ftp_url=args.ftp_url, ftp_user=args.ftp_user, ftp_pass=args.ftp_pass, ) notifier = Notifier(wm, processor) try: logger.info("Watching {}\nUploading to {}".format( args.input, args.ftp_url) ) notifier.loop(daemonize=args.daemonize) except NotifierError: logger.exception('Unable to start notifier loop') return 1 logger.info("NC2FTP Exited Successfully") return 0
def recon_view(pnet, rnet, onet, model, image_path, ip, margin=44, image_size=160): print('begin recon_view!!') with tf.Graph().as_default(): config = tf.ConfigProto() with tf.Session(config=config) as sess: # Load the model facenet.load_model(model) # Get input and output tensors images_placeholder = tf.get_default_graph().get_tensor_by_name( "input:0") # embeddings:输入端的特征向量 embeddings = tf.get_default_graph().get_tensor_by_name( "embeddings:0") phase_train_placeholder = tf.get_default_graph( ).get_tensor_by_name("phase_train:0") # 加载库中的人脸并映射成embdings print('加载人脸库。。。。。。') embding, images_label_list = face_lib.get_face_lib() print('读取完毕.....') path_exp = os.path.expanduser(image_path) classes = os.listdir(path_exp) if not len(classes) == 0: classes.sort() nrof_classes = len(classes) for i in range(nrof_classes): images_path = os.path.join(path_exp, classes[i]) # get_face(sess, images_path, margin, image_size, images_placeholder, embeddings, # phase_train_placeholder, embding, images_label_list, pnet, rnet, onet) if os.path.isfile(images_path): get_video(sess, images_path, margin, image_size, images_placeholder, embeddings, phase_train_placeholder, embding, images_label_list, pnet, rnet, onet, ip) os.remove(images_path) wm = WatchManager() mask = IN_DELETE | IN_CREATE | IN_MODIFY | IN_CLOSE_WRITE notifier = Notifier( wm, EventHandler(sess, image_path, margin, image_size, images_placeholder, embeddings, phase_train_placeholder, embding, images_label_list, pnet, rnet, onet, ip)) wm.add_watch(image_path, mask, auto_add=True, rec=True) print('Please input video') while True: try: notifier.process_events() if notifier.check_events(): notifier.read_events() except KeyboardInterrupt: notifier.stop() break
def __init__(self, freq=1, timeout=1, channel=channel): super(INotifyDriver, self).__init__(channel=channel) self._freq = freq self._wm = WatchManager() self._notifier = Notifier(self._wm, self._process, timeout=timeout)
if myfile in join(event.path, event.name): target.close() target = open(myfile, 'r') log.debug("Log file created... Catching up!") for line in target.readlines(): luser = is_bad(line.rstrip()) if (luser): blocker(luser["ip"]) now = pendulum.now().to_atom_string() log.info("Blocked {0} at {1}".format(luser["ip"], now)) add_block(luser["ip"], now) target.seek(0, 2) return notifier = Notifier(WM, EventHandler()) index = myfile.rfind("/") WM.add_watch(myfile[:index], dirmask) last = pendulum.parse(last_run) while True: try: now = pendulum.now() if now.diff(last).in_hours() > 1: last = now checkup() notifier.process_events() if notifier.check_events(): notifier.read_events() except KeyboardInterrupt: break
def __init__(self, path): self.wm = WatchManager() mask = IN_DELETE | IN_CLOSE_WRITE self.notifier = Notifier(self.wm, MyProcessEvent()) self.wm.add_watch(path, mask, rec=True)
if not os.path.exists(model_dir): os.makedirs(model_dir) package_path = f"{package_dir}/{package_file_name}" untar(package_path,model_dir) submit(package_name,model_dir) class EventHandler(ProcessEvent): def process_IN_CREATE( self, event): print(f"Create file:{event.path} - {event.name}") update_package(event.path,event.name) def process_IN_DELETE( self, event): print(f"Delete file:{event.path} - {event.name}") def process_IN_MODIFY( self, event): print(f"Modify file:{event.path} - {event.name}") update_package(event.path,event.name) def process_IN_MOVED_TO( self, event): print(f"Move to file:{event.path} - {event.name}") update_package(event.path,event.name) def process_IN_MOVED_FROM( self, event): print(f"Move from file:{event.path} - {event.name}") print("start loop...") notifier = Notifier(wm, EventHandler()) notifier.loop()
import os from pyinotify import WatchManager, Notifier, ProcessEvent, IN_DELETE, IN_CREATE, IN_MODIFY wm = WatchManager() mask = IN_DELETE | IN_CREATE | IN_MODIFY # watched events class PFilePath(ProcessEvent): def process_IN_CREATE(self, event): print "Create file: %s " % os.path.join(event.path, event.name) def process_IN_DELETE(self, event): print "Delete file: %s " % os.path.join(event.path, event.name) def process_IN_MODIFY(self, event): print "Modify file: %s " % os.path.join(event.path, event.name) if __name__ == "__main__": notifier = Notifier(wm, PFilePath()) wdd = wm.add_watch('.', mask, rec=True) while True: try: notifier.process_events() if notifier.check_events(): notifier.read_events() except KeyboardInterrupt: notifier.stop() break
default=[], help="Identifier for monitored files") #parser.add_argument("-c", "--configuration_file", dest="configuration_file", # default='noaa15_products.xml', type=str, # help="Name of the xml configuration file") if len(sys.argv) <= 1: parser.print_help() sys.exit() else: args = parser.parse_args() #Event handler observes the operations in defined folder wm = WatchManager() mask = pyinotify.IN_CREATE # monitored events #message_dict = {'publish_port':args.publish_port, 'filetypes':args.filetypes, 'subject':'/Joonas/'} event_handler = EventHandler(args.filetypes, publish_port=args.publish_port) notifier = Notifier(wm, event_handler) for monitored_dir in args.monitored_dirs: wdd = wm.add_watch(monitored_dir, mask, rec=True) # notifier.loop(daemonize=True,\ # pid_file='/tmp/pyinotify.pid',\ # stdout='/tmp/stdout.txt') notifier.loop()
os.unlink(arg) except Exception, e: print e print "Done: %s" % arg task_queue = Queue() done_queue = Queue() for i in range(PROCS): Process(target=worker, args=(task_queue, done_queue)).start() class CloseEvent(ProcessEvent): def process_IN_CLOSE_WRITE(self, event): task_queue.put("%s" % os.path.join(event.path, event.name)) print "Received: %s" % os.path.join(event.path, event.name) wm = WatchManager() notifier = Notifier(wm, CloseEvent()) wdd = wm.add_watch('/data2', pyinotify.IN_CLOSE_WRITE, rec=True) while True: # loop forever try: notifier.process_events() if notifier.check_events(): notifier.read_events() except KeyboardInterrupt: notifier.stop() break
content = file.read() os.system("rm " + tmp) #Remove temporary file. #Get the image link. start = content.find("<image_link>") end = content.find("</image_link>") link = content[start + 12 : end] os.system('zenity --info --text "' + link + '"') class WatcherEvent(ProcessEvent): def process_IN_CREATE(self, event): uploadImage(os.path.join(event.path, event.name)) wm = WatchManager() notifier = Notifier(wm, WatcherEvent()) wdd = wm.add_watch(imageDir, pyinotify.IN_CREATE, auto_add=True) while True: try: notifier.process_events() if notifier.check_events(): notifier.read_events() except KeyboardInterrupt: notifier.stop() break
if not options.background: if options.verbose: print "[d] run once" set_status(account, grovstatus_file, options) sys.exit(0) if options.verbose: print "[d] run monitoring file %s" % (grovstatus_file,) class PClose(ProcessEvent): def process_IN_CLOSE(self, event): # wait for write to file time.sleep(5) set_status(account, grovstatus_file, options) wm = WatchManager() notifier = Notifier(wm, PClose()) wm.add_watch(grovstatus_file, EventsCodes.ALL_FLAGS.get('IN_CLOSE_WRITE')) #wm.add_watch(grovstatus_file, EventsCodes.ALL_FLAGS.get('IN_CLOSE_WRITE')|EventsCodes.ALL_FLAGS.get('IN_CLOSE_NOWRITE')) set_status(account, grovstatus_file, options) try: while 1: notifier.process_events() if notifier.check_events(): notifier.read_events() except KeyboardInterrupt: notifier.stop()
def watch(directory): wm = WatchManager() notifier = Notifier(wm, EventHandler()) wm.add_watch(directory, IN_CREATE) notifier.loop()
def __init__(self, channel=channel): super(Notify, self).__init__(channel=channel) self._poller = None self._wm = WatchManager() self._notifier = Notifier(self._wm, self._on_process_events)
def __init__(self, path): threading.Thread.__init__(self) self.path = path self.thread_stop = False self.wm = WatchManager() self.notifier = Notifier(self.wm, EventHandler())
def __init__(self, local_rootpath): self.wm = WatchManager() mask = IN_CREATE self.notifier = Notifier(self.wm, proc_evt()) self.wm.add_watch(local_rootpath, mask, rec = True)
def main(): logger.setLevel(logging.INFO) logger.addHandler(logging.StreamHandler()) logging.getLogger('py.warnings').setLevel(logging.ERROR) parser = argparse.ArgumentParser( description="Monitor a directory for new glider data. " "Announce changes via ZMQ." ) parser.add_argument( "-d", "--data_path", help="Path to Glider data directory", default=os.environ.get('GDB_DATA_DIR') ) parser.add_argument( "--zmq_url", help='Port to publish ZMQ messages on. ' 'Default is "tcp://127.0.0.1:44444".', default=os.environ.get('ZMQ_URL', 'tcp://127.0.0.1:44444') ) parser.add_argument( "--daemonize", help="To daemonize or not to daemonize", type=bool, default=False ) args = parser.parse_args() if not args.data_path: logger.error("Please provide a --data_path attribute or set the GDB_DATA_DIR " "environmental variable") sys.exit(parser.print_usage()) monitor_path = args.data_path if monitor_path[-1] == '/': monitor_path = monitor_path[:-1] wm = WatchManager() mask = IN_MOVED_TO | IN_CLOSE_WRITE wm.add_watch( args.data_path, mask, rec=True, auto_add=True ) processor = GliderFileProcessor(zmq_url=args.zmq_url) notifier = Notifier(wm, processor) try: logger.info("Watching {}\nPublishing to {}".format( args.data_path, args.zmq_url) ) notifier.loop(daemonize=args.daemonize) except NotifierError: logger.exception('Unable to start notifier loop') return 1 logger.info("GSPS Exited Successfully") return 0
raise Reload() def process_IN_CLOSE_WRITE(self, event): target = os.path.join(event.path, event.name) if self.regex.match(target): args = self.script.replace('$f', target).split() os.system("clear") sys.stdout.write("executing script: " + " ".join(args) + "\n") subprocess.call(args) sys.stdout.write("------------------------\n") while True: wm = WatchManager() process = Process(options) notifier = Notifier(wm, process) mask = IN_DELETE | IN_CREATE | IN_CLOSE_WRITE wdd = wm.add_watch(options.directory, mask, rec=True) try: while True: print '.' notifier.process_events() print '+' if notifier.check_events(): notifier.read_events() print '-' except Reload: pass except KeyboardInterrupt: notifier.stop() break
def monitor_models(self): try: now = datetime.datetime.utcnow() - datetime.timedelta(hours=16) logger.info("refresh monitor info from database :{}".format(now.strftime("%Y-%m-%d %H:%M:%S"))) self.__cleanMonitorData() models = Model.objects.all() except Exception as e: info = "get models failed : {}".format(str(e)) logger.error(info) sys.exit(1) try: for model in models: text = model.text obj = json.loads(text) if not "monitor" in obj: continue user_id = model.user.uuid user_root = os.path.join(setting.UPLOADS_ROOT,str(user_id)) monitor = obj["monitor"] status = monitor["status"] if status == "on": data = monitor["data"] for d in data: d_path = d["path"] d["relative_path"] = d_path path = os.path.join(user_root,d_path[1:]) d["path"] = path self.__add_monitor_folder_path(path,str(model.uuid)) self.__monitorData.append({ "id": str(model.uuid), "data": data }) except Exception as e: logger.error("get monitor info failed:{}".format(str(e))) return try: mask = IN_CLOSE_WRITE paths = [] if not self.__wm: wm = WatchManager() self.__wm = wm self.__notifier = Notifier(self.__wm, EventHandler()) for monitorPath in self.__monitorPaths: path = monitorPath["path"] logger.info('now starting monitor %s' % (path)) paths.append(path) self.__watchPaths = self.__wm.add_watch(paths, mask, rec=False) connection.close() self.__timer = threading.Timer(self.__interval, self.monitor_models) self.__timer.start() while True: try: self.__notifier.process_events() if self.__notifier.check_events(): self.__notifier.read_events() except KeyboardInterrupt: self.__notifier.stop() break else: # 第二次访问,清理监听文件夹,新加入监听文件夹 rm_paths = [] for value in self.__watchPaths.values(): rm_paths.append(value) self.__wm.rm_watch(rm_paths) for monitorPath in self.__monitorPaths: path = monitorPath["path"] logger.info('now starting monitor %s' % (path)) paths.append(path) self.__watchPaths = self.__wm.add_watch(paths, mask, rec=False) connection.close() self.__timer = threading.Timer(self.__interval, self.monitor_models) self.__timer.start() except Exception as e: logger.error("run monitor failed:{}".format(str(e)))