def run(self): w = watcher.AutoWatcher() try: # Watch all paths recursively, and all events on them. w.add_all(self.mondir, inotify.IN_CREATE) except OSError as err: print('%s: %s' % (err.filename, err.strerror), file=sys.stderr) poll = select.poll() poll.register(w, select.POLLIN) timeout = None threshold = watcher.Threshold(w, 512) while self.jobq.count > 0: events = poll.poll(timeout) nread = 0 if threshold() or not events: for evt in w.read(0): nread += 1 print(repr(evt.fullpath), ' | '.join(inotify.decode_mask(evt.mask))) mfilemeta = evt.fullpath.split(self.mondir) if len(mfilemeta) <= 1: continue modifiedfile = mfilemeta[1] print(modifiedfile) try: self.jobq.remove(modifiedfile) except ValueError as err: pass if nread: timeout = None poll.register(w, select.POLLIN) else: timeout = 10 poll.unregister(w)
def watch_for_new_images(input_dir): w = watcher.AutoWatcher() try: # Watch all paths recursively, and all events on them. w.add_all(input_dir, inotify.IN_ALL_EVENTS) except OSError as err: print('%s: %s' % (err.filename, err.strerror), file=sys.stderr) poll = select.poll() poll.register(w, select.POLLIN) timeout = None threshold = watcher.Threshold(w, 512) while True: events = poll.poll(timeout) nread = 0 if threshold() or not events: print('reading,', threshold.readable(), 'bytes available') for evt in w.read(0): nread += 1 events = inotify.decode_mask(evt.mask) if 'IN_MOVED_TO' in events: filename = evt.fullpath if filename.endswith(('jpg', 'png', 'gif')): print("adding %s to the queue" % filename) new_pics.put(filename) if nread: timeout = None poll.register(w, select.POLLIN) else: timeout = 1000 poll.unregister(w)
w = watcher.AutoWatcher() paths = sys.argv[1:] or ['/tmp'] for path in paths: try: # Watch all paths recursively, and all events on them. w.add_all(path, inotify.IN_ALL_EVENTS) except OSError as err: print('%s: %s' % (err.filename, err.strerror), file=sys.stderr) # If we have nothing to watch, don't go into the read loop, or we'll # sit there forever. if not w.num_watches(): print("no files to watch", file=sys.stderr) sys.exit(1) try: while w.num_watches(): # The Watcher.read method returns a list of event objects. for evt in w.read(): # The inotify.decode_mask function returns a list of the # names of the bits set in an event's mask. This is very # handy for debugging. print(repr(evt.fullpath), ' | '.join(inotify.decode_mask(evt.mask))) except KeyboardInterrupt: print('interrupted!', file=sys.stderr)
import inotify import sys w = watcher.AutoWatcher() paths = sys.argv[1:] or ['/tmp'] for path in paths: try: # Watch all paths recursively, and all events on them. w.add_all(path, inotify.IN_ALL_EVENTS) except OSError as err: print('%s: %s' % (err.filename, err.strerror), file=sys.stderr) # If we have nothing to watch, don't go into the read loop, or we'll # sit there forever. if not len(w): sys.exit(1) try: while True: # The Watcher.read method returns a list of event objects. for evt in w.read(): # The inotify.decode_mask function returns a list of the # names of the bits set in an event's mask. This is very # handy for debugging. print(repr(evt.fullpath), ' | '.join(inotify.decode_mask(evt.mask))) except KeyboardInterrupt: print('interrupted!', file=sys.stderr)
import inotify import sys w = watcher.AutoWatcher() paths = sys.argv[1:] or ['/tmp'] for path in paths: try: # Watch all paths recursively, and all events on them. w.add_all(path, inotify.IN_ALL_EVENTS) except OSError, err: print >> sys.stderr, '%s: %s' % (err.filename, err.strerror) # If we have nothing to watch, don't go into the read loop, or we'll # sit there forever. if not len(w): sys.exit(1) try: while True: # The Watcher.read method returns a list of event objects. for evt in w.read(): # The inotify.decode_mask function returns a list of the # names of the bits set in an event's mask. This is very # handy for debugging. print repr(evt.fullpath), ' | '.join(inotify.decode_mask(evt.mask)) except KeyboardInterrupt: print >> sys.stderr, 'interrupted!'
def run(self): global timeout, w, tuples, regexes, json_pending, last_push, config fp = {} if osname == "linux": w = watcher.AutoWatcher() for path in config.get('Analyzer','paths').split(","): try: print("Recursively monitoring " + path.strip() + "...") w.add_all(path.strip(), inotify.IN_ALL_EVENTS) except OSError as err: pass if not w.num_watches(): print("No paths to analyze, nothing to do!") sys.exit(1) poll = select.poll() poll.register(w, select.POLLIN) timeout = None threshold = watcher.Threshold(w, 256) inodes = {} inodes_path = {} xes = connect_es(config) while True: events = poll.poll(timeout) nread = 0 if threshold() or not events: #print('reading,', threshold.readable(), 'bytes available') for evt in w.read(0): nread += 1 # The last thing to do to improve efficiency here would be # to coalesce similar events before passing them up to a # higher level. # For example, it's overwhelmingly common to have a stream # of inotify events contain a creation, followed by # multiple modifications of the created file. # Recognising this pattern (and others) and coalescing # these events into a single creation event would reduce # the number of trips into our app's presumably more # computationally expensive upper layers. masks = inotify.decode_mask(evt.mask) #print(masks) path = evt.fullpath #print(repr(evt.fullpath), ' | '.join(masks)) try: if not u'IN_ISDIR' in masks: if (u'IN_MOVED_FROM' in masks) and (path in filehandles): print("File moved, closing original handle") try: filehandles[path].close() except Exception as err: print(err) del filehandles[path] inode = inodes_path[path] del inodes[inode] elif (not u'IN_DELETE' in masks) and (not path in filehandles) and (path.find(".gz") == -1): try: print("Opening " + path) idata = os.stat(path) inode = idata.st_ino if not inode in inodes: filehandles[path] = open(path, "r") print("Started watching " + path) filehandles[path].seek(0,2) inodes[inode] = path inodes_path[path] = inode except Exception as err: print(err) try: filehandles[path].close() except Exception as err: print(err) del filehandles[path] inode = inodes_path[path] del inodes[inode] # First time we've discovered this file? if u'IN_CLOSE_NOWRITE' in masks and not path in filehandles: pass # New file created in a folder we're watching?? elif u'IN_CREATE' in masks: pass # File truncated? elif u'IN_CLOSE_WRITE' in masks and path in filehandles: # print(path + " truncated!") filehandles[path].seek(0,2) # File contents modified? elif u'IN_MODIFY' in masks and path in filehandles: # print(path + " was modified") rd = 0 data = "" #print("Change in " + path) try: while True: line = filehandles[path].readline() if not line: #filehandles[path].seek(0,2) break else: rd += len(line) data += line #print("Read %u bytes from %s" % (rd, path)) parseLine(path, data) except Exception as err: try: print("Could not utilize " + path + ", closing.." + err) filehandles[path].close() except Exception as err: print(err) del filehandles[path] inode = inodes_path[path] del inodes[inode] # File deleted? (close handle) elif u'IN_DELETE' in masks: if path in filehandles: print("Closed " + path) try: filehandles[path].close() except Exception as err: print(err) del filehandles[path] inode = inodes_path[path] del inodes[inode] print("Stopped watching " + path) else: pass except Exception as err: print(err) for x in json_pending: if (time.time() > (last_push[x] + 15)) or len(json_pending[x]) >= 500: if not x in fp: fp[x] = True print("First push for " + x + "!") t = NodeThread() t.assign(json_pending[x], x, xes) t.start() json_pending[x] = [] last_push[x] = time.time() if nread: #print('plugging back in') timeout = None poll.register(w, select.POLLIN) else: #print('unplugging,', threshold.readable(), 'bytes available') timeout = 1000 poll.unregister(w) if osname == "freebsd": xes = connect_es(config) observer = Observer() for path in paths: observer.schedule(BSDHandler(), path, recursive=True) syslog.syslog(syslog.LOG_INFO, "Recursively monitoring " + path.strip() + "...") observer.start() try: while True: for x in json_pending: if not x in last_push: last_push[x] = time.time() if len(json_pending[x]) > 0 and ((time.time() > (last_push[x] + 15)) or len(json_pending[x]) >= 500): if not x in fp: fp[x] = True syslog.syslog(syslog.LOG_INFO, "First push for " + x + "!") t = NodeThread() t.assign(json_pending[x], x, xes) t.start() json_pending[x] = [] last_push[x] = time.time() time.sleep(0.5) except KeyboardInterrupt: observer.stop() observer.join()