def main(): path = sys.argv[1] if len(sys.argv) > 1 else '.' event_handler = handler() observer = Observer() observer.schedule(event_handler, path, recursive=False) observer.daemon = False observer.start()
def startWatchdog(): if not Config.isSetted(): raise RuntimeError("Server not configured") config = Config.config("library-watchdog") print("Starting watchdog with config: " + str(config)) patterns = config["patterns"] ignore_patterns = config["ignore-patterns"] ignore_directories = config["ignore-directories"] case_sensitive = config["case-sensitive"] path = config["path"] recursively = config["recursively"] watchdog_events = PatternMatchingEventHandler( patterns=patterns, ignore_patterns=ignore_patterns, ignore_directories=ignore_directories, case_sensitive=case_sensitive) watchdog_events.on_created = on_created watchdog_events.on_deleted = on_deleted watchdog_events.on_modified = on_modified watchdog_events.on_moved = on_moved observer = Observer() observer.schedule(event_handler=watchdog_events, recursive=recursively, path=path) observer.daemon = True observer.start() print("Watchdog started...") return observer
def run(scope): """Run the awdpwn framework """ flag_queue = Queue() shells = Shells() pwner = Pwner(flag_queue, shells, scope) pwner.daemon = True submitter = Submitter(flag_queue) submitter.daemon = True observer = Observer() observer.daemon = True smserver = ShellManagerServer(flag_queue, shells) smserver.daemon = True handler = WatchHandler(pwner) observer.schedule(handler, ".", recursive=True) observer.start() pwner.start() smserver.start() submitter.start() while True: try: sys.stdin.read() except KeyboardInterrupt: confirm_exit()
def watch_filesystem(ip_queue=None, watch_dir='', notify=False, recursive=False, syncer=None, accountant=None, daemon=False): """ Start off a watchdog oberver thread to watch filesystem event changes. Also take actions accordingly. watch_dir: Directory to watch notify: Whether or not any action should be taken if an event occurs. """ event_handler = FSChangesHandler(ip_queue=ip_queue, notify=notify, syncer=syncer, accountant=accountant) observer = Observer() observer.schedule(event_handler, path=watch_dir, recursive=recursive) # If run from simplesync, this will be inside the observer_process process observer.daemon = daemon observer.start() print("\n>> Started observer\n>> Watching dir: {}".format(watch_dir)) try: while True: time.sleep(1) except KeyboardInterrupt: observer.stop()
def _watch_config_file(self): observer = Observer() observer.schedule(self, os.path.dirname(self._config_path), recursive=False) observer.daemon = False observer.start()
def start_watcher(self): file_event_handler = FileCompilerEventHandler(self.watchdog_event_handler) observer = Observer() observer.daemon = True observer.schedule(file_event_handler, self.src_path, recursive=True) observer.start() logger.info('File watcher started for {} files at {}'.format(self.ext_to_compile, self.src_path))
def setup_observer(builder_instance, port): """Setup and start the watchdog observer for the --watch command.""" handler = UpdateHandler(builder_instance) observer = Observer() observer.schedule(handler, path=builder_instance.searchpath, recursive=True) observer.schedule(handler, path=settings.ASSETS, recursive=True) observer.schedule(handler, path=settings.MEDIA_URL.strip('/'), recursive=True) observer.daemon = True observer.start() print( "Updating website when templates, CSS, or JS are modified. Press Ctrl-C to end." ) server = setup_httpd(port, builder_instance.renderpath) try: while True: time.sleep(1) except KeyboardInterrupt: print("Shutting down watcher and server...") server.terminate() server.join() observer.stop() observer.join()
def make_observer(file_name, handler): full_path = abspath(file_name) folder, _ = splitpath(full_path) observer = Observer() observer.schedule(handler, folder, False) observer.daemon = True return observer
def run(self): observer = Observer() observer.schedule(event_handler=Handler('*'), path=self.DIRECTORY_TO_WATCH) observer.daemon = False try: observer.start() except KeyboardInterrupt: logger.error('Watcher Stopped.') observer.join(2)
def setup_observer(self): observer = Observer() watched_app_paths = (os.path.join(app_config.path, 'static-src', 'stylesheets') for app_config in self.watched_apps) watched_app_paths = filter(os.path.isdir, watched_app_paths) for watched_app_path in watched_app_paths: observer.schedule(self, watched_app_path, recursive=True) observer.daemon = True observer.start()
def __init__(self, root, event_queue, ignored_dir_regexes=()): self._dir_queue = queue.Queue() self._root = os.path.realpath(root) # Add a watch to the root of the dir self._handler = FileFinderEventHandler(event_queue=event_queue, directory_queue=self._dir_queue, root=self._root) notifier = Observer() notifier.name = "[inotify] notifier" notifier.daemon = True self.notifier = notifier self._ignored_dir_res = ignored_dir_regexes
def test_watchdog(): observer = Observer() # create an Observer instance observer.schedule(event_handler=MyHandler("*"), path=".") # watch all files under current directory observer.daemon = False observer.start() try: observer.join( ) # watchdog run in a separate thread, force the program to block at this point except KeyboardInterrupt: logger.info("Program stopped by ctrl + c") observer.stop() observer.join()
def observe(ee): logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S') # curdir = os.path.dirname(__file__) relPath = os.path.relpath("../downloads", start=os.curdir) path = os.path.realpath(relPath) # path = "/Users/iseongjae/Downloads/" print(path) event_handler = CmdFileSystemEventHandler(ee) # log_handler = LoggingEventHandler() observer = Observer() observer.schedule(event_handler, path, recursive=False) # observer.schedule(log_handler, path, recursive=False) observer.daemon = True observer.start() return observer, event_handler
def generate_on_change(args): class Handler(FileSystemEventHandler): def on_any_event(self, event): print '**', event.src_path, event.event_type generate_once(args) generate_once(args) observer = Observer() observer.daemon = True observer.schedule(Handler(), args.source, recursive=True) observer.schedule(Handler(), args.templates, recursive=True) observer.start() try: while True: time.sleep(0.5) except KeyboardInterrupt: observer.stop() observer.join()
def main(): """Configure testtube and begins watching for file changes.""" # Configure the app based on passed arguments Settings.configure(*get_arguments()) renderer = Renderer() observer = Observer() observer.daemon = True observer.schedule(PyChangeHandler(), Settings.SRC_DIR, recursive=True) observer.start() observer.join(1) # Give the observer thread some time to start up. renderer.notice('testtube is now watching %s for changes...\n' % Settings.SRC_DIR) try: while True: observer.join(1) except KeyboardInterrupt: pass
def main(): """Configure testtube and begins watching for file changes.""" # Configure the app based on passed arguments Settings.configure(*get_arguments()) renderer = Renderer() observer = Observer() observer.daemon = True observer.schedule(PyChangeHandler(), Settings.SRC_DIR, recursive=True) observer.start() observer.join(1) # Give the observer thread some time to start up. renderer.notice( 'testtube is now watching %s for changes...\n' % Settings.SRC_DIR) try: while True: observer.join(1) except KeyboardInterrupt: pass
def __init__(self): FileSystemEventHandler.__init__(self) # futureproofing - not need for current version of watchdog self.root = None self.logdir = self._logdir() self.logfile = None self.logging_enabled = self._logging_enabled self._restart_required = False self.thread = None self.last_event = None # for communicating the Jump event if self.logdir: # Set up a watchog observer. This is low overhead so is left running irrespective of whether monitoring is desired. observer = Observer() observer.daemon = True observer.schedule(self, self.logdir) observer.start() atexit.register(observer.stop) # Latest pre-existing logfile - e.g. if E:D is already running. Assumes logs sort alphabetically. logfiles = sorted([x for x in listdir(self.logdir) if x.startswith('netLog.')]) self.logfile = logfiles and join(self.logdir, logfiles[-1]) or None
def main(): signal.signal(signal.SIGINT, signal_handler) global observer global bli global bl global logger parser = create_cli() if len(sys.argv) == 1: parser.print_help() sys.exit(1) args = parser.parse_args() logger = logging.getLogger('ext_acl_blacklist') hdlr = logging.handlers.WatchedFileHandler(args.log_file) formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s') hdlr.setFormatter(formatter) logger.addHandler(hdlr) logger.setLevel(logging.INFO) if not path.isfile(args.blacklist_file): print('Squid external acl blacklist file {} not found'.format( args.blacklist_file)) logger.warning('Squid external acl blacklist file {} not found'.format( args.blacklist_file)) sys.exit() bli = PySquidBlacklistsImporter(args.blacklist_file) bl = PySquidBlacklistsRunner(bli) event_handler = BliEventHandler(args.blacklist_file) observer = Observer() observer.schedule(event_handler, path=path.dirname(args.blacklist_file)) observer.daemon = True observer.start() bl.loop()
def main(input_dir, output_dir, handler): observer = Observer() observer.daemon = False log.info("Input directory: {}".format(input_dir)) log.info("Output directory: {}".format(output_dir)) log.info("Setting up ArchiveAdder handler") observer.schedule(handler, input_dir, recursive=False) def shutdown(sig, func): log.info("Signal handler called on signal: {}".format(sig)) observer.stop() observer.join() sys.exit() log.info("Setting SIGTERM and SIGINT handler") signal.signal(signal.SIGTERM, shutdown) signal.signal(signal.SIGINT, shutdown) log.info("Starting directory monitor") observer.start() log.info("Parent thread entering 1 second polling loop") while not observer.stopped_event.wait(1): pass
def generate_on_change(args): class Handler(FileSystemEventHandler): def on_any_event(self, event): # ignore some duplicate events if event.src_path.endswith('.pyc'): return if os.path.isdir(event.src_path): return print('**', event.src_path, event.event_type) generate_once(args) generate_once(args) observer = Observer() observer.daemon = True observer.schedule(Handler(), args.source, recursive=True) observer.schedule(Handler(), args.templates, recursive=True) observer.start() try: while True: time.sleep(0.5) except KeyboardInterrupt: observer.stop() observer.join()
def monitor(path, recursive=True): # Create an observer thread for the given path observer = Observer() observer.schedule(MonitorHandler(), path=path, recursive=recursive) observer.daemon = True observer.start()
# logging.basicConfig(level=logging.DEBUG, format=DEFAULT_LOG_FORMAT) # logger.setLevel(logging.CRITICAL) # logger.setLevel(logging.DEBUG) return if not RUNNING_UNITTEST: handler = LoggingConfigEventHandler(log_name, LOG_CONFIG_PATH, LOG_PATH, DEFAULT_LOG_FORMAT) handler.do_reconfigure_logging() if os.path.exists(LOG_CONFIG_PATH): observer = Observer() # observer.schedule(LoggingEventHandler(), LOG_CONFIG_PATH, recursive=False) observer.schedule(handler, LOG_CONFIG_PATH, recursive=False) print "log watcher to '%s'" % LOG_CONFIG_PATH observer.daemon = True # should not block the program ending observer.start() class LoggingConfigEventHandler(FileSystemEventHandler): """Check for logging config changes.""" def __init__(self, log_name, LOG_CONFIG_PATH, LOG_PATH, DEFAULT_LOG_FORMAT): self.log_name = log_name self.config_path = LOG_CONFIG_PATH self.log_path = LOG_PATH self.default_log_format = DEFAULT_LOG_FORMAT self.base_filename = 'logging-%s' % self.log_name def on_any_event(self, event): print("Event %s" % repr(event))
os.system("ssh -t -t -o StrictHostKeyChecking=no [email protected] \"killall iperf\"") self.flow1_start = False if not self.flow1_start and action == "start": os.system("ssh -t -t -o StrictHostKeyChecking=no [email protected] \"~/demo_run_pcc_convergence.sh 3 &\"") os.system("ssh -t -t -o StrictHostKeyChecking=no [email protected] \"~/demo_run_tcp_convergence.sh 3 &\"") self.flow1_start = True if flow_name == "flow2": if self.flow2_start and action == "stop": os.system("ssh -t -t -o StrictHostKeyChecking=no [email protected] \"killall appclient\"") os.system("ssh -t -t -o StrictHostKeyChecking=no [email protected] \"killall iperf\"") self.flow2_start = False if not self.flow2_start and action == "start": os.system("ssh -t -t -o StrictHostKeyChecking=no [email protected] \"~/demo_run_pcc_convergence.sh 4 &\"") os.system("ssh -t -t -o StrictHostKeyChecking=no [email protected] \"~/demo_run_tcp_convergence.sh 4 &\"") self.flow2_start = True if __name__ == "__main__": path = '.' event_handler = NewSnapshotEventHandler() observer = Observer() observer.daemon = True observer.schedule(event_handler, path, recursive=False) observer.start() try: while True: time.sleep(1) except KeyboardInterrupt: observer.stop() observer.join()
def main(): logging.basicConfig(format='%(message)s', level=logging.INFO) logging.info('File Checker v.1.0') logging.info('Initialising...') base_path = os.path.dirname(os.path.realpath(__file__)) config = configparser.RawConfigParser() config.read(os.path.join(base_path, 'config', 'settings.cfg')) # Cleaning the temp directory file_utils.clean_temp(base_path) # Initialising email sending mail_sender = MailSender(config.get('email-login', 'login'), config.get('email-login', 'pass'), config.get('email-server', 'server'), int(config.get('email-server', 'port'))) # Reading files for directories to watch and email addresses to send reports to dirs = file_utils.get_dir_list(base_path) emails = file_utils.get_emails(base_path) # Initial timestamp timestamp = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') # Creating observer for every directory in a list and recording them in a pool observer_pool = {} for dir_name in list(dirs): event_handler = FileLoggingEventHandler(dir_name) observer = Observer() try: observer.schedule(event_handler, dirs[dir_name], recursive=True) except FileNotFoundError: logging.warning('Directory %s cannot be found, skipping...', dir_name) dirs.pop(dir_name, None) continue observer_pool[dir_name] = observer observer.daemon = True observer.start() # Creating dir to store temp data for every directory temp_dir = os.path.join(base_path, 'temp', dir_name) if not os.path.exists(temp_dir): os.makedirs(temp_dir) if not len(dirs): logging.info('No directories to watch, exiting.') sys.exit() logging.info('Watching %i directories.', len(dirs)) # Initialising logger folder_logger = FolderLogger(os.path.join(base_path, 'log')) folder_logger.initialise(list(dirs)) logging.info('Started successfully.') # Looping forever until interrupted try: while True: # Time between checks is retrieved from config time.sleep(int(config.get('time', 'period'))) new_timestamp = datetime.datetime.now().strftime( '%Y-%m-%d %H:%M:%S') if not len(dirs): logging.info('No directories to watch, exiting.') sys.exit() # Logging and emailing changes for every watched directory for dir_name in list(dirs): temp_dir = os.path.join(base_path, 'temp', dir_name) # If watched directory does not exists anymore, it must've been deleted if not os.path.exists(dirs[dir_name]): logging.warning('Watched directory %s has been deleted.', dir_name) for email in emails: mail_sender.send_message( email, 'Changes occurred in directory ' + dir_name, 'Directory {0} has been deleted.'.format(dir_name)) folder_logger.write( dir_name, 'Directory {0} has been deleted.'.format(dir_name)) dirs.pop(dir_name, None) observer_pool[dir_name].stop() # Removing temp dir for this directory shutil.rmtree(temp_dir) continue # Composing email text from data gathered in temp files composer = email_composer.EmailComposer( timestamp, new_timestamp) for filename in os.listdir(temp_dir): with open(os.path.join(temp_dir, filename), 'r+') as file: file_contents = file.read() if file_contents: composer.add_block(filename, file_contents) file.seek(0) file.truncate() # Logging and sending if composer managed to compose text (i.e. something was changed) if not composer.is_empty(): logging.info( 'Changes occurred in directory %s, sending emails', dir_name) text = composer.compose_email() folder_logger.write(dir_name, text) for email in emails: mail_sender.send_message( email, 'Changes occurred in directory ' + dir_name, text) # Updating timestamp timestamp = new_timestamp except KeyboardInterrupt: for key in observer_pool: observer_pool[key].stop() logging.info('Operation ended by user.') for key in observer_pool: observer_pool[key].join() file_utils.clean_temp(base_path)
from watchdog.observers import Observer from watchdog.events import (PatternMatchingEventHandler, FileModifiedEvent, FileCreatedEvent) observer = Observer() # create an observer instance class Handler(PatternMatchingEventHandler): def on_created( self, event: FileCreatedEvent ): #subclass one of the handler classes and override the mthods for events you want to process print('FIle created: ', event.src_path) def on_modified(self, event: FileModifiedEvent): print('File modified: %s [%s]' % (event.src_path, event.event_type)) observer.schedule( event_handler=Handler('*'), path='.' ) #schedule the event handler and tell watchdog what it should be watching. (*) = ALl files, (.) current directory observer.daemon = False observer.start() try: observer.join() # watchdog runs in a separate thread except KeyboardInterrupt: print('Stopped') observer.stop() observer.join()
os.system( "ssh -t -t -o StrictHostKeyChecking=no [email protected] \"killall appclient\"" ) os.system( "ssh -t -t -o StrictHostKeyChecking=no [email protected] \"killall iperf\"" ) self.flow2_start = False if not self.flow2_start and action == "start": os.system( "ssh -t -t -o StrictHostKeyChecking=no [email protected] \"~/demo_run_pcc_convergence.sh 4 &\"" ) os.system( "ssh -t -t -o StrictHostKeyChecking=no [email protected] \"~/demo_run_tcp_convergence.sh 4 &\"" ) self.flow2_start = True if __name__ == "__main__": path = '.' event_handler = NewSnapshotEventHandler() observer = Observer() observer.daemon = True observer.schedule(event_handler, path, recursive=False) observer.start() try: while True: time.sleep(1) except KeyboardInterrupt: observer.stop() observer.join()
observer = Observer() class Handler(PatternMatchingEventHandler): def on_modified(self, event: FileModifiedEvent): global lab, root, but lab.configure(text='New Patient!') lab.configure(background='red') # root.attributes("-topmost", True) time.sleep(10) root.deiconify() root.attributes("-topmost", True) observer.schedule(event_handler=Handler('*'), path='d:\\john tillet\\episode_data\\watched\\') observer.daemon=False observer.start() class EpFullException(Exception): pass def episode_discharge(intime, outtime, anaesthetist, endoscopist): pya.hotkey('alt', 'i') pya.typewrite(['enter'] * 4) pya.typewrite(intime) pya.typewrite(['enter'] * 2) pya.typewrite(outtime) pya.typewrite(['enter'] * 3) if anaesthetist != 'locum':