def watch(self): """ Start watching """ logger.info('Watching directory %s' % self.directory) # Set up handler for when we see new files callback = self.callback class NewFileEventHandler(FileSystemEventHandler): def on_created(self, event): if not event.is_directory: logger.info('Detected new file: %s' % event.src_path) callback(event.src_path) event_handler = NewFileEventHandler() # Use polling observer (rather than filesystem-specific observers), # because it is more reliable. observer = PollingObserver(timeout=self.sleep_time) # Start the observer observer.schedule(event_handler, self.directory, recursive=False) observer.start() # Wait while the observer is running try: while True: sleep(self.sleep_time) # Exit gracefully except KeyboardInterrupt: logger.info('Detected interrupt. Stopping observer.') observer.stop() observer.join()
def watch(directory=None, auto_clear=False, beep_on_failure=True, onpass=None, onfail=None, poll=False, extensions=[]): """ Starts a server to render the specified file or directory containing a README. """ if directory and not os.path.isdir(directory): raise ValueError('Directory not found: ' + directory) directory = os.path.abspath(directory or '') # Initial run event_handler = ChangeHandler(directory, auto_clear, beep_on_failure, onpass, onfail, extensions) event_handler.run() # Setup watchdog if poll: observer = PollingObserver() else: observer = Observer() observer.schedule(event_handler, path=directory, recursive=True) observer.start() # Watch and run tests until interrupted by user try: while True: time.sleep(1) except KeyboardInterrupt: observer.stop() observer.join()
class Crafter(object): """ Setup and manage watchdog daemon. """ def __init__(self, base_dir): # Cache paths src_dir = os.path.join(base_dir, 'src') dest_dir = os.path.join(base_dir, 'preview') # Init handler self.handler = ObserverHandler(src_dir, dest_dir) # New observer class self.observer = Observer() self.observer.schedule(self.handler, path=src_dir, recursive=True) def craft(self): """ Start watching src directory """ self.observer.start() def shutdown(self): """ Properly shutdown watchdog daemon """ self.observer.stop() self.observer.join()
class LivereloadWatchdogWatcher(object): """ File system watch dog. """ def __init__(self): super(LivereloadWatchdogWatcher, self).__init__() self._changed = False # Allows the LivereloadWatchdogWatcher # instance to set the file which was # modified. Used for output purposes only. self._action_file = None self._observer = PollingObserver() self._observer.start() # Compatibility with livereload's builtin watcher # Accessed by LiveReloadHandler's on_message method to decide if a task # has to be added to watch the cwd. self._tasks = True # Accessed by LiveReloadHandler's watch_task method. When set to a # boolean false value, everything is reloaded in the browser ('*'). self.filepath = None # Accessed by Server's serve method to set reload time to 0 in # LiveReloadHandler's poll_tasks method. self._changes = [] #pylint: disable=unused-argument def watch(self, path, *args, **kwargs): event_handler = _WatchdogHandler(self) self._observer.schedule(event_handler, path=path, recursive=True)
def start_watch(): event_handler = when_file_chanage(kill_progress) observer = Observer(timeout=1) observer.schedule(event_handler, path=os.getcwd(), recursive=True) observer.start() global p, job_name cmd = ["uwsgi", "--json", job_name] p = subprocess.Popen(cmd, stderr=subprocess.PIPE) return_code = p.poll() while return_code is None: if not observer.is_alive(): kill_progress() break return_code = p.poll() line = p.stderr.readline().strip().decode("utf-8") if len(line) != 0: print(line) sys.stderr.flush() time.sleep(0.01) while len(line) != 0: line = p.stderr.readline().strip().decode("utf-8") print(line) sys.stderr.flush() observer.stop() return return_code
class DirWatcher(threading.Thread): def __init__(self, dirpath, scanq, options): super().__init__() self.__dirpath = dirpath self.__handler = WatchdogHandler(scanq, options) self.__observer = Observer() if options.poll: self.__observer = PollingObserver() self.__options = options self.__scanq = scanq self.__stop = False def run(self): self.__observer.schedule(self.__handler, self.__dirpath, recursive=True) self.__observer.start() try: while True: if self.__stop: return pass except KeyboardInterrupt: return def stop(self): self.__observer.stop() self.__observer.join() self.__stop = True
class Restarter(FileSystemEventHandler): __slots__ = ("_observer", "_changed") PATHS = Token("RESTARTER_PATHS") def __init__(self, paths: PATHS = None): self._changed = False self._observer = PollingObserver() if paths: for p in paths: # type: ignore print("watching for changes %r" % p) self._observer.schedule(self, str(p), recursive=True) self._observer.start() def restart_required(self) -> bool: changed = self._changed self._changed = False return changed def on_any_event(self, event: FileSystemEvent): self._changed = self._changed or (not event.is_directory and event.src_path.endswith(".py")) def stop(self): self._observer.unschedule_all()
class Watcher: def __init__(self): logging.basicConfig(level=logging.DEBUG) self.src_path = config.src_path self.event_observer = PollingObserver() def run(self): event_handler = FileSystemHandler() self.event_observer.schedule(event_handler, self.src_path) self.event_observer.start() logging.info('Initialized watcher on folder %s', self.src_path) # process files already in src folder for file in os.listdir(self.src_path): filename = os.path.join(self.src_path, file) event = FileCreatedEvent(filename) event_handler.on_any_event(event) try: while True: time.sleep(1) except KeyboardInterrupt: self.stop() def stop(self): self.event_observer.stop() self.event_observer.join()
def watch(self): observer = PollingObserver(0.1) observer.schedule(self, self.path) observer.start() self.file = open(self.path + "\Power.log", "r") self.on_modified(None) self.watching = True
class Watcher: DIRECTORY_TO_WATCH = os.path.join(Config.BASE_DIR, Config.QUEUE_LOCATION) def __init__(self): self.observer = PollingObserver() def run(self): event_handler = Handler() self.observer.schedule(event_handler, self.DIRECTORY_TO_WATCH, recursive=True) self.observer.start() print(self.DIRECTORY_TO_WATCH) logging.debug(self.DIRECTORY_TO_WATCH) try: while True: time.sleep(5) logging.debug("Watcher: Sleeping") print("Watcher: Sleeping") except: self.observer.stop() logging.error("-1 - Error") print("-1 - Error") self.observer.join()
def backend_monitor(API, config, logger): logger.info("Start backend monitor") if platform.system() == 'Windows': observer = PollingObserver() else: observer = Observer() for project_key in config['projects']['project_list']: event_handler = FileSystemMonitor( API, config, project_key, logger, load_list_last_crawl(config, project_key)) list_last_crawl = load_list_last_crawl(config, project_key) observer.schedule(event_handler, config[project_key]['rootdir'], recursive=True) observer.start() try: while True: # check the consistency between list_last_crawl and the current list in event_handler every 30s time.sleep(30) for project_key in config['projects']['project_list']: if set(list_last_crawl) != event_handler.set_last_crawl: log_full_run_filelist(dirs, list(event_handler.set_last_crawl), config[project_key]['name']) list_last_crawl = load_list_last_crawl(config, project_key) except KeyboardInterrupt: observer.stop() observer.join() return
def watch_record(indexer, use_polling=False): """ Start watching `cfstore.record_path`. :type indexer: rash.indexer.Indexer """ if use_polling: from watchdog.observers.polling import PollingObserver as Observer Observer # fool pyflakes else: from watchdog.observers import Observer event_handler = RecordHandler(indexer) observer = Observer() observer.schedule(event_handler, path=indexer.record_path, recursive=True) indexer.logger.debug('Start observer.') observer.start() try: while True: time.sleep(1) except KeyboardInterrupt: indexer.logger.debug('Got KeyboardInterrupt. Stopping observer.') observer.stop() indexer.logger.debug('Joining observer.') observer.join() indexer.logger.debug('Finish watching record.')
def start(self): path = self.config.get('watchdog', 'path') patterns = self.config.get('watchdog', 'patterns').split(';') ignore_directories = self.config.getboolean('watchdog', 'ignore_directories') ignore_patterns = self.config.get('watchdog', 'ignore_patterns').split(';') case_sensitive = self.config.getboolean('watchdog', 'case_sensitive') recursive = self.config.getboolean('watchdog', 'recursive') event_handler = PatternMatchingEventHandler( patterns=patterns, ignore_patterns=ignore_patterns, ignore_directories=ignore_directories, case_sensitive=case_sensitive) event_handler.on_created = self.on_created # event_handler.on_modified = self.on_modified observer = Observer() observer.schedule(path=path, event_handler=event_handler, recursive=recursive) observer.start() self.logger.info('WatchDog Observer for HCS/AFS/AAS is startting.....') self.logger.info('patterns=%s' % patterns) self.logger.info('path=%s' % path) try: while observer.is_alive(): time.sleep(1) except (KeyboardInterrupt): observer.stop() self.logger.debug('WatchDog Observer is stoped.') observer.join()
def watch_record(indexer, use_polling=False): """ Start watching `cfstore.record_path`. :type indexer: rash.indexer.Indexer """ if use_polling: from watchdog.observers.polling import PollingObserver as Observer Observer # fool pyflakes else: from watchdog.observers import Observer event_handler = RecordHandler(indexer) observer = Observer() observer.schedule(event_handler, path=indexer.record_path, recursive=True) indexer.logger.debug("Start observer.") observer.start() try: while True: time.sleep(1) except KeyboardInterrupt: indexer.logger.debug("Got KeyboardInterrupt. Stopping observer.") observer.stop() indexer.logger.debug("Joining observer.") observer.join() indexer.logger.debug("Finish watching record.")
def notify(): #------------------------ # Setup watchdog patterns #------------------------ #patterns = "*" patterns = ["*full.jpg"] ignore_patterns = "" ignore_directories = False case_sensitive = False #path = "c://self//UniWatch2//static//images" path = watchConfig.config['fileWatch']['pathToImagesUnifi'] go_recursively = True #------------------------ # Setup watchdog handlers #------------------------ event_handler = PatternMatchingEventHandler(patterns, ignore_patterns, ignore_directories, case_sensitive) event_handler.on_created = on_file_created #event_handler.on_modified = on_file_created #observer = Observer() observer = PollingObserver() observer.schedule(event_handler, path, recursive=go_recursively) #observer.schedule(event_handler, path, recursive=False) observer.start()
class AIOWatchdog(object): def __init__(self, path=".", recursive=True, event_handler=None, observer=None): if observer is None: self._observer = Observer() else: self._observer = observer evh = event_handler or AIOEventHandler() if isinstance(path, list): for _path in path: self._observer.schedule(evh, _path, recursive) else: self._observer.schedule(evh, path, recursive) def start(self): self._observer.start() def stop(self): self._observer.stop() self._observer.join()
def get_observer(config, handler): observer = PollingObserver() observer.schedule( handler, config.path, recursive=True ) return observer
def get_contracts_code_observer(project_dir): observer = PollingObserver() build_path = utils.get_build_dir(project_dir) event_handler = ContractCodeChangedEventHandler(project_dir=project_dir) observer.schedule(event_handler, build_path, recursive=False) return observer
def get_active_dir_observer(project_dir, event_handler): """ Setup a polling observer on the project's blockchains directory. This directory contains the .active-chain symlink which is watched for. """ bchain = get_blockchains_dir(project_dir) observer = PollingObserver() observer.schedule(event_handler, bchain, recursive=False) return observer
def watch_server(root_dir): # create event handler event_handler = OneDirServerEventHandler() # create observer observer = PollingObserver() # observe the given directory recursively observer.schedule(event_handler, path=root_dir, recursive=True) return observer
def run(self): observer = PollingObserver() file_event_handler = FileEventHandler(self.event_handler) observer.schedule(file_event_handler, "/opt/parent-radio-hk/queue", False) observer.start() while True: time.sleep(1) observer.join()
def _start_watch_mode(self, args: ConvertModeArguments) -> None: """Starts and runs the watch mode until canceled Arguments: args {ConvertModeArguments} -- The arguments for convert mode """ # Use custom event handler event_handler = self._create_watch_handler( args['in_path'], args['out_path'], ) # Init the observer observer = Observer() observer.schedule(event_handler, args['in_path'], recursive=True) self._logger.debug(f"Starting watch mode for: {args['in_path']}") if self._visual: print( colored('Starting watcher for:', 'blue', attrs=['bold']), colored(f"{os.path.abspath(args['in_path'])}", 'blue'), ) else: self._logger.info(f"Starting watch mode for: {args['in_path']}") # Start observer.start() # Keep the process running while # the watcher watches (until KeyboardInterrupt) try: while True: # Pretty spinner =) spinner_text = colored( 'Watching files', 'blue', ) + colored(' (use Ctrl+c to exit)', 'red') with yaspin( Spinners.bouncingBar, text=spinner_text, color='blue', ): time.sleep(1) except KeyboardInterrupt: self._logger.debug('Got a KeyboardInterrupt, stopping watcher.') observer.stop() observer.join() self._logger.debug(f"Stoped watching {args['in_path']}") if self._visual: print( colored('Stoped watcher for:', 'blue', attrs=['bold']), colored(f"{os.path.abspath(args['in_path'])}", 'blue'), ) else: self._logger.info(f"Stoped watching {args['in_path']}")
def main(argv): args = Args(sys.argv[1:]) # now that logging is setup log our settings args.log_settings() src_path = args.path # Did we ask for debug? if args.debug: set_debug() if args.globus_debug: set_gt_debug() # using globus, init to prompt for endpoiont activation etc GlobusTransfer(args.source, args.destination, args.destination_dir, src_path) event_handler = Handler(args) if args.prepopulate: event_handler.prepopulate() # observer = watchdog.observers.Observer() observer = PollingObserver() observer.schedule(event_handler, path=src_path, recursive=True) # setup signal handler def dump_status(event_handler, signalNumber, frame, details=False): """Dump the Handler() status when USR1 is recived.""" event_handler.status(details=details) signal.signal(signal.SIGUSR1, partial(dump_status, event_handler)) signal.signal(signal.SIGUSR2, partial(dump_status, event_handler, details=True)) observer.start() s = sched.scheduler(time.time, time.sleep) logger.info("Starting Main Event Loop") try: while True: logger.info( f"Starting iteration {event_handler.iteration} will sleep {args.sleep} seconds" ) s.enter( args.sleep, 1, event_handler.new_iteration, argument=(args.dwell_time, ), ) s.run() except KeyboardInterrupt: observer.stop() observer.join()
def __init__(self, filename: str, signal: NamedSignal) -> None: self._filename = filename self._changed_signal: NamedSignal = signal handler = WatcherEventHandler(filename, self._changed) obs = PollingObserver() obs.schedule(handler, path.dirname(filename)) obs.start() self._observer = obs
class Watcher: def __init__(self): # self.observer = Observer() # Use this if SS_DIR is local self.observer = PollingObserver() # Use this if SS_DIR is remote mount def run(self): event_handler = file_changed() self.observer.schedule(event_handler, SS_DIR, recursive=True) self.observer.start() loop1()
def watch_file(directory, filename): observer = PollingObserver() observer.schedule(MyHandler(observer), path=directory) observer.start() print('\nwatching for changes to {}'.format(directory)) while observer.isAlive() == True: time.sleep(1) observer.join()
def watch(script, callback): if script in _observers: raise RuntimeError("Script already observed") script_dir = os.path.dirname(os.path.abspath(script.filename)) script_name = os.path.basename(script.filename) event_handler = _ScriptModificationHandler(callback, filename=script_name) observer = Observer() observer.schedule(event_handler, script_dir) observer.start() _observers[script] = observer
def watch_folder(folder, queue): observer = Observer() observer.schedule(MyHandler(queue), path=folder, recursive=True) observer.start() try: while True: observer.join() time.sleep(1) except: pass
def __init__(self, index, interface='wlan0'): self.interface = interface self.run_state = '/run/network/ifstate.{interface}'.format(interface=interface) self.configurations = ['', interface] + rospy.get_param("~wlan_interfaces", []) rospy.loginfo("Loaded wlan configs %s", self.configurations) super(WifiUI, self).__init__(index, len(self.configurations)) self.config = get_configuration(self.run_state, self.configurations) rospy.loginfo("Start observingchanges in %s", os.path.dirname(self.run_state)) observer = Observer() observer.schedule(self, os.path.dirname(self.run_state), recursive=True) observer.start()
class PosixObserver(BaseObserver): """ Use the Watchdog module to observe filesystem events. """ def monitor(self): # Set up the event handler event_handler = MyEventHandler(patterns=['*'], ignore_patterns=['version.py'], ignore_directories=True) event_handler.setup(self) # Extract the set of directories to listen to listen_dirs = self.get_dirs_to_monitor() # Create an observer and schedule each of the directories self.observer = Observer() logger.debug("Starting observer: %s" % RippleConfig().monitor) if RippleConfig().monitor == "poll": self.observer = PollingObserver() for d in listen_dirs: # Put this in a try so it doesn't crash if the dir doesnt exist if os.path.isdir(d): logger.info("Monitoring: %s" % d) self.observer.schedule(event_handler, d, recursive=True) else: logger.error("Directory does not exist: %s" % d) try: self.observer.start() while True: time.sleep(1) except KeyboardInterrupt: self.stop_monitoring() self.observer.join() def get_dirs_to_monitor(self): """ Work out which directories to monitor. """ rules = RippleConfig().rules listen_dirs = [] for rule in rules: if rule['trigger']['monitor'] == 'filesystem': listen_dirs.append(rule['trigger']['parameters']['directory']) listen_dirs = list(set(listen_dirs)) logger.debug("Monitoring dirs: %s" % listen_dirs) return listen_dirs def stop_monitoring(self): """ Terminate the monitor """ logger.debug("Terminating POSIX monitor.") self.observer.stop()
def watch(self): observer = PollingObserver() observer.schedule(self.pickup_event_processor, path=self.pickup_dir) observer.start() try: while self.keep_running: sleep(3) except KeyboardInterrupt: observer.stop() observer.join()
def watch( directories=[], ignore=[], auto_clear=False, beep_on_failure=True, onpass=None, onfail=None, runner=None, beforerun=None, onexit=None, poll=False, extensions=[], args=[], spool=True, verbose=False, quiet=False, ): if not directories: directories = ["."] directories = [os.path.abspath(directory) for directory in directories] for directory in directories: if not os.path.isdir(directory): raise ValueError("Directory not found: " + directory) if ignore: recursive_dirs, non_recursive_dirs = split_recursive(directories, ignore) else: recursive_dirs = directories non_recursive_dirs = [] # Initial run event_handler = ChangeHandler( auto_clear, beep_on_failure, onpass, onfail, runner, beforerun, extensions, args, spool, verbose, quiet ) event_handler.run() # Setup watchdog observer = PollingObserver() if poll else Observer() for directory in recursive_dirs: observer.schedule(event_handler, path=directory, recursive=True) for directory in non_recursive_dirs: observer.schedule(event_handler, path=directory, recursive=False) # Watch and run tests until interrupted by user try: observer.start() while True: time.sleep(1) observer.join() except KeyboardInterrupt: observer.stop() if onexit: os.system(onexit)
def watch(directories=[], ignore=[], auto_clear=False, beep_on_failure=True, onpass=None, onfail=None, runner=None, beforerun=None, onexit=None, poll=False, extensions=[], args=[], spool=True, verbose=False, quiet=False): if not directories: directories = ['.'] directories = [os.path.abspath(directory) for directory in directories] for directory in directories: if not os.path.isdir(directory): raise ValueError('Directory not found: ' + directory) if ignore: recursive_dirs, non_recursive_dirs = split_recursive( directories, ignore) else: recursive_dirs = directories non_recursive_dirs = [] # Initial run event_handler = ChangeHandler(auto_clear, beep_on_failure, onpass, onfail, runner, beforerun, extensions, args, spool, verbose, quiet) event_handler.run() # Setup watchdog observer = PollingObserver() if poll else Observer() for directory in recursive_dirs: observer.schedule(event_handler, path=directory, recursive=True) for directory in non_recursive_dirs: observer.schedule(event_handler, path=directory, recursive=False) # Watch and run tests until interrupted by user try: observer.start() while True: time.sleep(1) observer.join() except KeyboardInterrupt: observer.stop() if onexit: os.system(onexit)
class ImageWatcher: """ Responsible for managing events in images on a particular path. """ def __init__(self, src_path): """ Initializes attributes and start thread for the monitoring Arguments: src_path {str} -- directory path to be monitored """ self.__src_path = src_path self.__event_handler = ImageHandler() self.__event_observer = PollingObserver() self.__stop_thread = False self.__process = Thread(target=self.__run) self.__process.start() def __run(self): """ Sets the background monitoring. """ self.__start() try: while True: if self.__stop_thread: break time.sleep(1) except Exception: logger.exception("Image watcher interrupted") finally: self.__stop() def __start(self): """ Starts monitoring. """ self.__schedule() self.__event_observer.start() logger.debug('Image watcher started!') def __stop(self): """ Breaks monitoring. """ self.__event_observer.stop() self.__event_observer.join() logger.debug('Image watcher stoped!') def __schedule(self): """ Schedules the event handler. """ self.__event_observer.schedule(self.__event_handler, self.__src_path, recursive=True) def close(self): """ Sets flag to stop monitoring. """ self.__stop_thread = True
def create_watchdog_notifier(attrs, publisher): """Create a notifier from the specified configuration attributes *attrs*.""" pattern = globify(attrs["origin"]) opath = os.path.dirname(pattern) timeout = float(attrs.get("watchdog_timeout", 1.)) LOGGER.debug("Watchdog timeout: %.1f", timeout) observer = PollingObserver(timeout=timeout) handler = WatchdogHandler(process_notify, publisher, pattern, attrs) observer.schedule(handler, opath) return observer, process_notify
def tail_like(path): observer = PollingObserver() handler = TailHandler(path) observer.schedule(handler, dirname(path)) observer.start() try: while True: time.sleep(1) except KeyboardInterrupt: observer.stop() finally: handler.close() observer.join()
def get_static_assets_observer(project_dir): build_path = utils.get_build_dir(project_dir) observer = PollingObserver() contracts_js_event_handler = ContractsJSChangedEventHandler(project_dir=project_dir) observer.schedule(contracts_js_event_handler, build_path, recursive=False) if project_has_assets(project_dir): assets_watch_path = get_static_assets_dir(project_dir) assets_event_handler = AssetsChangedEventHandler(project_dir=project_dir) observer.schedule(assets_event_handler, assets_watch_path, recursive=True) return observer
def watch_file(directory): event_handler = MyHandler() observer = PollingObserver() observer.schedule(event_handler, path=directory, recursive=False) observer.start() print 'watching for changes to', directory try: while True: time.sleep(1) except KeyboardInterrupt: observer.stop() observer.join()
def get_contracts_observer(project_dir, contract_filters=None, compiler_kwargs=None): if contract_filters is None: contract_filters = [] if compiler_kwargs is None: compiler_kwargs = {} contracts_dir = utils.get_contracts_dir(project_dir) libraries_dir = get_project_libraries_dir(project_dir) event_handler = ContractSourceChangedEventHandler( project_dir=project_dir, contract_filters=contract_filters, compiler_kwargs=compiler_kwargs ) observer = PollingObserver() observer.schedule(event_handler, contracts_dir, recursive=True) observer.schedule(event_handler, libraries_dir, recursive=True) return observer
class OSFileSystem(FileSystem): """ This class represents a file system implemented by the python os module. """ def __init__(self, instance=os, root="."): FileSystem.__init__(self, instance) self.root = os.path.normpath(root) self.eventQueue = EventQueue() self.eventHandler = EventHandler(self.eventQueue) self.observer = Observer() self.observer.schedule(self.eventHandler, path=self.root, recursive=True) self.observer.start() def join_path(self, path, *largs): return os.path.join(path, *largs) def get_relative_path(self, path): if path.startswith(self.root): return path.split(self.root + os.path.sep, 1)[1] else: return path def open(self, path, mode="rb", buffering=None): return open(path, mode) def mkdirs(self, path, mode=511): return os.makedirs(path, mode) def blockchecksums(self, path): return blockchecksums(path) def delta(self, path, checksums): return delta(path, checksums) def patch(self, path, delta): patched = patch(path, delta) self.instance.remove(path) return self.instance.rename(patched, path) def poll(self): r = [] while True: try: r.append(self.eventQueue.get_nowait()) except Empty: break return r
def watch(): # Users expect an implicit push push(watch=True) # Start the observer observer = PollingObserver() observer.event_queue.max_size = 1 observer.schedule(EventHandler(), os.getcwd(), recursive=True) observer.start() puts(colored.yellow('Watching for changes... (ctrl-c to stop)')) try: while True: time.sleep(1) except KeyboardInterrupt: observer.stop() # Block until the thread terminates observer.join()
def main(): handler = ChangeHandler() directory = "./" observer = PollingObserver(0.35) # Poll every 0.35 seconds if not os.path.exists(directory): os.makedirs(directory) observer.schedule(handler, directory, recursive=True) # Only search in the LaTeX directory observer.start() try: while True: time.sleep(60 * 5) # Sleep for 5 minutes (time doesn't really matter) except KeyboardInterrupt: observer.stop() observer.join()
def run_watch(self): if self.poll: from watchdog.observers.polling import PollingObserver as Observer else: from watchdog.observers import Observer event_handler = RoninEventHandler(self) observer = Observer() observer.schedule(event_handler, self.source, recursive=True) observer.start() try: logger.info("Watching directory: '{0}' for changes (poll={1})".format(self.source, self.poll)) while True: time.sleep(1) except KeyboardInterrupt: logger.info("Stopping watcher...") observer.stop() observer.join()
def server(self, args): server = Process(target=self._server) server.start() event_handler = PatternMatchingEventHandler(ignore_patterns=self.WATCH_EXCLUDE) event_handler.on_modified = lambda event : self._build() observer = Observer() observer.schedule(event_handler, self.BASE_DIR, recursive=True) observer.start() try: while True: time.sleep(1) except (KeyboardInterrupt, SystemExit): server.terminate() observer.stop() observer.join() self.logger.info("Clossing")
def folderObserver(pathStructure, dbPath): logging = DefaultLogger() if pathStructure == None or pathStructure['inBox'] == None: message = 'Watch: Unable to run as pathStructure is undefined' logging.debug(message) return event_handler = singleFileWatcher(pathStructure, dbPath) observer = PollingObserver() observer.schedule(event_handler, pathStructure['inBox'], recursive=False) observer.start() try: while True and observer.is_alive(): time.sleep(1) except KeyboardInterrupt: observer.stop() observer.join()
def __init__(self, contentdir=''): """ @param contentDirs are the dirs where we will load wiki files from & parse """ self.file_observers = [] self.spacehandler = SpaceHandler(self) self.contentdir = contentdir if contentdir.endswith('/') else '%s/' % contentdir if not j.system.fs.exists(contentdir): print "Contentdir %s was not found .. creating it." % contentdir j.system.fs.createDir(contentdir) if contentdir.strip(): # Watch the contentdir for changes observer = Observer() self.file_observers.append(observer) j.core.portal.active.watchedspaces.append(contentdir) print('Monitoring', contentdir) observer.schedule(self.spacehandler, contentdir, recursive=True) observer.start()
def main(): if int(os.getuid()) != 0: raise SystemExit("ERROR: this script should be run as root") parser = ArgumentParser(description='Watch a directory and install the code') args = parser.parse_args() current_path = Path(__file__).resolve().parent setup_path = Path(current_path, '..').resolve() git_path = Path(current_path, '..', '..').resolve() observer = Observer() observer.schedule(FileHandler(setup_path, 'site'), str(Path(git_path, 'site')), True) observer.schedule(FileHandler(setup_path, 'bin'), str(Path(git_path, 'bin')), True) observer.schedule(FileHandler(setup_path, 'bin'), str(Path(git_path, 'sbin')), True) observer.start() try: print("Watching the following folders for change:") print(" - site") print(" - bin") print(" - sbin") print() input("~~Hit enter to exit~~\n") finally: observer.stop() observer.join()
class FileMonitor(FileSystemEventHandler): def __init__(self, file, action): self.path,self.filename = os.path.split(file) self.action = action self.observer = None def start(self): self.observer = Observer() self.observer.schedule(self, self.path, recursive=False) self.observer.start() def stop(self): if self.observer is not None: self.observer.stop() def join(self): if self.observer is not None: self.observer.join() def on_modified(self, event): try: if os.path.samefile(event.src_path,self.filename): self.action() except OSError as e: print 'Exception on file check', e
def serve(config, options=None): """ Start the devserver, and rebuild the docs whenever any changes take effect. """ # Create a temporary build directory, and set some options to serve it tempdir = tempfile.mkdtemp() options['site_dir'] = tempdir # Only use user-friendly URLs when running the live server options['use_directory_urls'] = True # Perform the initial build config = load_config(options=options) build(config, live_server=True) # Note: We pass any command-line options through so that we # can re-apply them if the config file is reloaded. event_handler = BuildEventHandler(options) config_event_handler = ConfigEventHandler(options) # We could have used `Observer()`, which can be faster, but # `PollingObserver()` works more universally. observer = PollingObserver() observer.schedule(event_handler, config['docs_dir'], recursive=True) for theme_dir in config['theme_dir']: if not os.path.exists(theme_dir): continue observer.schedule(event_handler, theme_dir, recursive=True) observer.schedule(config_event_handler, '.') observer.start() class TCPServer(socketserver.TCPServer): allow_reuse_address = True class DocsDirectoryHandler(FixedDirectoryHandler): base_dir = config['site_dir'] host, port = config['dev_addr'].split(':', 1) server = TCPServer((host, int(port)), DocsDirectoryHandler) print('Running at: http://%s:%s/' % (host, port)) print('Live reload enabled.') print('Hold ctrl+c to quit.') try: server.serve_forever() except KeyboardInterrupt: print('Stopping server...') # Clean up observer.stop() observer.join() shutil.rmtree(tempdir) print('Quit complete')
'event_type': event.event_type } if event.event_type != events.EVENT_TYPE_DELETED: data['type'] = magic.from_file(event.src_path, mime=True) requests.post( 'http://%s:%s/items' % (APP_HOST, APP_PORT), data=data ) def on_modified(self, event): self.process(event) def on_created(self, event): self.process(event) def on_deleted(self, event): self.process(event) if __name__ == '__main__': args = sys.argv[1:] observer = Observer() observer.schedule(MyHandler(), path=args[0] if args else '../watch_here') observer.start() try: while True: time.sleep(1) except KeyboardInterrupt: observer.stop() observer.join()
def start(self): gevent.spawn(self._cleanScheduledJobs, 3600*24) observer = Observer() handler = JumpscriptHandler(self) observer.schedule(handler, "jumpscripts", recursive=True) observer.start()
def main(argv): """ Build the docs and serve them with an HTTP server. """ parser = argparse.ArgumentParser(description='Build and serve HTML Sphinx docs') parser.add_argument( '--port', help='Serve on this port, default 8000', type=int, default=8000) parser.add_argument( '--source', help='Directory of source Sphinx (reStructuredText) docs', type=os.path.realpath, default='docs/source') parser.add_argument( '--destination', help='Where to build the HTML output', type=os.path.realpath, default='docs/build/html') parser.add_argument( '--doctrees', help='Where the doctrees are built', type=os.path.realpath, default='docs/build/doctrees') options = parser.parse_args(argv) bound_build_docs = partial(build_docs, options.source, options.destination, options.doctrees) # Do the initial build bound_build_docs() # Watch the source directory for changes, build docs again if detected observer = Observer() observer.schedule( BuildDocsHandler(bound_build_docs), path=options.source, recursive=True) observer.start() # Set the root for the request handler, overriding Python stdlib current # working directory. DocsHTTPRequestHandler._root = options.destination server = SocketServer.TCPServer( ('', options.port), DocsHTTPRequestHandler) try: logger.info('Serving on localhost:{}'.format(options.port)) server.serve_forever() except KeyboardInterrupt: sys.stdout.write('\n') logger.info('(stopping server)') observer.stop() finally: observer.join() logging.info('Server stopped, exiting') sys.exit(0)
def run(self): if not self._allowRoot: self._check_for_root() global app global babel global printer global printerProfileManager global fileManager global slicingManager global analysisQueue global userManager global eventManager global loginManager global pluginManager global appSessionManager global pluginLifecycleManager global preemptiveCache global debug from tornado.ioloop import IOLoop from tornado.web import Application, RequestHandler import sys debug = self._debug # first initialize the settings singleton and make sure it uses given configfile and basedir if available s = settings(init=True, basedir=self._basedir, configfile=self._configfile) # then monkey patch a bunch of stuff util.tornado.fix_ioloop_scheduling() util.flask.enable_additional_translations(additional_folders=[s.getBaseFolder("translations")]) # setup app self._setup_app(app) # setup i18n self._setup_i18n(app) # then initialize logging self._setup_logging(self._debug, self._logConf) self._logger = logging.getLogger(__name__) def exception_logger(exc_type, exc_value, exc_tb): self._logger.error("Uncaught exception", exc_info=(exc_type, exc_value, exc_tb)) sys.excepthook = exception_logger self._logger.info("Starting OctoPrint %s" % DISPLAY_VERSION) # start the intermediary server self._start_intermediary_server(s) # then initialize the plugin manager pluginManager = octoprint.plugin.plugin_manager(init=True) printerProfileManager = PrinterProfileManager() eventManager = events.eventManager() analysisQueue = octoprint.filemanager.analysis.AnalysisQueue() slicingManager = octoprint.slicing.SlicingManager(s.getBaseFolder("slicingProfiles"), printerProfileManager) storage_managers = dict() storage_managers[octoprint.filemanager.FileDestinations.LOCAL] = octoprint.filemanager.storage.LocalFileStorage(s.getBaseFolder("uploads")) fileManager = octoprint.filemanager.FileManager(analysisQueue, slicingManager, printerProfileManager, initial_storage_managers=storage_managers) printer = Printer(fileManager, analysisQueue, printerProfileManager) appSessionManager = util.flask.AppSessionManager() pluginLifecycleManager = LifecycleManager(pluginManager) preemptiveCache = PreemptiveCache(os.path.join(s.getBaseFolder("data"), "preemptive_cache_config.yaml")) # ... and initialize all plugins def octoprint_plugin_inject_factory(name, implementation): """Factory for injections for all OctoPrintPlugins""" if not isinstance(implementation, octoprint.plugin.OctoPrintPlugin): # we only care about OctoPrintPlugins return None return dict( plugin_manager=pluginManager, printer_profile_manager=printerProfileManager, event_bus=eventManager, analysis_queue=analysisQueue, slicing_manager=slicingManager, file_manager=fileManager, printer=printer, app_session_manager=appSessionManager, plugin_lifecycle_manager=pluginLifecycleManager, data_folder=os.path.join(settings().getBaseFolder("data"), name), preemptive_cache=preemptiveCache ) def settings_plugin_inject_factory(name, implementation): """Factory for additional injections depending on plugin type""" if not isinstance(implementation, octoprint.plugin.SettingsPlugin): # we only care about SettingsPlugins return None # SettingsPlugin instnances get a PluginSettings instance injected default_settings = implementation.get_settings_defaults() get_preprocessors, set_preprocessors = implementation.get_settings_preprocessors() plugin_settings = octoprint.plugin.plugin_settings(name, defaults=default_settings, get_preprocessors=get_preprocessors, set_preprocessors=set_preprocessors) return dict(settings=plugin_settings) def settings_plugin_config_migration_and_cleanup(name, implementation): """Take care of migrating and cleaning up any old settings""" if not isinstance(implementation, octoprint.plugin.SettingsPlugin): return settings_version = implementation.get_settings_version() settings_migrator = implementation.on_settings_migrate if settings_version is not None and settings_migrator is not None: stored_version = implementation._settings.get_int([octoprint.plugin.SettingsPlugin.config_version_key]) if stored_version is None or stored_version < settings_version: settings_migrator(settings_version, stored_version) implementation._settings.set_int([octoprint.plugin.SettingsPlugin.config_version_key], settings_version) implementation.on_settings_cleanup() implementation._settings.save() implementation.on_settings_initialized() pluginManager.implementation_inject_factories=[octoprint_plugin_inject_factory, settings_plugin_inject_factory] pluginManager.initialize_implementations() settingsPlugins = pluginManager.get_implementations(octoprint.plugin.SettingsPlugin) for implementation in settingsPlugins: try: settings_plugin_config_migration_and_cleanup(implementation._identifier, implementation) except: self._logger.exception("Error while trying to migrate settings for plugin {}, ignoring it".format(implementation._identifier)) pluginManager.implementation_post_inits=[settings_plugin_config_migration_and_cleanup] pluginManager.log_all_plugins() # initialize file manager and register it for changes in the registered plugins fileManager.initialize() pluginLifecycleManager.add_callback(["enabled", "disabled"], lambda name, plugin: fileManager.reload_plugins()) # initialize slicing manager and register it for changes in the registered plugins slicingManager.initialize() pluginLifecycleManager.add_callback(["enabled", "disabled"], lambda name, plugin: slicingManager.reload_slicers()) # setup jinja2 self._setup_jinja2() # make sure plugin lifecycle events relevant for jinja2 are taken care of def template_enabled(name, plugin): if plugin.implementation is None or not isinstance(plugin.implementation, octoprint.plugin.TemplatePlugin): return self._register_additional_template_plugin(plugin.implementation) def template_disabled(name, plugin): if plugin.implementation is None or not isinstance(plugin.implementation, octoprint.plugin.TemplatePlugin): return self._unregister_additional_template_plugin(plugin.implementation) pluginLifecycleManager.add_callback("enabled", template_enabled) pluginLifecycleManager.add_callback("disabled", template_disabled) # setup assets self._setup_assets() # configure timelapse octoprint.timelapse.configure_timelapse() # setup command triggers events.CommandTrigger(printer) if self._debug: events.DebugEventListener() # setup access control userManagerName = s.get(["accessControl", "userManager"]) try: clazz = octoprint.util.get_class(userManagerName) userManager = clazz() except AttributeError as e: self._logger.exception("Could not instantiate user manager {}, falling back to FilebasedUserManager!".format(userManagerName)) userManager = octoprint.users.FilebasedUserManager() finally: userManager.enabled = s.getBoolean(["accessControl", "enabled"]) loginManager = LoginManager() loginManager.session_protection = "strong" loginManager.user_callback = load_user if not userManager.enabled: loginManager.anonymous_user = users.DummyUser principals.identity_loaders.appendleft(users.dummy_identity_loader) loginManager.init_app(app) # register API blueprint self._setup_blueprints() ## Tornado initialization starts here if self._host is None: self._host = s.get(["server", "host"]) if self._port is None: self._port = s.getInt(["server", "port"]) ioloop = IOLoop() ioloop.install() self._router = SockJSRouter(self._create_socket_connection, "/sockjs") upload_suffixes = dict(name=s.get(["server", "uploads", "nameSuffix"]), path=s.get(["server", "uploads", "pathSuffix"])) def mime_type_guesser(path): from octoprint.filemanager import get_mime_type return get_mime_type(path) download_handler_kwargs = dict( as_attachment=True, allow_client_caching=False ) additional_mime_types=dict(mime_type_guesser=mime_type_guesser) admin_validator = dict(access_validation=util.tornado.access_validation_factory(app, loginManager, util.flask.admin_validator)) no_hidden_files_validator = dict(path_validation=util.tornado.path_validation_factory(lambda path: not octoprint.util.is_hidden_path(path), status_code=404)) def joined_dict(*dicts): if not len(dicts): return dict() joined = dict() for d in dicts: joined.update(d) return joined server_routes = self._router.urls + [ # various downloads (r"/downloads/timelapse/([^/]*\.mp[g4])", util.tornado.LargeResponseHandler, joined_dict(dict(path=s.getBaseFolder("timelapse")), download_handler_kwargs, no_hidden_files_validator)), (r"/downloads/files/local/(.*)", util.tornado.LargeResponseHandler, joined_dict(dict(path=s.getBaseFolder("uploads")), download_handler_kwargs, no_hidden_files_validator, additional_mime_types)), (r"/downloads/logs/([^/]*)", util.tornado.LargeResponseHandler, joined_dict(dict(path=s.getBaseFolder("logs")), download_handler_kwargs, admin_validator)), # camera snapshot (r"/downloads/camera/current", util.tornado.UrlProxyHandler, dict(url=s.get(["webcam", "snapshot"]), as_attachment=True, access_validation=util.tornado.access_validation_factory(app, loginManager, util.flask.user_validator))), # generated webassets (r"/static/webassets/(.*)", util.tornado.LargeResponseHandler, dict(path=os.path.join(s.getBaseFolder("generated"), "webassets"))), # online indicators - text file with "online" as content and a transparent gif (r"/online.txt", util.tornado.StaticDataHandler, dict(data="online\n")), (r"/online.gif", util.tornado.StaticDataHandler, dict(data=bytes(base64.b64decode("R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7")), content_type="image/gif")) ] # fetch additional routes from plugins for name, hook in pluginManager.get_hooks("octoprint.server.http.routes").items(): try: result = hook(list(server_routes)) except: self._logger.exception("There was an error while retrieving additional server routes from plugin hook {name}".format(**locals())) else: if isinstance(result, (list, tuple)): for entry in result: if not isinstance(entry, tuple) or not len(entry) == 3: continue if not isinstance(entry[0], basestring): continue if not isinstance(entry[2], dict): continue route, handler, kwargs = entry route = r"/plugin/{name}/{route}".format(name=name, route=route if not route.startswith("/") else route[1:]) self._logger.debug("Adding additional route {route} handled by handler {handler} and with additional arguments {kwargs!r}".format(**locals())) server_routes.append((route, handler, kwargs)) server_routes.append((r".*", util.tornado.UploadStorageFallbackHandler, dict(fallback=util.tornado.WsgiInputContainer(app.wsgi_app), file_prefix="octoprint-file-upload-", file_suffix=".tmp", suffixes=upload_suffixes))) self._tornado_app = Application(server_routes) max_body_sizes = [ ("POST", r"/api/files/([^/]*)", s.getInt(["server", "uploads", "maxSize"])), ("POST", r"/api/languages", 5 * 1024 * 1024) ] # allow plugins to extend allowed maximum body sizes for name, hook in pluginManager.get_hooks("octoprint.server.http.bodysize").items(): try: result = hook(list(max_body_sizes)) except: self._logger.exception("There was an error while retrieving additional upload sizes from plugin hook {name}".format(**locals())) else: if isinstance(result, (list, tuple)): for entry in result: if not isinstance(entry, tuple) or not len(entry) == 3: continue if not entry[0] in util.tornado.UploadStorageFallbackHandler.BODY_METHODS: continue if not isinstance(entry[2], int): continue method, route, size = entry route = r"/plugin/{name}/{route}".format(name=name, route=route if not route.startswith("/") else route[1:]) self._logger.debug("Adding maximum body size of {size}B for {method} requests to {route})".format(**locals())) max_body_sizes.append((method, route, size)) self._stop_intermediary_server() # initialize and bind the server self._server = util.tornado.CustomHTTPServer(self._tornado_app, max_body_sizes=max_body_sizes, default_max_body_size=s.getInt(["server", "maxSize"])) self._server.listen(self._port, address=self._host) eventManager.fire(events.Events.STARTUP) # auto connect if s.getBoolean(["serial", "autoconnect"]): (port, baudrate) = s.get(["serial", "port"]), s.getInt(["serial", "baudrate"]) printer_profile = printerProfileManager.get_default() connectionOptions = get_connection_options() if port in connectionOptions["ports"]: printer.connect(port=port, baudrate=baudrate, profile=printer_profile["id"] if "id" in printer_profile else "_default") # start up watchdogs if s.getBoolean(["feature", "pollWatched"]): # use less performant polling observer if explicitely configured observer = PollingObserver() else: # use os default observer = Observer() observer.schedule(util.watchdog.GcodeWatchdogHandler(fileManager, printer), s.getBaseFolder("watched")) observer.start() # run our startup plugins octoprint.plugin.call_plugin(octoprint.plugin.StartupPlugin, "on_startup", args=(self._host, self._port)) def call_on_startup(name, plugin): implementation = plugin.get_implementation(octoprint.plugin.StartupPlugin) if implementation is None: return implementation.on_startup(self._host, self._port) pluginLifecycleManager.add_callback("enabled", call_on_startup) # prepare our after startup function def on_after_startup(): self._logger.info("Listening on http://%s:%d" % (self._host, self._port)) # now this is somewhat ugly, but the issue is the following: startup plugins might want to do things for # which they need the server to be already alive (e.g. for being able to resolve urls, such as favicons # or service xmls or the like). While they are working though the ioloop would block. Therefore we'll # create a single use thread in which to perform our after-startup-tasks, start that and hand back # control to the ioloop def work(): octoprint.plugin.call_plugin(octoprint.plugin.StartupPlugin, "on_after_startup") def call_on_after_startup(name, plugin): implementation = plugin.get_implementation(octoprint.plugin.StartupPlugin) if implementation is None: return implementation.on_after_startup() pluginLifecycleManager.add_callback("enabled", call_on_after_startup) # when we are through with that we also run our preemptive cache if settings().getBoolean(["devel", "cache", "preemptive"]): self._execute_preemptive_flask_caching(preemptiveCache) import threading threading.Thread(target=work).start() ioloop.add_callback(on_after_startup) # prepare our shutdown function def on_shutdown(): # will be called on clean system exit and shutdown the watchdog observer and call the on_shutdown methods # on all registered ShutdownPlugins self._logger.info("Shutting down...") observer.stop() observer.join() octoprint.plugin.call_plugin(octoprint.plugin.ShutdownPlugin, "on_shutdown") if self._octoprint_daemon is not None: self._logger.info("Cleaning up daemon pidfile") self._octoprint_daemon.terminated() self._logger.info("Goodbye!") atexit.register(on_shutdown) def sigterm_handler(*args, **kwargs): # will stop tornado on SIGTERM, making the program exit cleanly def shutdown_tornado(): ioloop.stop() ioloop.add_callback_from_signal(shutdown_tornado) signal.signal(signal.SIGTERM, sigterm_handler) try: # this is the main loop - as long as tornado is running, OctoPrint is running ioloop.start() except (KeyboardInterrupt, SystemExit): pass except: self._logger.fatal("Now that is embarrassing... Something really really went wrong here. Please report this including the stacktrace below in OctoPrint's bugtracker. Thanks!") self._logger.exception("Stacktrace follows:")
def watch(directories=[], ignore=[], extensions=[], beep_on_failure=True, auto_clear=False, wait=False, beforerun=None, afterrun=None, onpass=None, onfail=None, onexit=None, runner=None, spool=None, poll=False, verbose=False, quiet=False, pytest_args=[]): argv = (runner or 'py.test').split(' ') + (pytest_args or []) if not directories: directories = ['.'] directories = [os.path.abspath(directory) for directory in directories] for directory in directories: if not os.path.isdir(directory): raise ValueError('Directory not found: ' + directory) # Setup event handler event_listener = EventListener(extensions) # Setup watchdog observer = PollingObserver() if poll else Observer() recursedirs, norecursedirs = _split_recursive(directories, ignore) for directory in recursedirs: observer.schedule(event_listener, path=directory, recursive=True) for directory in norecursedirs: observer.schedule(event_listener, path=directory, recursive=False) observer.start() # Watch and run tests until interrupted by user events = [] while True: try: # Prepare next run if auto_clear: clear() elif not quiet: print() # Show event summary if not quiet: _show_summary(argv, events, verbose) # Run custom command run_hook(beforerun) # Run tests p = subprocess.Popen(argv, shell=is_windows) try: while True: # Check for completion exit_code = p.poll() if exit_code is not None: break # Interrupt the current test run on filesystem event if not wait and not event_listener.event_queue.empty(): send_keyboard_interrupt(p) exit_code = p.wait() break # Allow user to initiate a keyboard interrupt sleep(0.1) except KeyboardInterrupt: # Wait for current test run cleanup run_hook(afterrun, p.wait()) # Exit, since this keyboard interrupt was user-initiated break # Run custom command run_hook(afterrun, exit_code) # Run dependent commands if exit_code in [EXIT_OK, EXIT_NOTESTSCOLLECTED]: run_hook(onpass) else: if beep_on_failure: beep() run_hook(onfail) # Wait for a filesystem event while event_listener.event_queue.empty(): sleep(0.1) # Collect events for summary of next run events = dequeue_all(event_listener.event_queue, spool) except KeyboardInterrupt: break except Exception as ex: print(format_exc() if verbose else 'Error: {}'.format(ex)) break # Stop and wait for observer observer.stop() observer.join() # Run exit script run_hook(onexit)