def __init__(self, target_dir: str, do_work: Callable, patterns: List[str] = None): self._handler = DedupHandler(do_work, patterns) self._observer = PollingObserver() self._observer.schedule(self._handler, target_dir, recursive=True)
class AIOWatchdog(object): def __init__(self, path=".", recursive=True, event_handler=None, observer=None): if observer is None: self._observer = Observer() else: self._observer = observer evh = event_handler or AIOEventHandler() if isinstance(path, list): for _path in path: self._observer.schedule(evh, _path, recursive) else: self._observer.schedule(evh, path, recursive) def start(self): self._observer.start() def stop(self): self._observer.stop() self._observer.join()
def watch(self): observer = PollingObserver(0.1) observer.schedule(self, self.path) observer.start() self.file = open(self.path + "\Power.log", "r") self.on_modified(None) self.watching = True
class LivereloadWatchdogWatcher(object): """ File system watch dog. """ def __init__(self): super(LivereloadWatchdogWatcher, self).__init__() self._changed = False # Allows the LivereloadWatchdogWatcher # instance to set the file which was # modified. Used for output purposes only. self._action_file = None self._observer = PollingObserver() self._observer.start() # Compatibility with livereload's builtin watcher # Accessed by LiveReloadHandler's on_message method to decide if a task # has to be added to watch the cwd. self._tasks = True # Accessed by LiveReloadHandler's watch_task method. When set to a # boolean false value, everything is reloaded in the browser ('*'). self.filepath = None # Accessed by Server's serve method to set reload time to 0 in # LiveReloadHandler's poll_tasks method. self._changes = [] #pylint: disable=unused-argument def watch(self, path, *args, **kwargs): event_handler = _WatchdogHandler(self) self._observer.schedule(event_handler, path=path, recursive=True)
def _start_observer(self): """Start the directory observer thread. The ._observer thread is controlled by the ._monitor_thread. """ assert os.path.isdir(self.app.path) assert self._observer is None or not self._observer.isAlive() event_handler = self.AppPathFileSystemEventHandler(self.app) self._observer = Observer() self._observer.schedule(event_handler, self.app.path, recursive=True) try: self._observer.start() except OSError as error: if error.errno in (errno.ENOSPC, errno.EMFILE) and 'inotify' in str(error): # We reached the inotify watch limit, using polling-based fallback observer. self._observer = PollingObserver() self._observer.schedule(event_handler, self.app.path, recursive=True) self._observer.start() else: # reraise unrelated error raise error
def start(self): path = self.config.get('watchdog', 'path') patterns = self.config.get('watchdog', 'patterns').split(';') ignore_directories = self.config.getboolean('watchdog', 'ignore_directories') ignore_patterns = self.config.get('watchdog', 'ignore_patterns').split(';') case_sensitive = self.config.getboolean('watchdog', 'case_sensitive') recursive = self.config.getboolean('watchdog', 'recursive') event_handler = PatternMatchingEventHandler( patterns=patterns, ignore_patterns=ignore_patterns, ignore_directories=ignore_directories, case_sensitive=case_sensitive) event_handler.on_created = self.on_created # event_handler.on_modified = self.on_modified observer = Observer() observer.schedule(path=path, event_handler=event_handler, recursive=recursive) observer.start() self.logger.info('WatchDog Observer for HCS/AFS/AAS is startting.....') self.logger.info('patterns=%s' % patterns) self.logger.info('path=%s' % path) try: while observer.is_alive(): time.sleep(1) except (KeyboardInterrupt): observer.stop() self.logger.debug('WatchDog Observer is stoped.') observer.join()
def watch(directory=None, auto_clear=False, beep_on_failure=True, onpass=None, onfail=None, poll=False, extensions=[]): """ Starts a server to render the specified file or directory containing a README. """ if directory and not os.path.isdir(directory): raise ValueError('Directory not found: ' + directory) directory = os.path.abspath(directory or '') # Initial run event_handler = ChangeHandler(directory, auto_clear, beep_on_failure, onpass, onfail, extensions) event_handler.run() # Setup watchdog if poll: observer = PollingObserver() else: observer = Observer() observer.schedule(event_handler, path=directory, recursive=True) observer.start() # Watch and run tests until interrupted by user try: while True: time.sleep(1) except KeyboardInterrupt: observer.stop() observer.join()
def watch(self): """ Start watching """ logger.info('Watching directory %s' % self.directory) # Set up handler for when we see new files callback = self.callback class NewFileEventHandler(FileSystemEventHandler): def on_created(self, event): if not event.is_directory: logger.info('Detected new file: %s' % event.src_path) callback(event.src_path) event_handler = NewFileEventHandler() # Use polling observer (rather than filesystem-specific observers), # because it is more reliable. observer = PollingObserver(timeout=self.sleep_time) # Start the observer observer.schedule(event_handler, self.directory, recursive=False) observer.start() # Wait while the observer is running try: while True: sleep(self.sleep_time) # Exit gracefully except KeyboardInterrupt: logger.info('Detected interrupt. Stopping observer.') observer.stop() observer.join()
def watch(self, run_id, step_key): watch_key = self._watch_key(run_id, step_key) if watch_key in self._watchers: return update_paths = [ self._manager.get_local_path(run_id, step_key, ComputeIOType.STDOUT), self._manager.get_local_path(run_id, step_key, ComputeIOType.STDERR), ] complete_paths = [ self._manager.complete_artifact_path(run_id, step_key) ] directory = os.path.dirname( self._manager.get_local_path(run_id, step_key, ComputeIOType.STDERR)) if not self._observer: self._observer = PollingObserver(self._manager.polling_timeout) self._observer.start() ensure_dir(directory) self._watchers[watch_key] = self._observer.schedule( LocalComputeLogFilesystemEventHandler(self, run_id, step_key, update_paths, complete_paths), str(directory), )
def monitor(self): # Set up the event handler event_handler = MyEventHandler(patterns=['*'], ignore_patterns=['version.py'], ignore_directories=True) event_handler.setup(self) # Extract the set of directories to listen to listen_dirs = self.get_dirs_to_monitor() # Create an observer and schedule each of the directories self.observer = Observer() logger.debug("Starting observer: %s" % RippleConfig().monitor) if RippleConfig().monitor == "poll": self.observer = PollingObserver() for d in listen_dirs: # Put this in a try so it doesn't crash if the dir doesnt exist if os.path.isdir(d): logger.info("Monitoring: %s" % d) self.observer.schedule(event_handler, d, recursive=True) else: logger.error("Directory does not exist: %s" % d) try: self.observer.start() while True: time.sleep(1) except KeyboardInterrupt: self.stop_monitoring() self.observer.join()
def start_observer(self): self.observer = PollingObserver() self.observer.should_keep_running() self.observer.handler = RenderHandler(self.args) for path in self.observer_paths: self.observer.schedule(self.observer.handler, path, recursive=True) self.observer.start()
class LocalComputeLogSubscriptionManager(object): def __init__(self, manager): self._manager = manager self._subscriptions = defaultdict(list) self._watchers = {} self._observer = PollingObserver(WATCHDOG_POLLING_TIMEOUT) self._observer.start() def _key(self, run_id, step_key): return '{}:{}'.format(run_id, step_key) def add_subscription(self, subscription): check.inst_param(subscription, 'subscription', ComputeLogSubscription) key = self._key(subscription.run_id, subscription.step_key) self._subscriptions[key].append(subscription) self.watch(subscription.run_id, subscription.step_key) def remove_all_subscriptions(self, run_id, step_key): key = self._key(run_id, step_key) for subscription in self._subscriptions.pop(key, []): subscription.complete() def watch(self, run_id, step_key): key = self._key(run_id, step_key) if key in self._watchers: return update_paths = [ self._manager.get_local_path(run_id, step_key, ComputeIOType.STDOUT), self._manager.get_local_path(run_id, step_key, ComputeIOType.STDERR), ] complete_paths = [ self._manager.complete_artifact_path(run_id, step_key) ] directory = os.path.dirname( self._manager.get_local_path(run_id, step_key, ComputeIOType.STDERR)) ensure_dir(directory) self._watchers[key] = self._observer.schedule( LocalComputeLogFilesystemEventHandler(self, run_id, step_key, update_paths, complete_paths), directory, ) def notify_subscriptions(self, run_id, step_key): key = self._key(run_id, step_key) for subscription in self._subscriptions[key]: subscription.fetch() def unwatch(self, run_id, step_key, handler): key = self._key(run_id, step_key) if key in self._watchers: self._observer.remove_handler_for_watch(handler, self._watchers[key]) del self._watchers[key]
def get_contracts_code_observer(project_dir): observer = PollingObserver() build_path = utils.get_build_dir(project_dir) event_handler = ContractCodeChangedEventHandler(project_dir=project_dir) observer.schedule(event_handler, build_path, recursive=False) return observer
def __init__(self, extract_subs: ExtractSubs, target_path: str): super().__init__() self._file_observer = PollingObserver(NewFilesListener.DEFAULT_OBSERVER_TIMEOUT) self.cease_continuous_run = threading.Event() self._extract_subs = extract_subs self._target_path = target_path self._event_handler = FileFinallyCreatedEventHandler(["*.mkv"], self._file_predicate, self._extract_and_merge) self._file_observer.schedule(self._event_handler, target_path, recursive=True)
def get_observer(config, handler): observer = PollingObserver() observer.schedule( handler, config.path, recursive=True ) return observer
def __init__(self, it, paths): self.index = 0 self.items = list(it) self.added = [] self.observer = PollingObserver() for path in paths: if os.path.exists(path): self.observer.schedule(self, path, recursive=True) self.observer.start()
def get_active_dir_observer(project_dir, event_handler): """ Setup a polling observer on the project's blockchains directory. This directory contains the .active-chain symlink which is watched for. """ bchain = get_blockchains_dir(project_dir) observer = PollingObserver() observer.schedule(event_handler, bchain, recursive=False) return observer
def __init__(self, pool, query, src_path, patterns=None, ignore_directories=False, recursive=True, timeout=1, key=1): if patterns is None: patterns = ["*.txt"] self.src_path = src_path self.recursive = recursive self.event_observer = PollingObserver(timeout=timeout) self.event_handler = InsertToSQL(pool, query, patterns=patterns, ignore_directories=ignore_directories, key=key)
def watch_server(root_dir): # create event handler event_handler = OneDirServerEventHandler() # create observer observer = PollingObserver() # observe the given directory recursively observer.schedule(event_handler, path=root_dir, recursive=True) return observer
def __init__(self, paths: PATHS = None): self._changed = False self._observer = PollingObserver() if paths: for p in paths: # type: ignore print("watching for changes %r" % p) self._observer.schedule(self, str(p), recursive=True) self._observer.start()
def main(argv): args = Args(sys.argv[1:]) # now that logging is setup log our settings args.log_settings() src_path = args.path # Did we ask for debug? if args.debug: set_debug() if args.globus_debug: set_gt_debug() # using globus, init to prompt for endpoiont activation etc GlobusTransfer(args.source, args.destination, args.destination_dir, src_path) event_handler = Handler(args) if args.prepopulate: event_handler.prepopulate() # observer = watchdog.observers.Observer() observer = PollingObserver() observer.schedule(event_handler, path=src_path, recursive=True) # setup signal handler def dump_status(event_handler, signalNumber, frame, details=False): """Dump the Handler() status when USR1 is recived.""" event_handler.status(details=details) signal.signal(signal.SIGUSR1, partial(dump_status, event_handler)) signal.signal(signal.SIGUSR2, partial(dump_status, event_handler, details=True)) observer.start() s = sched.scheduler(time.time, time.sleep) logger.info("Starting Main Event Loop") try: while True: logger.info( f"Starting iteration {event_handler.iteration} will sleep {args.sleep} seconds" ) s.enter( args.sleep, 1, event_handler.new_iteration, argument=(args.dwell_time, ), ) s.run() except KeyboardInterrupt: observer.stop() observer.join()
def watch(script, callback): if script in _observers: raise RuntimeError("Script already observed") script_dir = os.path.dirname(os.path.abspath(script.filename)) script_name = os.path.basename(script.filename) event_handler = _ScriptModificationHandler(callback, filename=script_name) observer = Observer() observer.schedule(event_handler, script_dir) observer.start() _observers[script] = observer
class Watcher: def __init__(self): # self.observer = Observer() # Use this if SS_DIR is local self.observer = PollingObserver() # Use this if SS_DIR is remote mount def run(self): event_handler = file_changed() self.observer.schedule(event_handler, SS_DIR, recursive=True) self.observer.start() loop1()
def __init__(self, basedir='.', polling=False): """Initialize the Monitor.""" if not polling: self.observer = Observer() else: self.observer = PollingObserver() self.handlers = [] self.basedir = basedir self.running = False
def tricks_from(args): """ Command to execute tricks from a tricks configuration file. """ if args.debug_force_polling: from watchdog.observers.polling import PollingObserver as Observer elif args.debug_force_kqueue: from watchdog.observers.kqueue import KqueueObserver as Observer elif args.debug_force_winapi: from watchdog.observers.read_directory_changes import\ WindowsApiObserver as Observer elif args.debug_force_inotify: from watchdog.observers.inotify import InotifyObserver as Observer elif args.debug_force_fsevents: from watchdog.observers.fsevents import FSEventsObserver as Observer else: # Automatically picks the most appropriate observer for the platform # on which it is running. from watchdog.observers import Observer add_to_sys_path(path_split(args.python_path)) observers = [] for tricks_file in args.files: observer = Observer(timeout=args.timeout) if not os.path.exists(tricks_file): raise OSError(errno.ENOENT, os.strerror(errno.ENOENT), tricks_file) config = load_config(tricks_file) try: tricks = config[CONFIG_KEY_TRICKS] except KeyError: raise KeyError("No %r key specified in %s." % ( CONFIG_KEY_TRICKS, tricks_file)) if CONFIG_KEY_PYTHON_PATH in config: add_to_sys_path(config[CONFIG_KEY_PYTHON_PATH]) dir_path = os.path.dirname(tricks_file) if not dir_path: dir_path = os.path.relpath(os.getcwd()) schedule_tricks(observer, tricks, dir_path, args.recursive) observer.start() observers.append(observer) try: while True: time.sleep(1) except WatchdogShutdown: for o in observers: o.unschedule_all() o.stop() for o in observers: o.join()
def watch_record(indexer, use_polling=False): """ Start watching `cfstore.record_path`. :type indexer: rash.indexer.Indexer """ if use_polling: from watchdog.observers.polling import PollingObserver as Observer Observer # fool pyflakes else: from watchdog.observers import Observer event_handler = RecordHandler(indexer) observer = Observer() observer.schedule(event_handler, path=indexer.record_path, recursive=True) indexer.logger.debug('Start observer.') observer.start() try: while True: time.sleep(1) except KeyboardInterrupt: indexer.logger.debug('Got KeyboardInterrupt. Stopping observer.') observer.stop() indexer.logger.debug('Joining observer.') observer.join() indexer.logger.debug('Finish watching record.')
def watch_record(indexer, use_polling=False): """ Start watching `cfstore.record_path`. :type indexer: rash.indexer.Indexer """ if use_polling: from watchdog.observers.polling import PollingObserver as Observer Observer # fool pyflakes else: from watchdog.observers import Observer event_handler = RecordHandler(indexer) observer = Observer() observer.schedule(event_handler, path=indexer.record_path, recursive=True) indexer.logger.debug("Start observer.") observer.start() try: while True: time.sleep(1) except KeyboardInterrupt: indexer.logger.debug("Got KeyboardInterrupt. Stopping observer.") observer.stop() indexer.logger.debug("Joining observer.") observer.join() indexer.logger.debug("Finish watching record.")
def __init__(self, dbfile=":memory:", poll=False): self.db = sqlite3.connect(dbfile, check_same_thread=False) self.log = logging.getLogger(__name__) self._create_db() # self.log.warning("I'm warnin' ya!") # set up watchdog observer to monitor changes to # keyword files (or more correctly, to directories # of keyword files) self.observer = PollingObserver() if poll else Observer() self.observer.start()
def start(self): """ Starts scanning scan folder for new files """ try: scan_folder_path = config.get_scan_folder_path() self._observer = PollingObserver() self._observer.schedule(self, scan_folder_path, recursive=False) self._observer.start() except OSError as os_error: raise ScannerStartError(os_error)
def monitor_path(self, path, recursive=False): # return # FIXME logger.info(f"monitor_path: {path}") if getattr(self, "observer", None): self.observer.stop() self.observer = PollingObserver(5) self.observer.schedule(FilesViewEventHandler(self, self.browser.cwd), path, recursive=recursive) self.observer.start()
def __init__(self, index, interface='wlan0'): self.interface = interface self.run_state = '/run/network/ifstate.{interface}'.format(interface=interface) self.configurations = ['', interface] + rospy.get_param("~wlan_interfaces", []) rospy.loginfo("Loaded wlan configs %s", self.configurations) super(WifiUI, self).__init__(index, len(self.configurations)) self.config = get_configuration(self.run_state, self.configurations) rospy.loginfo("Start observingchanges in %s", os.path.dirname(self.run_state)) observer = Observer() observer.schedule(self, os.path.dirname(self.run_state), recursive=True) observer.start()
def __init__(self, conn_string, poll=False): self._engine = create_engine(conn_string) self.db = self._engine.connect() self.log = logging.getLogger(__name__) self._create_db() # set up watchdog observer to monitor changes to # keyword files (or more correctly, to directories # of keyword files) self.observer = PollingObserver() if poll else Observer() self.observer.start()
def __init__( self, config_dir, input_dir, output_dir, output_mode, success_action=OcrTask.ON_SUCCESS_DO_NOTHING, archive_dir=None, notify_url='', process_existing_files=False, run_scheduler=True, polling_observer=False, ): self.logger = logger.getChild('scheduler') self.config_dir = local.path(config_dir) self.input_dir = local.path(input_dir) self.output_dir = local.path(output_dir) if self.input_dir == self.output_dir: raise AutoOcrSchedulerError('Invalid configuration. Input and output directories must not be the same to avoid recursive OCR invocation!') self.output_mode = output_mode.lower() if self.output_mode not in AutoOcrScheduler.OUTPUT_MODES: raise AutoOcrSchedulerError('Invalid output mode: {}. Must be one of: {}'.format(self.output_mode, ', '.join(AutoOcrScheduler.OUTPUT_MODES))) self.success_action = success_action.lower() if self.success_action not in OcrTask.SUCCESS_ACTIONS: raise AutoOcrSchedulerError('Invalid success action: {}. Must be one of {}'.format(self.success_action, ', '.join(OcrTask.SUCCESS_ACTIONS))) self.archive_dir = local.path(archive_dir) if archive_dir else None if self.success_action == OcrTask.ON_SUCCESS_ARCHIVE and not self.archive_dir: raise AutoOcrSchedulerError('Archive directory required for success action {}'.format(self.success_action)) self.notify_url = notify_url self.current_tasks = {} self.walk_existing_task = None self.current_outputs = set() # Create a Threadpool to run OCR tasks on self.threadpool = ThreadPoolExecutor(max_workers=3) # Wire up an AutoOcrWatchdogHandler watchdog_handler = AutoOcrWatchdogHandler(self.on_file_touched, self.on_file_deleted) # Schedule watchdog to observe the input directory if run_scheduler: self.observer = PollingObserver() if polling_observer else Observer() self.observer.schedule(watchdog_handler, self.input_dir, recursive=True) self.observer.start() self.logger.warning('Watching %s', self.input_dir) else: self.observer = None self.logger.warning('Not watching %s', self.input_dir) # Process existing files in input directory, if requested if process_existing_files: self.walk_existing_task = self.threadpool.submit(self.walk_existing_files)
def __init__(self, dirpath, scanq, options): super().__init__() self.__dirpath = dirpath self.__handler = WatchdogHandler(scanq, options) self.__observer = Observer() if options.poll: self.__observer = PollingObserver() self.__options = options self.__scanq = scanq self.__stop = False
def watch(self): observer = PollingObserver() observer.schedule(self.pickup_event_processor, path=self.pickup_dir) observer.start() try: while self.keep_running: sleep(3) except KeyboardInterrupt: observer.stop() observer.join()
def watch(self, experiment, verbose=0): ''' Watch for a new checkpoint and run an evaluation step ''' # Use a polling observer because slurm doesn't seem to correctly handle inotify events :/ self.observer = PollingObserver() if self.config.polling else Observer() event_handler = CheckpointEventHandler(self.on_new_checkpoint, experiment, verbose) self.observer.schedule(event_handler, path=self.config.watch_directory) self.observer.start() while not self.should_exit: time.sleep(1) atexit.register(self.shutdown)
class SuiteTable(object): def __init__(self, dbfile=":memory:", poll=False): self.db = sqlite3.connect(dbfile, check_same_thread=False) self.log = logging.getLogger(__name__) self._create_db() # set up watchdog observer to monitor changes to # keyword files (or more correctly, to directories # of keyword files) self.observer = PollingObserver() if poll else Observer() self.observer.start() def _create_db(self): if not self._table_exists("collection_table"): self.db.execute(""" CREATE TABLE collection_table (collection_id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT COLLATE NOCASE, type COLLATE NOCASE, version TEXT, scope TEXT, namedargs TEXT, path TEXT, doc TEXT, doc_format TEXT) """) self.db.execute(""" CREATE INDEX collection_index ON collection_table (name) """) if not self._table_exists("keyword_table"): self.db.execute(""" CREATE TABLE keyword_table (keyword_id INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT COLLATE NOCASE, collection_id INTEGER, doc TEXT, args TEXT) """) self.db.execute(""" CREATE INDEX keyword_index ON keyword_table (name) """) def _table_exists(self, name): cursor = self.db.execute(""" SELECT name FROM sqlite_master WHERE type='table' AND name='%s' """ % name) return len(cursor.fetchall()) > 0
class Crafter(object): """ Setup and manage watchdog daemon. """ def __init__(self, base_dir): # Cache paths src_dir = os.path.join(base_dir, 'src') dest_dir = os.path.join(base_dir, 'preview') # Init handler self.handler = ObserverHandler(src_dir, dest_dir) # New observer class self.observer = Observer() self.observer.schedule(self.handler, path=src_dir, recursive=True) def craft(self): """ Start watching src directory """ self.observer.start() def shutdown(self): """ Properly shutdown watchdog daemon """ self.observer.stop() self.observer.join()
def get_static_assets_observer(project_dir): build_path = utils.get_build_dir(project_dir) observer = PollingObserver() contracts_js_event_handler = ContractsJSChangedEventHandler(project_dir=project_dir) observer.schedule(contracts_js_event_handler, build_path, recursive=False) if project_has_assets(project_dir): assets_watch_path = get_static_assets_dir(project_dir) assets_event_handler = AssetsChangedEventHandler(project_dir=project_dir) observer.schedule(assets_event_handler, assets_watch_path, recursive=True) return observer
def get_contracts_observer(project_dir, contract_filters=None, compiler_kwargs=None): if contract_filters is None: contract_filters = [] if compiler_kwargs is None: compiler_kwargs = {} contracts_dir = utils.get_contracts_dir(project_dir) libraries_dir = get_project_libraries_dir(project_dir) event_handler = ContractSourceChangedEventHandler( project_dir=project_dir, contract_filters=contract_filters, compiler_kwargs=compiler_kwargs ) observer = PollingObserver() observer.schedule(event_handler, contracts_dir, recursive=True) observer.schedule(event_handler, libraries_dir, recursive=True) return observer
class OSFileSystem(FileSystem): """ This class represents a file system implemented by the python os module. """ def __init__(self, instance=os, root="."): FileSystem.__init__(self, instance) self.root = os.path.normpath(root) self.eventQueue = EventQueue() self.eventHandler = EventHandler(self.eventQueue) self.observer = Observer() self.observer.schedule(self.eventHandler, path=self.root, recursive=True) self.observer.start() def join_path(self, path, *largs): return os.path.join(path, *largs) def get_relative_path(self, path): if path.startswith(self.root): return path.split(self.root + os.path.sep, 1)[1] else: return path def open(self, path, mode="rb", buffering=None): return open(path, mode) def mkdirs(self, path, mode=511): return os.makedirs(path, mode) def blockchecksums(self, path): return blockchecksums(path) def delta(self, path, checksums): return delta(path, checksums) def patch(self, path, delta): patched = patch(path, delta) self.instance.remove(path) return self.instance.rename(patched, path) def poll(self): r = [] while True: try: r.append(self.eventQueue.get_nowait()) except Empty: break return r
def __init__(self, instance=os, root="."): FileSystem.__init__(self, instance) self.root = os.path.normpath(root) self.eventQueue = EventQueue() self.eventHandler = EventHandler(self.eventQueue) self.observer = Observer() self.observer.schedule(self.eventHandler, path=self.root, recursive=True) self.observer.start()
def main(): """Script entry point.""" from watchdog.observers.polling import PollingObserver from .parser import AAConfigParser from .tricks import AutoRunTrick parser = _create_main_argparser() args = parser.parse_args() configm = _apply_main_args(args) # The reason to use PollingObserver() is it's os-independent. And it's # more reliable. observer = PollingObserver() parser = AAConfigParser(configm) handler_for_watch = parser.schedule_with(observer, AutoRunTrick) handlers = set.union(*tuple(handler_for_watch.values())) for handler in handlers: handler.start() observer.start() try: while True: time.sleep(1) except KeyboardInterrupt: observer.stop() observer.join() for handler in handlers: handler.stop()
def watch_assets(options): """ Watch for changes to asset files, and regenerate js/css """ # Don't watch assets when performing a dry run if tasks.environment.dry_run: return observer = PollingObserver() CoffeeScriptWatcher().register(observer) SassWatcher().register(observer) XModuleSassWatcher().register(observer) XModuleAssetsWatcher().register(observer) print("Starting asset watcher...") observer.start() if not getattr(options, 'background', False): # when running as a separate process, the main thread needs to loop # in order to allow for shutdown by contrl-c try: while True: observer.join(2) except KeyboardInterrupt: observer.stop() print("\nStopped asset watcher.")
def __init__(self, base_dir): # Cache paths src_dir = os.path.join(base_dir, 'src') dest_dir = os.path.join(base_dir, 'preview') # Init handler self.handler = ObserverHandler(src_dir, dest_dir) # New observer class self.observer = Observer() self.observer.schedule(self.handler, path=src_dir, recursive=True)
def __init__(self, dbfile=":memory:", poll=False): self.db = sqlite3.connect(dbfile, check_same_thread=False) self.log = logging.getLogger(__name__) self._create_db() # set up watchdog observer to monitor changes to # keyword files (or more correctly, to directories # of keyword files) self.observer = PollingObserver() if poll else Observer() self.observer.start()
def folderObserver(pathStructure, dbPath): logging = DefaultLogger() if pathStructure == None or pathStructure['inBox'] == None: message = 'Watch: Unable to run as pathStructure is undefined' logging.debug(message) return event_handler = singleFileWatcher(pathStructure, dbPath) observer = PollingObserver() observer.schedule(event_handler, pathStructure['inBox'], recursive=False) observer.start() try: while True and observer.is_alive(): time.sleep(1) except KeyboardInterrupt: observer.stop() observer.join()
def watch(): # Users expect an implicit push push(watch=True) # Start the observer observer = PollingObserver() observer.event_queue.max_size = 1 observer.schedule(EventHandler(), os.getcwd(), recursive=True) observer.start() puts(colored.yellow('Watching for changes... (ctrl-c to stop)')) try: while True: time.sleep(1) except KeyboardInterrupt: observer.stop() # Block until the thread terminates observer.join()
def main(): handler = ChangeHandler() directory = "./" observer = PollingObserver(0.35) # Poll every 0.35 seconds if not os.path.exists(directory): os.makedirs(directory) observer.schedule(handler, directory, recursive=True) # Only search in the LaTeX directory observer.start() try: while True: time.sleep(60 * 5) # Sleep for 5 minutes (time doesn't really matter) except KeyboardInterrupt: observer.stop() observer.join()
def __init__(self, contentdir=''): """ @param contentDirs are the dirs where we will load wiki files from & parse """ self.file_observers = [] self.spacehandler = SpaceHandler(self) self.contentdir = contentdir if contentdir.endswith('/') else '%s/' % contentdir if not j.system.fs.exists(contentdir): print "Contentdir %s was not found .. creating it." % contentdir j.system.fs.createDir(contentdir) if contentdir.strip(): # Watch the contentdir for changes observer = Observer() self.file_observers.append(observer) j.core.portal.active.watchedspaces.append(contentdir) print('Monitoring', contentdir) observer.schedule(self.spacehandler, contentdir, recursive=True) observer.start()
def run_watch(self): if self.poll: from watchdog.observers.polling import PollingObserver as Observer else: from watchdog.observers import Observer event_handler = RoninEventHandler(self) observer = Observer() observer.schedule(event_handler, self.source, recursive=True) observer.start() try: logger.info("Watching directory: '{0}' for changes (poll={1})".format(self.source, self.poll)) while True: time.sleep(1) except KeyboardInterrupt: logger.info("Stopping watcher...") observer.stop() observer.join()
def start(self): _g_logger.debug("Starting alert message sender %s" % self.dir_to_watch) if self._thread is not None: raise Exception("The alert object has already been started.") self._stopping = threading.Event() self._thread = threading.Thread(target=self._run) self._thread.start() try: self.observer = Observer() self.observer.schedule(self, path=self.dir_to_watch) self.observer.start() except: self.observer = None raise
def server(self, args): server = Process(target=self._server) server.start() event_handler = PatternMatchingEventHandler(ignore_patterns=self.WATCH_EXCLUDE) event_handler.on_modified = lambda event : self._build() observer = Observer() observer.schedule(event_handler, self.BASE_DIR, recursive=True) observer.start() try: while True: time.sleep(1) except (KeyboardInterrupt, SystemExit): server.terminate() observer.stop() observer.join() self.logger.info("Clossing")
def __init__(self, root, userkey, salt): """ Create a new server that handles the public key authentication. @param root: root path of the server. @type root: str @param userkey: path to the user's public key. @type userkey: str """ super(Server, self).__init__() self.root = root self.userkey = userkey self.salt = salt self.eventQueue = EventQueue() self.eventHandler = EventHandler(self.eventQueue) self.observer = Observer() self.observer.schedule(self.eventHandler, path=self.root, recursive=True) self.observer.start()