def backend_monitor(API, config, logger): logger.info("Start backend monitor") if platform.system() == 'Windows': observer = PollingObserver() else: observer = Observer() for project_key in config['projects']['project_list']: event_handler = FileSystemMonitor( API, config, project_key, logger, load_list_last_crawl(config, project_key)) list_last_crawl = load_list_last_crawl(config, project_key) observer.schedule(event_handler, config[project_key]['rootdir'], recursive=True) observer.start() try: while True: # check the consistency between list_last_crawl and the current list in event_handler every 30s time.sleep(30) for project_key in config['projects']['project_list']: if set(list_last_crawl) != event_handler.set_last_crawl: log_full_run_filelist(dirs, list(event_handler.set_last_crawl), config[project_key]['name']) list_last_crawl = load_list_last_crawl(config, project_key) except KeyboardInterrupt: observer.stop() observer.join() return
def watch(self, run_id, step_key): watch_key = self._watch_key(run_id, step_key) if watch_key in self._watchers: return update_paths = [ self._manager.get_local_path(run_id, step_key, ComputeIOType.STDOUT), self._manager.get_local_path(run_id, step_key, ComputeIOType.STDERR), ] complete_paths = [ self._manager.complete_artifact_path(run_id, step_key) ] directory = os.path.dirname( self._manager.get_local_path(run_id, step_key, ComputeIOType.STDERR)) if not self._observer: self._observer = PollingObserver(self._manager.polling_timeout) self._observer.start() ensure_dir(directory) self._watchers[watch_key] = self._observer.schedule( LocalComputeLogFilesystemEventHandler(self, run_id, step_key, update_paths, complete_paths), str(directory), )
def initialize(cls): cls._log = logging.getLogger(f'{cls.__module__}.{cls.__name__}') cls._queue = Queue(maxsize=cls.max_queue_size) cls._worker_list = [] cls._running = True for i in range(cls.threads): if not cls._debug_mode: worker = Thread(target=cls.process, name=f'MTT-{i}', daemon=True) else: worker = Thread(target=cls.dummy_process, name=f'MTT-{i}', daemon=True) cls._log.debug(f'Worker thread {worker.name} has been initialized') cls._worker_list.append(worker) if cls.is_library_network_path: cls._observer = PollingObserver() else: cls._observer = Observer() cls._observer.schedule(SeriesHandler(cls._queue), cls.download_dir, True)
def notify(): #------------------------ # Setup watchdog patterns #------------------------ #patterns = "*" patterns = ["*full.jpg"] ignore_patterns = "" ignore_directories = False case_sensitive = False #path = "c://self//UniWatch2//static//images" path = watchConfig.config['fileWatch']['pathToImagesUnifi'] go_recursively = True #------------------------ # Setup watchdog handlers #------------------------ event_handler = PatternMatchingEventHandler(patterns, ignore_patterns, ignore_directories, case_sensitive) event_handler.on_created = on_file_created #event_handler.on_modified = on_file_created #observer = Observer() observer = PollingObserver() observer.schedule(event_handler, path, recursive=go_recursively) #observer.schedule(event_handler, path, recursive=False) observer.start()
def watch_dir(self, sld): self.logger.debug("watch_dir %s" % sld) if self.force_polling: self.logger.info( "sr_watch polling observer overriding default (slower but more reliable.)" ) self.observer = PollingObserver() else: self.logger.info( "sr_watch optimal observer for platform selected (best when it works)." ) self.observer = Observer() self.obs_watched = [] self.watch_handler = SimpleEventHandler(self) self.walk_priming(sld) self.logger.info( "sr_watch priming walk done, but not yet active. Starting...") self.observer.start() self.logger.info("sr_watch now active on %s posting to exchange: %s" % (sld, self.post_exchange)) if self.post_on_start: self.walk(sld)
def setup_observers(node_list): """Setup the watchdogs to look for new files in the nodes.""" global obs_list # If any node has auto_import set, look for new files and add them to the # DB. Then set up a watchdog for it. obs_list = [] for node in node_list: if node.auto_import: log.info('Crawling base directory "%s" for new files.' % node.root) for acq_name, d, f_list in os.walk(node.root): log.info("Crawling %s." % acq_name) for file_name in sorted(f_list): import_file(node, node.root, os.path.basename(acq_name), file_name) # If it is an NFS mount, then the default Observer() doesn't work. # Determine this by seeing if the node name is the same as the node host: # not failsafe, but it will do for now. if node.host == node.name: obs_list.append(Observer()) else: obs_list.append(PollingObserver(timeout=120)) obs_list[-1].schedule(RegisterFile(node), node.root, recursive=True) else: obs_list.append(None) # Start up the watchdog threads for obs in obs_list: if obs: obs.start()
def watch(self): """ Start watching """ logger.info('Watching directory %s' % self.directory) # Set up handler for when we see new files callback = self.callback class NewFileEventHandler(FileSystemEventHandler): def on_created(self, event): if not event.is_directory: logger.info('Detected new file: %s' % event.src_path) callback(event.src_path) event_handler = NewFileEventHandler() # Use polling observer (rather than filesystem-specific observers), # because it is more reliable. observer = PollingObserver(timeout=self.sleep_time) # Start the observer observer.schedule(event_handler, self.directory, recursive=False) observer.start() # Wait while the observer is running try: while True: sleep(self.sleep_time) # Exit gracefully except KeyboardInterrupt: logger.info('Detected interrupt. Stopping observer.') observer.stop() observer.join()
def start_observer(self): self.observer = PollingObserver() self.observer.should_keep_running() self.observer.handler = RenderHandler(self.args) for path in self.observer_paths: self.observer.schedule(self.observer.handler, path, recursive=True) self.observer.start()
def monitor(self): # Set up the event handler event_handler = MyEventHandler(patterns=['*'], ignore_patterns=['version.py'], ignore_directories=True) event_handler.setup(self) # Extract the set of directories to listen to listen_dirs = self.get_dirs_to_monitor() # Create an observer and schedule each of the directories self.observer = Observer() logger.debug("Starting observer: %s" % RippleConfig().monitor) if RippleConfig().monitor == "poll": self.observer = PollingObserver() for d in listen_dirs: # Put this in a try so it doesn't crash if the dir doesnt exist if os.path.isdir(d): logger.info("Monitoring: %s" % d) self.observer.schedule(event_handler, d, recursive=True) else: logger.error("Directory does not exist: %s" % d) try: self.observer.start() while True: time.sleep(1) except KeyboardInterrupt: self.stop_monitoring() self.observer.join()
def __init__(self, target_dir: str, do_work: Callable, patterns: List[str] = None): self._handler = DedupHandler(do_work, patterns) self._observer = PollingObserver() self._observer.schedule(self._handler, target_dir, recursive=True)
def _start_observer(self): """Start the directory observer thread. The ._observer thread is controlled by the ._monitor_thread. """ assert os.path.isdir(self.app.path) assert self._observer is None or not self._observer.isAlive() event_handler = self.AppPathFileSystemEventHandler(self.app) self._observer = Observer() self._observer.schedule(event_handler, self.app.path, recursive=True) try: self._observer.start() except OSError as error: if error.errno in (errno.ENOSPC, errno.EMFILE) and 'inotify' in str(error): # We reached the inotify watch limit, using polling-based fallback observer. self._observer = PollingObserver() self._observer.schedule(event_handler, self.app.path, recursive=True) self._observer.start() else: # reraise unrelated error raise error
def watch_assets(options): """ Watch for changes to asset files, and regenerate js/css """ # Don't watch assets when performing a dry run if tasks.environment.dry_run: return observer = PollingObserver() CoffeeScriptWatcher().register(observer) SassWatcher().register(observer) XModuleSassWatcher().register(observer) XModuleAssetsWatcher().register(observer) print("Starting asset watcher...") observer.start() if not getattr(options, 'background', False): # when running as a separate process, the main thread needs to loop # in order to allow for shutdown by contrl-c try: while True: observer.join(2) except KeyboardInterrupt: observer.stop() print("\nStopped asset watcher.")
def main(): """Script entry point.""" from watchdog.observers.polling import PollingObserver from .parser import AAConfigParser from .tricks import AutoRunTrick parser = _create_main_argparser() args = parser.parse_args() configm = _apply_main_args(args) # The reason to use PollingObserver() is it's os-independent. And it's # more reliable. observer = PollingObserver() parser = AAConfigParser(configm) handler_for_watch = parser.schedule_with(observer, AutoRunTrick) handlers = set.union(*tuple(handler_for_watch.values())) for handler in handlers: handler.start() observer.start() try: while True: time.sleep(1) except KeyboardInterrupt: observer.stop() observer.join() for handler in handlers: handler.stop()
def __init__(self, use_polling=False): super(LivereloadWatchdogWatcher, self).__init__() self._changed = False # TODO: Hack. # Allows the LivereloadWatchdogWatcher # instance to set the file which was # modified. Used for output purposes only. self._action_file = None if use_polling: self._observer = PollingObserver() else: self._observer = Observer() self._observer.start() # Compatibility with livereload's builtin watcher # Accessed by LiveReloadHandler's on_message method to decide if a task # has to be added to watch the cwd. self._tasks = True # Accessed by LiveReloadHandler's watch_task method. When set to a # boolean false value, everything is reloaded in the browser ('*'). self.filepath = None # Accessed by Server's serve method to set reload time to 0 in # LiveReloadHandler's poll_tasks method. self._changes = []
def __init__(self, extract_subs: ExtractSubs, target_path: str): super().__init__() self._file_observer = PollingObserver(NewFilesListener.DEFAULT_OBSERVER_TIMEOUT) self.cease_continuous_run = threading.Event() self._extract_subs = extract_subs self._target_path = target_path self._event_handler = FileFinallyCreatedEventHandler(["*.mkv"], self._file_predicate, self._extract_and_merge) self._file_observer.schedule(self._event_handler, target_path, recursive=True)
def get_contracts_code_observer(project_dir): observer = PollingObserver() build_path = utils.get_build_dir(project_dir) event_handler = ContractCodeChangedEventHandler(project_dir=project_dir) observer.schedule(event_handler, build_path, recursive=False) return observer
def get_observer(config, handler): observer = PollingObserver() observer.schedule( handler, config.path, recursive=True ) return observer
def run(self): observer = PollingObserver() file_event_handler = FileEventHandler(self.event_handler) observer.schedule(file_event_handler, "/opt/parent-radio-hk/queue", False) observer.start() while True: time.sleep(1) observer.join()
def __init__(self, paths: PATHS = None): self._changed = False self._observer = PollingObserver() if paths: for p in paths: # type: ignore print("watching for changes %r" % p) self._observer.schedule(self, str(p), recursive=True) self._observer.start()
def __init__(self, it, paths): self.index = 0 self.items = list(it) self.added = [] self.observer = PollingObserver() for path in paths: if os.path.exists(path): self.observer.schedule(self, path, recursive=True) self.observer.start()
def get_active_dir_observer(project_dir, event_handler): """ Setup a polling observer on the project's blockchains directory. This directory contains the .active-chain symlink which is watched for. """ bchain = get_blockchains_dir(project_dir) observer = PollingObserver() observer.schedule(event_handler, bchain, recursive=False) return observer
def __get_observer(self, option): if option is not None and option.lower() == 'polling': return PollingObserver(timeout=3) elif option is not None and option.lower() == 'native': return Observer() else: self.logger.log.debug('Unknown observer option {} specified'.format(option)) return Observer()
def __init__(self, pool, query, src_path, patterns=None, ignore_directories=False, recursive=True, timeout=1, key=1): if patterns is None: patterns = ["*.txt"] self.src_path = src_path self.recursive = recursive self.event_observer = PollingObserver(timeout=timeout) self.event_handler = InsertToSQL(pool, query, patterns=patterns, ignore_directories=ignore_directories, key=key)
def main(argv): args = Args(sys.argv[1:]) # now that logging is setup log our settings args.log_settings() src_path = args.path # Did we ask for debug? if args.debug: set_debug() if args.globus_debug: set_gt_debug() # using globus, init to prompt for endpoiont activation etc GlobusTransfer(args.source, args.destination, args.destination_dir, src_path) event_handler = Handler(args) if args.prepopulate: event_handler.prepopulate() # observer = watchdog.observers.Observer() observer = PollingObserver() observer.schedule(event_handler, path=src_path, recursive=True) # setup signal handler def dump_status(event_handler, signalNumber, frame, details=False): """Dump the Handler() status when USR1 is recived.""" event_handler.status(details=details) signal.signal(signal.SIGUSR1, partial(dump_status, event_handler)) signal.signal(signal.SIGUSR2, partial(dump_status, event_handler, details=True)) observer.start() s = sched.scheduler(time.time, time.sleep) logger.info("Starting Main Event Loop") try: while True: logger.info( f"Starting iteration {event_handler.iteration} will sleep {args.sleep} seconds" ) s.enter( args.sleep, 1, event_handler.new_iteration, argument=(args.dwell_time, ), ) s.run() except KeyboardInterrupt: observer.stop() observer.join()
def watch_file(directory, filename): observer = PollingObserver() observer.schedule(MyHandler(observer), path=directory) observer.start() print('\nwatching for changes to {}'.format(directory)) while observer.isAlive() == True: time.sleep(1) observer.join()
def __init__(self, filename: str, signal: NamedSignal) -> None: self._filename = filename self._changed_signal: NamedSignal = signal handler = WatcherEventHandler(filename, self._changed) obs = PollingObserver() obs.schedule(handler, path.dirname(filename)) obs.start() self._observer = obs
def __init__(self, dbfile=":memory:", poll=False): self.db = sqlite3.connect(dbfile, check_same_thread=False) self.log = logging.getLogger(__name__) self._create_db() # self.log.warning("I'm warnin' ya!") # set up watchdog observer to monitor changes to # keyword files (or more correctly, to directories # of keyword files) self.observer = PollingObserver() if poll else Observer() self.observer.start()
def start(self): """ Starts scanning scan folder for new files """ try: scan_folder_path = config.get_scan_folder_path() self._observer = PollingObserver() self._observer.schedule(self, scan_folder_path, recursive=False) self._observer.start() except OSError as os_error: raise ScannerStartError(os_error)
def __init__(self, conn_string, poll=False): self._engine = create_engine(conn_string) self.db = self._engine.connect() self.log = logging.getLogger(__name__) self._create_db() # set up watchdog observer to monitor changes to # keyword files (or more correctly, to directories # of keyword files) self.observer = PollingObserver() if poll else Observer() self.observer.start()
def __init__( self, config_dir, input_dir, output_dir, output_mode, success_action=OcrTask.ON_SUCCESS_DO_NOTHING, archive_dir=None, notify_url='', process_existing_files=False, run_scheduler=True, polling_observer=False, ): self.logger = logger.getChild('scheduler') self.config_dir = local.path(config_dir) self.input_dir = local.path(input_dir) self.output_dir = local.path(output_dir) if self.input_dir == self.output_dir: raise AutoOcrSchedulerError('Invalid configuration. Input and output directories must not be the same to avoid recursive OCR invocation!') self.output_mode = output_mode.lower() if self.output_mode not in AutoOcrScheduler.OUTPUT_MODES: raise AutoOcrSchedulerError('Invalid output mode: {}. Must be one of: {}'.format(self.output_mode, ', '.join(AutoOcrScheduler.OUTPUT_MODES))) self.success_action = success_action.lower() if self.success_action not in OcrTask.SUCCESS_ACTIONS: raise AutoOcrSchedulerError('Invalid success action: {}. Must be one of {}'.format(self.success_action, ', '.join(OcrTask.SUCCESS_ACTIONS))) self.archive_dir = local.path(archive_dir) if archive_dir else None if self.success_action == OcrTask.ON_SUCCESS_ARCHIVE and not self.archive_dir: raise AutoOcrSchedulerError('Archive directory required for success action {}'.format(self.success_action)) self.notify_url = notify_url self.current_tasks = {} self.walk_existing_task = None self.current_outputs = set() # Create a Threadpool to run OCR tasks on self.threadpool = ThreadPoolExecutor(max_workers=3) # Wire up an AutoOcrWatchdogHandler watchdog_handler = AutoOcrWatchdogHandler(self.on_file_touched, self.on_file_deleted) # Schedule watchdog to observe the input directory if run_scheduler: self.observer = PollingObserver() if polling_observer else Observer() self.observer.schedule(watchdog_handler, self.input_dir, recursive=True) self.observer.start() self.logger.warning('Watching %s', self.input_dir) else: self.observer = None self.logger.warning('Not watching %s', self.input_dir) # Process existing files in input directory, if requested if process_existing_files: self.walk_existing_task = self.threadpool.submit(self.walk_existing_files)