class dir_watch: def __init__(self, config, change_manager, poll=1): self.watch_dir = config["watch_dir"] self.poll = poll self.event_handler = change_manager(self.watch_dir, config) self.observer = Observer(os.stat, os.listdir) try: os.stat("./logs") except: os.mkdir("./logs") logging.basicConfig(filename="./logs/dir_watch.log", level=logging.INFO, format='%(asctime)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S') def start_watch(self): self.observer.schedule(self.event_handler, self.watch_dir, recursive=True) self.observer.start() def stop_watch(self): self.observer.stop() self.observer.join()
def run(self): self.post.logger = self.logger self.logger.info("sr_watch run") self.validate_cache() self.post.connect() try: self.observer = PollingObserverVFS(os.stat, os.listdir, self.time_interval) self.obs_watched = self.observer.schedule( self.myeventhandler, self.watch_path, recursive=self.post.recursive) self.observer.start() except OSError as err: self.logger.error("Unable to start Observer: %s" % str(err)) os._exit(0) self.observer.join()
def __init__(self, registry, debounce=DEFAULT_DEBOUNCE, poll=False): if poll: self.observer = PollingObserverVFS( stat=os.stat, listdir=os.scandir, polling_interval=poll ) else: self.observer = Observer() self.registry = registry self.registry.precache_activity.register(self.on_prepare) self.debounce = debounce self.poll = poll
def run(self): """ "Main loop" that polls the requested folder. Don't call this method directly, Call WatchDogBasedSystem.start() to run it in a seperate thread. :return: Does not return. """ from watchdog.observers.polling import PollingObserver,PollingObserverVFS import os from tasks import action_file from blacklist import WatchmanBlacklist blacklist = WatchmanBlacklist() self.logger.info("Starting watchpuppy on {0}".format(self.path)) observer = PollingObserverVFS(os.stat, os.listdir, polling_interval=0.8) event_handler = self.MyEventHandler(observer, list=self.wonderfullist, ignorelist=self.ignorelist) observer.schedule(event_handler, self.path, recursive=self.recursive) observer.start() try: while True: timestamp2 = time() timeint2 = int(timestamp2) for path, ts in self.wonderfullist.items(): self.logger.debug("checking {0} with time {1}".format(path,ts)) if ts < (timeint2 - self.stable_time): self.logger.info("{0} is More than {1} seconds old, so triggering".format(path, self.stable_time)) cache_key = os.path.dirname(path)+os.path.basename(path) original_ts = blacklist.get(cache_key, update=True, value=timeint2) if original_ts is None: self.logger.debug("No time stamp found for {0}".format(cache_key)) action_file.delay(filepath=os.path.dirname(path), filename=os.path.basename(path)) else: self.logger.warning("System tried to trigger on {0} but was stopped by the blacklist".format(path)) self.logger.debug("Attempting to delete {0} from the list".format(path)) del self.wonderfullist[path] sleep(self.poll_delay) except KeyboardInterrupt: observer.stop() observer.join()
def run(self): self.post.logger = self.logger self.logger.info("sr_watch run") self.validate_cache() self.post.connect() try: self.observer = PollingObserverVFS(os.stat, os.listdir, self.time_interval) self.obs_watched = self.observer.schedule(self.myeventhandler, self.watch_path, recursive=self.post.recursive) self.observer.start() except OSError as err: self.logger.error("Unable to start Observer: %s" % str(err)) os._exit(0) self.observer.join()
def start(self): patterns = self.config.get('watchdog', 'patterns').split(';') ignore_directories = self.config.getboolean('watchdog', 'ignore_directories') ignore_patterns = self.config.get('watchdog', 'ignore_patterns').split(';') case_sensitive = self.config.getboolean('watchdog', 'case_sensitive') recursive = self.config.getboolean('watchdog', 'recursive') event_handler = PatternMatchingEventHandler(patterns=patterns, ignore_patterns=ignore_patterns, ignore_directories=ignore_directories, case_sensitive=case_sensitive) # event_handler.on_any_event = self.on_any_event event_handler.on_created = self.on_created # event_handler.on_modified = self.on_modified event_handler.on_moved = self.on_moved sy_path = self.config.get('sy', 'path') hcs_path = self.config.get('hcs', 'path') afs_path = self.config.get('afs', 'path') aas_path = self.config.get('aas', 'path') paths = [sy_path, hcs_path, afs_path, aas_path] self.observer = Observer(stat=os.stat, listdir=os.listdir) # self.observer = Observer() for path in paths: self.observer.schedule(path=path, event_handler=event_handler, recursive=recursive) self.observer.start() self.logger.info('Data Grabbing Robot for CCLAS is startting.....') self.logger.info('patterns=%s' % patterns) self.logger.info('paths=%s' % paths) self.logger.info('delay=%s' % str(self.config.getfloat('watchdog', 'delay'))) try: while self.observer.is_alive(): time.sleep(self.config.getfloat('watchdog', 'delay')) except KeyboardInterrupt: self.observer.stop() self.logger.info('Data Grabbing Robot is stoped.') except PermissionError: self.observer.start() except TypeError: self.observer.start() self.observer.join()
if len(sys.argv) < 3: print("Ошибка. Слишком мало параметров.") sys.exit(1) if len(sys.argv) > 3: print("Ошибка. Слишком много параметров.") sys.exit(1) source_path = sys.argv[1] dist_path = sys.argv[2] files = find_files(source_path) copy_files(files, dist_path) observer = PollingObserverVFS(stat=os.stat, listdir=os.listdir, polling_interval=30) observer.schedule(MyHandler(dist_path, source_path, patterns=['*.*', '*', '.*']), path=source_path) observer.start() try: while True: time.sleep(1) except KeyboardInterrupt: observer.stop() observer.join()
''' if event.event_type == 'modified': print('!!!!!') print(event.src_path) module.main_function(event.src_path) def on_modified(self, event): self.process(event) #def on_created(self, event): #self.process(event) if __name__ == '__main__': args = sys.argv[1:] source_dir = args[0] observer = PollingObserverVFS(stat=os.stat, listdir=os.listdir, polling_interval=5) observer.schedule(MyHandler(patterns=['*.RESULT']), path=source_dir) observer.start() try: while True: time.sleep(1) except KeyboardInterrupt: observer.stop() observer.join()
def __init__(self, workspace_directory, tomcat_directory, hotterdeploy_dir, liferay_context, do_polling, statics_directory): self.do_polling = do_polling self.workspace_directory = workspace_directory self.tomcat_directory = tomcat_directory self.liferay_context = liferay_context self.statics_directory = statics_directory self.portlets = {} self.themes = {} self.deploys = {} if hotterdeploy_dir == '': self.hotterdeploy_dir = os.path.abspath( os.path.join(tomcat_directory, '..', 'hotterdeploy')) else: self.hotterdeploy_dir = os.path.abspath( os.path.join(hotterdeploy_dir, 'hotterdeploy')) self.tomcat_temp_dir = os.path.join(tomcat_directory, 'temp') self.tomcat_webapps_dir = os.path.join(tomcat_directory, 'webapps') self.liferay_dir = os.path.join(self.tomcat_webapps_dir, self.liferay_context) if do_polling: self.observer = PollingObserverVFS(default_stat, listdir) else: self.observer = Observer() # Create our hotterdeploy directory and watch it for wars if not os.path.exists(self.hotterdeploy_dir): os.mkdir(self.hotterdeploy_dir) self.observer.schedule(OnDeployHandler(hotterDeployer=self), self.hotterdeploy_dir, recursive=False) # Scan tomcat temp directory for deployed portlets self._scan_temp() self.observer.schedule(OnTempDeployHandler(hotterDeployer=self), self.tomcat_temp_dir, recursive=False) # Scan tomcat webapps directory for deployed portlets self._scan_webapps() self.observer.schedule(OnWebappsDeployHandler(hotterDeployer=self), self.tomcat_webapps_dir, recursive=False) # Scan the working directory for portlets LOG.debug('Scanning workspace for portlets...') self._scan_wd(workspace_directory) LOG.debug('Done.') # path = os.path.join(self.workspace_directory, 'liferay-portal', 'credoc-newsletter-portlet') path = self.workspace_directory # IN_CREATE | IN_DELETE | IN_CLOSE_WRITE && src/main/webapp/WEB-INF w = self.observer.schedule(WorkSpaceHandler(hotterDeployer=self), path, recursive=True) # IN_CLOSE_WRITE && src/main/webapp self.observer.add_handler_for_watch( OnFileChangedHandler(hotterDeployer=self), w) # self.observer.schedule(OnFileChangedHandler(hotterDeployer=self), self.workspace_directory, recursive=True) # IN_CLOSE_WRITE && target/classes self.observer.add_handler_for_watch( OnClassChangedHandler(hotterDeployer=self), w) # self.observer.schedule(OnClassChangedHandler(hotterDeployer=self), self.workspace_directory, recursive=True) self.livereload_server = Server()
class WatchDogObServer(): def __init__(self, config, logger): self.config = config self.logger = logger self.jdyParser = JDYParser(config=config, logger=logger) self.scnParser = SCNParser(config=config, logger=logger) self.hbyParser = HBYParser(config=config, logger=logger) self.qtyParser = QTYParser(config=config, logger=logger) self.xjyParser = XJYParser(config=config, logger=logger) self.afsParser = AFSParser(config=config, logger=logger) self.hcsParser = HCSParser(config=config, logger=logger) self.aasParser = AASParser(config=config, logger=logger) # 动态获取数据处理模块 self.sheetName2Worker = dict(self.config.items('sheetname')) def on_any_event(self, event): self.logger.debug(event) def on_modified(self, event): self.logger.debug(event) filename = event.src_path self.on_handle(filename=filename) def on_moved(self, event): self.logger.debug(event) if '.xls' in event.dest_path: filename = event.dest_path time.sleep(3) self.on_handle(filename=filename) else: # filename = event.src_path time.sleep(3) pass def on_created(self, event): self.logger.debug(event) filename = event.src_path self.on_handle(filename=filename) # 当创建文件时触发 def on_handle(self, filename): try: _f = xlrd.open_workbook(filename) sheet_names = _f.sheet_names() targets = list(self.sheetName2Worker.keys()) workers = [] for sheet_name in sheet_names: if sheet_name in targets: workers.append('%s' % (self.sheetName2Worker.get(sheet_name))) for worker in workers: self.logger.debug('Starting %s ......' % worker) p = multiprocessing.Process(target=eval('self.' + worker)(filename)) p.start() p.join() except Exception as error: self.logger.error(error) pass except PermissionError as error: self.logger.error(error) pass except BadZipFile as error: self.logger.error(error) pass except AttributeError as error: self.logger.error(error) pass except TypeError as error: self.logger.error(error) pass except NameError as error: self.logger.error(error) pass def start(self): patterns = self.config.get('watchdog', 'patterns').split(';') ignore_directories = self.config.getboolean('watchdog', 'ignore_directories') ignore_patterns = self.config.get('watchdog', 'ignore_patterns').split(';') case_sensitive = self.config.getboolean('watchdog', 'case_sensitive') recursive = self.config.getboolean('watchdog', 'recursive') event_handler = PatternMatchingEventHandler(patterns=patterns, ignore_patterns=ignore_patterns, ignore_directories=ignore_directories, case_sensitive=case_sensitive) # event_handler.on_any_event = self.on_any_event event_handler.on_created = self.on_created # event_handler.on_modified = self.on_modified event_handler.on_moved = self.on_moved sy_path = self.config.get('sy', 'path') hcs_path = self.config.get('hcs', 'path') afs_path = self.config.get('afs', 'path') aas_path = self.config.get('aas', 'path') paths = [sy_path, hcs_path, afs_path, aas_path] self.observer = Observer(stat=os.stat, listdir=os.listdir) # self.observer = Observer() for path in paths: self.observer.schedule(path=path, event_handler=event_handler, recursive=recursive) self.observer.start() self.logger.info('Data Grabbing Robot for CCLAS is startting.....') self.logger.info('patterns=%s' % patterns) self.logger.info('paths=%s' % paths) self.logger.info('delay=%s' % str(self.config.getfloat('watchdog', 'delay'))) try: while self.observer.is_alive(): time.sleep(self.config.getfloat('watchdog', 'delay')) except KeyboardInterrupt: self.observer.stop() self.logger.info('Data Grabbing Robot is stoped.') except PermissionError: self.observer.start() except TypeError: self.observer.start() self.observer.join() def stop(self): self.observer.stop() # 各种worker——数据处理模块 def scnWorker(self, filename): # filename = event.src_path sheet_name = 'SCN' method = 'SY001' scnDF = self.scnParser.getSCNDF(filename=filename, sheet_name=sheet_name) increamentDF = self.scnParser.getIncreamentDF(srcDF=scnDF, filename=filename, sheet_name=sheet_name) reports = self.scnParser.buildReport(dataframe=increamentDF, sheet_name='SCN', method=method) self.scnParser.outputReport(reports=reports) self.scnParser.reportFileHandle(filename=filename, sheet_name=sheet_name) def jdyWorker(self, filename): # filename = event.src_path sheet_name = 'JDY' method = 'SY001' jdyDF = self.jdyParser.getJDYDF(filename=filename, sheet_name=sheet_name) increamentDF = self.jdyParser.getIncreamentDF(srcDF=jdyDF, filename=filename, sheet_name=sheet_name) reports = self.jdyParser.buildReport(dataframe=increamentDF, sheet_name='JDY', method=method) self.jdyParser.outputReport(reports=reports) self.jdyParser.reportFileHandle(filename=filename, sheet_name=sheet_name) def hbyWorker(self, filename): # filename = event.src_path sheet_name = 'HBY' method = 'SY001' hbyDF = self.hbyParser.getHBYDF(filename=filename, sheet_name=sheet_name) increamentDF = self.hbyParser.getIncreamentDF(srcDF=hbyDF, filename=filename, sheet_name=sheet_name) reports = self.hbyParser.buildReport(dataframe=increamentDF, sheet_name='HBY', method=method) self.hbyParser.outputReport(reports=reports) self.hbyParser.reportFileHandle(filename=filename, sheet_name=sheet_name) def qtyWorker(self, filename): # filename = event.src_path sheet_name = 'QTY' method = 'SY001' qtyDF = self.qtyParser.getQTYDF(filename=filename, sheet_name=sheet_name) increamentDF = self.qtyParser.getIncreamentDF(srcDF=qtyDF, filename=filename, sheet_name=sheet_name) reports = self.qtyParser.buildReport(dataframe=increamentDF, sheet_name='QTY', method=method) self.qtyParser.outputReport(reports=reports) self.qtyParser.reportFileHandle(filename=filename, sheet_name=sheet_name) def xjyWorker(self, filename): # filename = event.src_path # 取得当前月份01、02…… sheet_list = [] current_month = datetime.today().month sheet_list.append('%02d' % current_month) if current_month > 1: last_month = current_month - 1 sheet_list.append('%02d' % last_month) method = 'SY001' for sheet_name in sheet_list: xjyDF = self.xjyParser.getXJYDF(filename=filename, sheet_name=sheet_name) increamentDF = self.xjyParser.getIncreamentDF(srcDF=xjyDF, filename=filename, sheet_name=sheet_name) reports = self.xjyParser.buildReport(dataframe=increamentDF, sheet_name='XJY', method=method) self.xjyParser.outputReport(reports=reports) self.xjyParser.reportFileHandle(filename=filename, sheet_name=sheet_name) def afs2csvWorker(self, filename): # filename = event.src_path afsDf = self.afsParser.getAFSDF(filename=filename, sheet_name='样品测量数据') self.logger.debug(afsDf) def hcs2csvWorker(self, filename): # filename = event.src_path hcsDf = self.hcsParser.getHCSDF(sheet_name=0, filename=filename) self.logger.debug(hcsDf) def aas2csvWorker(self, filename): # filename = event.src_path aasDf = self.aasParser.getAASDF(filename=filename, sheet_name=0)
class sr_watch(sr_instances): def __init__(self, config=None, args=None): self.post = sr_post(config, args) sr_instances.__init__(self, config, args) def close(self): self.post.close() self.observer.unschedule(self.obs_watched) self.observer.stop() def overwrite_defaults(self): self.blocksize = 200 * 1024 * 1024 self.caching = True self.sleep = 5 def check(self): self.nbr_instances = 1 self.accept_unmatch = True self.post.configure() self.watch_path = self.post.watchpath() self.post.logger = self.logger self.post.program_name = 'sr_watch' self.post.blocksize = self.blocksize self.post.caching = self.caching self.post.watch_path = self.watch_path self.time_interval = self.sleep if self.reset: self.post.connect() self.post.poster.cache_reset() def validate_cache(self): self.cache_file = self.user_cache_dir self.cache_file += '/' + self.watch_path.replace('/', '_') self.cache_file += '_%d' % self.blocksize self.cache = shelve.open(self.cache_file) current_pid = os.getpid() k_pid = "pid" if "pid" in self.cache: if not self.cache[k_pid] == current_pid: if psutil.pid_exists(self.cache[k_pid]): self.logger.error( "Another sr_watch instance with same configuration is already running." ) os._exit(1) else: self.logger.debug("Reusing cache with pid=%s" % str(current_pid)) self.cache["pid"] = current_pid else: self.logger.debug("Creating new cache with pid=%s" % str(current_pid)) self.cache["pid"] = current_pid self.cache.close() def event_handler(self, meh): self.myeventhandler = meh def help(self): self.post.help() def run(self): self.post.logger = self.logger self.logger.info("sr_watch run") self.validate_cache() self.post.connect() try: self.observer = PollingObserverVFS(os.stat, os.listdir, self.time_interval) self.obs_watched = self.observer.schedule( self.myeventhandler, self.watch_path, recursive=self.post.recursive) self.observer.start() except OSError as err: self.logger.error("Unable to start Observer: %s" % str(err)) os._exit(0) self.observer.join() def reload(self): self.logger.info("%s reload" % self.program_name) self.close() self.configure() self.run() def start(self): self.logger.info("%s start" % self.program_name) self.run() def stop(self): self.logger.info("%s stop" % self.program_name) self.close() os._exit(0)
class sr_watch(sr_instances): def __init__(self,config=None,args=None): self.post = sr_post(config,args) sr_instances.__init__(self,config,args) def close(self): self.post.close() self.observer.unschedule(self.obs_watched) self.observer.stop() def overwrite_defaults(self): self.blocksize = 200 * 1024 * 1024 self.caching = True self.sleep = 5 def check(self): self.nbr_instances = 1 self.accept_unmatch = True self.post.configure() self.watch_path = self.post.watchpath() self.post.logger = self.logger self.post.program_name = 'sr_watch' self.post.blocksize = self.blocksize self.post.caching = self.caching self.post.watch_path = self.watch_path self.time_interval = self.sleep if self.reset : self.post.connect() self.post.poster.cache_reset() def validate_cache(self): self.cache_file = self.user_cache_dir self.cache_file += '/' + self.watch_path.replace('/','_') self.cache_file += '_%d' % self.blocksize self.cache = shelve.open(self.cache_file) current_pid = os.getpid() k_pid = "pid" if "pid" in self.cache: if not self.cache[k_pid] == current_pid: if psutil.pid_exists(self.cache[k_pid]): self.logger.error("Another sr_watch instance with same configuration is already running.") os._exit(1) else: self.logger.debug("Reusing cache with pid=%s" % str(current_pid)) self.cache["pid"] = current_pid else: self.logger.debug("Creating new cache with pid=%s" % str(current_pid)) self.cache["pid"] = current_pid self.cache.close() def event_handler(self,meh): self.myeventhandler = meh def help(self): self.post.help() def run(self): self.post.logger = self.logger self.logger.info("sr_watch run") self.validate_cache() self.post.connect() try: self.observer = PollingObserverVFS(os.stat, os.listdir, self.time_interval) self.obs_watched = self.observer.schedule(self.myeventhandler, self.watch_path, recursive=self.post.recursive) self.observer.start() except OSError as err: self.logger.error("Unable to start Observer: %s" % str(err)) os._exit(0) self.observer.join() def reload(self): self.logger.info("%s reload" % self.program_name) self.close() self.configure() self.run() def start(self): self.logger.info("%s start" % self.program_name) self.run() def stop(self): self.logger.info("%s stop" % self.program_name) self.close() os._exit(0)