def test_gutils_binary_to_ascii_watch(self): wm = WatchManager() mask = IN_MOVED_TO | IN_CLOSE_WRITE # Convert binary data to ASCII processor = Slocum2AsciiProcessor( deployments_path=resource('slocum'), ) notifier = ThreadedNotifier(wm, processor) notifier.coalesce_events() notifier.start() wdd = wm.add_watch( binary_path, mask, rec=True, auto_add=True ) # Wait 5 seconds for the watch to start time.sleep(5) gpath = os.path.join(original_binary, '*.*bd') # Sort the files so the .cac files are generated in the right order for g in sorted(glob(gpath)): shutil.copy2(g, binary_path) wait_for_files(ascii_path, 32) wm.rm_watch(wdd.values(), rec=True) notifier.stop()
def test_gutils_netcdf_to_erddap_watch(self): wm = WatchManager() mask = IN_MOVED_TO | IN_CLOSE_WRITE # Convert ASCII data to NetCDF processor = Netcdf2ErddapProcessor( deployments_path=resource('slocum'), erddap_content_path=erddap_content_path, erddap_flag_path=erddap_flag_path ) notifier = ThreadedNotifier(wm, processor, read_freq=5) notifier.coalesce_events() notifier.start() wdd = wm.add_watch( netcdf_path, mask, rec=True, auto_add=True ) # Wait 5 seconds for the watch to start time.sleep(5) orig_netcdf = resource('profile.nc') dummy_netcdf = os.path.join(netcdf_path, 'profile.nc') shutil.copy(orig_netcdf, dummy_netcdf) wait_for_files(erddap_content_path, 1) wait_for_files(erddap_flag_path, 1) wm.rm_watch(wdd.values(), rec=True) notifier.stop()
def test_gutils_ascii_to_netcdf_watch(self): wm = WatchManager() mask = IN_MOVED_TO | IN_CLOSE_WRITE # Convert ASCII data to NetCDF processor = Slocum2NetcdfProcessor(deployments_path=resource('slocum'), subset=False, template='trajectory', profile_id_type=2, tsint=10, filter_distance=1, filter_points=5, filter_time=10, filter_z=1) notifier = ThreadedNotifier(wm, processor) notifier.coalesce_events() notifier.start() wdd = wm.add_watch(ascii_path, mask, rec=True, auto_add=True) # Wait 5 seconds for the watch to start time.sleep(5) # Make the ASCII we are watching for merger = SlocumMerger(original_binary, ascii_path, globs=['*.tbd', '*.sbd']) merger.convert() wait_for_files(netcdf_path, 230) wm.rm_watch(wdd.values(), rec=True) notifier.stop()
def __watch_thread(self, root_lst, sync_list, cond, eventq): """ 初始化客户端监控文件变化的同步线程,根据同步的根目录列表和 需要同步的文件目录白名单,获取需要监控的目录列表以及监控排除的文件列表添加到INotifier中 @param root_lst: 监控的根目录列表 @type root_lst: tuple @param sync_list: 需要同步的文件和目录的列表 @type sync_list: tuple @param cond: 线程同步条件变量 @type cond: threading.Condition @param eventq: 保存文件变化的事件队列 @type eventq: pyinotify.Event @return: 初始化后的监控线程 @rtype: pyinotify.ThreadedNotifier """ wm = WatchManager() mask = IN_DELETE | IN_CLOSE_WRITE | IN_CREATE | IN_MOVED_FROM | IN_MOVED_TO thread_notifier = ThreadedNotifier(wm, EventHandler(cond=cond, eventq=eventq, sync_list=sync_list), read_freq=10, timeout=9) thread_notifier.coalesce_events() # Enable coalescing of events watch_lst = [] # INotifier watch direcory list exclude_lst = [] # INotifier exclude directory list LOGGER.debug('root:%s', str(root_lst)) LOGGER.debug('sublist:%s', str(sync_list)) for root_path in root_lst: # add root directory to watch list watch_lst.append(root_path['name']) if not root_path['is_all']: # get exclude sub direcory list for dirpath, _, _ in os.walk(root_path['name']): if dirpath != root_path['name']: for file_path in sync_list: is_exclude = True if file_path.startswith(dirpath) \ or dirpath.startswith(file_path): # 遍历的目录为同步列表文件的父目录, # 或者同步文件列表下的子目录,都不添加到排除目录列表 LOGGER.debug('dirpath:%s', dirpath) LOGGER.debug('file_path:%s', file_path) is_exclude = False break if is_exclude: exclude_lst.append(dirpath) LOGGER.debug('watchlist:%s', str(watch_lst)) LOGGER.debug('excludelist:%s', str(exclude_lst)) excl = ExcludeFilter(exclude_lst) # 设置受监视的事件,(rec=True, auto_add=True)为递归处理 wm_dict = wm.add_watch(watch_lst, mask, rec=True, auto_add=True, exclude_filter=excl) LOGGER.debug('client monitor lst:%s', str(wm_dict)) return thread_notifier