def Daemonize(self): if daemonable: logfile = self.absPath('daemon-%d.log' % self.qq) PRINT('将以 daemon 模式运行, log 文件: %s' % logfile) daemonize(logfile) else: PRINT('错误:无法以 daemon 模式运行') sys.exit(1)
def main(argv): global FilesSyncQueue global logger global foreground # parse argv parse_argv(argv, globals()) # daemonize daemonize(SYNCER_PID, foreground) # initialize logging logger = setup_logging(argv[0], CONSOLE_LOG_LEVEL, FILE_LOG_LEVEL, LOG_FORMAT, SYNCER_LOG, DATE_FORMAT) # sanity check if not os.path.isdir(WATCH_DIR): logger.critical('Watched directory %s does not exist. ' 'Bailing out.' % WATCH_DIR) sys.exit(1) # if FilesSyncQueue is nonexistant or damaged, truncate it try: FilesSyncQueue = read_atomic(FILES_SYNC_FILE) except (IOError, AttributeError, EOFError): logger.warn('Unusable file sync queue file %s. Recreating.' % FILES_SYNC_FILE) pass write_atomic(FILES_SYNC_FILE, FilesSyncQueue) # start main loop logger.debug('File sync service starting... Entering wait loop.') while True: while decisionlogic(): pass time.sleep(SLEEP_TIME)
def main(argv): global FilesActionMap global FilesHashMap global FilesSyncQueue global logger global foreground # parse argv parse_argv(argv, globals()) # daemonize daemonize(SUMMER_PID, foreground) # initialize logging logger = setup_logging(argv[0], CONSOLE_LOG_LEVEL, FILE_LOG_LEVEL, LOG_FORMAT, SUMMER_LOG, DATE_FORMAT) # sanity check if not os.path.isdir(WATCH_DIR): logger.critical('Watched directory %s does not exist. ' 'Bailing out.' % WATCH_DIR) sys.exit(1) # if FilesActionMap is nonexistant or damaged, truncate it try: FilesActionMap = read_atomic(FILES_STATUS_FILE) except (IOError, AttributeError, EOFError): logger.warn('Unusable action map status file %s. Recreating.' % FILES_STATUS_FILE) pass write_atomic(FILES_STATUS_FILE, FilesActionMap) # if FilesHashMap is nonexistant or damaged, truncate it try: FilesHashMap = read_atomic(FILES_HASH_FILE) except (IOError, AttributeError, EOFError): logger.warn('Unusable hash map file %s. Recreating.' % FILES_HASH_FILE) pass write_atomic(FILES_HASH_FILE, FilesHashMap) # if FilesSyncQueue is nonexistant or damaged, truncate it try: FilesSyncQueue = read_atomic(FILES_SYNC_FILE) except (IOError, AttributeError, EOFError): logger.warn('Unusable sync queue file %s. Recreating.' % FILES_SYNC_FILE) pass write_atomic(FILES_SYNC_FILE, FilesSyncQueue) # clear non-existant files from checksum map, most probably due to # changes when monitor was inactive for path in FilesHashMap.keys(): if not os.path.exists(path): logger.warn('File %s is in hash map, but not on disk. ' 'Deleting from map and trying to delete remotely.' % path) # remove from hash file FilesHashMap = read_atomic(FILES_HASH_FILE) del FilesHashMap[path] write_atomic(FILES_HASH_FILE, FilesHashMap) # enqueue to remove remotely FilesSyncQueue = read_atomic(FILES_SYNC_FILE) FilesSyncQueue.append((path, 'remove', 0)) write_atomic(FILES_SYNC_FILE, FilesSyncQueue) # start main loop logger.debug('Checksumming service starting... Entering wait loop.') while True: while decisionlogic(): pass time.sleep(SLEEP_TIME)
def main(argv): global FilesActionMap global logger global foreground # parse argv parse_argv(argv, globals()) # daemonize daemonize(MONITOR_PID, foreground) # initialize logging logger = setup_logging(argv[0], CONSOLE_LOG_LEVEL, FILE_LOG_LEVEL, LOG_FORMAT, MONITOR_LOG, DATE_FORMAT) # sanity check if not os.path.isdir(WATCH_DIR): logger.critical('Watched directory %s does not exist. ' 'Bailing out.' % WATCH_DIR) sys.exit(1) # if FilesActionMap is nonexistant or damaged, truncate it try: FilesActionMap = read_atomic(FILES_STATUS_FILE) except (IOError, AttributeError, EOFError): logger.warn('Unusable action map status file %s. Recreating.' % FILES_STATUS_FILE) pass write_atomic(FILES_STATUS_FILE, FilesActionMap) # initial recursive walk (initial events) for root, dirs, files in os.walk(WATCH_DIR): for name in files: path = os.path.join(root, name) FilesActionMap[path] = ('created', time.time()) for name in dirs: path = os.path.join(root, name) FilesActionMap[path] = ('created_dir', time.time()) write_atomic(FILES_STATUS_FILE, FilesActionMap) logger.debug('Initial events %s. Commiting.' % FilesActionMap) # start inotify monitor watch_manager = pyinotify.WatchManager() handler = ProcessEventHandler() notifier = pyinotify.Notifier(watch_manager, default_proc_fun=handler, read_freq=SLEEP_TIME) # try coalescing events if possible try: notifier.coalesce_events() logger.debug('Successfuly enabled events coalescing. Good.') except AttributeError: pass # catch only create/delete/modify/attrib events; don't monitor # IN_MODIFY, instead use IN_CLOSE_WRITE when file has been written to # and finally closed; and monitor IN_MOVED_TO when using temporary # files for atomicity as well as IN_MOVED_FROM when file is moved from # watched path event_mask = pyinotify.IN_CREATE|pyinotify.IN_DELETE|\ pyinotify.IN_CLOSE_WRITE|pyinotify.IN_ATTRIB|\ pyinotify.IN_MOVED_TO|pyinotify.IN_MOVED_FROM|\ pyinotify.IN_ISDIR|pyinotify.IN_UNMOUNT|\ pyinotify.IN_Q_OVERFLOW watch_manager.add_watch(WATCH_DIR, event_mask, rec=True, auto_add=True) # enter loop logger.debug('Inotify handler starting... Entering notify loop.') notifier.loop()