Exemplo n.º 1
0
    def process_default(self, event):
        global FilesActionMap
        global logger

        # sanity check
        if event.mask not in InotifyMask:
            logger.warn('Got unexpected/unhandled event %d. Ignoring.' %
                    event.mask)
            return

        # map and process individual actions
        FilesActionMap = read_atomic(FILES_STATUS_FILE)
        action = InotifyMask[event.mask]
        FilesActionMap[event.pathname] = (action, time.time())
        write_atomic(FILES_STATUS_FILE, FilesActionMap)
        logger.debug('Pending monitor actions %s.' % FilesActionMap)
Exemplo n.º 2
0
def check_updated(poporig):
    """Check if sync queue has been updated in meantime. Returns True if
yes, False otherwise.
    """
    global FilesSyncQueue
    global logger

    FilesSyncQueue = read_atomic(FILES_SYNC_FILE)
    popnew = FilesSyncQueue.popleft()
    if poporig == popnew:
        write_atomic(FILES_SYNC_FILE, FilesSyncQueue)
    else:
        logger.warn('Oops, left side of file sync queue %s has changed. '
                'This should never happen.' % FilesSyncQueue)
        return True
    return False
Exemplo n.º 3
0
def check_updated(monitor_action, monitor_timestamp, myfile):
    """Check if action map has been updated in the meantime. Returns True
if yes, False otherwise.
    """
    global FilesActionMap
    global logger

    # reread status and check if there are newer changes
    FilesActionMap = read_atomic(FILES_STATUS_FILE)

    _, monitor_timestampnew = FilesActionMap[myfile]
    if monitor_timestampnew == monitor_timestamp:
        # remove from action map if there are no changes
        del FilesActionMap[myfile]
        write_atomic(FILES_STATUS_FILE, FilesActionMap)
    else:
        return True
    return False
Exemplo n.º 4
0
def main(argv):
    global FilesSyncQueue
    global logger
    global foreground

    # parse argv
    parse_argv(argv, globals())

    # daemonize
    daemonize(SYNCER_PID, foreground)

    # initialize logging
    logger = setup_logging(argv[0], CONSOLE_LOG_LEVEL, FILE_LOG_LEVEL,
            LOG_FORMAT, SYNCER_LOG, DATE_FORMAT)

    # sanity check
    if not os.path.isdir(WATCH_DIR):
        logger.critical('Watched directory %s does not exist. '
                'Bailing out.' % WATCH_DIR)
        sys.exit(1)

    # if FilesSyncQueue is nonexistant or damaged, truncate it
    try:
        FilesSyncQueue = read_atomic(FILES_SYNC_FILE)
    except (IOError, AttributeError, EOFError):
        logger.warn('Unusable file sync queue file %s. Recreating.' %
                FILES_SYNC_FILE)
        pass
    write_atomic(FILES_SYNC_FILE, FilesSyncQueue)

    # start main loop
    logger.debug('File sync service starting... Entering wait loop.')
    while True:
        while decisionlogic():
            pass
        time.sleep(SLEEP_TIME)
Exemplo n.º 5
0
def decisionlogic():
    """Main decision/summing loop. Returns False if no more actions
to perform.
    """
    global FilesActionMap
    global FilesHashMap
    global FilesSyncQueue
    global logger

    # reread fresh status on every run
    FilesActionMap = read_atomic(FILES_STATUS_FILE)

    # ignore if no actions pending
    if len(FilesActionMap.keys()) == 0:
        return False

    # random choice to avoid checksumming the same file over and over if
    # it changes often
    myfile = random.choice(FilesActionMap.keys())
    monitor_action, monitor_timestamp = FilesActionMap[myfile]

    # by default don't resync nor remote remove files
    sync_action = None
    myperm = None

    # file is freshly created or changed
    if monitor_action == 'changed' or monitor_action == 'created' or \
            monitor_action == 'attrib':
        # calculate checksum
        try:
            mysha1sum = sha1sum(myfile)
        except (IOError, OSError):
            logger.info('Could not checksum file %s. Ignoring.' % myfile)
            check_updated(None, monitor_timestamp, myfile)
            return True

        # get permissions
        try:
            myperm = oct(stat.S_IMODE(os.stat(myfile)[stat.ST_MODE]))
        except (IOError, OSError):
            logger.info('Could not get permissions for file %s. Ignoring.'
                    % myfile)
            check_updated(None, monitor_timestamp, myfile)
            return True

        # already known file
        if myfile in FilesHashMap:
            mysha1sumold, mypermold = FilesHashMap[myfile]
            # if checksum is different, resync is mandatory
            if mysha1sumold != mysha1sum:
                sync_action = 'sync'
            # else if just mode changed, change remote mode
            elif myperm != mypermold:
                sync_action = 'change_perm'
        # first time seen file (no checksum and no mode)
        else:
            sync_action = 'sync'
        FilesHashMap[myfile] = mysha1sum, myperm

    # deleted file
    elif monitor_action == 'deleted':
        if myfile in FilesHashMap:
            del FilesHashMap[myfile]
        sync_action = 'remove'

    # created directory
    elif monitor_action == 'created_dir':
        try:
            myperm = oct(stat.S_IMODE(os.stat(myfile)[stat.ST_MODE]))
        except (IOError, OSError):
            logger.info('Could not get permissions for directory %s. '
                    'Ignoring.' % myfile)
            check_updated(None, monitor_timestamp, myfile)
            return True
        sync_action = 'make_dir'

    # deleted directory 
    elif monitor_action == 'deleted_dir':
        sync_action = 'remove_dir'

    # permissions change for directory
    elif monitor_action == 'attrib_dir':
        # get permissions
        try:
            myperm = oct(stat.S_IMODE(os.stat(myfile)[stat.ST_MODE]))
        except (IOError, OSError):
            logger.info('Could not get permissions for directory %s. '
                    'Ignoring.' % myfile)
            check_updated(None, monitor_timestamp, myfile)
            return True
        sync_action = 'change_perm'

    # write hash file..
    write_atomic(FILES_HASH_FILE, FilesHashMap)
    logger.debug('Hash file status: %s.' % FilesHashMap)

    # check if file/directory has been updated in the meantime
    check_updated(sync_action, monitor_timestamp, myfile)

    # resync or remove remote files
    logger.debug('Pending action %s for file %s.' % (sync_action, myfile))
    if sync_action:
        FilesSyncQueue = read_atomic(FILES_SYNC_FILE)
        FilesSyncQueue.append((myfile, sync_action, myperm))
        write_atomic(FILES_SYNC_FILE, FilesSyncQueue)
        logger.debug('Pending sync queue: %s.' % FilesSyncQueue)

    return True
Exemplo n.º 6
0
def main(argv):
    global FilesActionMap
    global FilesHashMap
    global FilesSyncQueue
    global logger
    global foreground

    # parse argv
    parse_argv(argv, globals())

    # daemonize
    daemonize(SUMMER_PID, foreground)

    # initialize logging
    logger = setup_logging(argv[0], CONSOLE_LOG_LEVEL, FILE_LOG_LEVEL,
            LOG_FORMAT, SUMMER_LOG, DATE_FORMAT)

    # sanity check
    if not os.path.isdir(WATCH_DIR):
        logger.critical('Watched directory %s does not exist. '
                'Bailing out.' % WATCH_DIR)
        sys.exit(1)

    # if FilesActionMap is nonexistant or damaged, truncate it
    try:
        FilesActionMap = read_atomic(FILES_STATUS_FILE)
    except (IOError, AttributeError, EOFError):
        logger.warn('Unusable action map status file %s. Recreating.' %
                FILES_STATUS_FILE)
        pass
    write_atomic(FILES_STATUS_FILE, FilesActionMap)

    # if FilesHashMap is nonexistant or damaged, truncate it
    try:
        FilesHashMap = read_atomic(FILES_HASH_FILE)
    except (IOError, AttributeError, EOFError):
        logger.warn('Unusable hash map file %s. Recreating.' %
                FILES_HASH_FILE)
        pass
    write_atomic(FILES_HASH_FILE, FilesHashMap)

    # if FilesSyncQueue is nonexistant or damaged, truncate it
    try:
        FilesSyncQueue = read_atomic(FILES_SYNC_FILE)
    except (IOError, AttributeError, EOFError):
        logger.warn('Unusable sync queue file %s. Recreating.' %
                FILES_SYNC_FILE)
        pass
    write_atomic(FILES_SYNC_FILE, FilesSyncQueue)

    # clear non-existant files from checksum map, most probably due to
    # changes when monitor was inactive
    for path in FilesHashMap.keys():
        if not os.path.exists(path):
            logger.warn('File %s is in hash map, but not on disk. '
                    'Deleting from map and trying to delete remotely.' %
                    path)
            # remove from hash file
            FilesHashMap = read_atomic(FILES_HASH_FILE)
            del FilesHashMap[path]
            write_atomic(FILES_HASH_FILE, FilesHashMap)
            # enqueue to remove remotely
            FilesSyncQueue = read_atomic(FILES_SYNC_FILE)
            FilesSyncQueue.append((path, 'remove', 0))
            write_atomic(FILES_SYNC_FILE, FilesSyncQueue)

    # start main loop
    logger.debug('Checksumming service starting... Entering wait loop.')
    while True:
        while decisionlogic():
            pass
        time.sleep(SLEEP_TIME)
Exemplo n.º 7
0
def main(argv):
    global FilesActionMap
    global logger
    global foreground

    # parse argv
    parse_argv(argv, globals())

    # daemonize
    daemonize(MONITOR_PID, foreground)

    # initialize logging
    logger = setup_logging(argv[0], CONSOLE_LOG_LEVEL, FILE_LOG_LEVEL,
            LOG_FORMAT, MONITOR_LOG, DATE_FORMAT)

    # sanity check
    if not os.path.isdir(WATCH_DIR):
        logger.critical('Watched directory %s does not exist. '
                'Bailing out.' % WATCH_DIR)
        sys.exit(1)

    # if FilesActionMap is nonexistant or damaged, truncate it
    try:
        FilesActionMap = read_atomic(FILES_STATUS_FILE)
    except (IOError, AttributeError, EOFError):
        logger.warn('Unusable action map status file %s. Recreating.' %
                FILES_STATUS_FILE)
        pass
    write_atomic(FILES_STATUS_FILE, FilesActionMap)

    # initial recursive walk (initial events)
    for root, dirs, files in os.walk(WATCH_DIR):
        for name in files:
            path = os.path.join(root, name)
            FilesActionMap[path] = ('created', time.time())
        for name in dirs:
            path = os.path.join(root, name)
            FilesActionMap[path] = ('created_dir', time.time())
    write_atomic(FILES_STATUS_FILE, FilesActionMap)
    logger.debug('Initial events %s. Commiting.' % FilesActionMap)

    # start inotify monitor
    watch_manager = pyinotify.WatchManager()
    handler = ProcessEventHandler()
    notifier = pyinotify.Notifier(watch_manager, default_proc_fun=handler,
            read_freq=SLEEP_TIME)

    # try coalescing events if possible
    try:
        notifier.coalesce_events()
        logger.debug('Successfuly enabled events coalescing. Good.')
    except AttributeError:
        pass

    # catch only create/delete/modify/attrib events; don't monitor
    # IN_MODIFY, instead use IN_CLOSE_WRITE when file has been written to
    # and finally closed; and monitor IN_MOVED_TO when using temporary
    # files for atomicity as well as IN_MOVED_FROM when file is moved from
    # watched path
    event_mask = pyinotify.IN_CREATE|pyinotify.IN_DELETE|\
            pyinotify.IN_CLOSE_WRITE|pyinotify.IN_ATTRIB|\
            pyinotify.IN_MOVED_TO|pyinotify.IN_MOVED_FROM|\
            pyinotify.IN_ISDIR|pyinotify.IN_UNMOUNT|\
            pyinotify.IN_Q_OVERFLOW
    watch_manager.add_watch(WATCH_DIR, event_mask, rec=True,
            auto_add=True)

    # enter loop
    logger.debug('Inotify handler starting... Entering notify loop.')
    notifier.loop()