Ejemplo n.º 1
0
class WatchFolder:
    """
    Creates and manages watch folder instances.
    """
    def __init__(self):
        self._watch_folder_instances = {}
        self._watch_folder_observer = Observer()
        self._watch_folder_observer.start()

    def add_folder_path(self, folder_path):
        """
        Sets up a watch folder instance using the folder path.

        :param folder_path: The absolute path of the folder.
        """
        watch_folder_instance = WatchFolderInstance(folder_path)
        self._watch_folder_instances[folder_path] = watch_folder_instance
        watch_folder_instance.watch = self._watch_folder_observer.schedule(
            watch_folder_instance.event_handler, folder_path, recursive=False)

    def get_instance(self, folder_path):
        """
        Returns the watch folder instance for the folder path.

        :param folder_path: The absolute path of the folder.
        """
        try:
            return self._watch_folder_instances[folder_path].queue.get()
        except KeyError:
            return None

    def is_instance_empty(self, folder_path):
        """
        Checks if the watch folder instance for the folder path has found any new files.

        :param folder_path: The absolute path of the folder.
        """
        try:
            watch_folder_instance = self._watch_folder_instances[folder_path]
            return watch_folder_instance.queue.empty()
        except KeyError:
            return True

    def stop_and_remove_instance(self, folder_path):
        """
        Stops the watch folder instance for the folder path and removes it.

        :param folder_path: The absolute path of the folder.
        """
        try:
            watch_folder_instance = self._watch_folder_instances[folder_path]
            watch_folder_instance.queue.put(False)
            self._watch_folder_observer.unschedule(watch_folder_instance.watch)

            return True
        except KeyError:
            logging.exception(
                '--- FAILED TO STOP AND REMOVE WATCH FOLDER INSTANCE ---')

            return False
Ejemplo n.º 2
0
class FileMonitor(object):
    def __init__(self):
        self.watches = []
        self.observer = Observer()
        self.observer.start()

    def addWatch(self, path, filename=None, recursive=False):
        if filename is None:
            self.watches.append(self.observer.schedule(MyEventHandler(), path, recursive))
            return

        if not path.endswith('/'):
            full_path = path + '/' + filename
        else:
            full_path = path + filename
        self.watches.append(self.observer.schedule(MyEventHandler([full_path,]), path, recursive))

    def removeWatch(self, path):
        if not path.endswith('/'):
            path += '/'

        for watch in self.watches:
            if watch.path == path:
                self.observer.unschedule(watch)
                return True
        return False

    def removeAllWatches(self):
        self.observer.unschedule_all()
Ejemplo n.º 3
0
class FileMonitor(object):
    def __init__(self):
        self.watches = []
        self.observer = Observer()
        self.observer.start()

    def add_watch(self, path, filename=None, recursive=False):
        # if no filename is provided, just watch the directory
        if filename is None:
            self.watches.append(
                self.observer.schedule(FSEventHandler(), path, recursive))
            return

        # if we're monitoring a specific file, we have to pass the full path to the event handler
        if not path.endswith('/'):
            full_path = path + '/' + filename
        else:
            full_path = path + filename
        self.watches.append(
            self.observer.schedule(FSEventHandler([
                full_path,
            ]), path, recursive))

    def remove_watch(self, path):
        if not path.endswith('/'):
            path += '/'
        for watch in self.watches:
            if watch.path == path:
                self.observer.unschedule(watch)
                return True
        return False

    def remove_all_watches(self):
        self.observer.unschedule_all()
Ejemplo n.º 4
0
    def run_watcher(self):
        # Create observer
        observer = Observer()

        # Start observer
        observer.start()

        # Run change check in a loop
        while not self._watcher_stop:
            try:
                # 2KGRW
                # Schedule a watch
                # self: `FileSystemEventHandler` instance
                # self._watched_path: File path to watch
                # recursive: Whether recursive
                self.watch_obj = observer.schedule(self,
                                                   self._watched_path,
                                                   recursive=True)
                # print(self.watch_obj)
            # If have error
            except Exception as e:
                print('erro aqui óh: \n {e}')

            # Sleep before next check
            time.sleep(self._interval)

        if self._watcher_stop:
            observer.unschedule(self.watch_obj)
Ejemplo n.º 5
0
class EntryWatcher:
    def __init__(self, navigator: 'Navigator'):
        self.navigator = navigator
        self.observer = Observer()
        self.observer.start()

        self.entry = None
        self.watch = None

    def on_update(self, _):
        if self.watch and self.entry == self.navigator.current_entry:
            return
        if self.watch:
            self.unregister(self.watch)
        self.watch = None
        self.entry = self.navigator.current_entry
        self.watch = self.register()

    def unregister(self, watch):
        if self.watch:
            self.observer.unschedule(watch)

    def register(self):
        entry = self.navigator.current_entry
        if entry.is_dir():
            path = entry.path
            handler = DirHandler(self.navigator)
            logger.debug('Watching {} for changes'.format(entry.path))
        else:
            path = entry.dir
            handler = FileHandler(self.navigator, entry.path)
        watch = self.observer.schedule(handler, path, recursive=False)
        return watch
Ejemplo n.º 6
0
 def unschedule(self, path):
     ''' 
     @summary: 監視リストから指定したファイルパスを監視するwatchを除去する
              整理はしていない
     @warning:  Linuxのみwatchスレッドが死んでいない テスト&要修正
     '''
     path = abspath(path)
     Observer.unschedule(self, self.watch_path[path])
     del self.watch_path[path]
     logger.debug("unschedule %s" % path)
Ejemplo n.º 7
0
	def main(self):
		eventos = Copiar_JDF_to()
		observador = Observer()
		observador.schedule(eventos, self.pasta_monitorada , recursive=False)
		observador.start()

		try:
			while True:
				time.sleep(1)
		except KeyboardInterrupt:
			observador.unschedule(eventos)
			observador.stop()
		observador.join()
		print ("FIM")
Ejemplo n.º 8
0
def test_watchdog_only_recursive(tmpdir):
    from robocorp_ls_core import watchdog_wrapper

    watchdog_wrapper._import_watchdog()

    import watchdog
    from watchdog.observers import Observer
    from watchdog.events import FileSystemEventHandler
    import os.path

    class Handler(FileSystemEventHandler):
        def __init__(self):
            FileSystemEventHandler.__init__(self)
            self.changes = []

        def on_any_event(self, event):
            print(event.src_path)
            self.changes.append(os.path.basename(event.src_path))

    handler = Handler()
    observer = Observer()

    watches = []
    watches.append(observer.schedule(handler, str(tmpdir), recursive=True))

    try:
        observer.start()
        time.sleep(0.1)

        tmpdir.join("my0.txt").write("foo")
        tmpdir.join("dir_rec").mkdir()
        tmpdir.join("dir_rec").join("my1.txt").write("foo")

        expected = {"dir_rec", "my0.txt", "my1.txt"}
        timeout_at = time.time() + 5
        while not expected.issubset(
                handler.changes) and time.time() < timeout_at:
            time.sleep(0.2)

        if not expected.issubset(handler.changes):
            raise AssertionError(
                f"Did not find expected changes. Found: {handler.changes}")

    finally:
        for watch in watches:
            observer.unschedule(watch)
        observer.stop()
Ejemplo n.º 9
0
class FileWatch(FileSystemEventHandler):
    def __init__(self, origin, watch_path, event_func):
        FileSystemEventHandler.__init__(self)
        self.origin = origin
        self.watch_path = watch_path
        self.event_func = event_func
        self.observer = Observer()
        self.watch = None
        self.mutex = threading.Lock()

    def get_lock(self):
        return self.mutex

    def start(self):
        dir = os.path.abspath(os.path.dirname(self.watch_path))
        self.origin.dbg('FileWatch: scheduling watch for directory %s' % dir)
        self.watch = self.observer.schedule(self, dir, recursive=False)
        self.observer.start()

    def stop(self):
        if self.watch:
            self.origin.dbg('FileWatch: unscheduling watch %r' % self.watch)
            self.observer.unschedule(self.watch)
            self.watch = None
        if self.observer.is_alive():
            self.observer.stop()
            self.observer.join()

    def __del__(self):
        self.stop()
        self.observer = None

    # Override from FileSystemEventHandler
    def on_any_event(self, event):
        if event.is_directory:
            return None
        if os.path.abspath(event.src_path) != os.path.abspath(self.watch_path):
            return None
        self.origin.dbg('FileWatch: received event %r' % event)
        try:
            self.mutex.acquire()
            self.event_func(event)
        finally:
            self.mutex.release()
Ejemplo n.º 10
0
def main():

    print('Starting vagrant rsync auto service.')

    #INITIALIZE
    vagrant('global-status', _bg=True, _out=vagrant_status_out).wait()
    last = time.time()

    handler = VagrantEventHandler()
    observer = Observer()
    watchers = {}

    #START WATCHING
    try:
        observer.start()
        while True:

            vagrant_dirs = set(vagrant_rsync_dirs.keys())
            watched_dirs = set(watchers.keys())

            for d in (vagrant_dirs - watched_dirs):
                watchers[d] = observer.schedule(handler, d, recursive=True)
                print('Observer is watching {}'.format(d))

            for d in (watched_dirs - vagrant_dirs):
                w = watchers.pop(d)
                observer.unschedule(w)
                print('Observer has stopped watching {}'.format(d))


            time.sleep(SLEEP_INTERVAL)
            passed = time.time() - last
            if passed > CHECK_INTERVAL:
                vagrant('global-status', _bg=True, _out=vagrant_status_out)
                last = time.time()

    #SHUT DOWN
    except KeyboardInterrupt:
        print(' << (*) >> ')
        observer.stop()

    observer.join()
    print('Goodbye :-)')
Ejemplo n.º 11
0
def test_watchdog_recursive():
    """ See https://github.com/gorakhargosh/watchdog/issues/706
    """
    from watchdog.observers import Observer
    from watchdog.events import FileSystemEventHandler
    import os.path

    class Handler(FileSystemEventHandler):
        def __init__(self):
            FileSystemEventHandler.__init__(self)
            self.changes = []

        def on_any_event(self, event):
            self.changes.append(os.path.basename(event.src_path))

    handler = Handler()
    observer = Observer()

    watches = []
    watches.append(observer.schedule(handler, str(p('')), recursive=True))

    try:
        observer.start()
        time.sleep(0.1)

        touch(p('my0.txt'))
        mkdir(p('dir_rec'))
        touch(p('dir_rec', 'my1.txt'))

        expected = {"dir_rec", "my0.txt", "my1.txt"}
        timeout_at = time.time() + 5
        while not expected.issubset(
                handler.changes) and time.time() < timeout_at:
            time.sleep(0.2)

        assert expected.issubset(
            handler.changes
        ), "Did not find expected changes. Found: {}".format(handler.changes)
    finally:
        for watch in watches:
            observer.unschedule(watch)
        observer.stop()
        observer.join(1)
Ejemplo n.º 12
0
class IPCom:
    """IPC async thread"""

    INSTANCE: Optional["IPCom"] = None

    def __init__(self):
        self.observer = Observer()
        self.observer.start()
        self.pid = os.getpid()

    def fswatch(self, watcher: FileSystemEventHandler, path: Path, recursive=False):
        if not self.observer.is_alive():
            # Just in case...
            self.observer.start()
        return self.observer.schedule(
            watcher, str(path.absolute()), recursive=recursive
        )

    def fsunwatch(self, watcher):
        self.observer.unschedule(watcher)
Ejemplo n.º 13
0
class Rec2DB(object):
    def __init__(self, sensor):
        super(Rec2DB, self).__init__()
        self._sensor = sensor
        self._handler = Handler(sensor)
        self._observer = Observer()
        self._watcher = None

    def loop(self):
        self._observer.start()

        folder = "/tmp/" + self._sensor
        os.makedirs(folder, exist_ok=True)
        self._watcher = self._observer.schedule(self._handler,
                                                folder,
                                                recursive=True)
        self._observer.join()

    def stop(self):
        if self._watcher: self._observer.unschedule(self._watcher)
        self._observer.stop()
Ejemplo n.º 14
0
class Rainmaker():
   
    def __init__(self):
        self.event_handlers = {}
        self.observer = Observer()
        self.observer.start()

    def add_watch(self,watch_path,rec_flag=True):
        event_handler = RainmakerEventHandler( watch_path )
        self.event_handlers[watch_path] = event_handler
        self.observer.schedule( event_handler, watch_path, recursive = rec_flag) 
    
    def remove_watch(self, k): 
        eh = self.event_handlers.pop(k)
        self.observer.unschedule(eh)

    def shutdown(self):
        self.log.info( "Shutting down FSwatcher")
        self.observer.stop()
        self.observer.unschedule_all()
        self.observer.join()
Ejemplo n.º 15
0
class Rec2DB(object):
    def __init__(self, sensor):
        super(Rec2DB, self).__init__()
        self._sensor = sensor
        self._handler = Handler(sensor)
        self._observer = Observer()
        self._watcher = None

    def loop(self):
        self._observer.start()

        folder = os.path.join(os.path.realpath(storage), self._sensor)
        print("Observing " + folder, flush=True)
        os.makedirs(folder, exist_ok=True)
        self._watcher = self._observer.schedule(self._handler,
                                                folder,
                                                recursive=True)
        self._observer.join()

    def stop(self):
        if self._watcher: self._observer.unschedule(self._watcher)
        self._observer.stop()
Ejemplo n.º 16
0
class FileDaemon:
    def __init__(self, msg_identifier, send_config):
        #Attributes
        self.target_dir = send_config["PATH_BASE"]
        self._monitor_flag_ = threading.Event()
        self.watch = None

        #Components
        self._logger_ = EventLogger()
        self._event_handler_ = SyncEventHandler(msg_identifier, send_config)
        self._observer_ = Observer()

    """
        Public methods
    """
    def initialize(self):
        """
            Initializes components, starts logging
        """
        #Prep for monitor
        self._monitor_flag_.clear()

        #Initialize components
        logfile = os.path.dirname(self.target_dir[:-1]) + SLASH + "daemon.log"
        self._logger_.init_session(logfile)
        self._event_handler_.initialize()
        self._observer_.start()

    def start(self):
        """
            Starts/resumes observing a target directory. Wraps up the monitor method
            which is used as the target for a separate thread.
        """
        if self._monitor_flag_.is_set():
            return #Already running
        else:
            self._monitor_flag_.set()
            threading.Thread(target=self._monitor_).start()

    def full_sync(self):
        """
            Handle for the directory sync method of event_handler which
            allows controller to request a sync of entire directory
        """
        self._event_handler_.dir_sync(self.target_dir)

    def pause(self):
        """
            Pause observation with intent to resume
        """
        self._monitor_flag_.clear()

    def stop(self):
        """
            End observation with no intent to resume
        """
        self._monitor_flag_.clear()
        time.sleep(2) #Wait for local activity to settle
        self._observer_.stop() #stop observing
        self._observer_.join() #wait for all threads to be done

    def is_alive(self):
        return self._monitor_flag_.is_set()

    """
        Protected methods
    """
    def _monitor_(self):
        """
            Function run on separate thread which acts as parent to observer thread(s).
            Used to control operation flow of observer using monitor_flag
        """
        self._logger_.log("INFO","Scheduling observation of " + self.target_dir + " tree...")
        self.__watch__ = self._observer_.schedule(self._event_handler_, self.target_dir, recursive=True)
        while (self._monitor_flag_.is_set()):
            time.sleep(1)
        self._observer_.unschedule(self.__watch__)
Ejemplo n.º 17
0
class FileWatcher:

    class Handler(PatternMatchingEventHandler):

        def __init__(self, watcher):
            super(FileWatcher.Handler, self).__init__(patterns = ["*.pdf"], ignore_directories = True)
            self.watcher = watcher

        def on_any_event(self, event):
            log.debug("Trace received %s event - %s.", event.event_type, event.src_path)

        def on_moved(self, event):
            # file is downloaded with .crdownload suffix and once downloaded renamed to target name
            # captured as a move to the expected target suffix
            log.debug("Received moved event - %s -> %s.", event.src_path, event.dest_path)
            self.watcher.notify(Path(event.dest_path))

    def __init__(self, watchdir = None):
        if watchdir is None:
            self.watched_dir = tempfile.TemporaryDirectory()
            self.watched_dir_path = Path(self.watched_dir.name)
        else:
            self.watched_dir = str(watchdir)
            self.watched_dir_path = watchdir
        log.debug("temp dir for download is [{!s}]".format(self.watched_dir))
        self.new_file = None
        self.observer = Observer()
        self.observer.start()
        self.watch = None

    def get_dir(self):
        return self.watched_dir_path

    def notify(self, f):
        self.new_file = f

    def _new_handler(self):
        return FileWatcher.Handler(self)

    def _init_observer(self):
        if self.watch is None:
            event_handler = self._new_handler()
            self.new_file = None
            self.watch = self.observer.schedule(event_handler, str(self.watched_dir_path), recursive=False)
            log.debug('start waiting for download')

    def start(self):
        self._init_observer()

    def wait_move_file(self, target, timeout=40):
        self._init_observer()
        try:
            while self.new_file is None and timeout >= 0:
                time.sleep(1)
                timeout -= 1
            self.observer.unschedule(self.watch)
            if self.new_file is not None:
                if not target.is_file():
                    copyfile(str(self.new_file), str(target))
                    self.new_file.unlink()
                    log.info("File downloaded : %s", str(target))
                else:
                    log.warning("File already existed dowloaded file is trashed")
            else:
                log.error("download failed with timeout")
        except:
            self.observer.unschedule(self.watch)
            log.exception("error while waiting for file download")

        self.watch = None
        return self.new_file is not None
    def run_watcher(self):
        """
        Watcher thread's function.

        :return:
            None.
        """
        # Create observer
        observer = Observer()

        # Start observer
        observer.start()

        # Dict that maps file path to `watch object`
        watche_obj_map = {}

        # Run change check in a loop
        while not self._watcher_to_stop:
            # Get current watch paths
            old_watch_path_s = set(watche_obj_map)

            # Get new watch paths
            new_watch_path_s = self._find_watch_paths()

            # For each new watch path
            for new_watch_path in new_watch_path_s:
                # Remove from the old watch paths if exists
                old_watch_path_s.discard(new_watch_path)

                # If the new watch path was not watched
                if new_watch_path not in watche_obj_map:
                    try:
                        # Schedule a watch
                        watch_obj = observer.schedule(
                            # 2KGRW
                            # `FileSystemEventHandler` instance
                            self,
                            # File path to watch
                            new_watch_path,
                            # Whether recursive
                            recursive=True,
                        )

                        # Store the watch obj
                        watche_obj_map[new_watch_path] = watch_obj

                    # If have error
                    except OSError:
                        # Set the watch object be None
                        watche_obj_map[new_watch_path] = None

            # For each old watch path that is not in the new watch paths
            for old_watch_path in old_watch_path_s:
                # Get watch object
                watch_obj = watche_obj_map.pop(old_watch_path, None)

                # If have watch object
                if watch_obj is not None:
                    # Unschedule the watch
                    observer.unschedule(watch_obj)

            # Store new watch paths
            self._watch_paths = new_watch_path_s

            # Sleep before next check
            time.sleep(self._interval)
Ejemplo n.º 19
0
class ModuleFileWatcher(EditorModule):
    def __init__(self):
        super(ModuleFileWatcher, self).__init__()
        self.watches = {}

    def getName(self):
        return 'filewatcher'

    def getDependency(self):
        return []

    def onLoad(self):
        self.observer = Observer()
        self.observer.start()

        signals.connect('file.moved', self.onFileMoved)
        signals.connect('file.added', self.onFileCreated)
        signals.connect('file.removed', self.onFileDeleted)
        signals.connect('file.modified', self.onFileModified)

    def onStart(self):
        self.assetWatcher = self.startWatch(
            self.getProject().getAssetPath(),
            ignorePatterns=['*/.git', '*/.*', '*/_gii'])

    def startWatch(self, path, **options):
        path = os.path.realpath(path)
        if self.watches.get(path):
            logging.warning('already watching: %s' % path)
            return self.watches[path]
        logging.info('start watching: %s' % path)

        ignorePatterns = ['*/.git', '*/.*', '*/_gii'] + options.get(
            'ignorePatterns', [])

        handler = FileWatcherEventHandler(
            options.get('patterns', None), ignorePatterns,
            options.get('ignoreDirectories', False),
            options.get('caseSensitive', True))

        watch = self.observer.schedule(handler, path,
                                       options.get('recursive', True))
        self.watches[path] = watch
        return watch

    def onStop(self):
        # print 'stop file watcher'
        self.observer.stop()
        self.observer.join(0.5)
        # print 'stopped file watcher'

    def stopWatch(self, path):
        path = os.path.realpath(path)
        watch = self.watches.get(path, None)
        if not watch: return
        self.observer.unschedule(watch)
        self.watches[path] = None

    def stopAllWatches(self):
        # logging.info('stop all file watchers')
        self.observer.unschedule_all()
        self.watches = {}

    def onFileMoved(self, path, newpath):
        # print('asset moved:',path, newpath)
        app.getAssetLibrary().scheduleScanProject()
        pass

    def onFileCreated(self, path):
        # print('asset created:',path)
        app.getAssetLibrary().scheduleScanProject()
        pass

    def onFileModified(self, path):
        # print('asset modified:',path)
        app.getAssetLibrary().scheduleScanProject()
        pass

    def onFileDeleted(self, path):
        # print('asset deleted:',path)
        app.getAssetLibrary().scheduleScanProject()
        pass
Ejemplo n.º 20
0
class WatchdogReloaderLoop(ReloaderLoop):
    def __init__(self, *args, **kwargs):
        from watchdog.observers import Observer
        from watchdog.events import PatternMatchingEventHandler

        super().__init__(*args, **kwargs)
        trigger_reload = self.trigger_reload

        class EventHandler(PatternMatchingEventHandler):
            def on_any_event(self, event):
                trigger_reload(event.src_path)

        reloader_name = Observer.__name__.lower()

        if reloader_name.endswith("observer"):
            reloader_name = reloader_name[:-8]

        self.name = f"watchdog ({reloader_name})"
        self.observer = Observer()
        # Extra patterns can be non-Python files, match them in addition
        # to all Python files in default and extra directories. Ignore
        # __pycache__ since a change there will always have a change to
        # the source file (or initial pyc file) as well. Ignore Git and
        # Mercurial internal changes.
        extra_patterns = [p for p in self.extra_files if not os.path.isdir(p)]
        self.event_handler = EventHandler(
            patterns=["*.py", "*.pyc", "*.zip", *extra_patterns],
            ignore_patterns=[
                "*/__pycache__/*",
                "*/.git/*",
                "*/.hg/*",
                *self.exclude_patterns,
            ],
        )
        self.should_reload = False

    def trigger_reload(self, filename):
        # This is called inside an event handler, which means throwing
        # SystemExit has no effect.
        # https://github.com/gorakhargosh/watchdog/issues/294
        self.should_reload = True
        self.log_reload(filename)

    def __enter__(self):
        self.watches = {}
        self.observer.start()
        return super().__enter__()

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.observer.stop()
        self.observer.join()

    def run(self):
        while not self.should_reload:
            self.run_step()
            time.sleep(self.interval)

        sys.exit(3)

    def run_step(self):
        to_delete = set(self.watches)

        for path in _find_watchdog_paths(self.extra_files,
                                         self.exclude_patterns):
            if path not in self.watches:
                try:
                    self.watches[path] = self.observer.schedule(
                        self.event_handler, path, recursive=True)
                except OSError:
                    # Clear this path from list of watches We don't want
                    # the same error message showing again in the next
                    # iteration.
                    self.watches[path] = None

            to_delete.discard(path)

        for path in to_delete:
            watch = self.watches.pop(path, None)

            if watch is not None:
                self.observer.unschedule(watch)
Ejemplo n.º 21
0
class CollectionScanner(object):
    def __init__(self):

        self.fswatcher = CollectionEventHandler(self)
        self.observer = Observer()
        self.watches = {}
        self.scanner_pool = Pool(processes=4)

        signal.signal(signal.SIGINT, self.quit)
        signal.signal(signal.SIGTERM, self.quit)

    def scan_directory(self, directory, full_scan):
        self.scanner_pool.apply_async(start_scanrunner, [directory, full_scan, self.last_shutdown_time])

    def add_directory(self, directory, full_scan=False):
        # start a full scan if required
        # otherwise do an incremental
        # and schedule a watch.

        self.scan_directory(directory, full_scan)
        #start_scanrunner(directory, full_scan, self.last_shutdown_time)
        #TODO(nikhil) fix me
        self.watches[directory] = True
        self.watches[directory] = self.observer.schedule(self.fswatcher, path=directory, recursive=True)

    def remove_directory(self, directory):
        try:
            self.observer.unschedule(self.watches[directory])
            del self.watches[directory]
        except KeyError:
            pass
        # also remove all tracks within that directory from DB
        tracks = Track.select(Track.q.url.startswith('file://'+directory))
        for track in tracks:
            track.destroySelf()

    def configuration_changed(self):
        # check if collection dirs have
        # been changed since we last started
        # if yes, we will do a full rescan
        # otherwise, an incremental scan.

        config = GlobalConfig()
        paths_saved_at = 0
        last_scan = 0
        self.last_shutdown_time = 0
        try:
            paths_saved_at = int(config['collection']['paths_saved_at'])
        except KeyError:
#TODO(nikhil) test this behaviour
            pass

        try:
            last_scan = int(config['collection']['last_scan'])
        except KeyError:
            last_scan = paths_saved_at - 1

        try:
            self.last_shutdown_time = int(config['last_shutdown_time'])
        except KeyError:
            pass

        collection_directories = set()
        try:
            collection_directories = set(config['collection']['paths'])
        except KeyError:
            pass

        full_scan = False
        if last_scan < paths_saved_at:
            full_scan = True

        if full_scan:
            try:
                # for a full scan, first wipe all tables
                Artist.deleteMany(None)
                Album.deleteMany(None)
                Genre.deleteMany(None)
                Composer.deleteMany(None)
                Track.deleteMany(None)
                TrackStatistics.deleteMany(None)
                AlbumStatistics.deleteMany(None)
                ArtistStatistics.deleteMany(None)
                GenreStatistics.deleteMany(None)
            except OperationalError:
                pass

        # first remove watches on
        # any directories that have been
        # removed from the collection directories
        existing_directories = set(self.watches.keys())
        for dir in existing_directories.difference(collection_directories):
            self.remove_directory(dir)

        for dir in collection_directories:
            if dir in self.watches:
                # directory is already being watched
                # do nothing
                pass
            else:
                self.add_directory(dir, full_scan)


    def start(self):
        self.configuration_changed()
        # Note: a 'first' scan is an
        # incremental scan behaving like a full
        # scan, so we don't have to explicitly
        # handle that case.
        # finally put a watch on the config file itself
        self.observer.schedule(ConfigWatcher(self), path=USERDIR)
        self.observer.start()

    def quit(self, signum, frame):
        signal.signal(signal.SIGINT, signal.SIG_DFL)
        signal.signal(signal.SIGTERM, signal.SIG_DFL)
        # stop watching file before we make any changes
        self.observer.unschedule_all()
        self.observer.stop()
        # close all update threads
        # save current time
        config = GlobalConfig()
        try:
            config['collection']
        except KeyError:
            config['collection'] = {}
        now = int(time.time())
        config['collection']['last_scan'] = now
        config.save()

        self.scanner_pool.close()
        self.scanner_pool.terminate()

        self.observer.join()
        self.scanner_pool.join()
        sys.exit(0)
Ejemplo n.º 22
0
class ConfigWatcher(object):
    @staticmethod
    def get_config():
        config_name = 'config.ini'
        default_home = get_default_home()
        path = os.path.join(os.path.dirname(sys.executable), config_name)
        if os.path.exists(path):
            return path
        user_ini = os.path.expanduser(os.path.join(default_home, config_name))
        if os.path.exists(user_ini):
            return user_ini
        return None

    config_ini = get_config.__func__()

    class ConfigModifiedEventHandler(FileSystemEventHandler):
        def on_modified(self, event):
            if isinstance(event, FileModifiedEvent) and not event.is_directory and \
                            event.src_path == ConfigWatcher.config_ini:

                import ConfigParser
                config = ConfigParser.RawConfigParser()
                config.read(ConfigWatcher.config_ini)
                try:
                    new_upload_rate = config.getint(ConfigParser.DEFAULTSECT, 'upload-rate')
                except ConfigParser.NoOptionError as e:
                    new_upload_rate = -1

                try:
                    new_download_rate = config.getint(ConfigParser.DEFAULTSECT, 'download-rate')
                except ConfigParser.NoOptionError as e:
                    new_download_rate = -1
                self.set_rate_limit_changed(new_upload_rate, new_download_rate)

        @staticmethod
        def set_rate_limit_changed(new_upload_rate, new_download_rate): 
            try:
                current_upload_rate = int(Manager.get()._dao.get_config('upload_rate', -1))
            except ValueError:
                current_upload_rate = -1

            try:
                current_download_rate = int(Manager.get()._dao.get_config('download_rate', -1))
            except ValueError:
                current_download_rate = -1

            change = not (new_upload_rate == current_upload_rate and new_download_rate == current_download_rate)
            if not change:
                return

            slower = new_upload_rate < current_upload_rate or new_download_rate < current_download_rate
            if slower:
                # change processors first, then update the rate(s)
                ConfigWatcher.ConfigModifiedEventHandler.change_processors(new_upload_rate, new_download_rate)

            if new_upload_rate != current_upload_rate:
                BaseAutomationClient.set_upload_rate_limit(new_upload_rate)
                Manager.get()._dao.update_config('upload_rate', new_upload_rate)
                log.trace('update upload rate: %s', str(BaseAutomationClient.upload_token_bucket))

            if new_download_rate != current_download_rate:
                BaseAutomationClient.set_download_rate_limit(new_download_rate)
                Manager.get()._dao.update_config('download_rate', new_download_rate)
                log.trace('update download rate: %s', str(BaseAutomationClient.download_token_bucket))

            if change and new_upload_rate > current_upload_rate and new_download_rate > current_download_rate:
                # changed rate(s) first, then change processors
                ConfigWatcher.ConfigModifiedEventHandler.change_processors(new_upload_rate, new_download_rate)

        @staticmethod
        def change_processors(upload_rate, download_rate):
            max_processors = get_number_of_processors(upload_rate, download_rate)
            for engine in Manager.get().get_engines().values():
                try:
                    engine.get_queue_manager().restart(max_processors)
                    log.trace('update engine %s processors: %d local, %d remote, %d generic',
                              engine.get_uid(), max_processors[0], max_processors[1], max_processors[2])
                except Exception as e:
                    log.error('failed to update engine %s processors: %s', engine.get_uid(), str(e))

    def __init__(self):
        self.watch = None
        self.observer = Observer()

    def setup_watchdog(self):
        if not ConfigWatcher.config_ini:
            return
        event_handler = ConfigWatcher.ConfigModifiedEventHandler()
        self.watch = self.observer.schedule(event_handler, os.path.dirname(ConfigWatcher.config_ini), recursive=False)
        self.observer.start()

    def unset_watcher(self):
        self.observer.unschedule(self.watch)
Ejemplo n.º 23
0
class FileSync(TimerWorker):
    ###########################
    # START Class Inheritance #
    def init_args(self,
                  dimensigon: 'Dimensigon',
                  file_sync_period=defaults.FILE_SYNC_PERIOD,
                  file_watches_refresh_period=FILE_WATCHES_REFRESH_PERIOD,
                  max_allowed_errors=MAX_ALLOWED_ERRORS,
                  retry_blacklist=RETRY_BLACKLIST):
        self.dm = dimensigon

        # Multiprocessing
        self.queue = MPQueue()

        # Parameters
        self.INTERVAL_SECS = file_sync_period
        self.file_watches_refresh_period = file_watches_refresh_period
        self.max_allowed_errors = max_allowed_errors
        self.retry_blacklist = retry_blacklist

        # internals
        self._changed_files: t.Set[Id] = set(
        )  # list of changed files to be sent
        self._changed_servers: t.Dict[Id, t.List[Id]] = dict()
        self._file2watch: t.Dict[Id, ObservedWatch] = {}
        self._last_file_updated = None
        self._blacklist: t.Dict[t.Tuple[Id, Id], BlacklistEntry] = {}
        self._blacklist_log: t.Dict[t.Tuple[Id, Id], BlacklistEntry] = {}
        self.session = None
        self._server = None
        self._loop = None

        # log variables
        self._mapper: t.Dict[Id, t.List[_PygtailBuffer]] = {}

    def startup(self):
        self._executor = ThreadPoolExecutor(
            max_workers=max(os.cpu_count(), 4),
            thread_name_prefix="FileSyncThreadPool")
        self._observer = Observer()
        self.session = self._create_session()
        self._observer.start()

        self._set_initial_modifications()
        self._loop = asyncio.new_event_loop()
        asyncio.set_event_loop(self._loop)
        self.dispatcher.listen([NewEvent, AliveEvent],
                               lambda x: self.add(None, x.args[0]))

    def shutdown(self):
        self.session.close()
        self._observer.stop()
        self._executor.shutdown()

    def main_func(self):
        # collect new File events
        while True:
            item = self.queue.safe_get()
            if item:
                self._add(*item)
            else:
                break
        self._set_watchers()
        self._sync_files()

        # send log data
        self._send_new_data()

    # END Class Inheritance #
    #########################

    ############################
    # INIT Interface functions #
    def add(self,
            file: t.Union[File, Id] = None,
            server: t.Union[Server, Id] = None):
        if isinstance(file, File):
            file_id = file.id
        else:
            file_id = file
        if isinstance(server, Server):
            server_id = server.id
        else:
            server_id = server
        try:
            self.queue.safe_put((file_id, server_id), timeout=2)
        except queue.Full:
            self.logger.warning("Queue is full. Try increasing its size")

    # END Interface functions  #
    ############################

    ##############################
    # INNER methods & attributes #
    def _create_session(self):
        self.Session = sessionmaker(bind=self.dm.engine)
        return self.Session()

    @property
    def server(self) -> Server:
        if self._server is None:
            self._server = self.session.query(Server).filter_by(
                _me=1, deleted=0).one_or_none()
        return self._server

    @property
    def my_files_query(self):
        return self.session.query(File).filter_by(deleted=0).filter_by(
            src_server_id=self.server.id) if self.session else None
        # return File.query.filter_by(src_server_id=self._server_id)

    @property
    def file_query(self):
        return self.session.query(File).filter_by(
            deleted=0) if self.session else None
        # return File.query

    def get_file(self, file_id):
        return self.file_query.filter_by(
            id=file_id).one_or_none() if self.session else None
        # return self.file_query.filter_by(id=file_id).one_or_none()

    def _add(self, file_id: Id = None, server_id: Id = None):
        if server_id:
            if file_id is None:
                # new server alive, send all files
                for fsa in self.session.query(FileServerAssociation).filter_by(
                        dst_server_id=server_id,
                        deleted=0).join(File).filter_by(
                            src_server_id=self.server.id).all():
                    if not (getattr(fsa.file, 'deleted', True)
                            or fsa.destination_server.deleted):
                        mtime = os.stat(fsa.target).st_mtime_ns
                        if fsa.l_mtime != mtime:
                            self._add(fsa.file.id, fsa.destination_server.id)
            elif file_id not in self._changed_files:
                # send data to a specific server
                if file_id not in self._changed_servers:
                    self._changed_servers[file_id] = [server_id]
                else:
                    if server_id not in self._changed_servers[file_id]:
                        self._changed_servers[file_id].append(server_id)
        else:
            self._changed_files.add(file_id)
            if file_id in self._changed_servers:
                self._changed_servers.pop(file_id)

    @staticmethod
    def _read_file(file, compress=True):
        with open(file, 'rb') as fd:
            if compress:
                return base64.b64encode(zlib.compress(
                    fd.read())).decode('utf-8')
            else:
                return base64.b64encode(fd.read()).decode('utf-8')

    async def _send_file(self, file: File, servers: t.List[Id] = None):
        try:
            content = await self._loop.run_in_executor(self._executor,
                                                       self._read_file,
                                                       file.target)
        except Exception as e:
            self.logger.exception(
                f"Unable to get content from file {file.target}.")
            return

        if servers:
            server_ids = servers
            fsas = [
                fsa for fsa in file.destinations
                if fsa.destination_server.id in server_ids
            ]
        else:
            server_ids = [
                fsa.destination_server.id for fsa in file.destinations
            ]
            fsas = file.destinations

        with self.dm.flask_app.app_context():
            auth = get_root_auth()
            alive = self.dm.cluster_manager.get_alive()
            tasks = [
                ntwrk.async_post(fsa.destination_server,
                                 view_or_url='api_1_0.file_sync',
                                 view_data={'file_id': file.id},
                                 json=dict(file=fsa.target,
                                           data=content,
                                           force=True),
                                 auth=auth) for fsa in fsas
                if fsa.destination_server.id in alive
            ]
            skipped = [
                fsa.destination_server.name for fsa in fsas
                if fsa.destination_server.id not in alive
            ]
            if skipped:
                self.logger.debug(
                    f"Following servers are skipped because we do not see them alive: {', '.join(skipped)}"
                )
            if tasks:
                self.logger.debug(
                    f"Syncing file {file} with the following servers: {', '.join([fsa.destination_server.name for fsa in fsas if fsa.destination_server.id in alive])}."
                )

                resp = await asyncio.gather(*tasks)
                for resp, fsa in zip(resp, fsas):
                    if not resp.ok:
                        self.logger.warning(
                            f"Unable to send file {file.target} to {fsa.destination_server}. Reason: {resp}"
                        )
                        if (file.id, fsa.destination_server.id
                            ) not in self._blacklist:
                            bl = BlacklistEntry()
                            self._blacklist[(file.id,
                                             fsa.destination_server.id)] = bl
                        else:
                            bl = self._blacklist.get(
                                (file.id, fsa.destination_server.id))
                        bl.retries += 1
                        if bl.retries >= self.max_allowed_errors:
                            self.logger.debug(
                                f"Adding server {fsa.destination_server} to the blacklist."
                            )
                            bl.blacklisted = time.time()
                    else:
                        if (file.id,
                                fsa.destination_server.id) in self._blacklist:
                            self._blacklist.pop(
                                (file.id, fsa.destination_server.id), None)
                        fsa.l_mtime = file.l_mtime
                try:
                    self.session.commit()
                except:
                    self.session.rollback()

    def _sync_files(self):
        coros = []
        for file_id in self._changed_files:
            f = self.get_file(file_id)
            if f:
                try:
                    f.l_mtime = os.stat(f.target).st_mtime_ns
                except FileNotFoundError:
                    pass
                else:
                    coros.append(self._send_file(f))
        if coros:
            try:
                self.session.commit()
            except:
                self.session.rollback()

        for file_id, server_id in list(self._blacklist.keys()):
            file = self.get_file(file_id)
            dest = self.session.query(Server).filter_by(deleted=0).filter_by(
                id=server_id).count()
            bl = self._blacklist[(file_id, server_id)]
            if file and dest:  # file and server in the black list may be deleted
                # if server_id in getattr(getattr(self.app, 'cluster_manager', None), 'cluster', [server_id]):
                if bl.retries < self.max_allowed_errors or time.time(
                ) - bl.blacklisted > self.retry_blacklist:
                    if file_id not in self._changed_files:
                        coros.append(self._send_file(file, [server_id]))
            else:
                self._blacklist.pop((file_id, server_id), None)

        for file_id, server_ids in self._changed_servers.items():
            file = self.get_file(file_id)
            if file:
                coros.append(self._send_file(file, server_ids))

        if coros:
            try:
                self._loop.run_until_complete(
                    asyncio.gather(*coros, return_exceptions=False))
            except Exception:
                self.logger.exception("Error while trying to send data.")
        self._changed_files.clear()
        self._changed_servers.clear()

    def _schedule_file(self, file_id, target=None):
        if isinstance(file_id, File):
            target = file_id.target
            file_id = file_id.id
        assert target is not None
        weh = WatchdogEventHandler(file_id, self, patterns=[target])
        try:
            watch = self._observer.schedule(weh,
                                            os.path.dirname(target),
                                            recursive=False)
        except FileNotFoundError:
            pass
        else:
            self._file2watch.update({file_id: watch})

    def _set_initial_modifications(self):
        for file in self.my_files_query.all():
            self._schedule_file(file)
            try:
                if os.path.exists(file.target):
                    mtime = os.stat(file.target).st_mtime_ns
                    if mtime != file.l_mtime:
                        self._add(file.id, None)
                    else:
                        for fsa in file.destinations:
                            if fsa.l_mtime != mtime:
                                self._add(file.id, fsa.destination_server.id)
            except:
                pass

    def _set_watchers(self, force=False):
        if self._last_file_updated is None or time.time(
        ) - self._last_file_updated > self.file_watches_refresh_period or force:
            self._last_file_updated = time.time()
            id2target = {
                f.id: f.target
                for f in self.my_files_query.options(
                    orm.load_only("id", "target")).all()
            }
            files_from_db = set(id2target.keys())
            files_already_watching = set(self._file2watch.keys())

            file_watches_to_remove = files_already_watching - files_from_db
            file_watches_to_add = files_from_db - files_already_watching

            if file_watches_to_remove:
                self.logger.debug(
                    f"Unscheduling the following files: {file_watches_to_remove}"
                )
                for file_id in file_watches_to_remove:
                    self._observer.unschedule(self._file2watch[file_id])
                    self._file2watch.pop(file_id)

            if file_watches_to_add:
                self.logger.debug(
                    f"Scheduling the following files: {file_watches_to_add}")
                for file_id in file_watches_to_add:
                    self._schedule_file(file_id, id2target[file_id])
                    # add for sending file for first time
                    self._add(file_id, None)

    @property
    def my_logs(self):
        return self.session.query(Log).filter_by(
            source_server=Server.get_current(session=self.session)).all()

    def update_mapper(self):
        logs = self.my_logs
        id2log = {log.id: log for log in logs}
        # remove logs
        for log_id in list(self._mapper.keys()):
            if log_id not in id2log:
                del self._mapper[log_id]

        # add new logs
        for log in logs:
            if log.id not in self._mapper:
                self._mapper[log.id] = []
            self.update_pytail_objects(log, self._mapper[log.id])

    def update_pytail_objects(self, log: Log, pytail_list: t.List):
        if os.path.isfile(log.target):
            if len(pytail_list) == 0:
                filename = '.' + os.path.basename(log.target) + '.offset'
                path = os.path.dirname(log.target)
                offset_file = self.dm.config.path(defaults.OFFSET_DIR,
                                                  remove_root(path), filename)
                if not os.path.exists(offset_file):
                    _log_logger.debug(f"creating offset file {offset_file}")
                    os.makedirs(os.path.dirname(offset_file), exist_ok=True)
                pytail_list.append(
                    _PygtailBuffer(file=log.target,
                                   offset_mode='manual',
                                   offset_file=offset_file))
        else:
            for folder, dirnames, filenames in os.walk(log.target):
                for filename in filenames:
                    if log._re_include.search(
                            filename) and not log._re_exclude.search(filename):
                        file = os.path.join(folder, filename)
                        offset_file = os.path.join(folder,
                                                   '.' + filename + '.offset')
                        if not any(map(lambda p: p.file == file, pytail_list)):
                            pytail_list.append(
                                _PygtailBuffer(file=file,
                                               offset_mode='manual',
                                               offset_file=offset_file))
                if not log.recursive:
                    break
                new_dirnames = []
                for dirname in dirnames:
                    if log._re_include.search(
                            dirname) and not log._re_exclude.search(dirname):
                        new_dirnames.append(dirname)
                dirnames[:] = new_dirnames

    def _send_new_data(self):
        self.update_mapper()
        tasks = OrderedDict()

        for log_id, pb in self._mapper.items():
            log = self.session.query(Log).get(log_id)
            for pytail in pb:
                data = pytail.fetch()
                data = data.encode() if isinstance(data, str) else data
                if data and log.destination_server.id in self.dm.cluster_manager.get_alive(
                ):
                    if log.mode == Mode.MIRROR:
                        file = pytail.file
                    elif log.mode == Mode.REPO_ROOT:
                        path_to_remove = os.path.dirname(log.target)
                        relative = os.path.relpath(pytail.file, path_to_remove)
                        file = os.path.join('{LOG_REPO}', relative)
                    elif log.mode == Mode.FOLDER:
                        path_to_remove = os.path.dirname(log.target)
                        relative = os.path.relpath(pytail.file, path_to_remove)
                        file = os.path.join(log.dest_folder, relative)
                    else:

                        def get_root(dirname):
                            new_dirname = os.path.dirname(dirname)
                            if new_dirname == dirname:
                                return dirname
                            else:
                                return get_root(new_dirname)

                        relative = os.path.relpath(pytail.file,
                                                   get_root(pytail.file))
                        file = os.path.join('{LOG_REPO}', relative)
                    with self.dm.flask_app.app_context():
                        auth = get_root_auth()

                    task = ntwrk.async_post(
                        log.destination_server,
                        'api_1_0.logresource',
                        view_data={'log_id': str(log_id)},
                        json={
                            "file":
                            file,
                            'data':
                            base64.b64encode(
                                zlib.compress(data)).decode('ascii'),
                            "compress":
                            True
                        },
                        auth=auth)

                    tasks[task] = (pytail, log)
                    _log_logger.debug(
                        f"Task sending data from '{pytail.file}' to '{log.destination_server}' prepared"
                    )

        if tasks:
            with self.dm.flask_app.app_context():
                responses = asyncio.run(asyncio.gather(*list(tasks.keys())))

            for task, resp in zip(tasks.keys(), responses):
                pytail, log = tasks[task]
                if resp.ok:
                    pytail.update_offset_file()
                    _log_logger.debug(f"Updated offset from '{pytail.file}'")
                    if log.id not in self._blacklist:
                        self._blacklist_log.pop(log.id, None)
                else:
                    _log_logger.error(
                        f"Unable to send log information from '{pytail.file}' to '{log.destination_server}'. Error: {resp}"
                    )
                    if log.id not in self._blacklist:
                        bl = BlacklistEntry()
                        self._blacklist_log[log.id] = bl
                    else:
                        bl = self._blacklist_log.get(log.id)
                    bl.retries += 1
                    if bl.retries >= self.max_allowed_errors:
                        _log_logger.debug(
                            f"Adding server {log.destination_server.id} to the blacklist."
                        )
                        bl.blacklisted = time.time()
Ejemplo n.º 24
0
class Feeder():
    def __init__(self):
        logger.debug("Initializing Feeder")

        self.office = list(map(float, os.environ["OFFICE"].split(",")))
        self.alg_id = None
        self.recording_volume = os.environ["STORAGE_VOLUME"]
        self.every_nth_frame = int(os.environ["EVERY_NTH_FRAME"])

        #Hosts
        self.dbhost = os.environ["DBHOST"]
        self.vahost = "http://localhost:8080/pipelines"
        self.mqtthost = os.environ["MQTTHOST"]

        #Clients
        self.db_alg = DBIngest(host=self.dbhost,
                               index="algorithms",
                               office=self.office)
        self.db_inf = DBIngest(host=self.dbhost,
                               index="analytics",
                               office=self.office)
        self.db_sensors = DBQuery(host=self.dbhost,
                                  index="sensors",
                                  office=self.office)
        self.mqttclient = None
        self.mqtttopic = None
        self.observer = Observer()

        self.batchsize = 300
        self.inference_cache = []

        self._threadflag = False

    def start(self):

        logger.info(" ### Starting Feeder ### ")

        logger.debug("Waiting for VA startup")
        r = requests.Response()
        r.status_code = 400
        while r.status_code != 200 and r.status_code != 201:
            try:
                r = requests.get(self.vahost)
            except Exception as e:
                r = requests.Response()
                r.status_code = 400

            time.sleep(10)

        # Register Algorithm
        logger.debug("Registering as algorithm in the DB")

        while True:
            try:
                self.alg_id = self.db_alg.ingest({
                    "name": "object_detection",
                    "office": {
                        "lat": self.office[0],
                        "lon": self.office[1]
                    },
                    "status": "idle",
                    "skip": self.every_nth_frame,
                })["_id"]
                break
            except Exception as e:
                logger.debug("Register algo exception: " + str(e))
                time.sleep(10)

        self.mqtttopic = "smtc_va_inferences_" + self.alg_id

        camera_monitor_thread = Thread(target=self.monitor_cameras,
                                       daemon=True)

        logger.debug("Starting working threads")
        self._threadflag = True
        self.startmqtt()
        self.observer.start()
        camera_monitor_thread.start()

        logger.debug("Waiting for interrupt...")
        camera_monitor_thread.join()
        self.observer.join()

    def stop(self):
        logger.info(" ### Stopping Feeder ### ")

        self._threadflag = False

        logger.debug("Unregistering algorithm from DB")
        self.db_alg.delete(self.alg_id)

        self.mqttclient.loop_stop()
        self.observer.stop()

    def startmqtt(self):
        self.mqttclient = mqtt.Client("feeder_" + self.alg_id)
        self.mqttclient.connect(self.mqtthost)
        self.mqttclient.on_message = self.mqtt_handler
        self.mqttclient.loop_start()
        self.mqttclient.subscribe(self.mqtttopic)

    def mqtt_handler(self, client, userdata, message):
        m_in = json.loads(str(message.payload.decode("utf-8", "ignore")))

        for tag in m_in["tags"]:
            m_in[tag] = m_in["tags"][tag]

        del m_in["tags"]

        m_in["time"] = m_in["real_base"] + m_in["timestamp"]
        # convert to milliseconds
        m_in["time"] = int(m_in["time"] / 1000000)
        self.inference_cache.append(m_in)
        if len(self.inference_cache) >= self.batchsize:
            try:
                self.db_inf.ingest_bulk(self.inference_cache[:self.batchsize])
                self.inference_cache = self.inference_cache[self.batchsize:]
            except Exception as e:
                logger.debug("Ingest Error: " + str(e))

    def monitor_cameras(self):
        logger.debug("Starting Sensor Monitor Thread")
        while self._threadflag:
            logger.debug("Searching for sensors...")

            try:
                for sensor in self.db_sensors.search(
                        "sensor:'camera' and status:'idle' and office:[" +
                        str(self.office[0]) + "," + str(self.office[1]) + "]"):
                    logger.debug(sensor)
                    try:
                        fswatch = None
                        logger.debug("Sensor found! " + sensor["_id"])
                        logger.debug("Setting sensor " + sensor["_id"] +
                                     " to streaming")
                        r = self.db_sensors.update(sensor["_id"],
                                                   {"status": "streaming"},
                                                   version=sensor["_version"])

                        logger.debug(
                            "Setting algorithm to streaming from sensor " +
                            sensor["_id"])
                        r = self.db_alg.update(self.alg_id, {
                            "source": sensor["_id"],
                            "status": "processing"
                        })

                        # Attempt to POST to VA service
                        jsonData = {
                            "source": {
                                "uri": sensor["_source"]["url"],
                                "type": "uri"
                            },
                            "tags": {
                                "algorithm": self.alg_id,
                                "sensor": sensor["_id"],
                                "office": {
                                    "lat": self.office[0],
                                    "lon": self.office[1],
                                },
                            },
                            "parameters": {
                                "every-nth-frame": self.every_nth_frame,
                                "recording_prefix":
                                "recordings/" + sensor["_id"],
                                "method": "mqtt",
                                "address": self.mqtthost,
                                "clientid": self.alg_id,
                                "topic": self.mqtttopic
                            },
                        }

                        folderpath = os.path.join(
                            os.path.realpath(self.recording_volume),
                            sensor["_id"])
                        if not os.path.exists(folderpath):
                            os.makedirs(folderpath)

                        logger.debug("Adding folder watch for " + folderpath)
                        filehandler = FSHandler(
                            sensor=sensor["_id"],
                            office=self.office,
                            dbhost=self.dbhost,
                            rec_volume=self.recording_volume)
                        fswatch = self.observer.schedule(filehandler,
                                                         folderpath,
                                                         recursive=True)

                        try:
                            logger.info("Posting Request to VA Service")
                            r = requests.post(self.vahost +
                                              "/object_detection/2",
                                              json=jsonData,
                                              timeout=10)
                            r.raise_for_status()
                            pipeline_id = None

                            if r.status_code == 200:
                                logger.debug("Started pipeline " + r.text)
                                pipeline_id = int(r.text)

                            while r.status_code == 200:
                                logger.debug("Querying status of pipeline")
                                r = requests.get(self.vahost +
                                                 "/object_detection/2/" +
                                                 str(pipeline_id) + "/status",
                                                 timeout=10)
                                r.raise_for_status()
                                jsonValue = r.json()
                                if "avg_pipeline_latency" not in jsonValue:
                                    jsonValue["avg_pipeline_latency"] = 0
                                state = jsonValue["state"]
                                try:
                                    logger.debug("fps: ")
                                    logger.debug(str(jsonValue))
                                except:
                                    logger.debug("error")
                                logger.debug("Pipeline state is " + str(state))
                                if state == "COMPLETED" or state == "ABORTED" or state == "ERROR":
                                    logger.debug("Pipeline ended")
                                    break

                                self.db_alg.update(
                                    self.alg_id, {
                                        "performance":
                                        jsonValue["avg_fps"],
                                        "latency":
                                        jsonValue["avg_pipeline_latency"] *
                                        1000
                                    })

                                time.sleep(10)

                            logger.debug("Setting sensor " + sensor["_id"] +
                                         " to disconnected")
                            r = self.db_sensors.update(
                                sensor["_id"], {"status": "disconnected"})

                        except requests.exceptions.RequestException as e:
                            logger.error(
                                "Feeder: Request to VA Service Failed: " +
                                str(e))
                            logger.debug("Setting sensor " + sensor["_id"] +
                                         " to idle")
                            r = self.db_sensors.update(sensor["_id"],
                                                       {"status": "idle"})

                    except Exception as e:
                        logger.error("Feeder Exception: " + str(e))

                    if fswatch:
                        self.observer.unschedule(fswatch)
                        del (filehandler)

                    logger.debug("Setting algorithm to idle")
                    r = self.db_alg.update(self.alg_id, {"status": "idle"})
                    break
            except Exception as e:
                print(e, flush=True)

            time.sleep(5)

        logger.debug("Sensor monitor thread done")
Ejemplo n.º 25
0
class INotify(PollMixin):
    """
    I am a prototype INotify, made to work on Mac OS X (Darwin)
    using the Watchdog python library. This is actually a simplified subset
    of the twisted Linux INotify class because we do not utilize the watch mask
    and only implement the following methods:
     - watch
     - startReading
     - stopReading
     - wait_until_stopped
     - set_pending_delay
    """
    def __init__(self):
        self._pending_delay = 1.0
        self.recursive_includes_new_subdirectories = False
        self._callbacks = {}
        self._watches = {}
        self._state = NOT_STARTED
        self._observer = Observer(timeout=self._pending_delay)

    def set_pending_delay(self, delay):
        Message.log(message_type=u"watchdog:inotify:set-pending-delay",
                    delay=delay)
        assert self._state != STARTED
        self._pending_delay = delay

    def startReading(self):
        with start_action(action_type=u"watchdog:inotify:start-reading"):
            assert self._state != STARTED
            try:
                # XXX twisted.internet.inotify doesn't require watches to
                # be set before startReading is called.
                # _assert(len(self._callbacks) != 0, "no watch set")
                self._observer.start()
                self._state = STARTED
            except:
                self._state = STOPPED
                raise

    def stopReading(self):
        with start_action(action_type=u"watchdog:inotify:stop-reading"):
            if self._state != STOPPED:
                self._state = STOPPING
            self._observer.unschedule_all()
            self._observer.stop()
            self._observer.join()
            self._state = STOPPED

    def loseConnection(self):
        return self.stopReading()

    def wait_until_stopped(self):
        return self.poll(lambda: self._state == STOPPED)

    def _isWatched(self, path_u):
        return path_u in self._callbacks.keys()

    def ignore(self, path):
        path_u = path.path
        self._observer.unschedule(self._watches[path_u])
        del self._callbacks[path_u]
        del self._watches[path_u]

    def watch(self,
              path,
              mask=IN_WATCH_MASK,
              autoAdd=False,
              callbacks=None,
              recursive=False):
        precondition(isinstance(autoAdd, bool), autoAdd=autoAdd)
        precondition(isinstance(recursive, bool), recursive=recursive)
        assert autoAdd == False

        path_u = path.path
        if not isinstance(path_u, unicode):
            path_u = path_u.decode('utf-8')
            _assert(isinstance(path_u, unicode), path_u=path_u)

        if path_u not in self._callbacks.keys():
            self._callbacks[path_u] = callbacks or []
            self._watches[path_u] = self._observer.schedule(
                INotifyEventHandler(path_u, mask, self._callbacks[path_u],
                                    self._pending_delay),
                path=path_u,
                recursive=False,
            )
Ejemplo n.º 26
0
class FolderWatcher:
    def __init__(self):

        self.__observer = Observer()
        self.__handlers: typing.Dict[str, typing.Union[
            SimpleAsyncEventHandler, FileEventToHABAppEvent]] = {}
        self.__watches = {}
        self.__started = False

    def start(self, shutdown_helper):
        from ..shutdown_helper import ShutdownHelper
        assert isinstance(shutdown_helper, ShutdownHelper)

        # we shall only start once!
        assert self.__started is False
        self.__started = True

        # start watching the folders
        self.__observer.start()

        # register for proper shutdown
        shutdown_helper.register_func(self.__observer.stop)
        shutdown_helper.register_func(self.__observer.join, last=True)
        return None

    def watch_folder(self,
                     folder: pathlib.Path,
                     file_ending: str,
                     target_func,
                     watch_subfolders=False,
                     worker_factory=None) -> SimpleAsyncEventHandler:
        assert isinstance(folder, pathlib.Path), type(folder)
        assert folder.is_dir(), folder

        folder_str = str(folder)
        assert folder_str not in self.__watches, folder_str

        self.__handlers[folder_str] = handler = SimpleAsyncEventHandler(
            target_func=target_func,
            file_ending=file_ending,
            worker_factory=worker_factory)
        self.__watches[folder_str] = self.__observer.schedule(
            handler, path=folder_str, recursive=watch_subfolders)
        return handler

    def watch_folder_habapp_events(self,
                                   folder: pathlib.Path,
                                   file_ending: str,
                                   habapp_topic: str,
                                   watch_subfolders: bool = False):
        assert isinstance(folder, pathlib.Path), type(folder)
        assert folder.is_dir(), folder

        folder_str = str(folder)
        assert folder_str not in self.__watches, folder_str

        self.__handlers[folder_str] = handler = FileEventToHABAppEvent(
            folder=folder,
            habapp_topic=habapp_topic,
            file_ending=file_ending,
            recursive=watch_subfolders)
        self.__watches[folder_str] = self.__observer.schedule(
            handler, path=folder_str, recursive=watch_subfolders)
        return handler

    def unwatch_folder(self, folder):
        if isinstance(folder, pathlib.Path):
            folder = str(folder)
        assert isinstance(folder, str), type(folder)

        self.__handlers.pop(folder)
        self.__observer.unschedule(self.__watches.pop(folder))

    def get_handler(
        self, folder
    ) -> typing.Union[SimpleAsyncEventHandler, FileEventToHABAppEvent]:
        if isinstance(folder, pathlib.Path):
            folder = str(folder)
        assert isinstance(folder, str), type(folder)

        return self.__handlers[folder]
Ejemplo n.º 27
0
class ChangesWatcher(FileSystemEventHandler):
    def __init__(self, application):
        self.task_map = {}  # path: ChangesKeeper
        self.application = application
        self.observer = Observer()
        self.changes = []
        self.changes_timer = None
        self.observer.start()

    def add_watch(self, path, mute_list=None):
        if path in self.task_map:
            return False
        else:
            mute_list = (mute_list or []) + DEFAULT_MUTE_LIST
            keeper = ChangesKeeper(path, mute_list)
            self.task_map[path] = keeper
            if os.path.exists(path):
                self.observer.schedule(self, path, recursive=True)
                return True
            return False

    def remove_watch(self, path):
        if path in self.task_map:
            keeper = self.task_map[path]
            watch = keeper.watch
            self.observer.unschedule(watch)
            return True
        else:
            return False

    def get_changes_since(self, timestamp, parent_path=None):
        ret = []
        for change in self.changes:
            if change.timestamp > timestamp and (
                    not parent_path
                    or path_is_parent(parent_path, change.path)):
                ret.append(change)
        return ret

    def add_pure_change(self, change):
        """ 监测change的类型,并添加非垃圾change和不在黑名单中的change
        """

        # 如果是黑名单及黑名单子目录的change,则跳过
        for mute_list in [
                keeper.mute_list for keeper in self.task_map.values()
        ]:
            for mute_path in mute_list:
                if path_is_parent(mute_path, change.path):
                    print '...', change.type, change.path
                    return

        # 寻找当前change对应的垃圾change,找到后删除;未找到则添加当前change
        trash_changes = self.find_related_trash_changes(change)
        if trash_changes:
            for change in trash_changes:
                self.changes.remove(change)
                print '-  ', change.type, change.path
        else:
            self.changes.append(change)
            print '+  ', change.type, change.path
            self.compile_if_needed(change)

        ioloop.IOLoop.instance().add_callback(
            lambda: self.remove_outdated_changes(30))

    def compile_if_needed(self, change):
        if change.type == EVENT_TYPE_DELETED:
            return

        input_path = normalize_path(change.path)
        base_path, ext = os.path.splitext(input_path)
        ext = ext.lower()
        if ext not in ['.less', '.coffee']:
            return

        project = self.application.find_project(input_path)
        if not project:
            return

        os.chdir(APP_FOLDER)
        begin_time = time.time()
        if ext == '.less':
            if project.compileLess:
                output_path = base_path + '.css'
                run_cmd('%s bundled/less/bin/lessc %s %s' %
                        (NODE_BIN_PATH, input_path, output_path))
                print '.less ->- .css', change.path, time.time(
                ) - begin_time, 'seconds'
            else:
                print '.less -X- .css', change.path, '(OFF by settings)'

        elif ext == '.coffee':
            if project.compileCoffee:
                run_cmd('%s bundled/coffee/bin/coffee --compile %s' %
                        (NODE_BIN_PATH, input_path))
                print '.coffee ->- .js', change.path, time.time(
                ) - begin_time, 'seconds'
            else:
                print '.coffee -X- .js', change.path, '(OFF by settings)'

    def check_folder_change(self, folder_path):
        if sys.platform.startswith('win') or \
                not os.path.isdir(folder_path):
            return

        now = time.time() - 0.5  # 0.5秒内的都算修改
        for filename in os.listdir(folder_path):
            file_path = os.path.join(folder_path, filename)
            if not os.path.isfile(file_path):
                continue

            modified_time = os.path.getmtime(file_path)
            if modified_time > now:
                self.on_any_event(FileModifiedEvent(file_path))

    def on_any_event(self, event):
        if event.is_directory:
            self.check_folder_change(event.src_path)
            return

        # 暂停文件变更的上报, 以免中途编译占用太长时间,而将事件提前返回
        loop = ioloop.IOLoop.instance()
        if self.changes_timer:
            ioloop.IOLoop.instance().remove_timeout(self.changes_timer)

        now = time.time()
        if event.event_type == EVENT_TYPE_MOVED:
            self.add_pure_change(
                Change(
                    dict(timestamp=now,
                         path=normalize_path(event.src_path),
                         type=EVENT_TYPE_DELETED)))
            self.add_pure_change(
                Change(
                    dict(timestamp=now,
                         path=normalize_path(event.dest_path),
                         type=EVENT_TYPE_CREATED)))
        else:
            self.add_pure_change(
                Change(
                    dict(timestamp=now,
                         path=normalize_path(event.src_path),
                         type=event.event_type)))

        # 延迟0.1秒上报变更,防止有些事件连续发生时错过
        self.changes_timer = loop.add_timeout(
            time.time() + 0.1, self.application.project_file_changed)

    def find_related_trash_changes(self, change):
        """ 寻找当前change之前短时间内的一些垃圾change
        有些编辑器喜欢用 改名->写入->改回名 的方式来保存文件,所以不能直接将change上报,需要进行一定的过滤
        """
        trash_changes = []
        for old_change in self.changes[::-1]:
            if old_change.path != change.path:
                continue
            if change.timestamp > old_change.timestamp + 1:
                break

            if change.type == EVENT_TYPE_DELETED:
                # 如果当前change类型是DELETED,那么返回所有该文件的事件,直到CREATED事件为止
                trash_changes.append(old_change)
                if old_change.type == EVENT_TYPE_CREATED:
                    return trash_changes
            elif change.type == EVENT_TYPE_CREATED:
                # 如果当前change类型是CREATED,那么返回所有该文件的事件,直到DELETED事件为止
                trash_changes.append(old_change)
                if old_change.type == EVENT_TYPE_DELETED:
                    return trash_changes
        return []

    def remove_outdated_changes(self, seconds):
        for change in self.changes[:]:
            if change.timestamp - time.time() > seconds:
                self.changes.remove(change)
Ejemplo n.º 28
0
class KicadProject(FileSystemEventHandler):
    def __init__(self, project_file):
        self.on_change_hook = None

        self.files = {}
        self.folders = []
        self.extensions = ['bom', 'net', 'sch', 'kicad_pcb']

        self.project_file = project_file
        self.root_path = os.path.dirname(self.project_file)

        # observer to trigger event on resource change
        self.enabled = True
        self.observer = Observer()
        self.watch = self.observer.schedule(self,
                                            self.root_path,
                                            recursive=True)
        self.observer.start()

    def on_any_event(self, event):
        if self.enabled == False:
            return

        self.enabled = False
        for extension in self.extensions:
            if hasattr(event, 'dest_path') and os.path.isfile(
                    event.dest_path) and os.path.basename(
                        event.dest_path).startswith(
                            '.') == False and event.dest_path.endswith(
                                '.' + extension):
                print("Something happend with %s" % (event.dest_path))
                path = os.path.relpath(event.dest_path, self.root_path)
                if self.on_change_hook:
                    wx.CallAfter(self.on_change_hook, path)
            elif hasattr(event, 'src_path') and os.path.isfile(
                    event.src_path
            ) and os.path.basename(event.src_path).startswith(
                    '.') == False and event.src_path.endswith('.' + extension):
                print("Something happend with %s" % (event.src_path))
                path = os.path.relpath(event.src_path, self.root_path)
                if self.on_change_hook:
                    wx.CallAfter(self.on_change_hook, path)

        self.enabled = True

    # - on_moved(self, event)
    # - on_created(self, event)
    # - on_deleted(self, event)
    # - on_modified(self, event)

    def Enabled(self, enabled=True):
        if enabled:
            if self.watch is None:
                self.watch = self.observer.schedule(self,
                                                    self.root_path,
                                                    recursive=True)
        else:
            if self.watch:
                self.observer.unschedule(self.watch)
            self.watch = None

    def Load(self):
        """
        fill cache files from disk
        """
        self.files, self.folders = self.GetFiles()

    def GetFiles(self):
        """
        Recurse all folders and return kicad files and folders path
        """
        basepath = os.path.normpath(os.path.abspath(self.root_path))
        to_explore = [basepath]
        files = []
        folders = []

        while len(to_explore) > 0:
            path = to_explore.pop()
            if os.path.exists(path):
                for file in glob(os.path.join(path, "*")):
                    if os.path.isfile(file):
                        for ext in self.extensions:
                            if os.path.normpath(
                                    os.path.abspath(file)).endswith(ext):
                                files.append(
                                    os.path.relpath(
                                        os.path.normpath(
                                            os.path.abspath(file)), basepath))
                    else:
                        folder = file
                        if folder != '/':
                            folders.append(
                                os.path.relpath(
                                    os.path.normpath(os.path.abspath(folder)),
                                    basepath))
                            if os.path.normpath(os.path.abspath(
                                    folder)) != os.path.normpath(
                                        os.path.abspath(path)):
                                to_explore.append(folder)

        return files, folders
Ejemplo n.º 29
0
class ModuleFileWatcher( EditorModule ):
	def __init__(self):
		super(ModuleFileWatcher, self).__init__()
		self.watches={}

	def getName(self):
		return 'filewatcher'

	def getDependency(self):
		return []

	def onLoad(self):		
		self.observer=Observer()
		self.observer.start()
		
		signals.connect( 'file.moved',    self.onFileMoved )
		signals.connect( 'file.added',    self.onFileCreated )
		signals.connect( 'file.removed',  self.onFileDeleted )
		signals.connect( 'file.modified', self.onFileModified )
		
	def onStart( self ):
		self.assetWatcher=self.startWatch(
			self.getProject().getAssetPath(),
			ignorePatterns = ['*/.git','*/.*','*/_gii']
		)
		
	def startWatch(self, path, **options):
		path = os.path.realpath(path)
		if self.watches.get(path):
			logging.warning( 'already watching: %s' % path )
			return self.watches[path]
		logging.info ( 'start watching: %s' % path )
		
		ignorePatterns = ['*/.git','*/.*','*/_gii'] + options.get('ignorePatterns',[])

		handler = FileWatcherEventHandler(
				options.get( 'patterns', None ),
				ignorePatterns,
				options.get( 'ignoreDirectories', False ),
				options.get( 'caseSensitive', True )
			)

		watch = self.observer.schedule( handler, path, options.get( 'recursive', True ) )
		self.watches[ path ] = watch
		return watch

	def onStop( self ):
		# print 'stop file watcher'
		self.observer.stop()
		self.observer.join( 0.5 )
		# print 'stopped file watcher'

	def stopWatch(self, path):
		path  = os.path.realpath(path)
		watch = self.watches.get(path, None)
		if not watch: return
		self.observer.unschedule(watch)
		self.watches[path] = None

	def stopAllWatches(self):
		# logging.info('stop all file watchers')
		self.observer.unschedule_all()
		self.watches = {}

	def onFileMoved(self, path, newpath):
		# print('asset moved:',path, newpath)
		app.getAssetLibrary().scheduleScanProject()
		pass

	def onFileCreated(self, path):
		# print('asset created:',path)
		app.getAssetLibrary().scheduleScanProject()
		pass

	def onFileModified(self, path):
		# print('asset modified:',path)
		app.getAssetLibrary().scheduleScanProject()
		pass

	def onFileDeleted(self, path):
		# print('asset deleted:',path)
		app.getAssetLibrary().scheduleScanProject()
		pass
Ejemplo n.º 30
0
class WatchdogTests(unittest.TestCase):

    def setUp(self):
        self.events = queue.Queue()
        self.observer = Observer()
        self.handler = EventHandler(self.events, channel_id=5)
        self.watch = self.observer.schedule(event_handler=self.handler, path=TMP_DIR)
        self.observer.start()

    def testEventChannelID(self):
        with open(TMP_FILE, "w") as local_file:
            local_file.write(string_common)
        event = self.events.get(block=True, timeout=1)
        self.assertEqual(event.channel_id, 5)

    # Modifying the file should add a new task to the queue
    def testTaskInsertion(self):
        new_string = "This is a whole new line in file "+filename
        with open (TMP_FILE, "w") as local_file:
            local_file.write(new_string)
        while True:
            try:
                task = self.events.get(block=True, timeout=1)
            except queue.Empty:
                break
            self.assertEqual(task.src_path,local_dir+filename)
            self.assertEqual(task.dest_path, remote_dir+filename)

    # It should be possible to add a new Handler to an already scheduled watch
    def testHandlerAdding(self):
        fake_handle="I am a fake handler, don't mind me!"
        class FakeHandler(EventHandler):
            def handle_modification(self, event):
                self.tasks.put(fake_handle)
        fake_queue = queue.Queue()
        fake_handler = FakeHandler(fake_queue)
        self.observer.add_handler_for_watch(fake_handler, self.watch)
        new_string = "This is a whole new line in file " + filename
        with open(local_dir + filename, "w") as local_file:
            local_file.write(new_string)
        while True:
            try:
                task = fake_queue.get(block=True, timeout=1)
            except queue.Empty:
                break
            self.assertEqual(task, fake_handle)

    # A Handler blocking on one event (ex: inserting into a busy queue)
    # should not prevent handling of further events
    def testHandlerHanging(self):
        class HangingHandler(EventHandler):
            def on_any_event(self, event):
                print("Handler hanging...")
                time.sleep(1)
                print("Handler dispatching")
                super().on_any_event(event)

        self.observer.remove_handler_for_watch(self.handler, self.watch)
        slow_handler = HangingHandler(self.events)
        self.observer.add_handler_for_watch(slow_handler, self.watch)
        for i in range(5):
            with open(os.path.join(TMP_DIR, "f"+str(i)), "w") as local_file:
                local_file.write("Write #"+str(i))
            time.sleep(0.1)
        time.sleep(6)
        num = len(empty_tasks(self.events))
        print("[Hanging] "+str(num)+" events")
        self.assertTrue(num >= 5)

    # Scheduling a new watch while another one is running
    # In this test, each write should set off 2 events (open+close) as seen on the next test
    # However watchdog is nice enough to avoid adding similar events to its internal queue
    def testNewScheduling(self):
        self.assertTrue(self.events.empty()) # Queue starts off
        with open(remote_dir + filename, "w") as remote_file:
            remote_file.write("Going once")
        self.assertRaises(queue.Empty, self.events.get, timeout=1) # No Handler is watching the file yet

        handler2 = EventHandler(self.events)
        self.observer.schedule(event_handler=handler2, path=remote_dir, recursive=True)

        with open(remote_dir + filename, "w") as remote_file:
            remote_file.write("Going twice")
        l1 = empty_tasks(self.events) # Now it should work
        self.assertEqual(len(l1), 1) # Single event
        for task in l1:
            self.assertEqual(task.src_path, remote_dir + filename)
            self.assertEqual(task.dest_path, local_dir + filename)

        with open(local_dir + filename, "w") as local_file:
            local_file.write("Going thrice") # Writing to the local file still works
        l2 = empty_tasks(self.events)
        self.assertEqual(len(l2), 1)  # Single event
        for task in l2:
            self.assertEqual(task.src_path, local_dir + filename)
            self.assertEqual(task.dest_path, remote_dir + filename)

    # It should be possible to remove a scheduled watch
    def testWatchRemoval(self):
        handler2 = EventHandler(self.events)
        watch2 = self.observer.schedule(event_handler=handler2, path=remote_dir, recursive=True)
        for client in [ {"path":local_dir+filename, "watch":self.watch},
                      {"path":remote_dir+filename, "watch":watch2} ]:
            with open(client["path"], "w") as file:
                file.write("This will make an event")
            time.sleep(0.5)
            task = self.events.get(timeout=1)
            self.assertEquals(task.src_path, client["path"])

            self.observer.unschedule(client["watch"])
            with open(local_dir+filename, "w") as local_file:
                local_file.write("This won't")
            self.assertRaises(queue.Empty, self.events.get, timeout=1)

    # Each open() and each close() should produce an event
    # They are sometimes squashed into a single event if done
    # Quickly enough (i.e. "with open(file) as f: f.write()")
    def testEventsPerWrite(self):
        local_file = open(local_dir+filename, "w")
        self.assertTrue(len(empty_tasks(self.events)) == 1) # Opening sets off an event
        local_file.write("First")
        self.assertTrue(len(empty_tasks(self.events)) == 0) # Writing doesn't set off an event
        local_file.write("Second")
        self.assertTrue(len(empty_tasks(self.events)) == 0) # Writing doesn't set off an event
        local_file.close()
        self.assertTrue(len(empty_tasks(self.events)) == 1) # Closing sets off an event

    def testEventsForBigFileCopy(self):
        self.observer.schedule(self.handler, TMP_DIR, recursive=True)
        try:
            os.mkdir(TMP_DIR)
        except FileExistsError:
            pass
        try:
            shutil.copy(BIG_FILE, BIG_IN)
        except OSError:
            self.fail()
        num = empty_tasks(self.events)
        print("Used "+str(len(num))+" events")
Ejemplo n.º 31
0
class FileObserver:
    """
    A class that will observe some file system paths for any change.
    """
    def __init__(self, on_change):
        """
        Initialize the file observer
        Parameters
        ----------
        on_change:
            Reference to the function that will be called if there is a change in aby of the observed paths
        """
        self._observed_paths = {}
        self._observed_watches = {}
        self._watch_dog_observed_paths = {}
        self._observer = Observer()
        self._code_change_handler = PatternMatchingEventHandler(
            patterns=["*"], ignore_patterns=[], ignore_directories=False)
        self._code_change_handler.on_modified = self.on_change
        self._input_on_change = on_change
        self._lock = threading.Lock()

    def on_change(self, event):
        """
        It got executed once there is a change in one of the paths that watchdog is observing.
        This method will check if any of the input paths is really changed, and based on that it will
        invoke the input on_change function with the changed paths

        Parameters
        ----------
        event: watchdog.events.FileSystemEventHandler
            Determines that there is a change happened to some file/dir in the observed paths
        """
        event_path = event.src_path
        observed_paths = []

        for watchdog_path, child_observed_paths in self._watch_dog_observed_paths.items(
        ):
            if event_path.startswith(watchdog_path):
                observed_paths += child_observed_paths

        if not observed_paths:
            return

        changed_paths = []
        for path in observed_paths:
            path_obj = Path(path)
            # The path got deleted
            if not path_obj.exists():
                self._observed_paths.pop(path, None)
                changed_paths += [path]
            else:
                new_checksum = calculate_checksum(path)
                if new_checksum != self._observed_paths.get(path, None):
                    changed_paths += [path]
                    self._observed_paths[path] = new_checksum
        if changed_paths:
            self._input_on_change(changed_paths)

    def watch(self, path):
        """
        Start watching the input path. File Observer will keep track of the input path with its hash, to check it later
        if it got really changed or not.
        File Observer will send the parent path to watchdog for to be observed to avoid the missing events if the input
        paths got deleted.

        Parameters
        ----------
        path: str
            The file/dir path to be observed

        Raises
        ------
        FileObserverException:
            if the input path is not exist
        """
        path_obj = Path(path)
        if not path_obj.exists():
            raise FileObserverException("Can not observe non exist path")

        self._observed_paths[path] = calculate_checksum(path)

        # Watchdog will observe the path's parent path to make sure not missing the event if the path itself got deleted
        parent_path = str(path_obj.parent)
        child_paths = self._watch_dog_observed_paths.get(parent_path, [])
        first_time = not bool(child_paths)
        if path not in child_paths:
            child_paths += [path]
        self._watch_dog_observed_paths[parent_path] = child_paths
        if first_time:
            self._observed_watches[parent_path] = self._observer.schedule(
                self._code_change_handler, parent_path, recursive=True)

    def unwatch(self, path):
        """
        Remove the input path form the observed paths, and stop watching this path.

        Parameters
        ----------
        path: str
            The file/dir path to be unobserved
        """
        path_obj = Path(path)
        if not path_obj.exists():
            raise FileObserverException("Can not unwatch non exist path")
        parent_path = str(path_obj.parent)
        child_paths = self._watch_dog_observed_paths.get(parent_path, [])
        if path in child_paths:
            child_paths.remove(path)
            self._observed_paths.pop(path, None)
        if not child_paths:
            self._watch_dog_observed_paths.pop(parent_path, None)
            if self._observed_watches[parent_path]:
                self._observer.unschedule(self._observed_watches[parent_path])
                self._observed_watches.pop(parent_path, None)

    def start(self):
        """
        Start Observing.
        """
        with self._lock:
            if not self._observer.is_alive():
                self._observer.start()

    def stop(self):
        """
        Stop Observing.
        """
        with self._lock:
            if self._observer.is_alive():
                self._observer.stop()
Ejemplo n.º 32
0
class Render:
    def __init__(self, modules, dest="dist"):
        self.modules = [i for i in modules if i]
        assert all(isinstance(i, Module) for i in self.modules)
        self.watchers = {}

        self.options = {}
        self.observer = None
        self.dest = dest

    def start(self, watch=False):
        self.run_once()

        if not watch:
            return

        if not self.watchers:
            print("Nothing to do!")
            return

        self.observer = Observer()

        for path in self.watchers:
            for index in range(len(self.watchers[path])):
                handler = WatchdogHandler(self, path, index)
                handler.watcher = self.observer.schedule(handler,
                                                         path,
                                                         recursive=True)

        self.observer.start()

        print(':: Watchdog started')
        while True:
            time.sleep(1)

    def handle(self, path, index, event, watcher):
        module, options = self.watchers[path][index]

        self.watchers[path].pop(index)
        self.observer.unschedule(watcher)
        new_watchers = []

        def watch(path, **options):
            self.watchers[path].insert(index, (module, options))
            new_watchers.append(path)

        self.watch = watch

        module.handle(self, path, event, **options)

        if not new_watchers:
            print("Watcher unregistered!")
        else:
            for n, path in enumerate(new_watchers):
                handler = WatchdogHandler(self, path, index + n)
                handler.watcher = self.observer.schedule(handler,
                                                         path,
                                                         recursive=True)

    def run_once(self):
        self.watchers = {}
        for i in self.modules:

            def watch(path, **options):
                if path not in self.watchers:
                    self.watchers[path] = []
                self.watchers[path].append((i, options))

            self.watch = watch
            i.run(self)

    def abort(self):
        print()
        print("Aborted!")
        quit()
Ejemplo n.º 33
0
class GoSyncModel(object):
    def __init__(self):
        self.calculatingDriveUsage = False
        self.driveAudioUsage = 0
        self.driveMoviesUsage = 0
        self.drivePhotoUsage = 0
        self.driveDocumentUsage = 0
        self.driveOthersUsage = 0
        self.totalFilesToCheck = 0
        self.savedTotalSize = 0
        self.fcount = 0
        self.updates_done = 0

        self.config_path = os.path.join(os.environ['HOME'], ".gosync")
        self.credential_file = os.path.join(self.config_path, "credentials.json")
        self.settings_file = os.path.join(self.config_path, "settings.yaml")
        self.base_mirror_directory = os.path.join(os.environ['HOME'], "Google Drive")
        self.client_secret_file = os.path.join(os.environ['HOME'], '.gosync', 'client_secrets.json')
        self.sync_selection = []
        self.config_file = os.path.join(os.environ['HOME'], '.gosync', 'gosyncrc')
        self.config_dict = {}
        self.account_dict = {}
        self.drive_usage_dict = {}
        self.config=None

        if not os.path.exists(self.config_path):
            os.mkdir(self.config_path, 0755)
            raise ClientSecretsNotFound()

        if not os.path.exists(self.base_mirror_directory):
            os.mkdir(self.base_mirror_directory, 0755)

        if not os.path.exists(self.client_secret_file):
            raise ClientSecretsNotFound()

        if not os.path.exists(self.settings_file) or \
                not os.path.isfile(self.settings_file):
            sfile = open(self.settings_file, 'w')
            sfile.write("save_credentials: False")
            sfile.write("\n")
            sfile.write("save_credentials_file: ")
            sfile.write(self.credential_file)
            sfile.write("\n")
            sfile.write('client_config_file: ' + self.client_secret_file + "\n")
            sfile.write("save_credentials_backend: file\n")
            sfile.close()

        self.observer = Observer()
        self.DoAuthenticate()
        self.about_drive = self.authToken.service.about().get().execute()
        self.user_email = self.about_drive['user']['emailAddress']

        self.mirror_directory = os.path.join(self.base_mirror_directory, self.user_email)
        if not os.path.exists(self.mirror_directory):
            os.mkdir(self.mirror_directory, 0755)

        self.tree_pickle_file = os.path.join(self.config_path, 'gtree-' + self.user_email + '.pick')

        if not os.path.exists(self.config_file):
            self.CreateDefaultConfigFile()

        try:
            self.LoadConfig()
        except:
            raise


        self.iobserv_handle = self.observer.schedule(FileModificationNotifyHandler(self),
                                                     self.mirror_directory, recursive=True)

        self.sync_lock = threading.Lock()
        self.sync_thread = threading.Thread(target=self.run)
        self.usage_calc_thread = threading.Thread(target=self.calculateUsage)
        self.sync_thread.daemon = True
        self.usage_calc_thread.daemon = True
        self.syncRunning = threading.Event()
        self.syncRunning.clear()
        self.usageCalculateEvent = threading.Event()
        self.usageCalculateEvent.set()

        self.logger = logging.getLogger(APP_NAME)
        self.logger.setLevel(logging.DEBUG)
        fh = logging.FileHandler(os.path.join(os.environ['HOME'], 'GoSync.log'))
        fh.setLevel(logging.DEBUG)
        formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
        fh.setFormatter(formatter)
        self.logger.addHandler(fh)
        if not os.path.exists(self.tree_pickle_file):
            self.driveTree = GoogleDriveTree()
        else:
            self.driveTree = pickle.load(open(self.tree_pickle_file, "rb"))

    def SetTheBallRolling(self):
        self.sync_thread.start()
        self.usage_calc_thread.start()
        self.observer.start()

    def IsUserLoggedIn(self):
        return self.is_logged_in

    def HashOfFile(self, abs_filepath):
        data = open(abs_filepath, "r").read()
        return hashlib.md5(data).hexdigest()

    def CreateDefaultConfigFile(self):
        f = open(self.config_file, 'w')
        self.config_dict['Sync Selection'] = [['root', '']]
        self.account_dict[self.user_email] = self.config_dict
        json.dump(self.account_dict, f)
        f.close()

    def LoadConfig(self):
        try:
            f = open(self.config_file, 'r')
            try:
                self.config = json.load(f)
                try:
                    self.config_dict = self.config[self.user_email]
                    self.sync_selection = self.config_dict['Sync Selection']
                    print self.config_dict['Drive Usage']
                    try:
                        self.drive_usage_dict = self.config_dict['Drive Usage']
                        self.totalFilesToCheck = self.drive_usage_dict['Total Files']
                        self.savedTotalSize = self.drive_usage_dict['Total Size']
                        self.driveAudioUsage = self.drive_usage_dict['Audio Size']
                        self.driveMoviesUsage = self.drive_usage_dict['Movies Size']
                        self.driveDocumentUsage = self.drive_usage_dict['Document Size']
                        self.drivePhotoUsage = self.drive_usage_dict['Photo Size']
                        self.driveOthersUsage = self.drive_usage_dict['Others Size']
                    except:
                        pass
                except:
                    pass

                f.close()
            except:
                raise ConfigLoadFailed()
        except:
            raise ConfigLoadFailed()

    def SaveConfig(self):
        f = open(self.config_file, 'w')
        f.truncate()
        if not self.sync_selection:
            self.config_dict['Sync Selection'] = [['root', '']]

        self.account_dict[self.user_email] = self.config_dict

        json.dump(self.account_dict, f)
        f.close()

    def DoAuthenticate(self):
        try:
            self.authToken = GoogleAuth(self.settings_file)
            self.authToken.LocalWebserverAuth()
            self.drive = GoogleDrive(self.authToken)
            self.is_logged_in = True
        except:
            dial = wx.MessageDialog(None, "Authentication Rejected!\n",
                                    'Information', wx.ID_OK | wx.ICON_EXCLAMATION)
            dial.ShowModal()
            self.is_logged_in = False
            pass

    def DoUnAuthenticate(self):
            self.do_sync = False
            self.observer.unschedule(self.iobserv_handle)
            self.iobserv_handle = None
            os.remove(self.credential_file)
            self.is_logged_in = False

    def DriveInfo(self):
        return self.about_drive

    def PathLeaf(self, path):
        head, tail = ntpath.split(path)
        return tail or ntpath.basename(head)

    def GetFolderOnDrive(self, folder_name, parent='root'):
        """
        Return the folder with name in "folder_name" in the parent folder
        mentioned in parent.
        """
        self.logger.debug("GetFolderOnDrive: searching %s on %s... " % (folder_name, parent))
        file_list = self.drive.ListFile({'q': "'%s' in parents and trashed=false" % parent}).GetList()
        for f in file_list:
            if f['title'] == folder_name and f['mimeType']=='application/vnd.google-apps.folder':
                self.logger.debug("Found!\n")
                return f

        return None

    def LocateFolderOnDrive(self, folder_path):
        """
        Locate and return the directory in the path. The complete path
        is walked and the last directory is returned. An exception is raised
        if the path walking fails at any stage.
        """
        dir_list = folder_path.split(os.sep)
        croot = 'root'
        for dir1 in dir_list:
            try:
                folder = self.GetFolderOnDrive(dir1, croot)
                if not folder:
                    raise FolderNotFound()
            except:
                raise

            croot = folder['id']

        return folder

    def LocateFileInFolder(self, filename, parent='root'):
        try:
            file_list = self.MakeFileListQuery({'q': "'%s' in parents and trashed=false" % parent})
            for f in file_list:
                if f['title'] == filename:
                    return f

            raise FileNotFound()
        except:
            raise FileNotFound()


    def LocateFileOnDrive(self, abs_filepath):
        dirpath = os.path.dirname(abs_filepath)
        filename = self.PathLeaf(abs_filepath)

        if dirpath != '':
            try:
                f = self.LocateFolderOnDrive(dirpath)
                try:
                    fil = self.LocateFileInFolder(filename, f['id'])
                    return fil
                except FileNotFound:
                    self.logger.debug("LocateFileOnDrive: File not found.\n")
                    raise
                except FileListQueryFailed:
                    self.logger.debug("LocateFileOnDrive: File list query failed\n")
                    raise
            except FolderNotFound:
                self.logger.debug("LocateFileOnDrive: Folder not found\n")
                raise
            except FileListQueryFailed:
                self.logger.debug("LocateFileOnDrive:  %s folder not found\n" % dirpath)
                raise
        else:
            try:
                fil = self.LocateFileInFolder(filename)
                return fil
            except FileNotFound:
                self.logger.debug("LocateFileOnDrive: File not found.\n")
                raise
            except FileListQueryFailed:
                self.logger.debug("LocateFileOnDrive: File list query failed.\n")
                raise
            except:
                self.logger.error("LocateFileOnDrive: Unknown error in locating file in drive\n")
                raise

    def CreateDirectoryInParent(self, dirname, parent_id='root'):
        upfile = self.drive.CreateFile({'title': dirname,
                                        'mimeType': "application/vnd.google-apps.folder",
                                        "parents": [{"kind": "drive#fileLink", "id": parent_id}]})
        upfile.Upload()

    def CreateDirectoryByPath(self, dirpath):
        self.logger.debug("create directory: %s\n" % dirpath)
        drivepath = dirpath.split(self.mirror_directory+'/')[1]
        basepath = os.path.dirname(drivepath)
        dirname = self.PathLeaf(dirpath)

        try:
            f = self.LocateFolderOnDrive(drivepath)
            return
        except FolderNotFound:
            if basepath == '':
                self.CreateDirectoryInParent(dirname)
            else:
                try:
                    parent_folder = self.LocateFolderOnDrive(basepath)
                    self.CreateDirectoryInParent(dirname, parent_folder['id'])
                except:
                    errorMsg = "Failed to locate directory path %s on drive.\n" % basepath
                    self.logger.error(errorMsg)
                    dial = wx.MessageDialog(None, errorMsg, 'Directory Not Found',
                                            wx.ID_OK | wx.ICON_EXCLAMATION)
                    dial.ShowModal()
                    return
        except FileListQueryFailed:
            errorMsg = "Server Query Failed!\n"
            self.logger.error(errorMsg)
            dial = wx.MessageDialog(None, errorMsg, 'Directory Not Found',
                                    wx.ID_OK | wx.ICON_EXCLAMATION)
            dial.ShowModal()
            return

    def CreateRegularFile(self, file_path, parent='root', uploaded=False):
        self.logger.debug("Create file %s\n" % file_path)
        filename = self.PathLeaf(file_path)
        upfile = self.drive.CreateFile({'title': filename,
                                       "parents": [{"kind": "drive#fileLink", "id": parent}]})
        upfile.SetContentFile(file_path)
        upfile.Upload()

    def UploadFile(self, file_path):
        if os.path.isfile(file_path):
            drivepath = file_path.split(self.mirror_directory+'/')[1]
            self.logger.debug("file: %s drivepath is %s\n" % (file_path, drivepath))
            try:
                f = self.LocateFileOnDrive(drivepath)
                self.logger.debug('Found file %s on remote (dpath: %s)\n' % (f['title'], drivepath))
                newfile = False
                self.logger.debug('Checking if they are same... ')
                if f['md5Checksum'] == self.HashOfFile(file_path):
                    self.logger.debug('yes\n')
                    return
                else:
                    self.logger.debug('no\n')
            except (FileNotFound, FolderNotFound):
                self.logger.debug("A new file!\n")
                newfile = True

            dirpath = os.path.dirname(drivepath)
            if dirpath == '':
                self.logger.debug('Creating %s file in root\n' % file_path)
                self.CreateRegularFile(file_path, 'root', newfile)
            else:
                try:
                    f = self.LocateFolderOnDrive(dirpath)
                    self.CreateRegularFile(file_path, f['id'], newfile)
                except FolderNotFound:
                    # We are coming from premise that upload comes as part
                    # of observer. So before notification of this file's
                    # creation happens, a notification of its parent directory
                    # must have come first.
                    # So,
                    # Folder not found? That cannot happen. Can it?
                    raise RegularFileUploadFailed()
        else:
            self.CreateDirectoryByPath(file_path)

    def UploadObservedFile(self, file_path):
        self.sync_lock.acquire()
        self.UploadFile(file_path)
        self.sync_lock.release()

    def RenameFile(self, file_object, new_title):
        try:
            file = {'title': new_title}

            updated_file = self.authToken.service.files().patch(fileId=file_object['id'],
                                                                body=file, fields='title').execute()
            return updated_file
        except errors.HttpError, error:
            self.logger.error('An error occurred while renaming file: %s' % error)
            return None
        except:
class _MultiFileWatcher(object):
    """Watches multiple files."""

    _singleton = None

    @classmethod
    def get_singleton(cls):
        """Return the singleton _MultiFileWatcher object.

        Instantiates one if necessary.
        """
        if cls._singleton is None:
            LOGGER.debug("No singleton. Registering one.")
            _MultiFileWatcher()

        return _MultiFileWatcher._singleton

    # Don't allow constructor to be called more than once.
    def __new__(cls):
        """Constructor."""
        if _MultiFileWatcher._singleton is not None:
            raise RuntimeError("Use .get_singleton() instead")
        return super(_MultiFileWatcher, cls).__new__(cls)

    def __init__(self):
        """Constructor."""
        _MultiFileWatcher._singleton = self

        # Map of folder_to_watch -> _FolderEventHandler.
        self._folder_handlers = {}

        # Used for mutation of _folder_handlers dict
        self._lock = threading.Lock()

        # The Observer object from the Watchdog module. Since this class is
        # only instantiated once, we only have a single Observer in Streamlit,
        # and it's in charge of watching all paths we're interested in.
        self._observer = Observer()
        self._observer.start()  # Start observer thread.

    def watch_file(self, file_path, callback):
        """Start watching a file.

        Parameters
        ----------
        file_path : str
            The full path of the file to watch.

        callback : callable
            The function to execute when the file is changed.

        """
        folder_path = os.path.abspath(os.path.dirname(file_path))

        with self._lock:
            folder_handler = self._folder_handlers.get(folder_path)

            if folder_handler is None:
                folder_handler = _FolderEventHandler()
                self._folder_handlers[folder_path] = folder_handler

                folder_handler.watch = self._observer.schedule(
                    folder_handler, folder_path, recursive=False
                )

            folder_handler.add_file_change_listener(file_path, callback)

    def stop_watching_file(self, file_path, callback):
        """Stop watching a file.

        Parameters
        ----------
        file_path : str
            The full path of the file to stop watching.

        callback : callable
            The function to execute when the file is changed.

        """
        folder_path = os.path.abspath(os.path.dirname(file_path))

        with self._lock:
            folder_handler = self._folder_handlers.get(folder_path)

            if folder_handler is None:
                LOGGER.debug(
                    "Cannot stop watching path, because it is already not being "
                    "watched. %s",
                    folder_path,
                )
                return

            folder_handler.remove_file_change_listener(file_path, callback)

            if not folder_handler.is_watching_files():
                # Sometimes watchdog's FileSystemEventHandler does not have
                # a .watch property. It's unclear why -- may be due to a
                # race condition.
                if hasattr(folder_handler, "watch"):
                    self._observer.unschedule(folder_handler.watch)
                del self._folder_handlers[folder_path]

    def close(self):
        with self._lock:
            """Close this _MultiFileWatcher object forever."""
            if len(self._folder_handlers) != 0:
                self._folder_handlers = {}
                LOGGER.debug(
                    "Stopping observer thread even though there is a non-zero "
                    "number of event observers!"
                )
            else:
                LOGGER.debug("Stopping observer thread")

            self._observer.stop()
            self._observer.join(timeout=5)
Ejemplo n.º 35
0
class Rainmaker():
   
    def __init__(self,config=None, auto_start=True ):
        self.log=logging.getLogger('main')
        self.event_handlers = []
        self.config = config if config else  RainmakerConfig()
        self.profiles = self.config.profiles

        self.msg_q = Queue()
        self.observer = Observer()
        self.observer.start()

        if not auto_start:
            return

        for k in self.profiles:
            if self.profiles[k]['auto_start']==True:
                self.add_watch(k)

        if not self.event_handlers:            
            self.log.warn('No running profiles')

    def add_watch(self,key):
        if not key in self.profiles:
            self.log.error('unknown profile %s' % key)

        self.log.info('Starting profile: %s' % key)
        profile = self.profiles[key]

        profile['local_root'] = os.path.abspath(os.path.expanduser(profile['local_root']))
        
        profile.subst_all()

        if not os.path.isdir(profile['local_root']):
            self.log.info('creating dir: %s' % profile['local_root'])
            os.mkdir(profile['local_root'])
        patterns=['*.unison.tmp']     
        event_handler = RainmakerEventHandler()
        event_handler.init2( self.config, profile, self.msg_q, key )
        self.event_handlers.append( event_handler )

        rec_flag = True
        if profile.has_key('recursive'):
            rec_flag = bool(profile['recursive']) 
        self.observer.schedule( event_handler, profile['local_root'], recursive = rec_flag) 
        
        logging.info('Started profile: %s' % key)
    
        if profile['cmds']['startup'] != '':
            event_handler.startup_cmd()

    def remove_watch(self, k): 
        for eh in self.event_handlers:
            if eh.name == k:
                self.log.info('Stopping profile: %s' % k)
                self.observer.unschedule(eh)
                break 

    def messages(self):
        messages = []
        try:
            while True:
                messages.append( self.msg_q.get_nowait() )
        except Empty:
            pass

        return messages

    def shutdown(self):
        self.log.info( "Shutting down FSwatcher")
        self.observer.stop()
        self.observer.unschedule_all()
        self.observer.join()
        self.log.info("Shutting down thread and Fork pool")
        for eh in self.event_handlers:
            self.log.info('Stopping profile: %s' % eh.name)
            eh.stop()
Ejemplo n.º 36
0
class INotify(PollMixin):
    """
    I am a prototype INotify, made to work on Mac OS X (Darwin)
    using the Watchdog python library. This is actually a simplified subset
    of the twisted Linux INotify class because we do not utilize the watch mask
    and only implement the following methods:
     - watch
     - startReading
     - stopReading
     - wait_until_stopped
     - set_pending_delay
    """
    def __init__(self):
        self._pending_delay = 1.0
        self.recursive_includes_new_subdirectories = False
        self._callbacks = {}
        self._watches = {}
        self._state = NOT_STARTED
        self._observer = Observer(timeout=self._pending_delay)

    def set_pending_delay(self, delay):
        Message.log(message_type=u"watchdog:inotify:set-pending-delay", delay=delay)
        assert self._state != STARTED
        self._pending_delay = delay

    def startReading(self):
        with start_action(action_type=u"watchdog:inotify:start-reading"):
            assert self._state != STARTED
            try:
                # XXX twisted.internet.inotify doesn't require watches to
                # be set before startReading is called.
                # _assert(len(self._callbacks) != 0, "no watch set")
                self._observer.start()
                self._state = STARTED
            except:
                self._state = STOPPED
                raise

    def stopReading(self):
        with start_action(action_type=u"watchdog:inotify:stop-reading"):
            if self._state != STOPPED:
                self._state = STOPPING
            self._observer.unschedule_all()
            self._observer.stop()
            self._observer.join()
            self._state = STOPPED

    def wait_until_stopped(self):
        return self.poll(lambda: self._state == STOPPED)

    def _isWatched(self, path_u):
        return path_u in self._callbacks.keys()

    def ignore(self, path):
        path_u = path.path
        self._observer.unschedule(self._watches[path_u])
        del self._callbacks[path_u]
        del self._watches[path_u]

    def watch(self, path, mask=IN_WATCH_MASK, autoAdd=False, callbacks=None, recursive=False):
        precondition(isinstance(autoAdd, bool), autoAdd=autoAdd)
        precondition(isinstance(recursive, bool), recursive=recursive)
        assert autoAdd == False

        path_u = path.path
        if not isinstance(path_u, unicode):
            path_u = path_u.decode('utf-8')
            _assert(isinstance(path_u, unicode), path_u=path_u)

        if path_u not in self._callbacks.keys():
            self._callbacks[path_u] = callbacks or []
            self._watches[path_u] = self._observer.schedule(
                INotifyEventHandler(path_u, mask, self._callbacks[path_u], self._pending_delay),
                path=path_u,
                recursive=False,
            )
Ejemplo n.º 37
0
Archivo: unox.py Proyecto: hnsl/unox
            if replica in triggered_reps:
                reportRecursiveChanges("", triggered_reps[replica])
                del triggered_reps[replica]
            sendCmd("DONE", [])
            _debug_triggers()
        elif cmd == "RESET":
            # Stop observing replica.
            [replica] = args
            if not replica in replicas:
                warn("unknown replica: " + replica)
                continue
            watch = replicas[replica]["watch"]
            if watch is not None:
                observer.unschedule(watch)
            del replicas[replica]
            if replica in triggered_reps:
                del triggered_reps[replica]
            _debug_triggers()
        else:
            sendError("unexpected root cmd: " + cmd)


if __name__ == '__main__':
    try:
        main()
    finally:
        for replica in replicas:
            observer.unschedule(replicas[replica]["watch"])
        observer.stop()
        observer.join()
Ejemplo n.º 38
0
class GoSyncModel(object):
    def __init__(self):

        self.calculatingDriveUsage = False
        self.driveAudioUsage = 0
        self.driveMoviesUsage = 0
        self.drivePhotoUsage = 0
        self.driveDocumentUsage = 0
        self.driveOthersUsage = 0
        self.totalFilesToCheck = 0
        self.savedTotalSize = 0
        self.fcount = 0
        self.updates_done = 0

        self.config_path = os.path.join(os.environ['HOME'], ".gosync")
        if not os.path.exists(self.config_path):
            os.mkdir(self.config_path, 0755)
            raise ClientSecretsNotFound()
        self.usersettings = parseUserSettings(self.config_path)

        self.credential_file = os.path.join(self.config_path, "credentials.json")
        self.settings_file = os.path.join(self.config_path, "settings.yaml")
        # self.base_mirror_directory = os.path.join(os.environ['HOME'], "Google Drive")
        self.base_mirror_directory = self.usersettings.syncFolder;
        self.client_secret_file = os.path.join(os.environ['HOME'], '.gosync', 'client_secrets.json')
        self.sync_selection = []
        self.config_file = os.path.join(os.environ['HOME'], '.gosync', 'gosyncrc')
        self.config_dict = {}
        self.account_dict = {}
        self.drive_usage_dict = {}
        self.config=None

        if not os.path.exists(self.base_mirror_directory):
            os.mkdir(self.base_mirror_directory, 0755)

        if not os.path.exists(self.client_secret_file):
            raise ClientSecretsNotFound()

        if not os.path.exists(self.settings_file) or \
                not os.path.isfile(self.settings_file):
            sfile = open(self.settings_file, 'w')
            sfile.write("save_credentials: {}".format(self.usersettings.saveCredentials))
            sfile.write("\n")
            sfile.write("save_credentials_file: ")
            sfile.write(self.credential_file)
            sfile.write("\n")
            sfile.write('client_config_file: ' + self.client_secret_file + "\n")
            sfile.write("save_credentials_backend: file\n")
            sfile.close()

        self.observer = Observer()
        self.DoAuthenticate()
        self.about_drive = self.authToken.service.about().get().execute()
        self.user_email = self.about_drive['user']['emailAddress']

        self.mirror_directory = os.path.join(self.base_mirror_directory, self.user_email)
        if not os.path.exists(self.mirror_directory):
            os.mkdir(self.mirror_directory, 0755)

        self.tree_pickle_file = os.path.join(self.config_path, 'gtree-' + self.user_email + '.pick')

        if not os.path.exists(self.config_file):
            self.CreateDefaultConfigFile()


        try:
            self.LoadConfig()
        except:
            raise

        self.iobserv_handle = self.observer.schedule(FileModificationNotifyHandler(self),
                                                     self.mirror_directory, recursive=True)

        self.driveTree = None
        self.sync_lock = threading.Lock()
        self.sync_thread = threading.Thread(target=self.run)
        self.usage_calc_thread = threading.Thread(target=self.calculateUsage)
        self.sync_thread.daemon = True
        self.usage_calc_thread.daemon = True
        self.syncRunning = threading.Event()
        self.syncRunning.clear()
        self.usageCalculateEvent = threading.Event()
        self.usageCalculateEvent.set()

        self.logger = logging.getLogger(APP_NAME)
        self.logger.setLevel(logging.DEBUG)
        fh = logging.FileHandler(os.path.join(os.environ['HOME'], 'GoSync.log'))
        fh.setLevel(logging.DEBUG)
        formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
        fh.setFormatter(formatter)
        self.logger.addHandler(fh)
        if not os.path.exists(self.tree_pickle_file):
            self.driveTree = GoogleDriveTree()
        else:
            self.driveTree = pickle.load(open(self.tree_pickle_file, "rb"))

    def SetTheBallRolling(self):
        self.sync_thread.start()
        self.usage_calc_thread.start()
        self.observer.start()


    def IsUserLoggedIn(self):
        return self.is_logged_in

    def HashOfFile(self, abs_filepath):
        data = open(abs_filepath, "r").read()
        return hashlib.md5(data).hexdigest()

    def CreateDefaultConfigFile(self):
        f = open(self.config_file, 'w')
        self.config_dict['Sync Selection'] = [['root', '']]
        self.account_dict[self.user_email] = self.config_dict
        json.dump(self.account_dict, f)
        f.close()

    def LoadConfig(self):
        try:
            f = open(self.config_file, 'r')
            try:
                self.config = json.load(f)
                try:
                    self.config_dict = self.config[self.user_email]
                    self.sync_selection = self.config_dict['Sync Selection']
                    print self.config_dict['Drive Usage']
                    try:
                        self.drive_usage_dict = self.config_dict['Drive Usage']
                        self.totalFilesToCheck = self.drive_usage_dict['Total Files']
                        self.savedTotalSize = self.drive_usage_dict['Total Size']
                        self.driveAudioUsage = self.drive_usage_dict['Audio Size']
                        self.driveMoviesUsage = self.drive_usage_dict['Movies Size']
                        self.driveDocumentUsage = self.drive_usage_dict['Document Size']
                        self.drivePhotoUsage = self.drive_usage_dict['Photo Size']
                        self.driveOthersUsage = self.drive_usage_dict['Others Size']
                    except:
                        pass
                except:
                    pass

                f.close()
            except:
                raise ConfigLoadFailed()
        except:
            raise ConfigLoadFailed()

    def SaveConfig(self):
        f = open(self.config_file, 'w')
        f.truncate()
        if not self.sync_selection:
            self.config_dict['Sync Selection'] = [['root', '']]

        self.account_dict[self.user_email] = self.config_dict

        json.dump(self.account_dict, f)
        f.close()

    def DoAuthenticate(self):
        try:
            if not os.path.exists(self.credential_file):
                self.authToken = GoogleAuth(self.settings_file)
            else:
                self.authToken = GoogleAuth();
                self.authToken.LoadCredentialsFile(self.credential_file)
            if (self.authToken.credentials is None):
                self.authToken.LocalWebserverAuth()
            elif self.authToken.access_token_expired :
                self.authToken.Refresh()
            else:
                self.authToken.Authorize()
            self.drive = GoogleDrive(self.authToken)
            self.is_logged_in = True
        except:

            dial = wx.MessageDialog(None, "Authentication Rejected!\n",
                                    'Information', wx.ID_OK | wx.ICON_EXCLAMATION)
            dial.ShowModal()
            self.is_logged_in = False
            pass

    def DoUnAuthenticate(self):
            self.do_sync = False
            self.observer.unschedule(self.iobserv_handle)
            self.iobserv_handle = None
            os.remove(self.credential_file)
            self.is_logged_in = False

    def DriveInfo(self):
        return self.about_drive

    def PathLeaf(self, path):
        head, tail = ntpath.split(path)
        return tail or ntpath.basename(head)

    def GetFolderOnDrive(self, folder_name, parent='root'):
        """
        Return the folder with name in "folder_name" in the parent folder
        mentioned in parent.
        """
        self.logger.debug("GetFolderOnDrive: searching %s on %s... " % (folder_name, parent))
        file_list = self.drive.ListFile({'q': "'%s' in parents and trashed=false" % parent}).GetList()
        for f in file_list:
            if f['title'] == folder_name and f['mimeType']=='application/vnd.google-apps.folder':
                self.logger.debug("Found!\n")
                return f

        return None

    def LocateFolderOnDrive(self, folder_path):
        """
        Locate and return the directory in the path. The complete path
        is walked and the last directory is returned. An exception is raised
        if the path walking fails at any stage.
        """
        dir_list = folder_path.split(os.sep)
        croot = 'root'
        for dir1 in dir_list:
            try:
                folder = self.GetFolderOnDrive(dir1, croot)
                if not folder:
                    raise FolderNotFound()
            except:
                raise

            croot = folder['id']

        return folder

    def LocateFileInFolder(self, filename, parent='root'):
        try:
            file_list = self.MakeFileListQuery({'q': "'%s' in parents and trashed=false" % parent})
            for f in file_list:
                if f['title'] == filename:
                    return f

            raise FileNotFound()
        except:
            raise FileNotFound()


    def LocateFileOnDrive(self, abs_filepath):
        dirpath = os.path.dirname(abs_filepath)
        filename = self.PathLeaf(abs_filepath)

        if dirpath != '':
            try:
                f = self.LocateFolderOnDrive(dirpath)
                try:
                    fil = self.LocateFileInFolder(filename, f['id'])
                    return fil
                except FileNotFound:
                    self.logger.debug("LocateFileOnDrive: File not found.\n")
                    raise
                except FileListQueryFailed:
                    self.logger.debug("LocateFileOnDrive: File list query failed\n")
                    raise
            except FolderNotFound:
                self.logger.debug("LocateFileOnDrive: Folder not found\n")
                raise
            except FileListQueryFailed:
                self.logger.debug("LocateFileOnDrive:  %s folder not found\n" % dirpath)
                raise
        else:
            try:
                fil = self.LocateFileInFolder(filename)
                return fil
            except FileNotFound:
                self.logger.debug("LocateFileOnDrive: File not found.\n")
                raise
            except FileListQueryFailed:
                self.logger.debug("LocateFileOnDrive: File list query failed.\n")
                raise
            except:
                self.logger.error("LocateFileOnDrive: Unknown error in locating file in drive\n")
                raise

    def CreateDirectoryInParent(self, dirname, parent_id='root'):
        upfile = self.drive.CreateFile({'title': dirname,
                                        'mimeType': "application/vnd.google-apps.folder",
                                        "parents": [{"kind": "drive#fileLink", "id": parent_id}]})
        upfile.Upload()

    def CreateDirectoryByPath(self, dirpath):
        self.logger.debug("create directory: %s\n" % dirpath)
        drivepath = dirpath.split(self.mirror_directory+'/')[1]
        basepath = os.path.dirname(drivepath)
        dirname = self.PathLeaf(dirpath)

        try:
            f = self.LocateFolderOnDrive(drivepath)
            return
        except FolderNotFound:
            if basepath == '':
                self.CreateDirectoryInParent(dirname)
            else:
                try:
                    parent_folder = self.LocateFolderOnDrive(basepath)
                    self.CreateDirectoryInParent(dirname, parent_folder['id'])
                except:
                    errorMsg = "Failed to locate directory path %s on drive.\n" % basepath
                    self.logger.error(errorMsg)
                    dial = wx.MessageDialog(None, errorMsg, 'Directory Not Found',
                                            wx.ID_OK | wx.ICON_EXCLAMATION)
                    dial.ShowModal()
                    return
        except FileListQueryFailed:
            errorMsg = "Server Query Failed!\n"
            self.logger.error(errorMsg)
            dial = wx.MessageDialog(None, errorMsg, 'Directory Not Found',
                                    wx.ID_OK | wx.ICON_EXCLAMATION)
            dial.ShowModal()
            return

    def CreateRegularFile(self, file_path, parent='root', uploaded=False):
        self.logger.debug("Create file %s\n" % file_path)
        filename = self.PathLeaf(file_path)
        upfile = self.drive.CreateFile({'title': filename,
                                       "parents": [{"kind": "drive#fileLink", "id": parent}]})
        upfile.SetContentFile(file_path)
        upfile.Upload()

    def UploadFile(self, file_path):
        if os.path.isfile(file_path):
            drivepath = file_path.split(self.mirror_directory+'/')[1]
            self.logger.debug("file: %s drivepath is %s\n" % (file_path, drivepath))
            try:
                f = self.LocateFileOnDrive(drivepath)
                self.logger.debug('Found file %s on remote (dpath: %s)\n' % (f['title'], drivepath))
                newfile = False
                self.logger.debug('Checking if they are same... ')
                if f['md5Checksum'] == self.HashOfFile(file_path):
                    self.logger.debug('yes\n')
                    return
                else:
                    self.logger.debug('no\n')
            except (FileNotFound, FolderNotFound):
                self.logger.debug("A new file!\n")
                newfile = True

            dirpath = os.path.dirname(drivepath)
            if dirpath == '':
                self.logger.debug('Creating %s file in root\n' % file_path)
                self.CreateRegularFile(file_path, 'root', newfile)
            else:
                try:
                    f = self.LocateFolderOnDrive(dirpath)
                    self.CreateRegularFile(file_path, f['id'], newfile)
                except FolderNotFound:
                    # We are coming from premise that upload comes as part
                    # of observer. So before notification of this file's
                    # creation happens, a notification of its parent directory
                    # must have come first.
                    # So,
                    # Folder not found? That cannot happen. Can it?
                    raise RegularFileUploadFailed()
        else:
            self.CreateDirectoryByPath(file_path)

    def UploadObservedFile(self, file_path):
        self.sync_lock.acquire()
        self.UploadFile(file_path)
        self.sync_lock.release()

    def RenameFile(self, file_object, new_title):
        try:
            file = {'title': new_title}

            updated_file = self.authToken.service.files().patch(fileId=file_object['id'],
                                                                body=file, fields='title').execute()
            return updated_file
        except errors.HttpError, error:
            self.logger.error('An error occurred while renaming file: %s' % error)
            return None
        except:
Ejemplo n.º 39
0
class watcher(FileSystemEventHandler):
    def __init__(self, message=None):
        super(watcher, self).__init__()  # Initialize the base class(es)

        self.directories = {}
        self.watchers = []
        self.q = SimpleQueue()

        self.msg = message if message else consoleMessage

        from watchdog.observers import Observer
        self.observer = Observer()
        self.msg(f"Starting Observer()")
        self.observer.start()

    def _addWatchItem(self, item):
        filespec = Path(item).resolve()

        if filespec.parent not in self.directories:
            self.msg(f"Creating watchlist() for [{filespec.parent}]")
            self.directories[filespec.parent] = watchlist(filespec)
        else:
            self.directories[filespec.parent].addItem(filespec.name)

    def _start(self, directory):
        self.msg(f"Scheduling watcher for [{directory}]")
        self.watchers.append(
            self.observer.schedule(self, str(directory), recursive=False))

    def _stop(self, watcher):
        if watcher in self.watchers:
            #//TODO.py: This won't work as-is. If needed, should make a small class to hold the
            # watcher stuff: directory, the watcher instance, etc.
            self.msg(f"UnScheduling watcher for [{directory}]")
            self.observer.unschedule(watcher)

    def on_modified(self, event):
        if event.is_directory: return None

        spec = Path(event.src_path).resolve()
        #path = spec.parent
        #name = spec.name
        if spec.parent in self.directories:
            if self.directories[spec.parent].exists(spec.name):
                self.msg(
                    f"File {event.src_path} was modified. Adding to queue...")
                self.q.put(event.src_path)
            else:
                self.msg(
                    f"File {event.src_path} was modified, but I don't care about it..."
                )
        else:
            self.msg(
                f"Got notification of change to {spec.name} in directory {spec.parent} --> {event.src_path}"
            )

    def begin(self, filelist):
        # hang on to the filelist so .reset() can efficiently determine a course of action
        self.filelist = filelist

        # first, create all the watchdir() for each unique directory
        for item in filelist:
            self._addWatchItem(item)

        # now, schedule an event handler(watchdog.watch) for each folder
        for dir in self.directories:
            self._start(dir)

    def reset(self, filelist):
        # if the filelist didn't change (true most of the time) ignore the reset
        if set(self.filelist) == set(filelist):
            self.msg("reset: no change to filelist, ignoring reset...")
            return

        # get list of directories being monitored
        self.msg("reset: shutting down directory observers...")
        self.observer.unschedule_all()
        self.directories = {}
        self.watchers = []
        self.begin(filelist)

    def end(self):
        self.observer.unschedule_all()
        self.observer.stop()
        self.msg("\nObserver Stopped")
        self.observer.join()

    def look(self):
        return True if self.q.qsize() else False

    def get(self):
        return self.q.get() if self.q.qsize() else ""

    def dump(self):
        for i in self.directories:
            self.msg(f"Folder: {i}")
            self.directories[i].dump()
Ejemplo n.º 40
0
class Schedule:
    def __init__(self, config):
        self.observer = Observer()
        self.handlers = {}
        self.watchers = {}
        self.counter = Counter()
        self.notifier = Notifier(config)
        self.offset_db = OffsetPersistence(config)

    def __make_key(self, filename):
        return path.abspath(urlsafe_b64decode(filename).decode())

    def add_watcher(self, filename):
        #如果监控的文件都在同一个目录下,下面这种方法只会启动一个inotify来进行监控这个目录下的所有文件,handler.start实际上调用了monitor的start
        filename = self.__make_key(filename)
        if handler.filename not in self.handlers.keys():
            handler = Watcher(filename, counter=self.counter, notifier=self.notifier, offset_db=self.offset_db)
            if path.dirname(handler.filename) not in self.watchers.keys():
                self.watchers[path.dirname(handler.filename)] = self.observer.schedule(handler, path.dirname(handler.filename), recursive=False)
            else:
                watch = self.watchers[path.dirname(handler.filename)]
                self.observer.add_handler_for_watch(handler, watch)
            self.handlers[handler.filename] = handler
            handler.start()


    def remove_watcher(self, filename):
        #判断key是否在watchers里面,有则从observer中剔除,然后从watchers字典内删除,并把handlers剔除并stop
        key = self.__make_key(filename)
        handler = self.handlers.pop(key)
        if handler is not None:
            watch = self.watchers[path.dirname(key)]
            self.observer.remove_handler_for_watch(handler, watch)
            handler.stop()
            if not self.observer._handlers[watch]:
                self.observer.unschedule(watch)
                self.watchers.pop(path.dirname(handler.filename))


    #添加和移除monitor,实质上就是调用Monitor类的add和remove方法来操作
    def add_monitor(self, filename, name, src):
        key = self.__make_key(filename)
        handler = self.handlers.get(key)
        if handler is None:
            logging.warning('watcher {0} not found, auto add it'.format(filename))
            handler.add_watcher(filename)
            handler = self.handlers.get(key)
        handler.monitor.add(filename, name, src)

    def remove_monitor(self, filename, name):
        key = self.__make_key(filename)
        handler = self.handlers.get(key)
        if handler is None:
            logging.warning('watcher {0} not found'.format(filename))
            return
        handler.monitor.remove(name)

    def start(self):
        self.observer.start()
        self.notifier.start()

    def join(self):
        self.observer.join()

    def stop(self):
        self.observer.stop()
        for handler in self.handlers.values():
            handler.stop()
        self.notifier.stop()
        self.offset_db.close()
class _MultiPathWatcher(object):
    """Watches multiple paths."""

    _singleton: Optional["_MultiPathWatcher"] = None

    @classmethod
    def get_singleton(cls) -> "_MultiPathWatcher":
        """Return the singleton _MultiPathWatcher object.

        Instantiates one if necessary.
        """
        if cls._singleton is None:
            LOGGER.debug("No singleton. Registering one.")
            _MultiPathWatcher()

        return cast("_MultiPathWatcher", _MultiPathWatcher._singleton)

    # Don't allow constructor to be called more than once.
    def __new__(cls) -> "_MultiPathWatcher":
        """Constructor."""
        if _MultiPathWatcher._singleton is not None:
            raise RuntimeError("Use .get_singleton() instead")
        return super(_MultiPathWatcher, cls).__new__(cls)

    def __init__(self) -> None:
        """Constructor."""
        _MultiPathWatcher._singleton = self

        # Map of folder_to_watch -> _FolderEventHandler.
        self._folder_handlers: Dict[str, _FolderEventHandler] = {}

        # Used for mutation of _folder_handlers dict
        self._lock = threading.Lock()

        # The Observer object from the Watchdog module. Since this class is
        # only instantiated once, we only have a single Observer in Streamlit,
        # and it's in charge of watching all paths we're interested in.
        self._observer = Observer()
        self._observer.start()  # Start observer thread.

    def __repr__(self) -> str:
        return repr_(self)

    def watch_path(
        self,
        path: str,
        callback: Callable[[str], None],
        *,  # keyword-only arguments:
        glob_pattern: Optional[str] = None,
        allow_nonexistent: bool = False,
    ) -> None:
        """Start watching a path."""
        folder_path = os.path.abspath(os.path.dirname(path))

        with self._lock:
            folder_handler = self._folder_handlers.get(folder_path)

            if folder_handler is None:
                folder_handler = _FolderEventHandler()
                self._folder_handlers[folder_path] = folder_handler

                folder_handler.watch = self._observer.schedule(folder_handler,
                                                               folder_path,
                                                               recursive=True)

            folder_handler.add_path_change_listener(
                path,
                callback,
                glob_pattern=glob_pattern,
                allow_nonexistent=allow_nonexistent,
            )

    def stop_watching_path(self, path: str, callback: Callable[[str],
                                                               None]) -> None:
        """Stop watching a path."""
        folder_path = os.path.abspath(os.path.dirname(path))

        with self._lock:
            folder_handler = self._folder_handlers.get(folder_path)

            if folder_handler is None:
                LOGGER.debug(
                    "Cannot stop watching path, because it is already not being "
                    "watched. %s",
                    folder_path,
                )
                return

            folder_handler.remove_path_change_listener(path, callback)

            if not folder_handler.is_watching_paths():
                # Sometimes watchdog's FileSystemEventHandler does not have
                # a .watch property. It's unclear why -- may be due to a
                # race condition.
                if hasattr(folder_handler, "watch"):
                    self._observer.unschedule(folder_handler.watch)
                del self._folder_handlers[folder_path]

    def close(self) -> None:
        with self._lock:
            """Close this _MultiPathWatcher object forever."""
            if len(self._folder_handlers) != 0:
                self._folder_handlers = {}
                LOGGER.debug(
                    "Stopping observer thread even though there is a non-zero "
                    "number of event observers!")
            else:
                LOGGER.debug("Stopping observer thread")

            self._observer.stop()
            self._observer.join(timeout=5)
Ejemplo n.º 42
0
class ChangesWatcher(FileSystemEventHandler):
    def __init__(self, application):
        self.task_map = {}  # path: ChangesKeeper
        self.application = application
        self.observer = Observer()
        self.changes = []
        self.changes_timer = None
        self.observer.start()

    def add_watch(self, path, mute_list=None):
        if path in self.task_map:
            return False
        else:
            mute_list = (mute_list or []) + DEFAULT_MUTE_LIST
            keeper = ChangesKeeper(path, mute_list)
            self.task_map[path] = keeper
            if os.path.exists(path):
                self.observer.schedule(self, path, recursive=True)
                return True
            return False

    def remove_watch(self, path):
        if path in self.task_map:
            keeper = self.task_map[path]
            watch = keeper.watch
            self.observer.unschedule(watch)
            return True
        else:
            return False

    def get_changes_since(self, timestamp, parent_path=None):
        ret = []
        for change in self.changes:
            if change.timestamp > timestamp and (not parent_path or path_is_parent(parent_path, change.path)):
                ret.append(change)
        return ret

    def add_pure_change(self, change):
        """ 监测change的类型,并添加非垃圾change和不在黑名单中的change
        """

        # 如果是黑名单及黑名单子目录的change,则跳过
        for mute_list in [keeper.mute_list for keeper in self.task_map.values()]:
            for mute_path in mute_list:
                if path_is_parent(mute_path, change.path):
                    print '...', change.type, change.path
                    return

        # 寻找当前change对应的垃圾change,找到后删除;未找到则添加当前change
        trash_changes = self.find_related_trash_changes(change)
        if trash_changes:
            for change in trash_changes:
                self.changes.remove(change)
                print '-  ', change.type, change.path
        else:
            self.changes.append(change)
            print '+  ', change.type, change.path
            self.compile_if_needed(change)

        ioloop.IOLoop.instance().add_callback(lambda: self.remove_outdated_changes(30))

    def compile_if_needed(self, change):
        if change.type == EVENT_TYPE_DELETED:
            return

        input_path = normalize_path(change.path)
        base_path, ext = os.path.splitext(input_path)
        ext = ext.lower()
        if ext not in['.less', '.coffee']:
            return

        project = self.application.find_project(input_path)
        if not project:
            return

        os.chdir(APP_FOLDER)
        begin_time = time.time()
        if ext == '.less':
            if project.compileLess:
                output_path = base_path + '.css'
                run_cmd('%s bundled/less/bin/lessc %s %s' % (NODE_BIN_PATH, input_path, output_path))
                print '.less ->- .css', change.path, time.time() - begin_time, 'seconds'
            else:
                print '.less -X- .css', change.path, '(OFF by settings)'

        elif ext == '.coffee':
            if project.compileCoffee:
                run_cmd('%s bundled/coffee/bin/coffee --compile %s' % (NODE_BIN_PATH, input_path))
                print '.coffee ->- .js', change.path, time.time() - begin_time, 'seconds'
            else:
                print '.coffee -X- .js', change.path, '(OFF by settings)'

    def check_folder_change(self, folder_path):
        if sys.platform.startswith('win') or \
                not os.path.isdir(folder_path):
            return

        now = time.time() - 0.5  # 0.5秒内的都算修改
        for filename in os.listdir(folder_path):
            file_path = os.path.join(folder_path, filename)
            if not os.path.isfile(file_path):
                continue

            modified_time = os.path.getmtime(file_path)
            if modified_time > now:
                self.on_any_event(FileModifiedEvent(file_path))

    def on_any_event(self, event):
        if event.is_directory:
            self.check_folder_change(event.src_path)
            return

        # 暂停文件变更的上报, 以免中途编译占用太长时间,而将事件提前返回
        loop = ioloop.IOLoop.instance()
        if self.changes_timer:
            ioloop.IOLoop.instance().remove_timeout(self.changes_timer)

        now = time.time()
        if event.event_type == EVENT_TYPE_MOVED:
            self.add_pure_change(Change(dict(
                timestamp=now,
                path=normalize_path(event.src_path),
                type=EVENT_TYPE_DELETED
            )))
            self.add_pure_change(Change(dict(
                timestamp=now,
                path=normalize_path(event.dest_path),
                type=EVENT_TYPE_CREATED
            )))
        else:
            self.add_pure_change(Change(dict(
                timestamp=now,
                path=normalize_path(event.src_path),
                type=event.event_type
            )))

        # 延迟0.1秒上报变更,防止有些事件连续发生时错过
        self.changes_timer = loop.add_timeout(time.time() + 0.1, self.application.project_file_changed)

    def find_related_trash_changes(self, change):
        """ 寻找当前change之前短时间内的一些垃圾change
        有些编辑器喜欢用 改名->写入->改回名 的方式来保存文件,所以不能直接将change上报,需要进行一定的过滤
        """
        trash_changes = []
        for old_change in self.changes[::-1]:
            if old_change.path != change.path:
                continue
            if change.timestamp > old_change.timestamp + 1:
                break

            if change.type == EVENT_TYPE_DELETED:
                # 如果当前change类型是DELETED,那么返回所有该文件的事件,直到CREATED事件为止
                trash_changes.append(old_change)
                if old_change.type == EVENT_TYPE_CREATED:
                    return trash_changes
            elif change.type == EVENT_TYPE_CREATED:
                # 如果当前change类型是CREATED,那么返回所有该文件的事件,直到DELETED事件为止
                trash_changes.append(old_change)
                if old_change.type == EVENT_TYPE_DELETED:
                    return trash_changes
        return []

    def remove_outdated_changes(self, seconds):
        for change in self.changes[:]:
            if change.timestamp - time.time() > seconds:
                self.changes.remove(change)