Пример #1
0
class Crafter(object):
    """
    Setup and manage watchdog daemon.
    """
    def __init__(self, base_dir):
        # Cache paths
        src_dir = os.path.join(base_dir, 'src')
        dest_dir = os.path.join(base_dir, 'preview')
        # Init handler
        self.handler = ObserverHandler(src_dir, dest_dir)
        # New observer class
        self.observer = Observer()
        self.observer.schedule(self.handler, path=src_dir, recursive=True)

    def craft(self):
        """
        Start watching src directory
        """
        self.observer.start()

    def shutdown(self):
        """
        Properly shutdown watchdog daemon
        """
        self.observer.stop()
        self.observer.join()
Пример #2
0
    def watch(self):
        """
        Start watching
        """
        logger.info('Watching directory %s' % self.directory)

        # Set up handler for when we see new files
        callback = self.callback
        class NewFileEventHandler(FileSystemEventHandler):
            def on_created(self, event):
                if not event.is_directory:
                    logger.info('Detected new file: %s' % event.src_path)
                    callback(event.src_path)
        event_handler = NewFileEventHandler()

        # Use polling observer (rather than filesystem-specific observers),
        # because it is more reliable.
        observer = PollingObserver(timeout=self.sleep_time)

        # Start the observer
        observer.schedule(event_handler, self.directory, recursive=False)
        observer.start()

        # Wait while the observer is running
        try:
            while True:
                sleep(self.sleep_time)
        # Exit gracefully
        except KeyboardInterrupt:
            logger.info('Detected interrupt. Stopping observer.')
            observer.stop()
        observer.join()
Пример #3
0
def main():
    if int(os.getuid()) != 0:
        raise SystemExit("ERROR: this script should be run as root")

    parser = ArgumentParser(description='Watch a directory and install the code')
    args = parser.parse_args()

    current_path = Path(__file__).resolve().parent
    setup_path = Path(current_path, '..').resolve()
    git_path = Path(current_path, '..', '..').resolve()

    observer = Observer()
    observer.schedule(FileHandler(setup_path, 'site'), str(Path(git_path, 'site')), True)
    observer.schedule(FileHandler(setup_path, 'bin'), str(Path(git_path, 'bin')), True)
    observer.schedule(FileHandler(setup_path, 'bin'), str(Path(git_path, 'sbin')), True)

    observer.start()
    try:
        print("Watching the following folders for change:")
        print("    - site")
        print("    - bin")
        print("    - sbin")
        print()
        input("~~Hit enter to exit~~\n")
    finally:
        observer.stop()
        observer.join()
Пример #4
0
def watch_assets(options):
    """
    Watch for changes to asset files, and regenerate js/css
    """
    # Don't watch assets when performing a dry run
    if tasks.environment.dry_run:
        return

    observer = PollingObserver()

    CoffeeScriptWatcher().register(observer)
    SassWatcher().register(observer)
    XModuleSassWatcher().register(observer)
    XModuleAssetsWatcher().register(observer)

    print("Starting asset watcher...")
    observer.start()
    if not getattr(options, 'background', False):
        # when running as a separate process, the main thread needs to loop
        # in order to allow for shutdown by contrl-c
        try:
            while True:
                observer.join(2)
        except KeyboardInterrupt:
            observer.stop()
        print("\nStopped asset watcher.")
Пример #5
0
 def watch(self):
     observer = PollingObserver(0.1)
     observer.schedule(self, self.path)
     observer.start()
     self.file = open(self.path + "\Power.log", "r")
     self.on_modified(None)
     self.watching = True
Пример #6
0
def watch_record(indexer, use_polling=False):
    """
    Start watching `cfstore.record_path`.

    :type indexer: rash.indexer.Indexer

    """
    if use_polling:
        from watchdog.observers.polling import PollingObserver as Observer

        Observer  # fool pyflakes
    else:
        from watchdog.observers import Observer

    event_handler = RecordHandler(indexer)
    observer = Observer()
    observer.schedule(event_handler, path=indexer.record_path, recursive=True)
    indexer.logger.debug("Start observer.")
    observer.start()
    try:
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        indexer.logger.debug("Got KeyboardInterrupt. Stopping observer.")
        observer.stop()
    indexer.logger.debug("Joining observer.")
    observer.join()
    indexer.logger.debug("Finish watching record.")
Пример #7
0
def watch(directory=None, auto_clear=False, beep_on_failure=True,
          onpass=None, onfail=None, poll=False, extensions=[]):
    """
    Starts a server to render the specified file or directory
    containing a README.
    """
    if directory and not os.path.isdir(directory):
        raise ValueError('Directory not found: ' + directory)
    directory = os.path.abspath(directory or '')

    # Initial run
    event_handler = ChangeHandler(directory, auto_clear, beep_on_failure,
                                  onpass, onfail, extensions)
    event_handler.run()

    # Setup watchdog
    if poll:
        observer = PollingObserver()
    else:
        observer = Observer()

    observer.schedule(event_handler, path=directory, recursive=True)
    observer.start()

    # Watch and run tests until interrupted by user
    try:
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        observer.stop()
    observer.join()
Пример #8
0
def main():
    """Script entry point."""
    from watchdog.observers.polling import PollingObserver
    from .parser import AAConfigParser
    from .tricks import AutoRunTrick

    parser = _create_main_argparser()
    args = parser.parse_args()
    configm = _apply_main_args(args)

    # The reason to use PollingObserver() is it's os-independent. And it's
    # more reliable.
    observer = PollingObserver()

    parser = AAConfigParser(configm)
    handler_for_watch = parser.schedule_with(observer, AutoRunTrick)
    handlers = set.union(*tuple(handler_for_watch.values()))

    for handler in handlers:
        handler.start()
    observer.start()
    try:
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        observer.stop()
    observer.join()
    for handler in handlers:
        handler.stop()
Пример #9
0
class Watcher:
    def __init__(self):
#       self.observer = Observer()            # Use this if SS_DIR is local
        self.observer = PollingObserver()     # Use this if SS_DIR is remote mount

    def run(self):
        event_handler = file_changed()
        self.observer.schedule(event_handler, SS_DIR, recursive=True)
        self.observer.start()
        loop1()
Пример #10
0
def watch(script, callback):
    if script in _observers:
        raise RuntimeError("Script already observed")
    script_dir = os.path.dirname(os.path.abspath(script.filename))
    script_name = os.path.basename(script.filename)
    event_handler = _ScriptModificationHandler(callback, filename=script_name)
    observer = Observer()
    observer.schedule(event_handler, script_dir)
    observer.start()
    _observers[script] = observer
Пример #11
0
 def __init__(self, index, interface='wlan0'):
     self.interface = interface
     self.run_state = '/run/network/ifstate.{interface}'.format(interface=interface)
     self.configurations = ['', interface] + rospy.get_param("~wlan_interfaces", [])
     rospy.loginfo("Loaded wlan configs %s", self.configurations)
     super(WifiUI, self).__init__(index, len(self.configurations))
     self.config = get_configuration(self.run_state, self.configurations)
     rospy.loginfo("Start observingchanges in %s", os.path.dirname(self.run_state))
     observer = Observer()
     observer.schedule(self, os.path.dirname(self.run_state), recursive=True)
     observer.start()
Пример #12
0
    def watch(self):
        observer = PollingObserver()
        observer.schedule(self.pickup_event_processor, path=self.pickup_dir)
        observer.start()
        
        try:
            while self.keep_running:
                sleep(3)
        except KeyboardInterrupt:
            observer.stop()

        observer.join()
Пример #13
0
def serve(config, options=None):
    """
    Start the devserver, and rebuild the docs whenever any changes take effect.
    """
    # Create a temporary build directory, and set some options to serve it
    tempdir = tempfile.mkdtemp()
    options['site_dir'] = tempdir

    # Only use user-friendly URLs when running the live server
    options['use_directory_urls'] = True

    # Perform the initial build
    config = load_config(options=options)
    build(config, live_server=True)

    # Note: We pass any command-line options through so that we
    #       can re-apply them if the config file is reloaded.
    event_handler = BuildEventHandler(options)
    config_event_handler = ConfigEventHandler(options)

    # We could have used `Observer()`, which can be faster, but
    # `PollingObserver()` works more universally.
    observer = PollingObserver()
    observer.schedule(event_handler, config['docs_dir'], recursive=True)
    for theme_dir in config['theme_dir']:
        if not os.path.exists(theme_dir):
            continue
        observer.schedule(event_handler, theme_dir, recursive=True)
    observer.schedule(config_event_handler, '.')
    observer.start()

    class TCPServer(socketserver.TCPServer):
        allow_reuse_address = True

    class DocsDirectoryHandler(FixedDirectoryHandler):
        base_dir = config['site_dir']

    host, port = config['dev_addr'].split(':', 1)
    server = TCPServer((host, int(port)), DocsDirectoryHandler)

    print('Running at: http://%s:%s/' % (host, port))
    print('Live reload enabled.')
    print('Hold ctrl+c to quit.')
    try:
        server.serve_forever()
    except KeyboardInterrupt:
        print('Stopping server...')

    # Clean up
    observer.stop()
    observer.join()
    shutil.rmtree(tempdir)
    print('Quit complete')
Пример #14
0
def watch(
    directories=[],
    ignore=[],
    auto_clear=False,
    beep_on_failure=True,
    onpass=None,
    onfail=None,
    runner=None,
    beforerun=None,
    onexit=None,
    poll=False,
    extensions=[],
    args=[],
    spool=True,
    verbose=False,
    quiet=False,
):
    if not directories:
        directories = ["."]
    directories = [os.path.abspath(directory) for directory in directories]
    for directory in directories:
        if not os.path.isdir(directory):
            raise ValueError("Directory not found: " + directory)

    if ignore:
        recursive_dirs, non_recursive_dirs = split_recursive(directories, ignore)
    else:
        recursive_dirs = directories
        non_recursive_dirs = []

    # Initial run
    event_handler = ChangeHandler(
        auto_clear, beep_on_failure, onpass, onfail, runner, beforerun, extensions, args, spool, verbose, quiet
    )
    event_handler.run()

    # Setup watchdog
    observer = PollingObserver() if poll else Observer()
    for directory in recursive_dirs:
        observer.schedule(event_handler, path=directory, recursive=True)
    for directory in non_recursive_dirs:
        observer.schedule(event_handler, path=directory, recursive=False)

    # Watch and run tests until interrupted by user
    try:
        observer.start()
        while True:
            time.sleep(1)
        observer.join()
    except KeyboardInterrupt:
        observer.stop()
    if onexit:
        os.system(onexit)
Пример #15
0
class SuiteTable(object):
    def __init__(self, dbfile=":memory:", poll=False):
        self.db = sqlite3.connect(dbfile, check_same_thread=False)
        self.log = logging.getLogger(__name__)
        self._create_db()

        # set up watchdog observer to monitor changes to
        # keyword files (or more correctly, to directories
        # of keyword files)
        self.observer =  PollingObserver() if poll else Observer()
        self.observer.start()

    def _create_db(self):

        if not self._table_exists("collection_table"):
            self.db.execute("""
                CREATE TABLE collection_table
                (collection_id INTEGER PRIMARY KEY AUTOINCREMENT,
                 name          TEXT COLLATE NOCASE,
                 type          COLLATE NOCASE,
                 version       TEXT,
                 scope         TEXT,
                 namedargs     TEXT,
                 path          TEXT,
                 doc           TEXT,
                 doc_format    TEXT)
            """)
            self.db.execute("""
                CREATE INDEX collection_index
                ON collection_table (name)
            """)

        if not self._table_exists("keyword_table"):
            self.db.execute("""
                CREATE TABLE keyword_table
                (keyword_id    INTEGER PRIMARY KEY AUTOINCREMENT,
                 name          TEXT COLLATE NOCASE,
                 collection_id INTEGER,
                 doc           TEXT,
                 args          TEXT)
            """)
            self.db.execute("""
                CREATE INDEX keyword_index
                ON keyword_table (name)
            """)

    def _table_exists(self, name):
        cursor = self.db.execute("""
            SELECT name FROM sqlite_master
            WHERE type='table' AND name='%s'
        """ % name)
        return len(cursor.fetchall()) > 0
Пример #16
0
class OSFileSystem(FileSystem):
    """
    This class represents a file system implemented by the python os module.
    """

    def __init__(self, instance=os, root="."):
        FileSystem.__init__(self, instance)
        self.root = os.path.normpath(root)
        self.eventQueue = EventQueue()
        self.eventHandler = EventHandler(self.eventQueue)
        self.observer = Observer()
        self.observer.schedule(self.eventHandler, path=self.root, recursive=True)
        self.observer.start()

    def join_path(self, path, *largs):
        return os.path.join(path, *largs)

    def get_relative_path(self, path):
        if path.startswith(self.root):
            return path.split(self.root + os.path.sep, 1)[1]
        else:
            return path

    def open(self, path, mode="rb", buffering=None):
        return open(path, mode)

    def mkdirs(self, path, mode=511):
        return os.makedirs(path, mode)

    def blockchecksums(self, path):
        return blockchecksums(path)

    def delta(self, path, checksums):
        return delta(path, checksums)

    def patch(self, path, delta):
        patched = patch(path, delta)
        self.instance.remove(path)
        return self.instance.rename(patched, path)

    def poll(self):
        r = []
        while True:
            try:
                r.append(self.eventQueue.get_nowait())
            except Empty:
                break
        return r
Пример #17
0
def watch():
    # Users expect an implicit push
    push(watch=True)

    # Start the observer
    observer = PollingObserver()
    observer.event_queue.max_size = 1
    observer.schedule(EventHandler(), os.getcwd(), recursive=True)
    observer.start()
    puts(colored.yellow('Watching for changes... (ctrl-c to stop)'))
    try:
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        observer.stop()
    # Block until the thread terminates
    observer.join()
def main():
    handler = ChangeHandler()
    directory = "./"
    observer = PollingObserver(0.35)
    # Poll every 0.35 seconds
    if not os.path.exists(directory):
        os.makedirs(directory)
    observer.schedule(handler, directory, recursive=True)
    # Only search in the LaTeX directory
    observer.start()
    try:
        while True:
            time.sleep(60 * 5)
            # Sleep for 5 minutes (time doesn't really matter)
    except KeyboardInterrupt:
        observer.stop()
    observer.join()
Пример #19
0
def watch_assets(options):
    """
    Watch for changes to asset files, and regenerate js/css
    """
    # Don't watch assets when performing a dry run
    if tasks.environment.dry_run:
        return

    themes = get_parsed_option(options, 'themes')
    theme_dirs = get_parsed_option(options, 'theme_dirs', [])

    # wait comes in as a list of strings, define the default value similarly for convenience.
    default_wait = [unicode(DEFAULT_OBSERVER_TIMEOUT)]
    wait = float(get_parsed_option(options, 'wait', default_wait)[0])

    if not theme_dirs and themes:
        # We can not add theme sass watchers without knowing the directory that contains the themes.
        raise ValueError('theme-dirs must be provided for watching theme sass.')
    else:
        theme_dirs = [path(_dir) for _dir in theme_dirs]

    sass_directories = get_watcher_dirs(theme_dirs, themes)
    observer = PollingObserver(timeout=wait)

    CoffeeScriptWatcher().register(observer)
    SassWatcher().register(observer, sass_directories)
    XModuleSassWatcher().register(observer, ['common/lib/xmodule/'])
    XModuleAssetsWatcher().register(observer)

    print("Starting asset watcher...")
    observer.start()

    # Run the Webpack file system watcher too
    execute_webpack_watch(settings=Env.DEVSTACK_SETTINGS)

    if not getattr(options, 'background', False):
        # when running as a separate process, the main thread needs to loop
        # in order to allow for shutdown by control-c
        try:
            while True:
                observer.join(2)
        except KeyboardInterrupt:
            observer.stop()
        print("\nStopped asset watcher.")
Пример #20
0
    def run_watch(self):
        if self.poll:
            from watchdog.observers.polling import PollingObserver as Observer
        else:
            from watchdog.observers import Observer

        event_handler = RoninEventHandler(self)
        observer = Observer()
        observer.schedule(event_handler, self.source, recursive=True)
        observer.start()

        try:
            logger.info("Watching directory: '{0}' for changes (poll={1})".format(self.source, self.poll))
            while True:
                time.sleep(1)
        except KeyboardInterrupt:
            logger.info("Stopping watcher...")
            observer.stop()
        observer.join()
Пример #21
0
def folderObserver(pathStructure, dbPath):

    logging = DefaultLogger()

    if pathStructure == None or pathStructure['inBox'] == None:
        message = 'Watch: Unable to run as pathStructure is undefined'
        logging.debug(message)
        return
    
    event_handler = singleFileWatcher(pathStructure, dbPath)
    observer = PollingObserver()
    observer.schedule(event_handler, pathStructure['inBox'], recursive=False)
    observer.start()

    try:
        while True and observer.is_alive():
            time.sleep(1)
    except KeyboardInterrupt:
        observer.stop()
    observer.join()
Пример #22
0
    def server(self, args):
        server = Process(target=self._server)
        server.start()

        event_handler = PatternMatchingEventHandler(ignore_patterns=self.WATCH_EXCLUDE)
        event_handler.on_modified = lambda event : self._build()
        observer = Observer()
        observer.schedule(event_handler, self.BASE_DIR, recursive=True)
        observer.start()

        try:
            while True:
                time.sleep(1)
        except (KeyboardInterrupt, SystemExit):
            server.terminate()
            observer.stop()

        observer.join()

        self.logger.info("Clossing")
Пример #23
0
def watch_assets(options):
    """
    Watch for changes to asset files, and regenerate js/css
    """
    # Don't watch assets when performing a dry run
    if tasks.environment.dry_run:
        return

    themes = getattr(options, 'themes', None)
    theme_dirs = getattr(options, 'theme-dirs', [])

    if not theme_dirs and themes:
        # We can not add theme sass watchers without knowing the directory that contains the themes.
        raise ValueError('theme-dirs must be provided for watching theme sass.')
    else:
        theme_dirs = [path(_dir) for _dir in theme_dirs]

    if isinstance(themes, basestring):
        themes = themes.split(',')
    else:
        themes = themes if isinstance(themes, list) else [themes]

    sass_directories = get_watcher_dirs(theme_dirs, themes)
    observer = PollingObserver()

    CoffeeScriptWatcher().register(observer)
    SassWatcher().register(observer, sass_directories)
    XModuleSassWatcher().register(observer, ['common/lib/xmodule/'])
    XModuleAssetsWatcher().register(observer)

    print("Starting asset watcher...")
    observer.start()
    if not getattr(options, 'background', False):
        # when running as a separate process, the main thread needs to loop
        # in order to allow for shutdown by contrl-c
        try:
            while True:
                observer.join(2)
        except KeyboardInterrupt:
            observer.stop()
        print("\nStopped asset watcher.")
Пример #24
0
def watch_assets(options):
    """
    Watch for changes to asset files, and regenerate js/css
    """
    # Don't watch assets when performing a dry run
    if tasks.environment.dry_run:
        return

    themes = get_parsed_option(options, 'themes')
    theme_dirs = get_parsed_option(options, 'theme_dirs', [])

    if not theme_dirs and themes:
        # We can not add theme sass watchers without knowing the directory that contains the themes.
        raise ValueError('theme-dirs must be provided for watching theme sass.')
    else:
        theme_dirs = [path(_dir) for _dir in theme_dirs]

    sass_directories = get_watcher_dirs(theme_dirs, themes)
    observer = PollingObserver()

    CoffeeScriptWatcher().register(observer)
    SassWatcher().register(observer, sass_directories)
    XModuleSassWatcher().register(observer, ['common/lib/xmodule/'])
    XModuleAssetsWatcher().register(observer)

    print("Starting asset watcher...")
    observer.start()

    # We only want Webpack to re-run on changes to its own entry points, not all JS files, so we use its own watcher
    # instead of subclassing from Watchdog like the other watchers do
    execute_webpack_watch(settings=Env.DEVSTACK_SETTINGS)

    if not getattr(options, 'background', False):
        # when running as a separate process, the main thread needs to loop
        # in order to allow for shutdown by contrl-c
        try:
            while True:
                observer.join(2)
        except KeyboardInterrupt:
            observer.stop()
        print("\nStopped asset watcher.")
Пример #25
0
    def __init__(self, contentdir=''):
        """
        @param contentDirs are the dirs where we will load wiki files from & parse

        """
        self.file_observers = []
        self.spacehandler = SpaceHandler(self)
        self.contentdir = contentdir if contentdir.endswith('/') else '%s/' % contentdir
        
        if not j.system.fs.exists(contentdir):
            print "Contentdir %s was not found .. creating it." % contentdir
            j.system.fs.createDir(contentdir)

        if contentdir.strip():
            # Watch the contentdir for changes
            observer = Observer()
            self.file_observers.append(observer)
            j.core.portal.active.watchedspaces.append(contentdir)
            print('Monitoring', contentdir)
            observer.schedule(self.spacehandler, contentdir, recursive=True)
            observer.start()
Пример #26
0
class FileMonitor(FileSystemEventHandler):
    def __init__(self, file, action):
        self.path,self.filename = os.path.split(file)
        self.action = action
        self.observer = None
    def start(self):
        self.observer = Observer()
        self.observer.schedule(self, self.path, recursive=False)
        self.observer.start()
    def stop(self):
        if self.observer is not None:
            self.observer.stop()
    def join(self):
        if self.observer is not None:
            self.observer.join()

    def on_modified(self, event):
        try:
            if os.path.samefile(event.src_path,self.filename):
                self.action()
        except OSError as e:
            print 'Exception on file check', e
Пример #27
0
def watch_assets(options):
    """
    Watch for changes to asset files, and regenerate js/css
    """
    # Don't watch assets when performing a dry run
    if tasks.environment.dry_run:
        return

    themes = get_parsed_option(options, 'themes')
    theme_dirs = get_parsed_option(options, 'theme_dirs', [])

    if not theme_dirs and themes:
        # We can not add theme sass watchers without knowing the directory that contains the themes.
        raise ValueError('theme-dirs must be provided for watching theme sass.')
    else:
        theme_dirs = [path(_dir) for _dir in theme_dirs]

    sass_directories = get_watcher_dirs(theme_dirs, themes)
    observer = PollingObserver()

    CoffeeScriptWatcher().register(observer)
    SassWatcher().register(observer, sass_directories)
    XModuleSassWatcher().register(observer, ['common/lib/xmodule/'])
    XModuleAssetsWatcher().register(observer)

    print("Starting asset watcher...")
    observer.start()
    if not getattr(options, 'background', False):
        # when running as a separate process, the main thread needs to loop
        # in order to allow for shutdown by contrl-c
        try:
            while True:
                observer.join(2)
        except KeyboardInterrupt:
            observer.stop()
        print("\nStopped asset watcher.")
Пример #28
0
def read_logs(api_url, logs):
    observers = []

    for log in logs:
        path = log['path']
        dir_name, log_name = os.path.split(path)
        print('Listening for changes on {} in {}'.format(log_name, dir_name))

        if log['os_log']:
            event_handler = LinuxLogEventHandler(api_url, log['regex'],
                                                 log_name)
        else:
            event_handler = TextLogEventHandler(api_url, log['regex'],
                                                log_name)

        interval = log['interval']
        if interval > 0:
            observer = PollingObserver(interval)
            # Initialize the offset to the end of the file:
            event_handler.offset = sum(1 for _ in open(path))
        else:
            observer = Observer()

        observer.schedule(event_handler, dir_name, recursive=False)
        observer.start()
        observers.append(observer)

    try:
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        for observer in observers:
            observer.stop()

    for observer in observers:
        observer.join()
Пример #29
0
    def test_watch_new_file(self):
        tmp_dir = tempfile.mkdtemp()

        created_file_count = defaultdict(lambda: 0)

        def _increment_counter(name: str, dir: str):
            created_file_count[os.path.join(dir, name)] += 1

        observer = PollingObserver()
        file_event_handler = FileFinallyCreatedEventHandler(
            ['*.txt'], lambda name, dir: 'exclude' not in name,
            _increment_counter, 1)
        observer.schedule(file_event_handler, tmp_dir, recursive=True)
        observer.start()

        test_files = [
            os.path.join(tmp_dir, x) for x in
            ['test_0.txt', 'test_1.txt', 'java.script', 'exclude.txt']
        ]
        for test_file_path in test_files:
            with open(test_file_path, 'w') as f:
                f.write('test ' + test_file_path)

        time.sleep(2)
        file_event_handler.pending_watch_created_files()

        for test_file_path in test_files:
            if test_file_path.endswith(
                    '.txt') and 'exclude' not in test_file_path:
                self.assertEqual(1, created_file_count[test_file_path])
            else:
                self.assertEqual(0, created_file_count[test_file_path])

        observer.stop()
        observer.join()
        shutil.rmtree(tmp_dir, ignore_errors=True)
Пример #30
0
def main():
    ocrmypdf.configure_logging(verbosity=ocrmypdf.Verbosity.default,
                               manage_root_logger=True)
    log.setLevel(LOGLEVEL)
    log.info(f"Starting OCRmyPDF watcher with config:\n"
             f"Input Directory: {INPUT_DIRECTORY}\n"
             f"Output Directory: {OUTPUT_DIRECTORY}\n"
             f"Output Directory Year & Month: {OUTPUT_DIRECTORY_YEAR_MONTH}")
    log.debug(f"INPUT_DIRECTORY: {INPUT_DIRECTORY}\n"
              f"OUTPUT_DIRECTORY: {OUTPUT_DIRECTORY}\n"
              f"OUTPUT_DIRECTORY_YEAR_MONTH: {OUTPUT_DIRECTORY_YEAR_MONTH}\n"
              f"ON_SUCCESS_DELETE: {ON_SUCCESS_DELETE}\n"
              f"DESKEW: {DESKEW}\n"
              f"ARGS: {OCR_JSON_SETTINGS}\n"
              f"POLL_NEW_FILE_SECONDS: {POLL_NEW_FILE_SECONDS}\n"
              f"USE_POLLING: {USE_POLLING}\n"
              f"LOGLEVEL: {LOGLEVEL}\n")

    if 'input_file' in OCR_JSON_SETTINGS or 'output_file' in OCR_JSON_SETTINGS:
        log.error(
            'OCR_JSON_SETTINGS should not specify input file or output file')
        sys.exit(1)

    handler = HandleObserverEvent(patterns=PATTERNS)
    if USE_POLLING:
        observer = PollingObserver()
    else:
        observer = Observer()
    observer.schedule(handler, INPUT_DIRECTORY, recursive=True)
    observer.start()
    try:
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        observer.stop()
    observer.join()
Пример #31
0
class FileWatcher:

    def __init__(self, pool, query, src_path, patterns=None, ignore_directories=False, recursive=True, timeout=1, key=1):

        if patterns is None:
            patterns = ["*.txt"]

        self.src_path = src_path
        self.recursive = recursive
        self.event_observer = PollingObserver(timeout=timeout)
        self.event_handler = InsertToSQL(pool, query, patterns=patterns, ignore_directories=ignore_directories, key=key)

    def bark(self):

        self.start()

        try:
            while True:

                # Main Loop.
                # Watchdog is polling every TIMEOUT seconds

                pass

        except KeyboardInterrupt:
            self.stop()

    def start(self):
        # Schedule observer
        self.event_observer.schedule(self.event_handler, self.src_path, recursive=self.recursive)
        # Start watchdog thread; can give it name with observer.set_name()
        self.event_observer.start()

    def stop(self):
        self.event_observer.stop()
        self.event_observer.join()
Пример #32
0
class Command(BaseCommand):
    """
    On every iteration of an infinite loop, consume what we can from the
    consumption directory.
    """

    # This is here primarily for the tests and is irrelevant in production.
    stop_flag = False

    def __init__(self, *args, **kwargs):

        self.logger = logging.getLogger(__name__)

        BaseCommand.__init__(self, *args, **kwargs)
        self.observer = None

    def add_arguments(self, parser):
        parser.add_argument("directory",
                            default=settings.CONSUMPTION_DIR,
                            nargs="?",
                            help="The consumption directory.")
        parser.add_argument("--oneshot",
                            action="store_true",
                            help="Run only once.")

    def handle(self, *args, **options):
        directory = options["directory"]
        recursive = settings.CONSUMER_RECURSIVE

        if not directory:
            raise CommandError("CONSUMPTION_DIR does not appear to be set.")

        if not os.path.isdir(directory):
            raise CommandError(
                f"Consumption directory {directory} does not exist")

        if recursive:
            for dirpath, _, filenames in os.walk(directory):
                for filename in filenames:
                    filepath = os.path.join(dirpath, filename)
                    _consume(filepath)
        else:
            for entry in os.scandir(directory):
                _consume(entry.path)

        if options["oneshot"]:
            return

        if settings.CONSUMER_POLLING == 0 and INotify:
            self.handle_inotify(directory, recursive)
        else:
            self.handle_polling(directory, recursive)

        logger.debug("Consumer exiting.")

    def handle_polling(self, directory, recursive):
        logging.getLogger(__name__).info(
            f"Polling directory for changes: {directory}")
        self.observer = PollingObserver(timeout=settings.CONSUMER_POLLING)
        self.observer.schedule(Handler(), directory, recursive=recursive)
        self.observer.start()
        try:
            while self.observer.is_alive():
                self.observer.join(1)
                if self.stop_flag:
                    self.observer.stop()
        except KeyboardInterrupt:
            self.observer.stop()
        self.observer.join()

    def handle_inotify(self, directory, recursive):
        logging.getLogger(__name__).info(
            f"Using inotify to watch directory for changes: {directory}")

        inotify = INotify()
        inotify_flags = flags.CLOSE_WRITE | flags.MOVED_TO
        if recursive:
            descriptor = inotify.add_watch_recursive(directory, inotify_flags)
        else:
            descriptor = inotify.add_watch(directory, inotify_flags)

        try:
            while not self.stop_flag:
                for event in inotify.read(timeout=1000):
                    if recursive:
                        path = inotify.get_path(event.wd)
                    else:
                        path = directory
                    filepath = os.path.join(path, event.name)
                    _consume(filepath)
        except KeyboardInterrupt:
            pass

        inotify.rm_watch(descriptor)
        inotify.close()
Пример #33
0
class FolderScanner(FileSystemEventHandler, InputScanner, QObject):
    """
    Watches file changes (creation, move) in a specific filesystem folder

    the watched directory is retrieved from user config on scanner startup
    """
    @log
    def __init__(self):
        FileSystemEventHandler.__init__(self)
        InputScanner.__init__(self)
        QObject.__init__(self)
        self._observer = None

    @log
    def start(self):
        """
        Starts scanning scan folder for new files
        """
        try:
            scan_folder_path = config.get_scan_folder_path()
            self._observer = PollingObserver()
            self._observer.schedule(self, scan_folder_path, recursive=False)
            self._observer.start()
        except OSError as os_error:
            raise ScannerStartError(os_error)

    @log
    def stop(self):
        """
        Stops scanning scan folder for new files
        """
        if self._observer is not None:
            self._observer.stop()
            self._observer = None

    @log
    def on_moved(self, event):
        if event.event_type == 'moved':
            image_path = event.dest_path
            _LOGGER.debug(f"File move detected : {image_path}")

            self.broadcast_image(read_disk_image(Path(image_path)))

    @log
    def on_created(self, event):
        if event.event_type == 'created':
            file_is_incomplete = True
            last_file_size = -1
            image_path = event.src_path
            _LOGGER.debug(
                f"File creation detected : {image_path}. Waiting until file is complete and readable ..."
            )

            while file_is_incomplete:
                info = QFileInfo(image_path)
                size = info.size()
                _LOGGER.debug(f"File {image_path}'s size = {size}")
                if size == last_file_size:
                    file_is_incomplete = False
                    _LOGGER.debug(f"File {image_path} is ready to be read")
                last_file_size = size
                time.sleep(_DEFAULT_SCAN_FILE_SIZE_RETRY_PERIOD_IN_SEC)

            self.broadcast_image(read_disk_image(Path(image_path)))
Пример #34
0
def main():
  print 'Starting main...'
  log.info('--------------------------------------------------------------------')
  log.info('HealthPro CSV Ingester service started.')
  log.info('Details about database from config file: Server: {}, DB Table: {}, '\
           ''.format(db_info['host'], healthpro_table_name))
  observer = PollingObserver(timeout=5) # check every 5 seconds
  try:
    if not do_startup_checks():
      raise Exception('One or more startup checks failed')
    class FSEHandler(FileSystemEventHandler):
      # Here we define what we'd like to do when certain filesystem
      # events take place -- e.g., when a new CSV appears in the watched
      # directory.
      # Uncomment on_any_event for extra logging.
      #def on_any_event(self, event):
      #  log.info('FSEHandler->on_any_event: event_type=[' \
      #           '{}], src_path=[{}]'.format(event.event_type, event.src_path))
      def on_deleted(self, event):
        # Our forked Watchdog (v0.8.3.1) emits this event when inbox folder 
        # unmounts (or otherwise is not available).
        # We log and send an email only once. It very well might remount without 
        # us needing to do anything.
        global inbox_gone_flag
        if event.src_path == inbox_dir:
          if not inbox_gone_flag:
            inbox_gone_flag = True
            msg = event.src_path + ' is gone!'
            log.error(msg)
            send_notice_email(msg) 
      def on_created(self, event):
        # In on_deleted above, we set the inbox_gone_flag. But if a file appears
        # we know the inbox is back and all is well; so unset it. 
        global inbox_gone_flag
        if inbox_gone_flag:
          inbox_gone_flag = False
        log.info('FSEHandler->on_created: a new file has appeared: '
                 + event.src_path)
        process_file(event.src_path)
    observe_subdirs_flag = False
    observer.schedule(FSEHandler(), inbox_dir, observe_subdirs_flag)
    observer.start()
    log.info('Waiting for activity...')
    print 'Service started.' 
    try:
      while True:
        observer.join(10)
        if observer.is_alive():
          continue
        else:
          raise Exception('Observer thread has stopped.')
    except KeyboardInterrupt:
      print '\nKeyboard interrupt caught. Quitting.'
      observer.stop()
      sys.exit(0)
    observer.join() 
  except Exception, ex:
    print '\nError caught. Quitting. Check log.'
    log.error(str(ex))
    send_error_email('An error occurred in main(). Please check.')
    observer.stop()
    sys.exit(1)
Пример #35
0
#!/usr/bin/env python
import sys, os, re
import time
import datetime
import logging
from watchdog.observers.polling import PollingObserver as Observer
from watchdog.events import FileSystemEventHandler
import threading, thread
import h5py
from Bio import SeqIO
from StringIO import StringIO
import MySQLdb
import subprocess
import string
import configargparse
from warnings import filterwarnings
import socket
import hashlib
import xmltodict
parser = configargparse.ArgParser(
    description=
    'eboladb_squiggle_align: A program providing the ability to determine which region of the ebola genome and individual read is derived from.'
)
parser.add(
    '-fasta',
    '--reference_fasta_file',
    type=str,
    dest='fasta',
    required=True,
    default=None,
Пример #36
0
class Evaluator(object):
    ''' An object that encapsulates model evaluation '''
    def __init__(self, config, model, dataloader, device):
        self.model = model
        self.config = config
        self.device = device
        self.dataloader = dataloader

        self.should_exit = False
        signal.signal(signal.SIGHUP, self.on_training_complete)

        self.observer = None

        if 'cuda' in device.type:
            self.model = nn.DataParallel(model.cuda())

        self.modules = {'model': model}

    @property
    def dataset(self):
        ''' Get the dataset '''
        return self.dataloader.dataset

    def evaluate(self, batch):
        ''' Runs one evaluation step '''
        with torch.no_grad():
            self.model.eval()
            _, nll = self.model(batch)
            #pdb.set_trace()
            # nn.DataParallel wants to gather rather than doing a reduce_add, so the output here
            # will be a tensor of values that must be summed
            nll = nll.sum()

            # need to use .item() which converts to Python scalar
            # because as a Tensor it accumulates gradients
            return nll.item(), torch.sum(batch['target_lens']).item()

    def evaluate_epoch(self, epoch, experiment, verbose=0):
        ''' Evaluate a single epoch '''
        neg_log_likelihood = metrics.Metric('nll', metrics.format_float)

        #pdb.set_trace()
        def get_description():
            mode_name = 'Test' if self.dataset.split == 'test' else 'Validate'
            description = f'{mode_name} #{epoch}'
            if verbose > 0:
                description += f' {neg_log_likelihood}'
            if verbose > 1:
                description += f' [{profile.mem_stat_string(["allocated"])}]'
            return description

        batches = tqdm(
            self.dataloader,
            unit='batch',
            dynamic_ncols=True,
            desc=get_description(),
            file=sys.stdout  # needed to make tqdm_wrap_stdout work
        )
        with tqdm_wrap_stdout():
            for batch in batches:
                # run the data through the model
                batches.set_description_str(get_description())
                nll, length = self.evaluate(batch)
                if length:
                    neg_log_likelihood.update(nll / length)

        experiment.log_metric('nll', neg_log_likelihood.average)
        return neg_log_likelihood.average

    def on_new_checkpoint(self, path, experiment, verbose=0):
        ''' Upon receiving a new checkpoint path '''
        epoch, step = restore(path,
                              self.modules,
                              num_checkpoints=self.config.average_checkpoints,
                              map_location=self.device.type)
        experiment.set_step(step)
        self.evaluate_epoch(epoch, experiment, verbose)

    def on_training_complete(self, signum, frame):  # pylint:disable=unused-argument
        ''' Received a SIGHUP indicating the training session has ended '''
        self.should_exit = True

    def shutdown(self):
        ''' Shutdown the current observer '''
        if self.observer:
            self.observer.stop()
            self.observer.join()

    def watch(self, experiment, verbose=0):
        ''' Watch for a new checkpoint and run an evaluation step '''
        # Use a polling observer because slurm doesn't seem to correctly handle inotify events :/
        self.observer = PollingObserver() if self.config.polling else Observer(
        )
        event_handler = CheckpointEventHandler(self.on_new_checkpoint,
                                               experiment, verbose)
        self.observer.schedule(event_handler, path=self.config.watch_directory)
        self.observer.start()

        while not self.should_exit:
            time.sleep(1)

        atexit.register(self.shutdown)

    def __call__(self, epoch, experiment, verbose=0):
        ''' Validate the model and store off the stats '''
        enter_mode = experiment.validate
        if self.dataset.split == 'test':
            enter_mode = experiment.test

        with ExitStack() as stack:
            stack.enter_context(enter_mode())
            stack.enter_context(torch.no_grad())

            if self.config.watch_directory:
                self.watch(experiment, verbose)
            else:
                return self.evaluate_epoch(epoch, experiment, verbose)
Пример #37
0
    # file modifications on VM share (nfs) always triggering create event
    # so don't need modified event
    #def on_modified(self, event):
    #    if os.path.isfile(event.src_path):
    #        print "modified, touching {}".format(event.src_path)
    #        _retouch_file(event.src_path)


if __name__ == "__main__":

    path = sys.argv[1] if len(sys.argv) > 1 else '.'
    # default polling_timeout to 3 seconds
    polling_timeout = int(sys.argv[2]) if len(sys.argv) > 2 else 3

    logger.info(
        'Starting watching and touching - path - {}, polling timeout - {} ...'.
        format(path, polling_timeout))

    event_handler = PollingTouchFileHandler()
    #observer = Observer()
    observer = PollingObserver(timeout=polling_timeout)
    observer.schedule(event_handler, path, recursive=True)
    observer.start()
    try:
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        observer.stop()
    observer.join()
Пример #38
0
def main(argv):
    """
    Build the docs and serve them with an HTTP server.
    """
    parser = argparse.ArgumentParser(
        description='Build and serve HTML Sphinx docs')

    parser.add_argument('--port',
                        help='Serve on this port, default 8000',
                        type=int,
                        default=8000)

    parser.add_argument(
        '--source',
        help='Directory of source Sphinx (reStructuredText) docs',
        type=os.path.realpath,
        default='docs/source')

    parser.add_argument('--destination',
                        help='Where to build the HTML output',
                        type=os.path.realpath,
                        default='docs/build/html')

    parser.add_argument('--doctrees',
                        help='Where the doctrees are built',
                        type=os.path.realpath,
                        default='docs/build/doctrees')

    options = parser.parse_args(argv)

    bound_build_docs = partial(build_docs, options.source, options.destination,
                               options.doctrees)

    # Do the initial build
    bound_build_docs()

    # Watch the source directory for changes, build docs again if detected
    observer = Observer()
    observer.schedule(BuildDocsHandler(bound_build_docs),
                      path=options.source,
                      recursive=True)
    observer.start()

    # Set the root for the request handler, overriding Python stdlib current
    # working directory.
    DocsHTTPRequestHandler._root = options.destination

    server = SocketServer.TCPServer(('', options.port), DocsHTTPRequestHandler)

    try:
        logger.info('Serving on localhost:{}'.format(options.port))
        server.serve_forever()
    except KeyboardInterrupt:
        sys.stdout.write('\n')
        logger.info('(stopping server)')
        observer.stop()
    finally:
        observer.join()

    logging.info('Server stopped, exiting')
    sys.exit(0)
Пример #39
0
class ProbeEvaluator(object):
    ''' An object that encapsulates model evaluation '''
    def __init__(self, config, model, dataloader, device):
        self.model = model
        self.config = config
        self.device = device
        self.dataloader = dataloader

        self.should_exit = False
        signal.signal(signal.SIGHUP, self.on_training_complete)

        self.observer = None

        if 'cuda' in device.type:
            self.model = nn.DataParallel(model.cuda())

        self.modules = {
            'model': model
        }

        # stats
        self.stats = {model_stat: {stats_type: {'mean': torch.zeros((model.num_layers, model.num_heads),
                                                                    dtype=torch.float32).to(device),
                                                'var': torch.zeros((model.num_layers, model.num_heads),
                                                                   dtype=torch.float32).to(device)}
                                   for stats_type in STATS_TYPES}
                      for model_stat in MODEL_STATS}
        self.count = {model_stat: 0 for model_stat in MODEL_STATS}

    @property
    def dataset(self):
        ''' Get the dataset '''
        return self.dataloader.dataset

    def evaluate(self, batch):
        ''' Runs one evaluation step '''
        with torch.no_grad():
            self.model.eval()
            # _, nll = self.model(batch)
            result = self.model(batch)
            nll = result['nll']

            # stats
            encoder_stats = probe(result['encoder_attn_weights_tensor'])
            decoder_stats = probe(result['decoder_attn_weights_tensor'])
            enc_dec_stats = probe(result['enc_dec_attn_weights_tensor'])
            stats = {'encoder_stats': {stats_type: encoder_stats[stats_type].view(self.model.num_layers,
                                                                                  self.model.num_heads,
                                                                                  -1)
                                       for stats_type in STATS_TYPES},
                     'decoder_stats': {stats_type: decoder_stats[stats_type].view(self.model.num_layers,
                                                                                  self.model.num_heads,
                                                                                  -1)
                                       for stats_type in STATS_TYPES},
                     'enc_dec_stats': {stats_type: enc_dec_stats[stats_type].view(self.model.num_layers,
                                                                                  self.model.num_heads,
                                                                                  -1)
                                       for stats_type in STATS_TYPES}}

            # nn.DataParallel wants to gather rather than doing a reduce_add, so the output here
            # will be a tensor of values that must be summed
            nll = nll.sum()

            # need to use .item() which converts to Python scalar
            # because as a Tensor it accumulates gradients
            return nll.item(), torch.sum(batch['target_lens']).item(), stats


    def evaluate_epoch(self, epoch, experiment, stats_file, verbose=0):
        ''' Evaluate a single epoch '''
        neg_log_likelihood = metrics.Metric('nll', metrics.format_float)

        def get_description():
            mode_name = 'Test' if self.dataset.split == 'test' else 'Validate'
            description = f'{mode_name} #{epoch}'
            if verbose > 0:
                description += f' {neg_log_likelihood}'
            if verbose > 1:
                description += f' [{profile.mem_stat_string(["allocated"])}]'
            return description

        batches = tqdm(
            self.dataloader,
            unit='batch',
            dynamic_ncols=True,
            desc=get_description(),
            file=sys.stdout # needed to make tqdm_wrap_stdout work
        )
        with tqdm_wrap_stdout():
            for batch in batches:
                # run the data through the model
                batches.set_description_str(get_description())
                nll, length, stats = self.evaluate(batch)
                self.update_stats(stats, self.stats, self.count)
                if length:
                    neg_log_likelihood.update(nll / length)

        experiment.log_metric('nll', neg_log_likelihood.average)
        self.save_stats(stats_file)
        return neg_log_likelihood.average

    def update_stats(self, stats, self_stats, self_count):
        ''' Update stats after each batch '''
        for model_stat in stats:
            current_count = stats[model_stat][STATS_TYPES[0]].size()[-1]
            old_count = self_count[model_stat]
            new_count = old_count + current_count
            for stat_type in stats[model_stat]:
                old_mean = self_stats[model_stat][stat_type]['mean']
                current_mean = stats[model_stat][stat_type].mean(dim=-1)
                new_mean = (old_mean * self_count[model_stat] + stats[model_stat][stat_type].sum(dim=-1)) / new_count
                old_var = self_stats[model_stat][stat_type]['var']
                current_var = stats[model_stat][stat_type].var(
                    dim=-1)  # torch.sum((stats[model_stat][stat_type] - new_mean.unsqueeze(-1)) ** 2, dim=-1) / (current_count - 1)
                new_var = (old_count * (old_var + (old_mean - new_mean) ** 2)
                           + current_count * (current_var + (current_mean - new_mean) ** 2)) / new_count
                self_stats[model_stat][stat_type]['mean'] = new_mean
                self_stats[model_stat][stat_type]['var'] = new_var
            self_count[model_stat] = new_count

    def save_stats(self, stats_file):
        ''' Save stats to file '''
        stats = {'stats': self.stats, 'count': self.count}
        pickle.dump(stats, stats_file, protocol=pickle.HIGHEST_PROTOCOL)

    def on_new_checkpoint(self, path, experiment, verbose=0):
        ''' Upon receiving a new checkpoint path '''
        epoch, step = restore(
            path,
            self.modules,
            num_checkpoints=self.config.average_checkpoints,
            map_location=self.device.type
        )
        experiment.set_step(step)
        self.evaluate_epoch(epoch, experiment, verbose)

    def on_training_complete(self, signum, frame): # pylint:disable=unused-argument
        ''' Received a SIGHUP indicating the training session has ended '''
        self.should_exit = True

    def shutdown(self):
        ''' Shutdown the current observer '''
        if self.observer:
            self.observer.stop()
            self.observer.join()

    def watch(self, experiment, verbose=0):
        ''' Watch for a new checkpoint and run an evaluation step '''
        # Use a polling observer because slurm doesn't seem to correctly handle inotify events :/
        self.observer = PollingObserver() if self.config.polling else Observer()
        event_handler = CheckpointEventHandler(self.on_new_checkpoint, experiment, verbose)
        self.observer.schedule(event_handler, path=self.config.watch_directory)
        self.observer.start()

        while not self.should_exit:
            time.sleep(1)

        atexit.register(self.shutdown)

    def __call__(self, epoch, experiment, verbose=0):
        ''' Validate the model and store off the stats '''
        enter_mode = experiment.validate
        if self.dataset.split == 'test':
            enter_mode = experiment.test

        with ExitStack() as stack:
            stack.enter_context(enter_mode())
            stack.enter_context(torch.no_grad())

            stats_filename = self.config.stats_filename or f'stats_probe_evaluate.pickle'
            stats_path = os.path.join(self.config.stats_directory, stats_filename)
            stats_file = stack.enter_context(open(stats_path, 'wb'))

            if self.config.watch_directory:
                self.watch(experiment, verbose)
            else:
                return self.evaluate_epoch(epoch, experiment, stats_file, verbose)
Пример #40
0
class LivePremailer():
    def __init__(self):
        self.observer_paths = {HERE}
        self.bsync_params = {
            'server': None,
            'directory': None,
            'reloadDelay': 1000,
            'online': 'true',
            'logLevel': 'silent',
            'files': HERE,
        }

    def append_arguments(self, parser):
        parser.add_argument('--devpostfix',
                            nargs='?',
                            default='_dev',
                            help='Postfix which should be used to search\
                                  dev templates')
        parser.add_argument('--livepostfix',
                            nargs='?',
                            default='_live',
                            help='Postfix which should be used to name\
                                  files with live preview')
        parser.add_argument('--loadhistory',
                            action='store_true',
                            help='lpremailer will load all paths located\
                                  in {} file to memory and rerender them\
                                  everytime change in any file occurs'.format(
                                HISTORY_FILENAME))
        parser.add_argument('--savehistory',
                            action='store_true',
                            help='lpremailer will save all dev file paths\
                                  recorded during development in {} file'.
                            format(HISTORY_FILENAME))
        parser.add_argument('--astext',
                            action='store_true',
                            help='lpremailer will save all dev files\
                                  as simple txt messages')

    def parse_args(self):
        parser = argparse.ArgumentParser()
        subparsers = parser.add_subparsers()

        runserver_help = 'Runs server for live premailer'
        sub_parser = subparsers.add_parser(PARSER_RUN, help=runserver_help)
        sub_parser.set_defaults(which=PARSER_RUN)
        sub_parser.add_argument('--staticdir',
                                nargs='?',
                                help='Path to directory where static folder\
                                      is located')
        self.append_arguments(sub_parser)
        init_help = 'Create json files for htmls with provided\
                     postfix in current directory'

        sub_parser = subparsers.add_parser(PARSER_INIT, help=init_help)
        sub_parser.set_defaults(which=PARSER_INIT)
        sub_parser.add_argument('--force',
                                action='store_true',
                                help='Overwrites json files')
        self.append_arguments(sub_parser)

        self.args = parser.parse_args()

    def json_files(self):
        handler = RenderHandler(self.args)
        if self.args.which == PARSER_INIT:
            JsonGenerator(handler, HERE).generate()
            sys.exit(1)

    def start_observer(self):
        self.observer = PollingObserver()
        self.observer.should_keep_running()
        self.observer.handler = RenderHandler(self.args)
        for path in self.observer_paths:
            self.observer.schedule(self.observer.handler, path, recursive=True)
        self.observer.start()

    def update_params(self):
        if not os.path.exists(self.args.staticdir):
            logging.warning('Static files won\'t be maintained/served.')
            return
        files = '!**/*.less,!**/*.sass,!**/*.scss'
        files = (self.bsync_params['files'], self.args.staticdir, files)
        self.bsync_params['files'] = ','.join(files)
        self.bsync_params['ss'] = self.args.staticdir
        self.observer_paths.add(self.args.staticdir)

    def bsync_command(self):
        return 'browser-sync start {}'\
               .format(parse_params(self.bsync_params))

    def run_bsync(self):
        self.bsync = subprocess.Popen(self.bsync_command(), shell=True)

    def run(self):
        self.parse_args()
        self.json_files()
        self.update_params()
        self.start_observer()
        self.run_bsync()
        try:
            while True:
                time.sleep(1)
        except KeyboardInterrupt:
            if self.args.savehistory:
                self.observer.handler.save_history()
            self.observer.stop()
            self.bsync.kill()
        self.observer.join()
Пример #41
0
def watch(entries=[], ignore=[], extensions=[], beep_on_failure=True,
          auto_clear=False, wait=False, beforerun=None, afterrun=None,
          onpass=None, onfail=None, onexit=None, runner=None, spool=None,
          poll=False, verbose=False, quiet=False, pytest_args=[]):
    argv = _get_pytest_runner(runner) + (pytest_args or [])

    if not entries:
        entries = ['.']

    files = []
    directories = []
    for entry in entries:
        entry = os.path.abspath(entry)
        if os.path.isfile(entry):
            files.append(entry)
        elif os.path.isdir(entry):
            directories.append(entry)
        else:
            raise ValueError('Directory not found: ' + entry)

    # Setup event handler
    event_listener = EventListener(extensions)

    # Setup watchdog
    observer = PollingObserver() if poll else Observer()
    for file in files:
        single_file_listener = EventSingleFileListener(
            file, event_queue=event_listener.event_queue)
        observer.schedule(
            single_file_listener, path=os.path.dirname(file), recursive=False)
    recursedirs, norecursedirs = _split_recursive(directories, ignore)
    for directory in recursedirs:
        observer.schedule(event_listener, path=directory, recursive=True)
    for directory in norecursedirs:
        observer.schedule(event_listener, path=directory, recursive=False)
    observer.start()

    # Watch and run tests until interrupted by user
    events = []
    while True:
        try:
            # Prepare next run
            if auto_clear:
                clear()
            elif not quiet:
                print()

            # Show event summary
            if not quiet:
                _show_summary(argv, events, verbose)

            # Run custom command
            run_hook(beforerun)

            # Run tests
            p = subprocess.Popen(argv, shell=is_windows)
            try:
                while True:
                    # Check for completion
                    exit_code = p.poll()
                    if exit_code is not None:
                        break
                    # Interrupt the current test run on filesystem event
                    if not wait and not event_listener.event_queue.empty():
                        send_keyboard_interrupt(p)
                        exit_code = p.wait()
                        break
                    # Allow user to initiate a keyboard interrupt
                    time.sleep(0.1)
            except KeyboardInterrupt:
                # Wait for current test run cleanup
                run_hook(afterrun, p.wait())
                # Exit, since this keyboard interrupt was user-initiated
                break

            # Run custom command
            run_hook(afterrun, exit_code)

            # Run dependent commands
            if exit_code in [EXIT_OK, EXIT_NOTESTSCOLLECTED]:
                run_hook(onpass)
            else:
                if beep_on_failure:
                    beep()
                run_hook(onfail)

            # Wait for a filesystem event
            while event_listener.event_queue.empty():
                time.sleep(0.1)

            # Collect events for summary of next run
            events = dequeue_all(event_listener.event_queue, spool)
        except KeyboardInterrupt:
            break
        except Exception as ex:
            print(format_exc() if verbose else 'Error: {}'.format(ex))
            break

    # Stop and wait for observer
    observer.stop()
    observer.join()

    # Run exit script
    run_hook(onexit)
Пример #42
0
class DirWatcher(object):
    def __init__(self, settings, api, file_pusher):
        self._api = api
        self._file_count = 0
        self._dir = settings.files_dir
        self._settings = settings
        self._user_file_policies = {"end": set(), "live": set(), "now": set()}
        self._file_pusher = file_pusher
        self._file_event_handlers = {}
        self._file_observer = PollingObserver()
        self._file_observer.schedule(self._per_file_event_handler(),
                                     self._dir,
                                     recursive=True)
        self._file_observer.start()
        logger.info("watching files in: %s", settings.files_dir)

    @property
    def emitter(self):
        try:
            return next(iter(self._file_observer.emitters))
        except StopIteration:
            return None

    def update_policy(self, path, policy):
        self._user_file_policies[policy].add(path)
        for src_path in glob.glob(os.path.join(self._dir, path)):
            save_name = os.path.relpath(src_path, self._dir)
            self._get_file_event_handler(src_path,
                                         save_name).on_modified(force=True)

    def _per_file_event_handler(self):
        """Create a Watchdog file event handler that does different things for every file
        """
        file_event_handler = PatternMatchingEventHandler()
        file_event_handler.on_created = self._on_file_created
        file_event_handler.on_modified = self._on_file_modified
        file_event_handler.on_moved = self._on_file_moved
        file_event_handler._patterns = [
            os.path.join(self._dir, os.path.normpath("*"))
        ]
        # Ignore hidden files/folders
        #  TODO: what other files should we skip?
        file_event_handler._ignore_patterns = [
            "*.tmp",
            "*.wandb",
            "wandb-summary.json",
            os.path.join(self._dir, ".*"),
            os.path.join(self._dir, "*/.*"),
        ]
        # TODO: pipe in actual settings
        for glb in self._settings.ignore_globs:
            file_event_handler._ignore_patterns.append(
                os.path.join(self._dir, glb))

        return file_event_handler

    def _on_file_created(self, event):
        logger.info("file/dir created: %s", event.src_path)
        if os.path.isdir(event.src_path):
            return None
        self._file_count += 1
        # We do the directory scan less often as it grows
        if self._file_count % 100 == 0:
            emitter = self.emitter
            if emitter:
                emitter._timeout = int(self._file_count / 100) + 1
        save_name = os.path.relpath(event.src_path, self._dir)
        self._get_file_event_handler(event.src_path, save_name).on_modified()

    def _on_file_modified(self, event):
        logger.info("file/dir modified: %s", event.src_path)
        if os.path.isdir(event.src_path):
            return None
        save_name = os.path.relpath(event.src_path, self._dir)
        self._get_file_event_handler(event.src_path, save_name).on_modified()

    def _on_file_moved(self, event):
        # TODO: test me...
        logger.info("file/dir moved: %s -> %s", event.src_path,
                    event.dest_path)
        if os.path.isdir(event.dest_path):
            return None
        old_save_name = os.path.relpath(event.src_path, self._dir)
        new_save_name = os.path.relpath(event.dest_path, self._dir)

        # We have to move the existing file handler to the new name
        handler = self._get_file_event_handler(event.src_path, old_save_name)
        self._file_event_handlers[new_save_name] = handler
        del self._file_event_handlers[old_save_name]

        handler.on_renamed(event.dest_path, new_save_name)

    def _get_file_event_handler(self, file_path, save_name):
        """Get or create an event handler for a particular file.

        file_path: the file's actual path
        save_name: its path relative to the run directory (aka the watch directory)
        """
        if save_name not in self._file_event_handlers:
            # TODO: we can use PolicyIgnore if there are files we never want to sync
            if 'tfevents' in save_name or 'graph.pbtxt' in save_name:
                self._file_event_handlers[save_name] = PolicyLive(
                    file_path, save_name, self._api, self._file_pusher)
            else:
                Handler = PolicyEnd
                for policy, globs in six.iteritems(self._user_file_policies):
                    if policy == "end":
                        continue
                    # Convert set to list to avoid RuntimeError's
                    # TODO: we may need to add locks
                    for g in list(globs):
                        paths = glob.glob(os.path.join(self._dir, g))
                        if any(save_name in p for p in paths):
                            if policy == "live":
                                Handler = PolicyLive
                            elif policy == "now":
                                Handler = PolicyNow
                self._file_event_handlers[save_name] = Handler(
                    file_path, save_name, self._api, self._file_pusher)
        return self._file_event_handlers[save_name]

    def finish(self):
        logger.info("shutting down directory watcher")
        try:
            # avoid hanging if we crashed before the observer was started
            if self._file_observer.is_alive():
                # rather unfortunatly we need to manually do a final scan of the dir
                # with `queue_events`, then iterate through all events before stopping
                # the observer to catch all files written.  First we need to prevent the
                # existing thread from consuming our final events, then we process them
                self._file_observer._timeout = 0
                self._file_observer._stopped_event.set()
                self._file_observer.join()
                self.emitter.queue_events(0)
                while True:
                    try:
                        self._file_observer.dispatch_events(
                            self._file_observer.event_queue, 0)
                    except queue.Empty:
                        break
                # Calling stop unschedules any inflight events so we handled them above
                self._file_observer.stop()
        # TODO: py2 TypeError: PyCObject_AsVoidPtr called with null pointer
        except TypeError:
            pass
        # TODO: py3 SystemError: <built-in function stop> returned an error
        except SystemError:
            pass

        # Ensure we've at least noticed every file in the run directory. Sometimes
        # we miss things because asynchronously watching filesystems isn't reliable.
        logger.info("scan: %s", self._dir)

        for dirpath, _, filenames in os.walk(self._dir):
            for fname in filenames:
                file_path = os.path.join(dirpath, fname)
                save_name = os.path.relpath(file_path, self._dir)
                logger.info("scan save: %s %s", file_path, save_name)
                self._get_file_event_handler(file_path, save_name).finish()
Пример #43
0
class MonitorFile(PatternMatchingEventHandler):
    """Monitor files and create Beergarden events

    This is a wrapper around a watchdog PollingObserver. PollingObserver is used instead
    of Observer because Observer throws events on each file transaction.

    Note that the events generated are NOT watchdog events, they are whatever
    Beergarden events are specified during initialization.

    """
    def __init__(
        self,
        path: str,
        create_event: Event = None,
        modify_event: Event = None,
        moved_event: Event = None,
        deleted_event: Event = None,
    ):
        super().__init__(patterns=[path], ignore_directories=True)

        self._path = path
        self._observer = PollingObserver()

        self.create_event = create_event
        self.modify_event = modify_event
        self.moved_event = moved_event
        self.deleted_event = deleted_event

    def start(self):
        self._observer.schedule(self, Path(self._path).parent, recursive=False)
        self._observer.start()

    def stop(self):
        if self._observer.is_alive():
            self._observer.stop()
            self._observer.join()

    def on_created(self, _):
        """Callback invoked when the file is created

        When a user VIM edits a file it DELETES, then CREATES the file, this
        captures that case
        """
        if self.create_event:
            publish(self.create_event)

    def on_modified(self, _):
        """Callback invoked when the file is modified

        This captures all other modification events that occur against the file
        """
        if self.modify_event:
            publish(self.modify_event)

    def on_moved(self, _):
        """Callback invoked when the file is moved

        This captures if the file is moved into or from the directory
        """
        if self.moved_event:
            publish(self.moved_event)

    def on_deleted(self, _):
        """Callback invoked when the file is deleted

        This captures if the file was deleted (be warned that VIM does this by
        default during write actions)
        """
        if self.deleted_event:
            publish(self.deleted_event)
Пример #44
0
class ScanFolder:
    'Class defining a scan folder'

    def __init__(self):
        self.FourDFlowScanNameC1 = ""
        self.FourDFlowScanNameC2 = ""
        self.DicomNamesC1 = []
        self.DicomNamesC2 = []
        self.Candidate_Num1 = 0
        self.Candidate_Num2 = 0
        self.finalPath1 = ""
        self.finalPath2 = ""
        self.Patients = []

        self.path = r'/mnt/data_imaging/jetsonTest'
        #self.path = r'/mnt/data_imaging/dicom'
        self.documents = dict(
        )  # key = document label   value = Document reference

        self.patterns = "*"
        self.ignore_patterns = ""
        self.ignore_directories = False
        self.case_sensitive = True
        self.my_event_handler = watchdog.events.PatternMatchingEventHandler(
            self.patterns, self.ignore_patterns, self.ignore_directories,
            self.case_sensitive)
        #self.my_event_handler.on_any_event = self.on_any_event
        #self.my_event_handler.on_deleted = self.on_deleted
        self.my_event_handler.on_created = self.on_created

        self.observer = Observer()
        self.observer.schedule(self.my_event_handler,
                               self.path,
                               recursive=True)
        self.observer.start()
        try:
            while True:
                time.sleep(1)
        except KeyboardInterrupt:
            self.observer.stop()
            self.observer.join()

# def on_any_event(self, event):
#     print(event.src_path, event.event_type)
#     print("self even")

#def stop(self):
#    self.observer.stop()
#    self.observer.join()

    def on_deleted(self, event):
        print(f"what the f**k! Someone deleted {event.src_path}!")
        #print(self.FourDFlowScanNameC1)
        #self.FourDFlowScanNameC1 = "dogs"

    def on_created(self, event):
        #print(f"hey, {event.src_path} has been created!")
        fullFilePath = {event.src_path}
        parts = str({event.src_path}.pop()).split(
            '/')  #change to be / for jetson and \ for my desktop
        #print(parts)
        #print(len(parts))
        ##need to try catch this in case of weird inputs?
        fileName = parts[-1]
        scanName = parts[-2]
        patientName = parts[-3]

        #if "fileName" doesn't have a '.extension' then it is a folder OR use os.path.isfile() / os.path.isdir()
        fileNameParts = fileName.split('.')
        #print(fileNameParts)
        numParts = len(fileNameParts)
        #print(numParts)
        #print("The scan name")
        #print(scanName)

        if (numParts >= 1):
            #pattern matching block, swap from 4D to 4D and Aorta
            pattern = "4D"
            #print("our matching result :" + str( re.search(pattern, scanName)))
            result = re.search(pattern, scanName)

            pattern2 = "[aA][oO][rR][tT][aA]"
            result2 = re.search(pattern2, scanName)

            if (result and result2):
                patientFound = False
                for patient in self.Patients:
                    pName = patient.getPatientName()
                    if (pName == patientName):
                        patientFound = True
                        patient.takeInScan(scanName, fullFilePath)
                        if (patient.pushToJetson()):
                            print("Let's push to Jetson!")
                            flow = patient.getFlow()
                            flow = Path(flow).parent
                            print(flow)
                            mag = patient.getMag()
                            mag = Path(mag).parent

                            cmd = [
                                "python", "./mrStructWrite.py",
                                "--flow=" + str(flow), "--mag=" + str(mag)
                            ]
                            #subprocess.call(["python /home/condauser/Documents/ao_seg/4dflow_dicom_to_array.py --flow=",str(flow)," --mag=",str(mag)])
                            subprocess.call(cmd)
                            #cmdstr = re.sub(r"(",r"\(","python /home/condauser/Documents/ao_seg/4dflow_dicom_to_array.py --flow="+str(flow)+" --mag="+str(mag))
                            #print(cmdstr)
                            #os.system(cmdstr)
                            #delete the patient

                if (not patientFound):
                    #print("Adding patient " + str(patientName))
                    self.Patients.append(Patient(patientName))
                    self.Patients[-1].takeInScan(scanName, fullFilePath)
Пример #45
0
def main():
  print 'Starting main...'
  log.info('--------------------------------------------------------------------')
  log.info('HealthPro CSV Ingester service started.')
  log.info('Details about database from config file: Server: {}, DB Table: {}. '\
           ''.format(db_info['host'], healthpro_table_name))
  observer = PollingObserver(timeout=5) # check every 5 seconds
  try:
    if not do_startup_checks():
      raise Exception('One or more startup checks failed')
    class FSEHandler(FileSystemEventHandler):
      # Here we define what we'd like to do when certain filesystem
      # events take place -- e.g., when a new CSV appears in the watched
      # directory.
      # Uncomment on_any_event for extra logging.
      #def on_any_event(self, event):
      #  log.info('FSEHandler->on_any_event: event_type=[' \
      #           '{}], src_path=[{}]'.format(event.event_type, event.src_path))
      def on_deleted(self, event):
        # We log and send an email only once.
        global inbox_gone_flag
        if event.src_path == inbox_dir:
          if not inbox_gone_flag:
            inbox_gone_flag = True
            msg = event.src_path + ' is gone! Please see this SOP: https://nexus.weill.cornell.edu/pages/viewpage.action?pageId=110266114'
            log.error(msg)
            send_notice_email(msg) 
      def on_created(self, event):
        # In on_deleted above, we set the inbox_gone_flag. But if a file appears
        # we know the inbox is back and all is well; so unset it. 
        global inbox_gone_flag
        if inbox_gone_flag:
          inbox_gone_flag = False
        log.info('FSEHandler->on_created: a new file has appeared: '
                 + event.src_path)
        process_file(event.src_path)
    observe_subdirs_flag = False
    observer.schedule(FSEHandler(), inbox_dir, observe_subdirs_flag)
    observer.start()
    log.info('Waiting for activity...')
    print 'Service started.' 
    if cfg.get('start-telemetry-ping-listener') == 'yes':
      start_telemetry_ping_listener()
    try:
      while True:
        observer.join(10)
        if observer.is_alive():
          continue
        else:
          raise Exception('Observer thread has stopped.')
    except KeyboardInterrupt:
      print '\nKeyboard interrupt caught. Quitting.'
      observer.stop()
      sys.exit(0)
    observer.join() 
  except Exception, ex:
    print '\nError caught. Quitting. Check log.'
    log.error(str(ex))
    send_error_email('An error occurred in main(). Please check.')
    observer.stop()
    sys.exit(1)
Пример #46
0
class MyGui:
    def __init__(self):
        def get_pref_DIRECTORY_TO_WATCH():
            folder_selected = filedialog.askdirectory()
            DIR_W_Path.set(folder_selected)

        def get_pref_DIRECTORY_TO_MOVE():
            folder_selected = filedialog.askdirectory()
            DIR_M_Path.set(folder_selected)

        def get_pref_DIRECTORY_UNMATCHED():
            folder_selected = filedialog.askdirectory()
            DIR_U_Path.set(folder_selected)

        def pref_set():
            global DIRECTORY_UNMATCHED
            DIRECTORY_UNMATCHED = DIR_U_Path.get().replace("/", "\\")
            global DIRECTORY_TO_MOVE
            DIRECTORY_TO_MOVE = DIR_M_Path.get().replace("/", "\\")
            global DIRECTORY_TO_WATCH
            DIRECTORY_TO_WATCH = DIR_W_Path.get().replace("/", "\\")
            global pref_ID
            pref_ID = UI_pref_ID.get()
            global pref_DryRun
            pref_DryRun = UI_pref_DryRun.get()
            global pref_StripSymbol
            pref_StripSymbol = UI_pref_StripSymbol.get()
            if ((DIRECTORY_TO_WATCH != "") and (DIRECTORY_TO_MOVE != "")
                    and (DIRECTORY_UNMATCHED != "")
                    and (os.path.exists(DIRECTORY_TO_WATCH))
                    and (os.path.exists(DIRECTORY_TO_MOVE))
                    and (os.path.exists(DIRECTORY_UNMATCHED))):
                self.but.config(state="normal", text="Start Watchdog")
            else:
                self.but.config(state="disabled", text="Set Preferences First")

        root.title('Porndog - Adult Scene Renamer')
        root.iconbitmap('.\\icon.ico')
        root.geometry("500x500")

        Watchdog_Preferences_Label = Label(
            root, text="Watchdog Preferences - Directories")
        Watchdog_Preferences_Label.place(x=192, y=5)

        DIR_W_Path = tk.StringVar()
        DIR_W_Label = Label(root, text="Active Directory - ")
        DIR_W_Label.place(x=80, y=28)
        self.DIR_W_TextField = Entry(root, textvariable=DIR_W_Path, width=35)
        self.DIR_W_TextField.place(x=180, y=30)
        self.DIR_W_Button = ttk.Button(root,
                                       text="Browse Folder",
                                       command=get_pref_DIRECTORY_TO_WATCH)
        self.DIR_W_Button.place(x=400, y=28)

        DIR_M_Path = tk.StringVar()
        DIR_M_Label = Label(root, text="Move Directory - ")
        DIR_M_Label.place(x=82, y=58)
        self.DIR_M_TextField = Entry(root, textvariable=DIR_M_Path, width=35)
        self.DIR_M_TextField.place(x=180, y=60)
        self.DIR_M_Button = ttk.Button(root,
                                       text="Browse Folder",
                                       command=get_pref_DIRECTORY_TO_MOVE)
        self.DIR_M_Button.place(x=400, y=58)

        DIR_U_Path = tk.StringVar()
        DIR_U_Label = Label(root, text="Unmatched Directory - ")
        DIR_U_Label.place(x=50, y=88)
        self.DIR_U_TextField = Entry(root, textvariable=DIR_U_Path, width=35)
        self.DIR_U_TextField.place(x=180, y=90)
        self.DIR_U_Button = ttk.Button(root,
                                       text="Browse Folder",
                                       command=get_pref_DIRECTORY_UNMATCHED)
        self.DIR_U_Button.place(x=400, y=88)

        UI_FilenamePref_Label = Label(root, text="Filename Preferences")
        UI_FilenamePref_Label.place(x=225, y=120)

        UI_pref_ID = BooleanVar()
        self.UI_Checkbutton_ID = Checkbutton(
            root, text="Prefer Scene ID over Scene Title", variable=UI_pref_ID)
        self.UI_Checkbutton_ID.place(x=185, y=150)

        UI_pref_StripSymbol = tk.StringVar()
        UI_pref_StripSymbol_Label = Label(root, text="Strip Symbol - ")
        UI_pref_StripSymbol_Label.place(x=95, y=180)
        self.UI_pref_StripSymbol_TextField = Entry(
            root, textvariable=UI_pref_StripSymbol, width=35)
        self.UI_pref_StripSymbol_TextField.place(x=180, y=180)

        UI_OtherPref_Label = Label(root, text="Other Preferences")
        UI_OtherPref_Label.place(x=228, y=210)

        UI_pref_DryRun = BooleanVar()
        self.UI_Checkbutton_DryRun = Checkbutton(root,
                                                 text="Dry Run",
                                                 variable=UI_pref_DryRun)
        self.UI_Checkbutton_DryRun.place(x=240, y=240)

        self.SET_BUTTON = tk.Button(root,
                                    text="Set Preferences",
                                    command=pref_set)
        self.SET_BUTTON.place(x=230, y=270)
        self.but = tk.Button(root,
                             text="Start Watchdog",
                             command=self.start_observer)
        self.but.place(x=230, y=300)
        self.but.config(state="disabled", text="Set Preferences First")

        self.but2 = tk.Button(root,
                              text="Stop Watchdog",
                              command=self.stop_observer)
        self.but2.place(x=230, y=330)
        self.but2.config(state="disabled", text="Stop Watchdog")

    def start_observer(self):
        loggerwatchdog.info(
            "******************** Pre-initialization ********************")
        loggerwatchdog.info("Watchdog will be active to this directory: " +
                            DIRECTORY_TO_WATCH)
        loggerwatchdog.info(
            "Watchdog will move the files to this directory: " +
            DIRECTORY_TO_MOVE)
        loggerwatchdog.info(
            "Watchdog will move unmatched files to this directory: " +
            DIRECTORY_UNMATCHED)
        loggerwatchdog.info("Preferred ID is set to: " + str(pref_ID))
        loggerwatchdog.info("Dry Run is set to: " + str(pref_DryRun))
        if (pref_StripSymbol != ""):
            loggerwatchdog.info("Your strip symbol is: " + (pref_StripSymbol))
        else:
            loggerwatchdog.info("You haven't set a Strip Symbol.")
        loggerwatchdog.info(
            "******************** Pre-initialization ********************")
        self.DIR_W_TextField.config(state="disabled")
        self.DIR_W_Button.config(state="disabled")

        self.DIR_M_TextField.config(state="disabled")
        self.DIR_M_Button.config(state="disabled")

        self.DIR_U_TextField.config(state="disabled")
        self.DIR_U_Button.config(state="disabled")

        self.UI_Checkbutton_ID.config(state="disabled")
        self.UI_pref_StripSymbol_TextField.config(state="disabled")
        self.UI_Checkbutton_DryRun.config(state="disabled")

        self.SET_BUTTON.config(state="disabled")
        self.but.config(state="disabled", text="Watchdog Initiated")
        self.but2.config(state="normal", text="Stop Watchdog")

        self.observer = Observer()
        self.observer.schedule(event_handler,
                               DIRECTORY_TO_WATCH,
                               recursive=True)
        self.observer.start()
        loggerwatchdog.info(
            "******************** Watchdog initiated ********************")

    def stop_observer(self):
        self.observer.stop()
        self.observer.join()
        self.observer = None
        self.DIR_W_TextField.config(state="normal")
        self.DIR_W_Button.config(state="normal")

        self.DIR_M_TextField.config(state="normal")
        self.DIR_M_Button.config(state="normal")

        self.DIR_U_TextField.config(state="normal")
        self.DIR_U_Button.config(state="normal")

        self.UI_Checkbutton_ID.config(state="normal")
        self.UI_pref_StripSymbol_TextField.config(state="normal")
        self.UI_Checkbutton_DryRun.config(state="normal")

        self.SET_BUTTON.config(state="normal")
        self.but.config(state="disabled", text="Set Preferences First")
        self.but2.config(state="disabled", text="Stop Watchdog")
Пример #47
0
                        [i for i in name.split() if not i.isdigit()])
                    item["mac"] = newName.replace("/mnt/jarvis", "/Volumes")
                    item["category"] = category
                    item["path"] = newName.replace("/mnt/jarvis/Library",
                                                   "Y:").replace("/", "\\")

        with open('/home/dyang/PWP-Lib-Search/public/plants.json',
                  'w') as publicJson:
            json.dump(plantData, publicJson)
        with open('/home/dyang/PWP-Lib-Search/build/plants.json',
                  'w') as buildJson:
            json.dump(plantData, buildJson)
        logging.info("rename file success: " + newName)

    event_handler.on_created = on_created
    event_handler.on_deleted = on_deleted
    event_handler.on_moved = on_moved

    nWatch = PollingObserver(timeout=60)
    targetPath = str(path)
    nWatch.schedule(event_handler, targetPath, recursive=False)

    nWatch.start()

    try:
        while True:
            time.sleep(1000)
    except KeyboardInterrupt:
        nWatch.stop()
    nWatch.join()
Пример #48
0
def main():
    logging.basicConfig(level=LOGGING_LEVEL,
                        format=LOGGING_FORMAT,
                        datefmt=LOGGING_FORMAT_DATE)

    # readability
    global path, tag, channel, stream_id, filenames_previous, connection, channel, args
    args = parse_args()
    path = args.path[0]
    tag = args.tag
    rabbitmq_host = args.host
    stream_id = create_stream_id(tag)
    filenames_previous = set()

    # if multiple file endings
    include = args.include.split(',')

    # add star before the file ending if missing
    include = [
        ext.lower() if ext.startswith('*') else "*" + ext.lower()
        for ext in include
    ]

    logging.info(
        f'starting with args {[path, include, tag, rabbitmq_host]} (excl. creds)'
    )

    # create the event handler
    my_event_handler = PatternMatchingEventHandler(include,
                                                   ignore_patterns="",
                                                   ignore_directories=True,
                                                   case_sensitive=False)
    my_event_handler.on_created = on_created
    # my_observer = Observer() # only on local file systems (inotify)
    my_observer = PollingObserver(timeout=20)
    my_observer.schedule(my_event_handler, path, recursive=False)

    # TODO: make queue persistent between rabbitMQ restarts?
    # connect to the RabbitMQ server
    logging.info(
        f'starting with args {[path, include, tag, rabbitmq_host]} (excl. creds)'
    )
    connection = pika.BlockingConnection(
        pika.ConnectionParameters(args.host,
                                  credentials=PlainCredentials(
                                      args.username, args.password),
                                  heartbeat=0))
    channel = connection.channel()
    channel.queue_declare(queue='files')

    logging.info('connected to AMPQ.')

    # trigger event for all existing files?
    existing_file_event = DummyEvent()
    for existing_file in os.listdir(path):  # list dir
        if os.path.isfile(os.path.join(path, existing_file)):  # only files
            if f'*{os.path.splitext(existing_file)[-1].lower()}' in include:  # if extension is matching
                existing_file_event.src_path = os.path.join(
                    path, existing_file
                )  # add property to dummy object to have the same structure as a watchdog even object
                on_created(
                    existing_file_event)  # manually trigger creation function

    # start monitoring
    my_observer.start()
    logging.info('started monitoring directory.')
    try:
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        my_observer.stop()
        my_observer.join()
        connection.close()
class LocalComputeLogSubscriptionManager:
    def __init__(self, manager):
        self._manager = manager
        self._subscriptions = defaultdict(list)
        self._watchers = {}
        self._observer = None

    def _watch_key(self, run_id, key):
        return "{}:{}".format(run_id, key)

    def add_subscription(self, subscription):
        check.inst_param(subscription, "subscription", ComputeLogSubscription)
        if self._manager.is_watch_completed(subscription.run_id,
                                            subscription.key):
            subscription.fetch()
            subscription.complete()
        else:
            watch_key = self._watch_key(subscription.run_id, subscription.key)
            self._subscriptions[watch_key].append(subscription)
            self.watch(subscription.run_id, subscription.key)

    def remove_all_subscriptions(self, run_id, step_key):
        watch_key = self._watch_key(run_id, step_key)
        for subscription in self._subscriptions.pop(watch_key, []):
            subscription.complete()

    def watch(self, run_id, step_key):
        watch_key = self._watch_key(run_id, step_key)
        if watch_key in self._watchers:
            return

        update_paths = [
            self._manager.get_local_path(run_id, step_key,
                                         ComputeIOType.STDOUT),
            self._manager.get_local_path(run_id, step_key,
                                         ComputeIOType.STDERR),
        ]
        complete_paths = [
            self._manager.complete_artifact_path(run_id, step_key)
        ]
        directory = os.path.dirname(
            self._manager.get_local_path(run_id, step_key,
                                         ComputeIOType.STDERR))

        if not self._observer:
            self._observer = PollingObserver(self._manager.polling_timeout)
            self._observer.start()

        ensure_dir(directory)

        self._watchers[watch_key] = self._observer.schedule(
            LocalComputeLogFilesystemEventHandler(self, run_id, step_key,
                                                  update_paths,
                                                  complete_paths),
            str(directory),
        )

    def notify_subscriptions(self, run_id, step_key):
        watch_key = self._watch_key(run_id, step_key)
        for subscription in self._subscriptions[watch_key]:
            subscription.fetch()

    def unwatch(self, run_id, step_key, handler):
        watch_key = self._watch_key(run_id, step_key)
        if watch_key in self._watchers:
            self._observer.remove_handler_for_watch(handler,
                                                    self._watchers[watch_key])
        del self._watchers[watch_key]

    def dispose(self):
        if self._observer:
            self._observer.stop()
            self._observer.join(15)
Пример #50
0
	def run(self):
		if not self._allowRoot:
			self._check_for_root()

		global app
		global babel

		global printer
		global printerProfileManager
		global fileManager
		global slicingManager
		global analysisQueue
		global userManager
		global eventManager
		global loginManager
		global pluginManager
		global appSessionManager
		global pluginLifecycleManager
		global preemptiveCache
		global debug

		from tornado.ioloop import IOLoop
		from tornado.web import Application, RequestHandler

		import sys

		debug = self._debug

		# first initialize the settings singleton and make sure it uses given configfile and basedir if available
		s = settings(init=True, basedir=self._basedir, configfile=self._configfile)

		# then monkey patch a bunch of stuff
		util.tornado.fix_ioloop_scheduling()
		util.flask.enable_additional_translations(additional_folders=[s.getBaseFolder("translations")])

		# setup app
		self._setup_app(app)

		# setup i18n
		self._setup_i18n(app)

		# then initialize logging
		self._setup_logging(self._debug, self._logConf)
		self._logger = logging.getLogger(__name__)
		def exception_logger(exc_type, exc_value, exc_tb):
			self._logger.error("Uncaught exception", exc_info=(exc_type, exc_value, exc_tb))
		sys.excepthook = exception_logger
		self._logger.info("Starting OctoPrint %s" % DISPLAY_VERSION)

		# start the intermediary server
		self._start_intermediary_server(s)

		# then initialize the plugin manager
		pluginManager = octoprint.plugin.plugin_manager(init=True)

		printerProfileManager = PrinterProfileManager()
		eventManager = events.eventManager()
		analysisQueue = octoprint.filemanager.analysis.AnalysisQueue()
		slicingManager = octoprint.slicing.SlicingManager(s.getBaseFolder("slicingProfiles"), printerProfileManager)
		storage_managers = dict()
		storage_managers[octoprint.filemanager.FileDestinations.LOCAL] = octoprint.filemanager.storage.LocalFileStorage(s.getBaseFolder("uploads"))
		fileManager = octoprint.filemanager.FileManager(analysisQueue, slicingManager, printerProfileManager, initial_storage_managers=storage_managers)
		printer = Printer(fileManager, analysisQueue, printerProfileManager)
		appSessionManager = util.flask.AppSessionManager()
		pluginLifecycleManager = LifecycleManager(pluginManager)
		preemptiveCache = PreemptiveCache(os.path.join(s.getBaseFolder("data"), "preemptive_cache_config.yaml"))

		# ... and initialize all plugins

		def octoprint_plugin_inject_factory(name, implementation):
			"""Factory for injections for all OctoPrintPlugins"""

			if not isinstance(implementation, octoprint.plugin.OctoPrintPlugin):
				# we only care about OctoPrintPlugins
				return None

			return dict(
				plugin_manager=pluginManager,
				printer_profile_manager=printerProfileManager,
				event_bus=eventManager,
				analysis_queue=analysisQueue,
				slicing_manager=slicingManager,
				file_manager=fileManager,
				printer=printer,
				app_session_manager=appSessionManager,
				plugin_lifecycle_manager=pluginLifecycleManager,
				data_folder=os.path.join(settings().getBaseFolder("data"), name),
				preemptive_cache=preemptiveCache
			)

		def settings_plugin_inject_factory(name, implementation):
			"""Factory for additional injections depending on plugin type"""

			if not isinstance(implementation, octoprint.plugin.SettingsPlugin):
				# we only care about SettingsPlugins
				return None

			# SettingsPlugin instnances get a PluginSettings instance injected
			default_settings = implementation.get_settings_defaults()
			get_preprocessors, set_preprocessors = implementation.get_settings_preprocessors()
			plugin_settings = octoprint.plugin.plugin_settings(name,
			                                                   defaults=default_settings,
			                                                   get_preprocessors=get_preprocessors,
			                                                   set_preprocessors=set_preprocessors)
			return dict(settings=plugin_settings)

		def settings_plugin_config_migration_and_cleanup(name, implementation):
			"""Take care of migrating and cleaning up any old settings"""

			if not isinstance(implementation, octoprint.plugin.SettingsPlugin):
				return

			settings_version = implementation.get_settings_version()
			settings_migrator = implementation.on_settings_migrate

			if settings_version is not None and settings_migrator is not None:
				stored_version = implementation._settings.get_int([octoprint.plugin.SettingsPlugin.config_version_key])
				if stored_version is None or stored_version < settings_version:
					settings_migrator(settings_version, stored_version)
					implementation._settings.set_int([octoprint.plugin.SettingsPlugin.config_version_key], settings_version)

			implementation.on_settings_cleanup()
			implementation._settings.save()

			implementation.on_settings_initialized()

		pluginManager.implementation_inject_factories=[octoprint_plugin_inject_factory, settings_plugin_inject_factory]
		pluginManager.initialize_implementations()

		settingsPlugins = pluginManager.get_implementations(octoprint.plugin.SettingsPlugin)
		for implementation in settingsPlugins:
			try:
				settings_plugin_config_migration_and_cleanup(implementation._identifier, implementation)
			except:
				self._logger.exception("Error while trying to migrate settings for plugin {}, ignoring it".format(implementation._identifier))

		pluginManager.implementation_post_inits=[settings_plugin_config_migration_and_cleanup]

		pluginManager.log_all_plugins()

		# initialize file manager and register it for changes in the registered plugins
		fileManager.initialize()
		pluginLifecycleManager.add_callback(["enabled", "disabled"], lambda name, plugin: fileManager.reload_plugins())

		# initialize slicing manager and register it for changes in the registered plugins
		slicingManager.initialize()
		pluginLifecycleManager.add_callback(["enabled", "disabled"], lambda name, plugin: slicingManager.reload_slicers())

		# setup jinja2
		self._setup_jinja2()

		# make sure plugin lifecycle events relevant for jinja2 are taken care of
		def template_enabled(name, plugin):
			if plugin.implementation is None or not isinstance(plugin.implementation, octoprint.plugin.TemplatePlugin):
				return
			self._register_additional_template_plugin(plugin.implementation)
		def template_disabled(name, plugin):
			if plugin.implementation is None or not isinstance(plugin.implementation, octoprint.plugin.TemplatePlugin):
				return
			self._unregister_additional_template_plugin(plugin.implementation)
		pluginLifecycleManager.add_callback("enabled", template_enabled)
		pluginLifecycleManager.add_callback("disabled", template_disabled)

		# setup assets
		self._setup_assets()

		# configure timelapse
		octoprint.timelapse.configure_timelapse()

		# setup command triggers
		events.CommandTrigger(printer)
		if self._debug:
			events.DebugEventListener()

		# setup access control
		userManagerName = s.get(["accessControl", "userManager"])
		try:
			clazz = octoprint.util.get_class(userManagerName)
			userManager = clazz()
		except AttributeError as e:
			self._logger.exception("Could not instantiate user manager {}, falling back to FilebasedUserManager!".format(userManagerName))
			userManager = octoprint.users.FilebasedUserManager()
		finally:
			userManager.enabled = s.getBoolean(["accessControl", "enabled"])

		loginManager = LoginManager()
		loginManager.session_protection = "strong"
		loginManager.user_callback = load_user
		if not userManager.enabled:
			loginManager.anonymous_user = users.DummyUser
			principals.identity_loaders.appendleft(users.dummy_identity_loader)
		loginManager.init_app(app)

		# register API blueprint
		self._setup_blueprints()

		## Tornado initialization starts here

		if self._host is None:
			self._host = s.get(["server", "host"])
		if self._port is None:
			self._port = s.getInt(["server", "port"])

		ioloop = IOLoop()
		ioloop.install()

		self._router = SockJSRouter(self._create_socket_connection, "/sockjs")

		upload_suffixes = dict(name=s.get(["server", "uploads", "nameSuffix"]), path=s.get(["server", "uploads", "pathSuffix"]))

		def mime_type_guesser(path):
			from octoprint.filemanager import get_mime_type
			return get_mime_type(path)

		download_handler_kwargs = dict(
			as_attachment=True,
			allow_client_caching=False
		)
		additional_mime_types=dict(mime_type_guesser=mime_type_guesser)
		admin_validator = dict(access_validation=util.tornado.access_validation_factory(app, loginManager, util.flask.admin_validator))
		no_hidden_files_validator = dict(path_validation=util.tornado.path_validation_factory(lambda path: not octoprint.util.is_hidden_path(path), status_code=404))

		def joined_dict(*dicts):
			if not len(dicts):
				return dict()

			joined = dict()
			for d in dicts:
				joined.update(d)
			return joined

		server_routes = self._router.urls + [
			# various downloads
			(r"/downloads/timelapse/([^/]*\.mp[g4])", util.tornado.LargeResponseHandler, joined_dict(dict(path=s.getBaseFolder("timelapse")), download_handler_kwargs, no_hidden_files_validator)),
			(r"/downloads/files/local/(.*)", util.tornado.LargeResponseHandler, joined_dict(dict(path=s.getBaseFolder("uploads")), download_handler_kwargs, no_hidden_files_validator, additional_mime_types)),
			(r"/downloads/logs/([^/]*)", util.tornado.LargeResponseHandler, joined_dict(dict(path=s.getBaseFolder("logs")), download_handler_kwargs, admin_validator)),
			# camera snapshot
			(r"/downloads/camera/current", util.tornado.UrlProxyHandler, dict(url=s.get(["webcam", "snapshot"]), as_attachment=True, access_validation=util.tornado.access_validation_factory(app, loginManager, util.flask.user_validator))),
			# generated webassets
			(r"/static/webassets/(.*)", util.tornado.LargeResponseHandler, dict(path=os.path.join(s.getBaseFolder("generated"), "webassets"))),
			# online indicators - text file with "online" as content and a transparent gif
			(r"/online.txt", util.tornado.StaticDataHandler, dict(data="online\n")),
			(r"/online.gif", util.tornado.StaticDataHandler, dict(data=bytes(base64.b64decode("R0lGODlhAQABAIAAAAAAAP///yH5BAEAAAAALAAAAAABAAEAAAIBRAA7")), content_type="image/gif"))
		]

		# fetch additional routes from plugins
		for name, hook in pluginManager.get_hooks("octoprint.server.http.routes").items():
			try:
				result = hook(list(server_routes))
			except:
				self._logger.exception("There was an error while retrieving additional server routes from plugin hook {name}".format(**locals()))
			else:
				if isinstance(result, (list, tuple)):
					for entry in result:
						if not isinstance(entry, tuple) or not len(entry) == 3:
							continue
						if not isinstance(entry[0], basestring):
							continue
						if not isinstance(entry[2], dict):
							continue

						route, handler, kwargs = entry
						route = r"/plugin/{name}/{route}".format(name=name, route=route if not route.startswith("/") else route[1:])

						self._logger.debug("Adding additional route {route} handled by handler {handler} and with additional arguments {kwargs!r}".format(**locals()))
						server_routes.append((route, handler, kwargs))

		server_routes.append((r".*", util.tornado.UploadStorageFallbackHandler, dict(fallback=util.tornado.WsgiInputContainer(app.wsgi_app), file_prefix="octoprint-file-upload-", file_suffix=".tmp", suffixes=upload_suffixes)))

		self._tornado_app = Application(server_routes)
		max_body_sizes = [
			("POST", r"/api/files/([^/]*)", s.getInt(["server", "uploads", "maxSize"])),
			("POST", r"/api/languages", 5 * 1024 * 1024)
		]

		# allow plugins to extend allowed maximum body sizes
		for name, hook in pluginManager.get_hooks("octoprint.server.http.bodysize").items():
			try:
				result = hook(list(max_body_sizes))
			except:
				self._logger.exception("There was an error while retrieving additional upload sizes from plugin hook {name}".format(**locals()))
			else:
				if isinstance(result, (list, tuple)):
					for entry in result:
						if not isinstance(entry, tuple) or not len(entry) == 3:
							continue
						if not entry[0] in util.tornado.UploadStorageFallbackHandler.BODY_METHODS:
							continue
						if not isinstance(entry[2], int):
							continue

						method, route, size = entry
						route = r"/plugin/{name}/{route}".format(name=name, route=route if not route.startswith("/") else route[1:])

						self._logger.debug("Adding maximum body size of {size}B for {method} requests to {route})".format(**locals()))
						max_body_sizes.append((method, route, size))

		self._stop_intermediary_server()

		# initialize and bind the server
		self._server = util.tornado.CustomHTTPServer(self._tornado_app, max_body_sizes=max_body_sizes, default_max_body_size=s.getInt(["server", "maxSize"]))
		self._server.listen(self._port, address=self._host)

		eventManager.fire(events.Events.STARTUP)

		# auto connect
		if s.getBoolean(["serial", "autoconnect"]):
			(port, baudrate) = s.get(["serial", "port"]), s.getInt(["serial", "baudrate"])
			printer_profile = printerProfileManager.get_default()
			connectionOptions = get_connection_options()
			if port in connectionOptions["ports"]:
				printer.connect(port=port, baudrate=baudrate, profile=printer_profile["id"] if "id" in printer_profile else "_default")

		# start up watchdogs
		if s.getBoolean(["feature", "pollWatched"]):
			# use less performant polling observer if explicitely configured
			observer = PollingObserver()
		else:
			# use os default
			observer = Observer()
		observer.schedule(util.watchdog.GcodeWatchdogHandler(fileManager, printer), s.getBaseFolder("watched"))
		observer.start()

		# run our startup plugins
		octoprint.plugin.call_plugin(octoprint.plugin.StartupPlugin,
		                             "on_startup",
		                             args=(self._host, self._port))

		def call_on_startup(name, plugin):
			implementation = plugin.get_implementation(octoprint.plugin.StartupPlugin)
			if implementation is None:
				return
			implementation.on_startup(self._host, self._port)
		pluginLifecycleManager.add_callback("enabled", call_on_startup)

		# prepare our after startup function
		def on_after_startup():
			self._logger.info("Listening on http://%s:%d" % (self._host, self._port))

			# now this is somewhat ugly, but the issue is the following: startup plugins might want to do things for
			# which they need the server to be already alive (e.g. for being able to resolve urls, such as favicons
			# or service xmls or the like). While they are working though the ioloop would block. Therefore we'll
			# create a single use thread in which to perform our after-startup-tasks, start that and hand back
			# control to the ioloop
			def work():
				octoprint.plugin.call_plugin(octoprint.plugin.StartupPlugin,
				                             "on_after_startup")

				def call_on_after_startup(name, plugin):
					implementation = plugin.get_implementation(octoprint.plugin.StartupPlugin)
					if implementation is None:
						return
					implementation.on_after_startup()
				pluginLifecycleManager.add_callback("enabled", call_on_after_startup)

				# when we are through with that we also run our preemptive cache
				if settings().getBoolean(["devel", "cache", "preemptive"]):
					self._execute_preemptive_flask_caching(preemptiveCache)

			import threading
			threading.Thread(target=work).start()
		ioloop.add_callback(on_after_startup)

		# prepare our shutdown function
		def on_shutdown():
			# will be called on clean system exit and shutdown the watchdog observer and call the on_shutdown methods
			# on all registered ShutdownPlugins
			self._logger.info("Shutting down...")
			observer.stop()
			observer.join()
			octoprint.plugin.call_plugin(octoprint.plugin.ShutdownPlugin,
			                             "on_shutdown")

			if self._octoprint_daemon is not None:
				self._logger.info("Cleaning up daemon pidfile")
				self._octoprint_daemon.terminated()

			self._logger.info("Goodbye!")
		atexit.register(on_shutdown)

		def sigterm_handler(*args, **kwargs):
			# will stop tornado on SIGTERM, making the program exit cleanly
			def shutdown_tornado():
				ioloop.stop()
			ioloop.add_callback_from_signal(shutdown_tornado)
		signal.signal(signal.SIGTERM, sigterm_handler)

		try:
			# this is the main loop - as long as tornado is running, OctoPrint is running
			ioloop.start()
		except (KeyboardInterrupt, SystemExit):
			pass
		except:
			self._logger.fatal("Now that is embarrassing... Something really really went wrong here. Please report this including the stacktrace below in OctoPrint's bugtracker. Thanks!")
			self._logger.exception("Stacktrace follows:")
Пример #51
0
def run(parser, args):
    args.tomlfile = args.toml
    args.toml = toml.load(args.toml)
    print(args)

    # TODO: Move logging config to separate configuration file
    # set up logging to file
    logging.basicConfig(
        level=logging.DEBUG,
        format='%(levelname)s::%(asctime)s::%(name)s::%(message)s',
        filename=args.log_file,
        filemode='w')

    # define a Handler that writes INFO messages or higher to the sys.stderr
    console = logging.StreamHandler()
    console.setLevel(logging.INFO)

    # set a format which is simpler for console use
    formatter = logging.Formatter('%(name)-15s: %(levelname)-8s %(message)s')
    console.setFormatter(formatter)

    # add the handler to the root logger
    logging.getLogger('').addHandler(console)

    # Start by logging sys.argv and the parameters used
    logger = logging.getLogger("Manager")
    logger.info(" ".join(sys.argv))
    print_args(args, logger=logger)

    logger.info("Initialising iterAlign.")

    logger.info("Setting up FastQ monitoring.")

    #### Check if a run is active - if not, wait.

    args.simulation = True
    connection = None
    if args.watch is None:
        args.simulation = False
        logger.info("Creating rpc connection for device {}.".format(
            args.device))
        try:
            connection, messageport = get_rpc_connection(args.device)
        except ValueError as e:
            print(e)
            sys.exit(1)

        send_message(connection, "Iteralign Connected to MinKNOW",
                     Severity.WARN)

        logger.info("Loaded RPC")
        while parse_message(connection.acquisition.current_status()
                            )['status'] != "PROCESSING":
            time.sleep(1)
        #### Check if we know where data is being written to , if not... wait
        args.watch = parse_message(connection.acquisition.get_acquisition_info(
        ))['config_summary']['reads_directory']

    else:
        messageport = ""

    event_handler = FastqHandler(args, logging, messageport, connection)
    # This block handles the fastq
    observer = Observer()
    observer.schedule(event_handler, path=args.watch, recursive=True)
    observer.daemon = True

    try:

        observer.start()
        logger.info("FastQ Monitoring Running.")
        while 1:
            time.sleep(1)

    except KeyboardInterrupt:

        logger.info("Exiting - Will take a few seconds to clean up!")

        observer.stop()
        observer.join()

        os._exit(0)
Пример #52
0
class AiidaLabAppWatch:
    """Watch to monitor the app installation status.

    Create a watch instance to monitor the installation status of an
    AiiDAlab app. This is achieved by monitoring the app repository
    for existance and changes.

    Arguments:
        app (AiidaLabApp):
            The AiidaLab app to monitor.
    """
    class AppPathFileSystemEventHandler(FileSystemEventHandler):
        """Internal event handeler for app path file system events."""
        def __init__(self, app):
            self.app = app

        def on_any_event(self, event):
            """Refresh app for any event."""
            self.app.refresh_async()

    def __init__(self, app):
        self.app = app

        self._started = False
        self._monitor_thread = None
        self._observer = None

    def __repr__(self):
        return f"<{type(self).__name__}(app={self.app!r})>"

    def _start_observer(self):
        """Start the directory observer thread.

        The ._observer thread is controlled by the ._monitor_thread.
        """
        assert os.path.isdir(self.app.path)
        assert self._observer is None or not self._observer.isAlive()

        event_handler = self.AppPathFileSystemEventHandler(self.app)

        self._observer = Observer()
        self._observer.schedule(event_handler, self.app.path, recursive=True)
        try:
            self._observer.start()
        except OSError as error:
            if error.errno in (errno.ENOSPC,
                               errno.EMFILE) and 'inotify' in str(error):
                # We reached the inotify watch limit, using polling-based fallback observer.
                self._observer = PollingObserver()
                self._observer.schedule(event_handler,
                                        self.app.path,
                                        recursive=True)
                self._observer.start()
            else:  # reraise unrelated error
                raise error

    def _stop_observer(self):
        """Stop the directory observer thread.

        The ._observer thread is controlled by the ._monitor_thread.
        """
        assert self._observer is not None
        self._observer.stop()

    def start(self):
        """Watch the app repository for file system events.

        The app state is refreshed automatically for all events.
        """
        if self._started:
            raise RuntimeError(
                f"Instances of {type(self).__name__} can only be started once."
            )

        if self._monitor_thread is None:

            def check_path_exists_changed():
                is_dir = os.path.isdir(self.app.path)
                while not self._monitor_thread.stop_flag:
                    switched = is_dir != os.path.isdir(self.app.path)
                    if switched:
                        is_dir = not is_dir
                        self.app.refresh()

                    if is_dir:
                        if self._observer is None or not self._observer.isAlive(
                        ):
                            self._start_observer()
                    elif self._observer and self._observer.isAlive():
                        self._stop_observer()

                    sleep(1)

                # stop-flag set, stopping observer...
                if self._observer:
                    self._observer.stop()

            self._monitor_thread = Thread(target=check_path_exists_changed)
            self._monitor_thread.stop_flag = False
            self._monitor_thread.start()

        self._started = True

    def stop(self):
        """Stop watching the app repository for file system events."""
        if self._monitor_thread is not None:
            self._monitor_thread.stop_flag = True

    def is_alive(self):
        """Return True if this watch is still alive."""
        return self._monitor_thread and self._monitor_thread.is_alive()

    def join(self, timeout=None):
        """Join the watch after stopping.

        This function will timeout if a timeout argument is provided. Use the
        is_alive() function to determien whether the watch was stopped within
        the given timout.
        """
        if self._monitor_thread is not None:
            self._monitor_thread.join(timeout=timeout)
Пример #53
0
def main(argv):
    """
    Build the docs and serve them with an HTTP server.
    """
    parser = argparse.ArgumentParser(description='Build and serve HTML Sphinx docs')

    parser.add_argument(
        '--port',
        help='Serve on this port, default 8000',
        type=int,
        default=8000)

    parser.add_argument(
        '--source',
        help='Directory of source Sphinx (reStructuredText) docs',
        type=os.path.realpath,
        default='docs/source')

    parser.add_argument(
        '--destination',
        help='Where to build the HTML output',
        type=os.path.realpath,
        default='docs/build/html')

    parser.add_argument(
        '--doctrees',
        help='Where the doctrees are built',
        type=os.path.realpath,
        default='docs/build/doctrees')

    options = parser.parse_args(argv)

    bound_build_docs = partial(build_docs, options.source, options.destination, options.doctrees)

    # Do the initial build
    bound_build_docs()

    # Watch the source directory for changes, build docs again if detected
    observer = Observer()
    observer.schedule(
        BuildDocsHandler(bound_build_docs),
        path=options.source, recursive=True)
    observer.start()

    # Set the root for the request handler, overriding Python stdlib current
    # working directory.
    DocsHTTPRequestHandler._root = options.destination

    server = SocketServer.TCPServer(
        ('', options.port),
        DocsHTTPRequestHandler)

    try:
        logger.info('Serving on localhost:{}'.format(options.port))
        server.serve_forever()
    except KeyboardInterrupt:
        sys.stdout.write('\n')
        logger.info('(stopping server)')
        observer.stop()
    finally:
        observer.join()

    logging.info('Server stopped, exiting')
    sys.exit(0)
Пример #54
0
class AutoOcrScheduler(object):

    SINGLE_FOLDER = 'single_folder'
    MIRROR_TREE = 'mirror_tree'

    OUTPUT_MODES = [SINGLE_FOLDER, MIRROR_TREE]

    def __init__(
            self,
            config_dir,
            input_dir,
            output_dir,
            output_mode,
            success_action=OcrTask.ON_SUCCESS_DO_NOTHING,
            archive_dir=None,
            notify_url='',
            process_existing_files=False,
            run_scheduler=True,
            polling_observer=False,
        ):
        self.logger = logger.getChild('scheduler')

        self.config_dir = local.path(config_dir)
        self.input_dir = local.path(input_dir)
        self.output_dir = local.path(output_dir)
        if self.input_dir == self.output_dir:
            raise AutoOcrSchedulerError('Invalid configuration. Input and output directories must not be the same to avoid recursive OCR invocation!')
        self.output_mode = output_mode.lower()
        if self.output_mode not in AutoOcrScheduler.OUTPUT_MODES:
            raise AutoOcrSchedulerError('Invalid output mode: {}. Must be one of: {}'.format(self.output_mode, ', '.join(AutoOcrScheduler.OUTPUT_MODES)))
        self.success_action = success_action.lower()
        if self.success_action not in OcrTask.SUCCESS_ACTIONS:
            raise AutoOcrSchedulerError('Invalid success action: {}. Must be one of {}'.format(self.success_action, ', '.join(OcrTask.SUCCESS_ACTIONS)))
        self.archive_dir = local.path(archive_dir) if archive_dir else None
        if self.success_action == OcrTask.ON_SUCCESS_ARCHIVE and not self.archive_dir:
            raise AutoOcrSchedulerError('Archive directory required for success action {}'.format(self.success_action))

        self.notify_url = notify_url
        self.current_tasks = {}
        self.walk_existing_task = None
        self.current_outputs = set()

        # Create a Threadpool to run OCR tasks on
        self.threadpool = ThreadPoolExecutor(max_workers=3)

        # Wire up an AutoOcrWatchdogHandler
        watchdog_handler = AutoOcrWatchdogHandler(self.on_file_touched, self.on_file_deleted)

        # Schedule watchdog to observe the input directory
        if run_scheduler:
            self.observer = PollingObserver() if polling_observer else Observer()
            self.observer.schedule(watchdog_handler, self.input_dir, recursive=True)
            self.observer.start()
            self.logger.warning('Watching %s', self.input_dir)
        else:
            self.observer = None
            self.logger.warning('Not watching %s', self.input_dir)

        # Process existing files in input directory, if requested
        if process_existing_files:
            self.walk_existing_task = self.threadpool.submit(self.walk_existing_files)

    def shutdown(self):
        # Shut down the feed of incoming watchdog events
        if self.observer:
            self.logger.debug('Shutting down filesystem watchdog...')
            self.observer.unschedule_all()
            self.observer.stop()

        # Cancel all outstanding cancelable tasks
        if self.walk_existing_task:
            self.logger.debug('Canceling walk existing files task...')
            self.walk_existing_task.cancel()
        self.logger.debug('Canceling all %d in-flight tasks...', len(self.current_tasks))
        tasks = [task for _, task in self.current_tasks.items()]
        for task in tasks:
            task.cancel()

        # Wait for the threadpool to clean up
        if self.threadpool:
            self.logger.debug('Shutting down threadpool...')
            self.threadpool.shutdown()
            self.threadpool = None

        # Wait for the watchdog to clean up
        if self.observer:
            self.logger.debug('Cleaning up filesystem watchdog...')
            self.observer.join()
            self.observer = None

    def __enter__(self):
        return self

    def __exit__(self, *args):
        self.shutdown()
        return False

    def _map_output_path(self, input_path):
        if self.output_mode == AutoOcrScheduler.MIRROR_TREE:
            return self.output_dir / (input_path - self.input_dir)
        else:
            assert self.output_mode == AutoOcrScheduler.SINGLE_FOLDER
            output_path = self.output_dir / (input_path.name)
            unique = 1
            if output_path.exists() or output_path in self.current_outputs:
                suffix = '.{}.{}{}'.format(datetime.now().strftime('%Y%m%d'), unique, output_path.suffix)
                output_path = output_path.with_suffix(suffix)

            while output_path.exists() or output_path in self.current_outputs:
                unique = unique + 1
                output_path = output_path.with_suffix('.{}{}'.format(unique, output_path.suffix), depth=2)
            return output_path

    def _map_archive_path(self, input_path):
        return self.archive_dir / (input_path - self.input_dir)

    def _get_config_path(self, input_path):
        assert (input_path - self.input_dir)[0] != '..'
        config_path = input_path.parent / 'ocr.config'
        while True:
            if config_path.exists():
                return config_path
            if config_path.parent == self.input_dir:
                break
            config_path = config_path.parent.parent / 'ocr.config'

        config_path = self.config_dir / 'ocr.config'
        if config_path.exists():
            return config_path
        return None

    def queue_path(self, path):
        output_path = self._map_output_path(path)
        config_file = self._get_config_path(path)
        archive_file = self._map_archive_path(path)
        task = OcrTask(path,
                       output_path,
                       self.threadpool.submit,
                       self.on_task_done,
                       config_file=config_file,
                       success_action=self.success_action,
                       archive_path=archive_file,
                       notify_url=self.notify_url)
        self.current_tasks[path] = task
        self.current_outputs.add(output_path)

    def walk_existing_files(self):
        self.logger.debug('Enumerating existing input files...')
        def keep_file(file):
            return any([fnmatch.fnmatch(file, pattern) for pattern in AutoOcrWatchdogHandler.MATCH_PATTERNS])
        for file in self.input_dir.walk(filter=keep_file):
            self.on_file_touched(file)
        self.walk_existing_task = None

    def on_file_touched(self, path):
        if path in self.current_tasks:
            self.current_tasks[path].touch()
        else:
            self.queue_path(path)

    def on_file_deleted(self, path):
        if path in self.current_tasks:
            self.current_tasks[path].cancel()

    def on_task_done(self, task):
        self.current_outputs.remove(task.output_path)
        del self.current_tasks[task.input_path]

    def wait_for_idle(self):
        if self.walk_existing_task:
            self.logger.debug('Waiting for walk existing files to complete...')
            concurrent.futures.wait([self.walk_existing_task])
        while self.current_tasks:
            self.logger.debug('Waiting for %d tasks to complete...', len(self.current_tasks))
            concurrent.futures.wait([task.future for _, task in self.current_tasks.items()])
Пример #55
0
class KeywordTable(object):
    """A SQLite database of keywords"""
    def __init__(self, dbfile=":memory:", poll=False):
        self.db = sqlite3.connect(dbfile, check_same_thread=False)
        self.log = logging.getLogger(__name__)
        self._create_db()
        #        self.log.warning("I'm warnin' ya!")

        # set up watchdog observer to monitor changes to
        # keyword files (or more correctly, to directories
        # of keyword files)
        self.observer = PollingObserver() if poll else Observer()
        self.observer.start()

    def add(self, name, monitor=True):
        """Add a folder, library (.py) or resource file (.robot, .tsv, .txt, .resource) to the database
        """

        if os.path.isdir(name):
            if (not os.path.basename(name).startswith(".")):
                self.add_folder(name)

        elif os.path.isfile(name):
            if ((self._looks_like_resource_file(name))
                    or (self._looks_like_libdoc_file(name))
                    or (self._looks_like_library_file(name))):
                self.add_file(name)
        else:
            # let's hope it's a library name!
            self.add_library(name)

    def on_change(self, path, event_type):
        """Respond to changes in the file system

        This method will be given the path to a file that
        has changed on disk. We need to reload the keywords
        from that file
        """
        # I can do all this work in a sql statement, but
        # for debugging it's easier to do it in stages.
        sql = """SELECT collection_id
                 FROM collection_table
                 WHERE path == ?
        """
        cursor = self._execute(sql, (path, ))
        results = cursor.fetchall()
        # there should always be exactly one result, but
        # there's no harm in using a loop to process the
        # single result
        for result in results:
            collection_id = result[0]
            # remove all keywords in this collection
            sql = """DELETE from keyword_table
                     WHERE collection_id == ?
            """
            cursor = self._execute(sql, (collection_id, ))
            self._load_keywords(collection_id, path=path)

    def _load_keywords(self, collection_id, path=None, libdoc=None):
        """Load a collection of keywords

           One of path or libdoc needs to be passed in...
        """
        if libdoc is None and path is None:
            raise (
                Exception("You must provide either a path or libdoc argument"))

        if libdoc is None:
            libdoc = LibraryDocumentation(path)

        if len(libdoc.keywords) > 0:
            for keyword in libdoc.keywords:
                self._add_keyword(collection_id, keyword.name, keyword.doc,
                                  keyword.args)

    def add_file(self, path):
        """Add a resource file or library file to the database"""
        libdoc = LibraryDocumentation(path)
        if len(libdoc.keywords) > 0:
            # if libdoc.doc.startswith("Documentation for resource file"):
            # bah! The file doesn't have an file-level documentation
            # and libdoc substitutes some placeholder text.
            # libdoc.doc = ""

            collection_id = self.add_collection(path, libdoc.name, libdoc.type,
                                                libdoc.doc, libdoc.version,
                                                libdoc.scope)
            self._load_keywords(collection_id, libdoc=libdoc)

    def add_library(self, name):
        """Add a library to the database

        This method is for adding a library by name (eg: "BuiltIn")
        rather than by a file.
        """
        libdoc = LibraryDocumentation(name)
        if len(libdoc.keywords) > 0:
            # FIXME: figure out the path to the library file
            collection_id = self.add_collection(None, libdoc.name, libdoc.type,
                                                libdoc.doc, libdoc.version,
                                                libdoc.scope)
            self._load_keywords(collection_id, libdoc=libdoc)

    def add_folder(self, dirname, watch=True):
        """Recursively add all files in a folder to the database

        By "all files" I mean, "all files that are resource files
        or library files". It will silently ignore files that don't
        look like they belong in the database. Pity the fool who
        uses non-standard suffixes.

        N.B. folders with names that begin with '." will be skipped
        """

        ignore_file = os.path.join(dirname, ".rfhubignore")
        exclude_patterns = []
        try:
            with open(ignore_file, "r") as f:
                exclude_patterns = []
                for line in f.readlines():
                    line = line.strip()
                    if (re.match(r'^\s*#', line)): continue
                    if len(line.strip()) > 0:
                        exclude_patterns.append(line)
        except:
            # should probably warn the user?
            pass

        for filename in os.listdir(dirname):
            path = os.path.join(dirname, filename)
            (basename, ext) = os.path.splitext(filename.lower())

            try:
                if (os.path.isdir(path)):
                    if (not basename.startswith(".")):
                        if os.access(path, os.R_OK):
                            self.add_folder(path, watch=False)
                else:
                    if (ext in (".xml", ".robot", ".txt", ".py", ".tsv",
                                ".resource")):
                        if os.access(path, os.R_OK):
                            self.add(path)
            except Exception as e:
                # I really need to get the logging situation figured out.
                print("bummer:", str(e))

        # FIXME:
        # instead of passing a flag around, I should just keep track
        # of which folders we're watching, and don't add wathers for
        # any subfolders. That will work better in the case where
        # the user accidentally starts up the hub giving the same
        # folder, or a folder and it's children, on the command line...
        if watch:
            # add watcher on normalized path
            dirname = os.path.abspath(dirname)
            event_handler = WatchdogHandler(self, dirname)
            self.observer.schedule(event_handler, dirname, recursive=True)

    def add_collection(self,
                       path,
                       c_name,
                       c_type,
                       c_doc,
                       c_version="unknown",
                       c_scope="",
                       c_namedargs="yes",
                       c_doc_format="ROBOT"):
        """Insert data into the collection table"""
        if path is not None:
            # We want to store the normalized form of the path in the
            # database
            path = os.path.abspath(path)

        cursor = self.db.cursor()
        cursor.execute(
            """
            INSERT INTO collection_table
                (name, type, version, scope, namedargs, path, doc, doc_format)
            VALUES
                (?,?,?,?,?,?,?,?)
        """, (c_name, c_type, c_version, c_scope, c_namedargs, path, c_doc,
              c_doc_format))
        collection_id = cursor.lastrowid
        return collection_id

    def add_installed_libraries(
            self,
            extra_libs=["SeleniumLibrary", "SudsLibrary", "RequestsLibrary"]):
        """Add any installed libraries that we can find

        We do this by looking in the `libraries` folder where
        robot is installed. If you have libraries installed
        in a non-standard place, this won't pick them up.
        """

        libdir = os.path.dirname(robot.libraries.__file__)
        loaded = []
        for filename in os.listdir(libdir):
            if filename.endswith(".py") or filename.endswith(".pyc"):
                libname, ext = os.path.splitext(filename)
                if (libname.lower() not in loaded
                        and not self._should_ignore(libname)):

                    try:
                        self.add(libname)
                        loaded.append(libname.lower())
                    except Exception as e:
                        # need a better way to log this...
                        self.log.debug("unable to add library: " + str(e))

        # I hate how I implemented this, but I don't think there's
        # any way to find out which installed python packages are
        # robot libraries.
        for library in extra_libs:
            if (library.lower() not in loaded
                    and not self._should_ignore(library)):
                try:
                    self.add(library)
                    loaded.append(library.lower())
                except Exception as e:
                    self.log.debug("unable to add external library %s: %s" % \
                                   (library, str(e)))

    def get_collection(self, collection_id):
        """Get a specific collection"""
        sql = """SELECT collection.collection_id, collection.type,
                        collection.name, collection.path,
                        collection.doc,
                        collection.version, collection.scope,
                        collection.namedargs,
                        collection.doc_format
                 FROM collection_table as collection
                 WHERE collection_id == ? OR collection.name like ?
        """
        cursor = self._execute(sql, (collection_id, collection_id))
        # need to handle the case where we get more than one result...
        sql_result = cursor.fetchone()
        return {
            "collection_id": sql_result[0],
            "type": sql_result[1],
            "name": sql_result[2],
            "path": sql_result[3],
            "doc": sql_result[4],
            "version": sql_result[5],
            "scope": sql_result[6],
            "namedargs": sql_result[7],
            "doc_format": sql_result[8]
        }
        return sql_result

    def get_collections(self, pattern="*", libtype="*"):
        """Returns a list of collection name/summary tuples"""

        sql = """SELECT collection.collection_id, collection.name, collection.doc,
                        collection.type, collection.path
                 FROM collection_table as collection
                 WHERE name like ?
                 AND type like ?
                 ORDER BY collection.name
              """

        cursor = self._execute(
            sql, (self._glob_to_sql(pattern), self._glob_to_sql(libtype)))
        sql_result = cursor.fetchall()

        return [{
            "collection_id": result[0],
            "name": result[1],
            "synopsis": result[2].split("\n")[0],
            "type": result[3],
            "path": result[4]
        } for result in sql_result]

    def get_keyword_data(self, collection_id):
        sql = """SELECT keyword.keyword_id, keyword.name, keyword.args, keyword.doc
                 FROM keyword_table as keyword
                 WHERE keyword.collection_id == ?
                 ORDER BY keyword.name
              """
        cursor = self._execute(sql, (collection_id, ))
        return cursor.fetchall()

    def get_keyword(self, collection_id, name):
        """Get a specific keyword from a library"""
        sql = """SELECT keyword.name, keyword.args, keyword.doc
                 FROM keyword_table as keyword
                 WHERE keyword.collection_id == ?
                 AND keyword.name like ?
              """
        cursor = self._execute(sql, (collection_id, name))
        # We're going to assume no library has duplicate keywords
        # While that in theory _could_ happen, it never _should_,
        # and you get what you deserve if it does.
        row = cursor.fetchone()
        if row is not None:
            return {
                "name": row[0],
                "args": json.loads(row[1]),
                "doc": row[2],
                "collection_id": collection_id
            }
        return {}

    def get_keyword_hierarchy(self, pattern="*"):
        """Returns all keywords that match a glob-style pattern

        The result is a list of dictionaries, sorted by collection
        name.

        The pattern matching is insensitive to case. The function
        returns a list of (library_name, keyword_name,
        keyword_synopsis tuples) sorted by keyword name

        """

        sql = """SELECT collection.collection_id, collection.name, collection.path,
                 keyword.name, keyword.doc
                 FROM collection_table as collection
                 JOIN keyword_table as keyword
                 WHERE collection.collection_id == keyword.collection_id
                 AND keyword.name like ?
                 ORDER by collection.name, collection.collection_id, keyword.name
             """
        cursor = self._execute(sql, (self._glob_to_sql(pattern), ))
        libraries = []
        current_library = None
        for row in cursor.fetchall():
            (c_id, c_name, c_path, k_name, k_doc) = row
            if c_id != current_library:
                current_library = c_id
                libraries.append({
                    "name": c_name,
                    "collection_id": c_id,
                    "keywords": [],
                    "path": c_path
                })
            libraries[-1]["keywords"].append({"name": k_name, "doc": k_doc})
        return libraries

    def search(self, pattern="*", mode="both"):
        """Perform a pattern-based search on keyword names and documentation

        The pattern matching is insensitive to case. The function
        returns a list of tuples of the form library_id, library_name,
        keyword_name, keyword_synopsis, sorted by library id,
        library name, and then keyword name

        If a pattern begins with "name:", only the keyword names will
        be searched. Otherwise, the pattern is searched for in both
        the name and keyword documentation.

        You can limit the search to a single library by specifying
        "in:" followed by the name of the library or resource
        file. For example, "screenshot in:SeleniumLibrary" will only
        search for the word 'screenshot' in the SeleniumLibrary.

        """
        pattern = self._glob_to_sql(pattern)

        COND = "(keyword.name like ? OR keyword.doc like ?)"
        args = [pattern, pattern]
        if mode == "name":
            COND = "(keyword.name like ?)"
            args = [
                pattern,
            ]

        sql = """SELECT collection.collection_id, collection.name, keyword.name, keyword.doc
                 FROM collection_table as collection
                 JOIN keyword_table as keyword
                 WHERE collection.collection_id == keyword.collection_id
                 AND %s
                 ORDER by collection.collection_id, collection.name, keyword.name
             """ % COND

        cursor = self._execute(sql, args)
        result = [(row[0], row[1], row[2], row[3].strip().split("\n")[0])
                  for row in cursor.fetchall()]
        return list(set(result))

    def get_keywords(self, pattern="*"):
        """Returns all keywords that match a glob-style pattern

        The pattern matching is insensitive to case. The function
        returns a list of (library_name, keyword_name,
        keyword_synopsis tuples) sorted by keyword name

        """

        sql = """SELECT collection.collection_id, collection.name,
                        keyword.name, keyword.doc, keyword.args
                 FROM collection_table as collection
                 JOIN keyword_table as keyword
                 WHERE collection.collection_id == keyword.collection_id
                 AND keyword.name like ?
                 ORDER by collection.name, keyword.name
             """
        pattern = self._glob_to_sql(pattern)
        cursor = self._execute(sql, (pattern, ))
        result = [(row[0], row[1], row[2], row[3], row[4])
                  for row in cursor.fetchall()]
        return list(sorted(set(result), key=itemgetter(2)))

    def reset(self):
        """Remove all data from the database, but leave the tables intact"""
        self._execute("DELETE FROM collection_table")
        self._execute("DELETE FROM keyword_table")

    def _looks_like_library_file(self, name):
        return name.endswith(".py")

    def _looks_like_libdoc_file(self, name):
        """Return true if an xml file looks like a libdoc file"""
        # inefficient since we end up reading the file twice,
        # but it's fast enough for our purposes, and prevents
        # us from doing a full parse of files that are obviously
        # not libdoc files
        if name.lower().endswith(".xml"):
            with open(name, "r") as f:
                # read the first few lines; if we don't see
                # what looks like libdoc data, return false
                data = f.read(200)
                index = data.lower().find("<keywordspec ")
                if index > 0:
                    return True
        return False

    def _looks_like_resource_file(self, name):
        """Return true if the file has a keyword table but not a testcase table"""
        # inefficient since we end up reading the file twice,
        # but it's fast enough for our purposes, and prevents
        # us from doing a full parse of files that are obviously
        # not robot files

        if (re.search(r'__init__.(txt|robot|html|tsv)$', name)):
            # These are initialize files, not resource files
            return False

        found_keyword_table = False
        if (name.lower().endswith(".robot") or name.lower().endswith(".txt")
                or name.lower().endswith(".tsv")
                or name.lower().endswith(".resource")):

            with open(name, "r") as f:
                data = f.read()
                for match in re.finditer(
                        r'^\*+\s*(Test Cases?|(?:User )?Keywords?)', data,
                        re.MULTILINE | re.IGNORECASE):
                    if (re.match(r'Test Cases?', match.group(1),
                                 re.IGNORECASE)):
                        # if there's a test case table, it's not a keyword file
                        return False

                    if (not found_keyword_table
                            and re.match(r'(User )?Keywords?', match.group(1),
                                         re.IGNORECASE)):
                        found_keyword_table = True
        return found_keyword_table

    def _should_ignore(self, name):
        """Return True if a given library name should be ignored

        This is necessary because not all files we find in the library
        folder are libraries. I wish there was a public robot API
        for "give me a list of installed libraries"...
        """
        _name = name.lower()
        return (_name.startswith("deprecated") or _name.startswith("_")
                or _name in ("remote", "reserved", "dialogs_py", "dialogs_ipy",
                             "dialogs_jy"))

    def _execute(self, *args):
        """Execute an SQL query

        This exists because I think it's tedious to get a cursor and
        then use a cursor.
        """
        cursor = self.db.cursor()
        cursor.execute(*args)
        return cursor

    def _add_keyword(self, collection_id, name, doc, args):
        """Insert data into the keyword table

        'args' should be a list, but since we can't store a list in an
        sqlite database we'll make it json we can can convert it back
        to a list later.
        """
        argstring = json.dumps(args.argument_names)
        self.db.execute(
            """
            INSERT INTO keyword_table
                (collection_id, name, doc, args)
            VALUES
                (?,?,?,?)
        """, (collection_id, name, doc, argstring))

    def _create_db(self):

        if not self._table_exists("collection_table"):
            self.db.execute("""
                CREATE TABLE collection_table
                (collection_id INTEGER PRIMARY KEY AUTOINCREMENT,
                 name          TEXT COLLATE NOCASE,
                 type          COLLATE NOCASE,
                 version       TEXT,
                 scope         TEXT,
                 namedargs     TEXT,
                 path          TEXT,
                 doc           TEXT,
                 doc_format    TEXT)
            """)
            self.db.execute("""
                CREATE INDEX collection_index
                ON collection_table (name)
            """)

        if not self._table_exists("keyword_table"):
            self.db.execute("""
                CREATE TABLE keyword_table
                (keyword_id    INTEGER PRIMARY KEY AUTOINCREMENT,
                 name          TEXT COLLATE NOCASE,
                 collection_id INTEGER,
                 doc           TEXT,
                 args          TEXT)
            """)
            self.db.execute("""
                CREATE INDEX keyword_index
                ON keyword_table (name)
            """)

    def _glob_to_sql(self, string):
        """Convert glob-like wildcards to SQL wildcards

        * becomes %
        ? becomes _
        % becomes \%
        \\ remains \\
        \* remains \*
        \? remains \?

        This also adds a leading and trailing %, unless the pattern begins with
        ^ or ends with $
        """

        # What's with the chr(1) and chr(2) nonsense? It's a trick to
        # hide \* and \? from the * and ? substitutions. This trick
        # depends on the substitutiones being done in order.  chr(1)
        # and chr(2) were picked because I know those characters
        # almost certainly won't be in the input string
        table = ((r'\\', chr(1)), (r'\*', chr(2)), (r'\?', chr(3)),
                 (r'%', r'\%'), (r'?', '_'), (r'*', '%'), (chr(1), r'\\'),
                 (chr(2), r'\*'), (chr(3), r'\?'))

        for (a, b) in table:
            string = string.replace(a, b)

        string = string[1:] if string.startswith("^") else "%" + string
        string = string[:-1] if string.endswith("$") else string + "%"

        return string

    def _table_exists(self, name):
        cursor = self.db.execute("""
            SELECT name FROM sqlite_master
            WHERE type='table' AND name='%s'
        """ % name)
        return len(cursor.fetchall()) > 0
Пример #56
0
def tail_like(path):
    observer = PollingObserver()
    handler = TailHandler(path)
    observer.schedule(handler, dirname(path))
    observer.start()
    current_path = path
    # 末尾へ seek
    current_pos = os.stat(path)[6]
    current_file = open(path, 'r')
    try:
        buffer = []
        old_unix_time = 0

        while True:

            current_file.seek(current_pos)
            data = ''

            for block in iter(lambda: current_file.read(32), ''):
                data += block

            current_pos = current_file.tell()

            if current_path != handler.path:
                print('change path')
                current_path = handler.path
                current_pos = os.stat(current_path)[6]
                current_file = open(current_path, 'r')

            if data == '':
                continue

            rows = data.split('\n')

            for row in rows:

                columns = row.split(',')
                unix_time = columns[0]

                if row == '':
                    pass
                elif unix_time != '' and old_unix_time == 0:
                    pass
                elif float(unix_time) > old_unix_time:
                    send_data(buffer)
                    old_unix_time = float(unix_time)
                    # print(buffer)
                    buffer = []
                    buffer.append(MakeTrajectoryData(columns))
                else:
                    buffer.append(MakeTrajectoryData(columns))

                if unix_time != '':
                    old_unix_time = float(unix_time)

            time.sleep(READ_INTERVAL)

    except KeyboardInterrupt:
        observer.stop()
    finally:
        handler.close()
    observer.join()
Пример #57
0
 def start(self):
     gevent.spawn(self._cleanScheduledJobs, 3600*24)
     observer = Observer()
     handler = JumpscriptHandler(self)
     observer.schedule(handler, "jumpscripts", recursive=True)
     observer.start()
Пример #58
0
def run(parser, args):
    # new code block: change the reference path within the args.toml file into the args.mindex path
    d = toml.load(args.toml)

    print(d["conditions"]["reference"])
    args.tomlfile = args.toml
    args.toml = toml.load(args.toml)
    print(args)

    # TODO: Move logging config to separate configuration file
    # set up logging to file
    logging.basicConfig(level=logging.DEBUG,
                        format='%(levelname)s::%(asctime)s::%(name)s::%(message)s',
                        filename=args.log_file,
                        filemode='w')

    # define a Handler that writes INFO messages or higher to the sys.stderr
    console = logging.StreamHandler()
    console.setLevel(logging.INFO)

    # set a format which is simpler for console use
    formatter = logging.Formatter('%(name)-15s: %(levelname)-8s %(message)s')
    console.setFormatter(formatter)

    # add the handler to the root logger
    logging.getLogger('').addHandler(console)

    # Start by logging sys.argv and the parameters used
    logger = logging.getLogger("Manager")
    logger.info(" ".join(sys.argv))
    print_args(args, logger=logger)

    logger.info("Initialising iterAlign.")

    logger.info("Setting up FastQ monitoring.")

    #### Check if a run is active - if not, wait.

    args.simulation = True
    connection = None

    #set default message severity level.
    severity = 2

    if args.watch is None:
        args.simulation = False
        logger.info("Creating rpc connection for device {}.".format(args.device))
        try:
            connection, messageport = get_rpc_connection(args.device)
        except ValueError as e:
            print(e)
            sys.exit(1)

        #send_message_port("Iteralign Connected to MinKNOW", args.host, messageport)
        send_message(connection, "Iteralign Connected to MinKNOW.", Severity.WARN)

        logger.info("Loaded RPC")
        while parse_message(connection.acquisition.current_status())['status'] != "PROCESSING":
            time.sleep(1)
        ### Check if we know where data is being written to , if not... wait
        args.watch = parse_message(connection.acquisition.get_acquisition_info())['config_summary'][
            'reads_directory']

    else:
        messageport = ""

    event_handler = FastqHandler(args, logging, messageport, connection)
    # This block handles the fastq
    observer = Observer()
    observer.schedule(event_handler, path=args.watch, recursive=True)
    observer.daemon = True

    try:

        observer.start()
        logger.info("FastQ Monitoring Running.")
        while 1:
            time.sleep(1)

    except KeyboardInterrupt:

        logger.info("Exiting - Will take a few seconds to clean up!")

        observer.stop()
        observer.join()

        if args.keepfiles:
            logging.info("The 'keepfiles' argument was set, files generated by classifier have been retained")
        else:
            if os.path.isdir(args.path):
                for path, dirs, files in os.walk(args.path):
                    for f in files:
                        if f.startswith(args.prefix):
                            os.unlink(f)
                            logging.info("file removed: {}".format(f))

            if os.path.isdir("./"):
                for path, dirs, files in os.walk("./"):
                    for f in files:
                        if f.endswith(args.creport):
                            os.unlink(f)
                            logging.info("file removed: {}".format(f))

        logging.info("All files generated by classifier have been removed.")

        os._exit(0)
Пример #59
0
            'event_type': event.event_type
        }
        if event.event_type != events.EVENT_TYPE_DELETED:
            data['type'] = magic.from_file(event.src_path, mime=True)

        requests.post(
            'http://%s:%s/items' % (APP_HOST, APP_PORT),
            data=data
        )

    def on_modified(self, event):
        self.process(event)

    def on_created(self, event):
        self.process(event)

    def on_deleted(self, event):
        self.process(event)

if __name__ == '__main__':
    args = sys.argv[1:]
    observer = Observer()
    observer.schedule(MyHandler(), path=args[0] if args else '../watch_here')
    observer.start()

    try:
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        observer.stop()
    observer.join()
Пример #60
0
class Config:
    def __init__(self, args, callback=None):
        self._watchdog_observer = None
        self._path = args.config
        self._callback = callback
        self._config = {}

        # Load from file
        try:
            self.load_config_file()
        except FileNotFoundError:
            logger.info("No configuration file, creating a new one")
            self._config = CONF_SCHEMA({})

        # Overwrite with command line arguments
        args_keys = vars(args)
        for key in args_keys:
            if self._config.get(key) != args_keys[key]:
                self._config[key] = args_keys[key]

        self.save_config_file()

        self._watchdog_observer = Observer()
        watchdog_event_handler = FileSystemEventHandler()
        watchdog_event_handler.on_modified = lambda event: self.load_config_file(
        )
        self._watchdog_observer.schedule(watchdog_event_handler, self._path)
        self._watchdog_observer.start()

    def load_config_file(self):
        """Load configuration from yaml file."""
        with open(self._path, "r") as infile:
            logger.debug("Loading configuration from <%s>", self._path)
            try:
                configuration = yaml.safe_load(infile)
                if not configuration:
                    logger.warning(
                        "Could not load a configuration from %s, creating a new one",
                        self._path,
                    )
                    configuration = {}
                self._config = CONF_SCHEMA(configuration)
                self._callback()
            except AttributeError:
                # No callback configured
                pass
            except vol.MultipleInvalid as error:
                logger.error("In configuration file %s: %s", self._path, error)
                quit(1)

    def save_config_file(self):
        """Save configuration back to yaml file."""
        try:
            with open(self._path, "w", encoding="utf8") as outfile:
                cfg = self._config.pop(
                    CONF_CONFIG)  # temporary displace config file
                yaml.dump(self._config,
                          outfile,
                          default_flow_style=False,
                          allow_unicode=True)
        except Exception as err:
            logger.error("Could not save configuration: %s", err)
        finally:
            self._config[CONF_CONFIG] = cfg  # restore

    def __del__(self):
        """Release watchdog."""
        if self._watchdog_observer:
            self._watchdog_observer.stop()
            self._watchdog_observer.join()
        if self._config != {}:
            self.save_config_file()

    def __repr__(self):
        return self._config

    @property
    def mqtt_conf(self):
        return (
            self._config[CONF_MQTT_SERVER],
            self._config[CONF_MQTT_PORT],
            self._config[CONF_MQTT_BASE_TOPIC],
        )

    @property
    def dali_driver(self):
        return self._config[CONF_DALI_DRIVER]

    @property
    def ha_discovery_prefix(self):
        return self._config[CONF_HA_DISCOVERY_PREFIX]

    @property
    def log_level(self):
        return self._config[CONF_LOG_LEVEL]

    @property
    def log_color(self):
        return self._config[CONF_LOG_COLOR]

    @property
    def devices_names_file(self):
        return self._config[CONF_DEVICES_NAMES_FILE]