Ejemplo n.º 1
0
def watch(path, callback):
    """
    Start observing changes in filesystem. See JasyEventHandler for the event callbacks.

    :param path: Path wich will be observed
    :type name: string
    """
    
    if Observer is None:
        Console.error("You need to install Watchdog for supporting file system watchers")

    # Initialize file system observer
    observer = Observer()
    observer.schedule(JasyEventHandler(), ".", recursive=True)
    observer.start()

    Console.info("Started file system watcher for %s... [PID=%s]", path, os.getpid())
    Console.info("Use 'ulimit -n 1024' to increase number of possible open files")

    try:
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        observer.stop()

    Console.info("Stopped file system watcher for %s...", path)
    observer.join()
Ejemplo n.º 2
0
def main():
    if (len(sys.argv) > 1 and os.path.exists(sys.argv[1]) and os.path.isfile(sys.argv[1])):
        filename = sys.argv[1]
    else:
        filename = gui.get_path("*.csv",defaultFile="data.csv")
    
    commonPath = os.path.abspath(os.path.split(filename)[0])
    outputFile = os.path.join(commonPath, "odmanalysis.csv")
    
    print "Now watching %s for changes" % filename
    handler = OMDCsvChunkHandler(filename,outputFile)
    observer = Observer()
    observer.schedule(handler, path=commonPath, recursive=False)
    handler.startPCChain()
    observer.start()

    try:
        while True:
            time.sleep(1)
                
    except (KeyboardInterrupt, SystemExit):
        print "Stopping..."
        observer.stop()
        time.sleep(1)
    observer.join()
Ejemplo n.º 3
0
def generate_and_observe(args, event):
    while event.isSet():
        # Generate the presentation
        monitor_list = generate(args)
        print("Presentation generated.")

        # Make a list of involved directories
        directories = defaultdict(list)
        for file in monitor_list:
            directory, filename = os.path.split(file)
            directories[directory].append(filename)

        observer = Observer()
        handler = HovercraftEventHandler(monitor_list)
        for directory, files in directories.items():
            observer.schedule(handler, directory, recursive=False)

        observer.start()
        while event.wait(1):
            time.sleep(0.05)
            if handler.quit:
                break

        observer.stop()
        observer.join()
Ejemplo n.º 4
0
def watch(path, callback):
    
    header("Build Daemon")
    
    if Observer is None:
        error("You need to install Watchdog for supporting file system watchers")

    # We need to pause the session to make room for other jasy executions
    session.pause()

    # Initialize file system observer
    observer = Observer()
    observer.schedule(JasyEventHandler(), ".", recursive=True)
    observer.start()

    info("Started file system watcher for %s... [PID=%s]", path, os.getpid())
    info("Use 'ulimit -n 1024' to increase number of possible open files")

    try:
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        observer.stop()

    info("Stopped file system watcher for %s...", path)
    observer.join()
Ejemplo n.º 5
0
    def watch(self, source, write=True, package=None, run=False, force=False):
        """Watches a source and recompiles on change."""
        from watchdog.events import FileSystemEventHandler
        from watchdog.observers import Observer

        def recompile(path):
            if os.path.isfile(path) and os.path.splitext(path)[1] in code_exts:
                self.compile_path(path, write, package, run, force)

        class watcher(FileSystemEventHandler):
            def on_modified(_, event):
                recompile(event.src_path)
            def on_created(_, event):
                recompile(event.src_path)

        source = fixpath(source)

        self.console.show("Watching        "+showpath(source)+" ...")
        self.console.print("(press Ctrl-C to end)")

        observer = Observer()
        observer.schedule(watcher(), source, recursive=True)
        observer.start()
        try:
            while True:
                time.sleep(.1)
        except KeyboardInterrupt:
            pass
        finally:
            observer.stop()
            observer.join()
Ejemplo n.º 6
0
Archivo: server.py Proyecto: befks/odoo
class FSWatcher(object):
    def __init__(self):
        self.observer = Observer()
        for path in odoo.modules.module.ad_paths:
            _logger.info('Watching addons folder %s', path)
            self.observer.schedule(self, path, recursive=True)

    def dispatch(self, event):
        if isinstance(event, (FileCreatedEvent, FileModifiedEvent, FileMovedEvent)):
            if not event.is_directory:
                path = getattr(event, 'dest_path', event.src_path)
                if path.endswith('.py'):
                    try:
                        source = open(path, 'rb').read() + '\n'
                        compile(source, path, 'exec')
                    except SyntaxError:
                        _logger.error('autoreload: python code change detected, SyntaxError in %s', path)
                    else:
                        _logger.info('autoreload: python code updated, autoreload activated')
                        restart()

    def start(self):
        self.observer.start()
        _logger.info('AutoReload watcher running')

    def stop(self):
        self.observer.stop()
        self.observer.join()
Ejemplo n.º 7
0
def main(): 
    global badExtensionCounter, failedFlag, pool, failedProcessCounter#, db
    
    sql_setup() # Set-up SQL Database/check to see if exists
    
    # Initiate File Path Handler
    observer = Observer()
    observer.schedule(MyHandler(), path=file_path, recursive=True)
    observer.start()
    
    cpuCount = multiprocessing.cpu_count() # Count all available CPU's
    print "\nTotal CPU Count: %d"%(cpuCount)
    pool = multiprocessing.Pool(4, worker,(processQueue,)) # Create 4 child processes to handle all queued elements
    active = multiprocessing.active_children() # All active child processes
    print "Total number of active child processes: %s\n"%(str(active))
    
    try:
        while True:
            time.sleep(0.2)
    except KeyboardInterrupt:
        pool.terminate() # Stop all child processes
        pool.join() # Join the processes with parent and terminate
        active = multiprocessing.active_children() # All active child processes, list should be empty at this point.
        print "\nTotal number of active child processes: %s\n"%(str(active))
        shutdown() # Run shutdown sequence        
        observer.stop()
        observer.join()
        sys.exit(1)
def main():
    # Fill all changes that occurred when track-changes.py wasn't running.
    if os.path.isdir("out"):
        shutil.rmtree("out", True)

    if not os.path.isdir("out"):
        os.mkdir("out")
 
    startup_changes.sync_offline_changes("posts", "out")

    print "Watching posts directory for changes... CTRL+C to quit."
    watch_directory = "posts"

    event_handler = MyHandler()

    # Run the watchdog.
    observer = Observer()
    observer.schedule(event_handler, watch_directory, True)
    observer.start()

    """
    Keep the script running or else python closes without stopping the observer
    thread and this causes an error.
    """
    try:
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        observer.stop()

    observer.join()
Ejemplo n.º 9
0
class WatchFile(object):
    def __init__(self, send_msg_func, *args, **kargs):
        self.path = kargs['path'] if kargs.has_key('path') else '.'
        self.suffix = kargs['suffix'] if kargs.has_key('suffix') else '*'  # star represent any file
        self.observer = Observer()
        self.event_handler = MyFileMonitor(self.suffix, callback=self.get_data)
        self.send_msg_func = send_msg_func
        self.filename = self.zip_filename = ''

    def run(self):
        self.observer.schedule(self.event_handler, self.path, recursive=True)
        self.observer.start()
        try:
            while True:
                time.sleep(1)
        except KeyboardInterrupt:
            self.observer.stop()
        self.observer.join()

    def get_data(self, filename):
        data = self._unpack(filename)
        data = str(data)
        print(data, type(data))
        self.send_msg_func(data)

    def _unpack(self, filename):
        # first rename suffix to zip file
        # may not work on linux
        if system == 'Windows':
            filename = filename[2:] if filename.startswith('.\\') else filename
            filename = filename.lstrip()
            new_name = filename.split('.')[0] + '.zip'
            new_name = new_name[1:] if new_name.startswith('\\') else new_name
        elif system == 'Linux':
            new_name = filename

        print('Old name:', filename, ' New name:', new_name)

        self.filename = filename
        self.zip_filename = new_name
        # waiting for operating sys create the file
        time.sleep(3)
        os.rename(filename, new_name)
        zip_file = zipfile.ZipFile(new_name, 'r')
        json_data = ""
        for name in zip_file.namelist():
            if name == "project.json":
                file = zip_file.open(name, 'r')
                json_data = "".join(file.readlines())
        # change filename back to .sb2
        if new_name.endswith('.zip'):
            os.rename(new_name, filename)

        return self.get_cmd(json_data)

    def get_cmd(self, json_data):
        jsonfy_data = json.loads(json_data)
        child = jsonfy_data['children'][0]
        scripts = child['scripts']
        return scripts
Ejemplo n.º 10
0
    def __init__(self, config):
        """
        Initialize the watcher, use the config passed from main
        """
        self.config = config


        # List of pending files
        self.pending_files = set()

        self.sync_timer = None

        # Setup our watchdog observer
        observer = Observer()
        observer.schedule(ChangeHandler(self.on_file_changed), path=config.directory, recursive=True)
        observer.start()

        logging.info("Starting change tracker, cmd: {}, dir: {}, delay: {}".format(config.sync_cmd,
                                                                                   config.directory,
                                                                                   config.delay))
        try:
            while True:
                time.sleep(0.5)
        except KeyboardInterrupt:
            observer.stop()
        observer.join()
Ejemplo n.º 11
0
class PropMTimeWatcher:
    def __init__(self, app_data_folder):
        self._app_data_folder = app_data_folder
        self._observer = Observer()
        self.schedule()

    def schedule(self):
        pref = PropMTimePreferences(self._app_data_folder)
        self._observer.unschedule_all()
        for path, watcher in pref.get_all_paths().items():
            if watcher:
                if os.path.exists(path):
                    event_handler = ModHandler(path, self._app_data_folder)
                    log.info('scheduling watcher : %s' % path)
                    self._observer.schedule(event_handler, path=path, recursive=True)
                else:
                    log.error('Error: "%s" does not exist.\n\nPlease edit the path.\n\nTo do this, click on the %s icon and select "Paths".' %
                              (path, __application_name__))
        self._observer.start()

    def request_exit(self):
        self._observer.unschedule_all()
        self._observer.stop()
        self._observer.join(TIMEOUT)
        if self._observer.isAlive():
            log.error('observer still alive')
Ejemplo n.º 12
0
def watch(directory=None, auto_clear=False, beep_on_failure=True,
          onpass=None, onfail=None, extensions=[]):
    """
    Starts a server to render the specified file or directory
    containing a README.
    """
    if directory and not os.path.isdir(directory):
        raise ValueError('Directory not found: ' + directory)
    directory = os.path.abspath(directory or '')

    # Initial run
    event_handler = ChangeHandler(directory, auto_clear, beep_on_failure,
                                  onpass, onfail, extensions)
    event_handler.run()

    # Setup watchdog
    observer = Observer()
    observer.schedule(event_handler, path=directory, recursive=True)
    observer.start()

    # Watch and run tests until interrupted by user
    try:
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        observer.stop()
    observer.join()
Ejemplo n.º 13
0
def watch(path, handler=None, debug=True):
	import time
	from watchdog.observers import Observer
	from watchdog.events import FileSystemEventHandler

	class Handler(FileSystemEventHandler):
		def on_any_event(self, event):
			if debug:
				print "File {0}: {1}".format(event.event_type, event.src_path)

			if not handler:
				print "No handler specified"
				return

			handler(event.src_path, event.event_type)

	event_handler = Handler()
	observer = Observer()
	observer.schedule(event_handler, path, recursive=True)
	observer.start()
	try:
		while True:
			time.sleep(1)
	except KeyboardInterrupt:
		observer.stop()
	observer.join()
Ejemplo n.º 14
0
def watch_project(markdown_fn, output_fn, template_fn, render_first=True):
    class Handler(FileSystemEventHandler):
        def on_any_event(self, event):
            if event.src_path == os.path.abspath(output_fn):
                return
            print('Rendering slides...')
            process_slides(markdown_fn, output_fn, template_fn)

    if render_first == True:
        process_slides(markdown_fn, output_fn, template_fn)
        
    observer = Observer()
    event_handler = Handler()

    dirname = os.path.dirname(os.path.abspath(markdown_fn))
    
    observer.schedule(event_handler, path=dirname, recursive=True)
    print("Watching for events on {:s}...".format(dirname))
    observer.start()

    try:
        while True:
            time.sleep(0.5)
    except KeyboardInterrupt:
        observer.stop()
    observer.join()
Ejemplo n.º 15
0
def watch_assets(options):
    """
    Watch for changes to asset files, and regenerate js/css
    """
    # Don't watch assets when performing a dry run
    if tasks.environment.dry_run:
        return

    observer = Observer()

    CoffeeScriptWatcher().register(observer)
    SassWatcher().register(observer)
    XModuleSassWatcher().register(observer)
    XModuleAssetsWatcher().register(observer)

    print("Starting asset watcher...")
    observer.start()
    if not getattr(options, 'background', False):
        # when running as a separate process, the main thread needs to loop
        # in order to allow for shutdown by contrl-c
        try:
            while True:
                observer.join(2)
        except KeyboardInterrupt:
            observer.stop()
        print("\nStopped asset watcher.")
Ejemplo n.º 16
0
class Modsync:
    _target = []
    _source = ''
    _observer = None

    def __init__(self):
        pass

    def setSource(self, source):
        self._source = source

    def setTarget(self, target_dir):
        self._target.append(target_dir)

    def getObserver(self):
        return self._observer

    def run(self):

        if not self._source:
            return 0

        self._observer = Observer()
        event_handler = ModsyncEventHandler(self._observer, self._source, self._target)
        self._observer.schedule(event_handler, self._source, recursive=True)
        self._observer.start()
        try:
            time.sleep(2)
            pass
        except KeyboardInterrupt:
            self._observer.stop()
        self._observer.join()
        return 0
Ejemplo n.º 17
0
def main():
    if not config['play']['scan']:
        raise Exception('''
            Nothing to scan. Add a path in the config file.

            Example:

                play:
                    scan:
                        -
                            type: shows
                            path: /a/path/to/the/shows
            ''')
    obs = Observer()
    for s in config['play']['scan']:
        event_handler = Handler(
            scan_path=s['path'],
            type_=s['type'],
        )
        obs.schedule(
            event_handler,
            s['path'],
            recursive=True,
        )
    obs.start()
    try:
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        obs.stop()
    obs.join()
Ejemplo n.º 18
0
class Sceduler:

  def __init__(self, config):

    fs = config.get('scheduler', 'fs', 0)
    dest = config.get('store', 'path', 0)
    self.ioqueue = Queue()
    self.iothread = Thread(target=self.ioprocess)
    self.iothread.daemon = True
    self.observer = Observer()
    self.event_handler = IoTask(self.ioqueue, fs, dest)
    self.observer.schedule(self.event_handler, fs, recursive=True)

  def ioprocess(self):
    while True:
      t = self.ioqueue.get()
      try:
        t.process()
      finally:
        self.ioqueue.task_done()

  def start(self):
    self.observer.start()
    self.iothread.start()

  def stop(self):
    self.observer.stop()
    self.iothread.stop()

  def join(self):
     self.observer.join()
     self.iothread.join()
Ejemplo n.º 19
0
    def __init__(self, input_dir, templates_dir):

        paths = [input_dir, templates_dir]
        threads = []

        try:
            observer = Observer()
            event_handler = WatchEventHandler()

            for i in paths:
                targetPath = str(i)
                observer.schedule(event_handler, targetPath, recursive=True)
                threads.append(observer)

            observer.start()

            signal_watch_init = signal('watch_init')
            signal_watch_init.send(self)

            try:
                while True:
                    time.sleep(1)
            except KeyboardInterrupt:
                wrangler._reporter.log("Stopping with grace and poise", "green")
                observer.stop()
            
            observer.join()
        except:
            return None
Ejemplo n.º 20
0
def main():
    arguments = docopt(__doc__, version='Storyline HTTP v0.1')

    if arguments.get('--debug'):
        logger = logging.getLogger()
        logger.setLevel(logging.DEBUG)
        handler = logging.StreamHandler(sys.stderr)
        logger.addHandler(handler)

    app.config.from_object(__name__)
    app.debug = arguments.get('--debug')

    story_path = arguments.get('STORY_PATH', '.')

    global plot
    plot = storyfile.load_plot_from_path(story_path)

    observer = Observer()
    observer.schedule(LoggingEventHandler(), path=story_path, recursive=True)
    observer.schedule(Reloader(story_path), path=story_path, recursive=True)

    observer.start()
    try:
        app.run()
    finally:
        observer.stop()
        observer.join()
Ejemplo n.º 21
0
def start_watchdog():
    event_handler = RankingHandler()
    observer      = Observer()
    log_handler   = LoggingEventHandler()
    log_observer  = Observer()
    try:
        observer.schedule(event_handler, path='./watch')
        observer.start()
        log_observer.schedule(log_handler, path='./watch')
        log_observer.start()
        logging.info("Watching Directory")
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        logging.info("Kill message sent. Aborting")
        observer.stop()
        log_observer.stop()
    except:
        logging.info("Unexpected error: %s" % sys.exc_info()[0])

        observer.stop()
        log_observer.stop()

        error_message(sys.exc_info()[0])

    observer.join()
    log_observer.join()
Ejemplo n.º 22
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--after', help="a command to run after")
    args = parser.parse_args()

    py_event_handler = TouchFileEventHandler(
        patterns=['*.py'],
        touch_file='index.rst'
    )
    rst_event_handler = MakeEventHandler(
        patterns=['*.rst'],
        make_target='html',
        after=args.after)

    observer = Observer()
    observer.schedule(py_event_handler, path='..', recursive=True)
    observer.schedule(rst_event_handler, path='.', recursive=True)

    observer.start()
    try:
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        observer.stop()
    observer.join()
Ejemplo n.º 23
0
def serve(site, director):
    """Run a simple web server that serve the output directory and watches for
    changes to the site. When something is changed, it should be generated.
    """
    # Override the log level to display some interactive messages with the
    # user. With the dev server running, there's no sense in being silent.
    logger.setLevel(logging.INFO)

    # Start the watchdog.
    event_handler = SiteHandler(director)
    observer = Observer()
    observer.schedule(event_handler, site.path, recursive=True)
    observer.start()

    # The simple HTTP server is pretty dumb and does not even take a path to
    # serve. The only way to serve the right path is to change the directory.
    outdir = director.outdir
    os.chdir(outdir)

    socketserver.TCPServer.allow_reuse_address = True
    httpd = socketserver.TCPServer(('', PORT), SimpleHTTPRequestHandler)

    logger.info(
        _('Serving {outdir} at http://localhost:{port}/.'
          '\nPress Ctrl-C to quit.').format(outdir=outdir, port=PORT))
    try:
        httpd.serve_forever()
    except KeyboardInterrupt:
        logger.info(_('\nBye.'))
        observer.stop()

    observer.join()
Ejemplo n.º 24
0
def watch_directory(watch_dir, target_dir, condition=None):
    if condition:
        condition.acquire()

    watch_path = abspath(watch_dir)
    logger.info('Watch path: %s' % watch_path)

    target_path = abspath(target_dir)
    logger.info('Target path: %s' % target_path)

    handler = ModifiedHandler(watch_path, target_path)
    obs = Observer()

    obs.schedule(handler, watch_path, recursive=True)
    obs.start()

    if condition:
        condition.notify()
        condition.release()
    try:
        while True:
            sleep(1)
    except KeyboardInterrupt:
        obs.stop()
    obs.join()
Ejemplo n.º 25
0
def filemonitor(topdir, mode, jfs):
    errors = {}
    def saferun(cmd, *args):
        log.debug('running %s with args %s', cmd, args)
        try:
            return apply(cmd, args)
        except Exception as e:
            puts(colored.red('Ouch. Something\'s wrong with "%s":' % args[0]))
            log.exception('SAFERUN: Got exception when processing %s', args)
            errors.update( {args[0]:e} )
            return False

    if mode == 'archive':
        event_handler = ArchiveEventHandler(jfs, topdir)
    elif mode == 'sync':
        event_handler = SyncEventHandler(jfs, topdir)
        #event_handler = LoggingEventHandler()
    elif mode == 'share':
        event_handler = ShareEventHandler(jfs, topdir)
    observer = Observer()
    observer.schedule(event_handler, topdir, recursive=True)
    observer.start()
    try:
        puts(colored.green('Starting JottaCloud monitor'))
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        observer.stop()
        puts(colored.red('JottaCloud monitor stopped'))
    observer.join()
Ejemplo n.º 26
0
def go_watch():
    try:
        print 'Start watching %s' % PATH_TO_WATCH
        logging.basicConfig(level=logging.INFO,
                            format='%(asctime)s - %(message)s',
                            datefmt='%Y-%m-%d %H:%M:%S')
        event_handler = LoggingEventHandler()
        observer = Observer()
        observer.schedule(event_handler, PATH_TO_WATCH, recursive=True)
        observer.start()
        event_handler.on_modified = sync_upload
        event_handler.on_deleted = sync_upload_delete
        event_handler.on_created = sync_upload_create
        event_handler.on_moved = sync_upload_move
        time_loop = 1
        try:
            while True:
                time.sleep(1)
                time_loop += 1
                if not time_loop % AUTO_SYNC_TIME:
                    print 'Auto sync every %s second' % AUTO_SYNC_TIME
                    if not observer.event_queue.unfinished_tasks:
                        sync_download()
                        check_dir_deleted()
                    print 'Auto check downloaded file or folder'
                    check_dir_deleted()
        except KeyboardInterrupt:
            print 'End watching.'
            observer.stop()
        observer.join()
    except Exception, e:
        print '*' * 10
        print e
        print '*' * 10
        return
Ejemplo n.º 27
0
  def start(self):
    # Watch the source files for changes
    filewatch = Observer()
    filewatch.schedule(FilewatchHandler(parent=self,
        ignore_patterns=['*.swp', '*~']),
        self.src_dir,
        recursive=True)

    # Clean shutdown on ctrl+c
    def signal_handler(signal, frame):
      print
      print 'Shutting down...'
      self.stop_server()
      filewatch.stop()

    signal.signal(signal.SIGINT, signal_handler)

    self.rebuild()
    self.start_server()

    print 'Serving at port', self.port
    print 'Serving files from', self.final_build_dir
    print('Press Ctrl+C to stop')

    filewatch.start()
    signal.pause()
    filewatch.join(5000)
Ejemplo n.º 28
0
def start_watchdog():
    event_handler = FileSystemEventHandler()
    observer      = Observer()
    observer.schedule(event_handler, path='../watch')
    observer.start()
    log_handler   = LoggingEventHandler()
    log_observer  = Observer()
    log_observer.schedule(log_handler, path='../watch')
    log_observer.start()
    try:
        logging.info("Watching Directory")
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        logging.info("Kill message sent. Aborting")
        observer.stop()
        log_observer.stop()
    except:
        logging.info("Unexpected error: %s" % sys.exc_info()[0])
        observer.stop()
        log_observer.stop()

        # Send Email
        msg = "Unexpected error: %s\nScript Failed. Please log in and restart manually" % sys.exc_info()[0]
        for receiver in toaddrs:
            server = smtplib.SMTP('smtp.gmail.com:587')
            server.starttls()
            server.login(username,password)
            server.sendmail(fromaddr, receiver, msg)
            server.quit()
    observer.join()
    log_observer.join()
Ejemplo n.º 29
0
def main():
    if len(sys.argv) < 4:
        print "Usage: autorerun <directory_to_monitory> <pattern> <command> <command_args>"
        return
    
    logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
        
    directory = sys.argv[1]
    pattern = sys.argv[2]
    command = sys.argv[3:]
    
    event_handler = RestartSubProcessEvent(command, pattern)
    
    observer = Observer()
    observer.schedule(event_handler, directory, recursive=True)
    observer.start()
    try:
        while True:
            time.sleep(0.1)
    except KeyboardInterrupt:
        observer.stop()
    
    event_handler.kill()
    
    observer.join()
Ejemplo n.º 30
0
    def compile(self):
        """
        Generate autodoc rst files from code docstrings and compile sphinx output
        """
        if self.options.get('watch', False):
            # If the watch folder is set, we will set up
            # an observer using watchdog and re-compile on code change

            from watchdog.observers import Observer

            observer = Observer()
            handler = CompileEventHandler(self)
            observer.schedule(handler, path=self.doc_path, recursive=True)
            observer.start()

            print "Watching folder @ {} for changes.".format(self.doc_path)

            try:
                while True:
                    time.sleep(1)
            except KeyboardInterrupt:
                print "Stopping watch folder and Exiting."
                observer.stop()
            observer.join()

            print "Goodbye"

        else:

            print "Compiling autodoc rst and sphinx files"

            self._compile_all()

            print "Files compiled. Goodbye"
Ejemplo n.º 31
0
def main(args):
    stream = ALPACA_STREAM(data_feed='sip')
    api = ALPACA_REST()
    fleet = {}

    # Move old signals so that we do not consider them
    MoveOldSignals(api)
    
    # checking for trades to execute!
    event_handler = MyHandler(fleet,api,stream)
    observer = Observer(timeout=1)
    observer.schedule(event_handler,  path='/Users/schae/testarea/finances/FinanceMonitor/Instructions/',  recursive=True)
    observer.start()

    symbols = args.symbols
    for symbol in symbols:
        if args.lot>0:
            algo = BullishAlgo(api, symbol, lot=args.lot, limit=args.limit, target=args.target, df=[])
            fleet[symbol] = algo

    # Trigger the loading of the trades
    event_handler.on_modified(True)

    async def on_bars(data):
        if data.symbol in fleet:
            fleet[data.symbol].on_bar(data)

    for symbol in symbols:
        print(symbol)
        sys.stdout.flush()
        #stream.subscribe_trades(on_bars, symbol)
        stream.subscribe_bars(on_bars, symbol)
    
    async def on_trade_updates(data):
        logger.info(f'trade_updates {data}')
        symbol = data.order['symbol']
        if symbol in fleet:
            fleet[symbol].on_order_update(data.event, data.order)
    
    stream.subscribe_trade_updates(on_trade_updates)
    
    async def periodic():
        while True:
            if not api.get_clock().is_open:
                logger.info('exit as market is not open')
                sys.exit(0)
            await asyncio.sleep(30)
            positions = api.list_positions()
            for symbol, algo in fleet.items():
                pos = [p for p in positions if p.symbol == symbol]
                algo.checkup(pos[0] if len(pos) > 0 else None)
    
    loop = asyncio.get_event_loop()
    while 1:
        try:
            loop.run_until_complete(asyncio.gather(stream._run_forever(),periodic()))
        except (ConnectionResetError,urllib3.exceptions.ProtocolError,requests.exceptions.ConnectionError,APIError,ValueError,AttributeError,RuntimeError,TimeoutError):
            print('Connection error. will try to restart')
            pass
    loop.close()
    observer.stop()
    observer.join()
Ejemplo n.º 32
0
class LocalFS(FileSystem):
    def __init__(self, root):
        abs_root = os.path.abspath(root)
        os.makedirs(abs_root, exist_ok=True)
        super().__init__(abs_root)

        self._state = {}
        self._watchdog = Observer()
        self._watchdog.schedule(LocalFSEventHandler(self), abs_root, recursive=True)
        self._queue = Queue()

    def _abs_path(self, path):
        return os.path.abspath(os.path.join(self._root, path))

    def _rel_path(self, path):
        return os.path.relpath(path, start=self.root)

    def _to_file(self, abs_path):
        is_folder = os.path.isdir(abs_path)
        rel_path = self._rel_path(abs_path)
        return LocalFile(
            md5=_md5(abs_path) if not is_folder else os.stat(abs_path).st_ino,
            is_folder=is_folder,
            modified_date=datetime.fromtimestamp(round(os.path.getmtime(abs_path), 3))
            if not is_folder
            else None,
        )

    @property
    def state(self):
        if self._state:
            return self._state

        self._state = LocalFSState.from_file_list(self._list())
        self._watchdog.start()

        return self._state

    def get_changes(self):
        while True:
            yield self._queue.get()

    @atomic()
    def makedirs(self, path):
        LOGGER.debug(f"Creating local directory {path}")
        os.makedirs(self._abs_path(path), exist_ok=True)

    def read(self, path):
        try:
            return open(self._abs_path(path), "rb")
        except (FileNotFoundError, IOError):
            LOGGER.error(f"Cannot read file {self._abs_path(path)} from {self}")
            return None

    def search(self, path):
        return self.state[path]

    def _list(self):
        return [
            (self._rel_path(os.path.join(dp, f)), self._to_file(os.path.join(dp, f)))
            for dp, dn, filenames in os.walk(self.root)
            for f in dn + filenames
        ]

    def list(self):
        return iter(self.state)

    @atomic()
    def remove(self, path):
        LOGGER.debug(f"Removing local file at {path}")
        abs_path = self._abs_path(path)
        try:
            os.remove(abs_path)
        except IsADirectoryError:
            rmtree(abs_path)
        except FileNotFoundError:
            pass

    @atomic()
    def move(self, src: str, dst: str):
        try:
            LOGGER.debug(f"Moving local file {src} to {dst}")
            move(self._abs_path(src), self._abs_path(dst))
        except FileNotFoundError:
            pass

    @atomic()
    def write(self, stream, path, modified_date):
        abs_path = self._abs_path(path)

        folder = os.path.dirname(path)
        if not self.search(folder):
            self.makedirs(folder)

        with open(abs_path, "wb") as fout:
            copyfileobj(stream, fout)
            stream.close()

        mtime = datetime.timestamp(modified_date)
        os.utime(abs_path, (mtime, mtime))

    def conflict(self, path: str) -> str:
        head, tail = os.path.split(path)
        return os.path.join(
            head, f"conflict_{hex(int(time())).replace('0x', '')}_{tail}"
        )

    @atomic()
    def copy(self, src: str, dst: str):
        try:
            copy(self._abs_path(src), self._abs_path(dst))
        except FileNotFoundError:
            pass

    def __del__(self):
        self._watchdog.stop()
        self._watchdog.join()

        super().__del__()

    def __repr__(self):
        return f"{type(self).__name__}({self._root})"
Ejemplo n.º 33
0
    def process(self, event):
        """
        event.event_type
            'modified' | 'created' | 'moved' | 'deleted'
        event.is_directory
            True | False
        event.src_path
            path/to/observed/file
        """

        print(event.src_path, event.event_type)  # print now only for debug

    def on_modified(self, event):
        self.process(event)


if __name__ == '__main__':
    args = sys.argv[1:]
    observer = Observer()
    observer.schedule(MyHandler(), path=args[0] if args else '.')
    observer.start()

    try:
        while True:
            time.sleep(1)
            # print('checking')
    except KeyboardInterrupt:
        observer.stop()

    observer.join()
Ejemplo n.º 34
0
import sys
import time
import logging
from watchdog.observers import Observer
from watchdog.events import LoggingEventHandler

if __name__ == "__main__":
    logging.basicConfig(level=logging.INFO,
                        format='%(asctime)s - %(message)s',
                        datefmt='%Y-%m-%d %H:%M:%S')
    path = "P:/Dokument/3"
    event_handler = LoggingEventHandler()
    observer = Observer()
    observer.schedule(event_handler, path, recursive=False)
    observer.start()
    try:
        while True:
            print("wach")
            time.sleep(1)
            print("schlafe")
    except KeyboardInterrupt:
        observer.stop()
        print("stopped")
    observer.join()  #einrücken oder links ?
    print("joined")
Ejemplo n.º 35
0
class PortListerPlugin(octoprint.plugin.StartupPlugin, octoprint.plugin.AssetPlugin, octoprint.plugin.SettingsPlugin):
	def on_after_startup(self, *args, **kwargs):
		self._logger.info("Port Lister %s %s" % (repr(args), repr(kwargs)))
		event_handler = PortListEventHandler(self)
		self._observer = Observer()
		self._observer.schedule(event_handler, "/dev", recursive=False)
		self._observer.start()

	def on_port_created(self, port, *args, **kwargs):
		# if we're already connected ignore it
		if self._printer.is_closed_or_error():
			connection_options = get_connection_options()
			self._logger.info("on_port_created connection_options %s" % (repr(connection_options)))

			# is the new device in the port list? yes, tell the view model
			self._logger.info("Checking if %s is in %s" % (port, repr(connection_options["ports"])))
			if port in connection_options["ports"]:
				self._plugin_manager.send_plugin_message(self._plugin_name, port)

				# if autoconnect and the new port matches, try to connect
				if self._settings.global_get_boolean(["serial", "autoconnect"]):
					self._logger.info("autoconnect_delay %d", self._settings.get(["autoconnect_delay"]))
					Timer(self._settings.get(["autoconnect_delay"]), self.do_auto_connect, [port]).start()
				else:
					self._logger.info("Not autoconnecting because autoconnect is turned off.")
			else:
				self._logger.warning("Won't autoconnect because %s isn't in %s" % (port, repr(connection_options["ports"])))
		else:
			self._logger.warning("Not auto connecting because printer is not closed nor in error state.")

	def on_shutdown(self, *args, **kwargs):
		self._logger.info("Shutting down file system observer")
		self._observer.stop();
		self._observer.join()

	def do_auto_connect(self, port, *args, **kwargs):
		try:
			self._logger.info("do_auto_connect")
			(autoport, baudrate) = self._settings.global_get(["serial", "port"]), self._settings.global_get_int(["serial", "baudrate"])
			if not autoport:
				autoport = "AUTO"
			if not port:
				port = "AUTO"
			if autoport == "AUTO" or os.path.realpath(autoport) == os.path.realpath(port):
				self._logger.info("realpath match")
				printer_profile = self._printer_profile_manager.get_default()
				profile = printer_profile["id"] if "id" in printer_profile else "_default"
				if not self._printer.is_closed_or_error():
					self._logger.info("Not autoconnecting; printer already connected")
					return
				self._logger.info("Attempting to connect to %s at %d with profile %s" % (autoport, baudrate, repr(profile)))
				self._printer.connect(port=autoport, baudrate=baudrate, profile=profile)
			else:
				self._logger.info("realpath no match")
				self._logger.info("Skipping auto connect on %s because it isn't %s" % (os.path.realpath(port), os.path.realpath(autoport)))
		except:
			self._logger.error("Exception in do_auto_connect %s", get_exception_string())

	def get_settings_defaults(self, *args, **kwargs):
		return dict(autoconnect_delay=0)

	def get_assets(self, *args, **kwargs):
		return dict(js=["js/portlister.js"])

	def get_update_information(self, *args, **kwargs):
		return dict(
			portlister=dict(
				displayName="PortLister",
				displayVersion=self._plugin_version,

				# use github release method of version check
				type="github_release",
				user="******",
				repo="OctoPrint-PortLister",
				current=self._plugin_version,

				# update method: pip
				pip="https://github.com/mikekscholz/OctoPrint-PortLister/archive/{target_version}.zip"
			)
		)
Ejemplo n.º 36
0
def root():
    initial_log = []
    global f_name_pos_map
    with open(log_filename) as f:
        for i in f.readlines():
            initial_log.append(i)
        f_name_pos_map[log_filename] = f.seek(0, 2)
    return render_template('static.html', initial_log=initial_log)


@app.route('/ping')
def handle_my_custom_event():
    print(request.args)
    socketio.emit('log_response', "testing response")
    return ""


log_observer = Observer()
log_observer.schedule(LogChangeHandler(patterns=["*.log"],
                                       ignore_patterns=["*.swp"],
                                       ignore_directories=True,
                                       case_sensitive=True),
                      "logs",
                      recursive=False)
log_observer.start()

if __name__ == '__main__':
    socketio.run(app, debug=True)
    log_observer.stop()
    log_observer.join()
Ejemplo n.º 37
0
#
# def on_deleted(event):
#     print(event,'has been deleted')
#
# def on_moved(event):
#     print(event,'has been moved')

#my_event_handler.on_created = on_created()
my_event_handler.on_modified = Practice.FilePoller.FilePoller.on_modified

#my_event_handler.on_deleted = on_deleted
#my_event_handler.on_moved = on_moved

path = 'F:/Python/TextFiles/'
go_recursively = True
Obj_Observer = Observer()

Obj_Observer.schedule(my_event_handler, path, recursive=go_recursively)

Obj_Observer.start()

try:
    while True:
        #print('Sleep starts')
        time.sleep(10)
        #print('Sleep ends')
except KeyboardInterrupt:
    Obj_Observer.stop()
    Obj_Observer.join()
Ejemplo n.º 38
0
    def interact(self, shell, keyboard_thread):
        event_handler = RerunSceneHandler(self.queue)
        file_observer = Observer()
        file_observer.schedule(event_handler, config["input_file"], recursive=True)
        file_observer.start()

        self.quit_interaction = False
        keyboard_thread_needs_join = shell.pt_app is not None
        assert self.queue.qsize() == 0

        last_time = time.time()
        while not (self.renderer.window.is_closing or self.quit_interaction):
            if not self.queue.empty():
                tup = self.queue.get_nowait()
                if tup[0].startswith("rerun"):
                    # Intentionally skip calling join() on the file thread to save time.
                    if not tup[0].endswith("keyboard"):
                        if shell.pt_app:
                            shell.pt_app.app.exit(exception=EOFError)
                        file_observer.unschedule_all()
                        raise RerunSceneException
                    keyboard_thread.join()

                    kwargs = tup[2]
                    if "from_animation_number" in kwargs:
                        config["from_animation_number"] = kwargs[
                            "from_animation_number"
                        ]
                    # # TODO: This option only makes sense if interactive_embed() is run at the
                    # # end of a scene by default.
                    # if "upto_animation_number" in kwargs:
                    #     config["upto_animation_number"] = kwargs[
                    #         "upto_animation_number"
                    #     ]

                    keyboard_thread.join()
                    file_observer.unschedule_all()
                    raise RerunSceneException
                elif tup[0].startswith("exit"):
                    # Intentionally skip calling join() on the file thread to save time.
                    if not tup[0].endswith("keyboard") and shell.pt_app:
                        shell.pt_app.app.exit(exception=EOFError)
                    keyboard_thread.join()
                    # Remove exit_keyboard from the queue if necessary.
                    while self.queue.qsize() > 0:
                        self.queue.get()
                    keyboard_thread_needs_join = False
                    break
                else:
                    method, args, kwargs = tup
                    getattr(self, method)(*args, **kwargs)
            else:
                self.renderer.animation_start_time = 0
                dt = time.time() - last_time
                last_time = time.time()
                self.renderer.render(self, dt, self.moving_mobjects)
                self.update_mobjects(dt)
                self.update_meshes(dt)
                self.update_self(dt)

        # Join the keyboard thread if necessary.
        if shell is not None and keyboard_thread_needs_join:
            shell.pt_app.app.exit(exception=EOFError)
            keyboard_thread.join()
            # Remove exit_keyboard from the queue if necessary.
            while self.queue.qsize() > 0:
                self.queue.get()

        file_observer.stop()
        file_observer.join()

        if self.dearpygui_imported and config["enable_gui"]:
            dpg.stop_dearpygui()

        if self.renderer.window.is_closing:
            self.renderer.window.destroy()
class TangibleLandscapePlugin(wx.Dialog):
    def __init__(self, giface, parent):
        wx.Dialog.__init__(self, parent, title="Tangible Landscape", style=wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER)
        self.giface = giface
        self.parent = parent

        if not gscript.find_program('r.in.kinect'):
            self.giface.WriteError("ERROR: Module r.in.kinect not found.")

        self.settings = {}
        UserSettings.ReadSettingsFile(settings=self.settings)
        # for the first time
        if not 'tangible' in self.settings:
            self.settings['tangible'] = {'calibration': {'matrix': None},
                                         'analyses': {'file': None,
                                                      'contours': None,
                                                      'contours_step': 1}
                                         }
        self.calib_matrix = self.settings['tangible']['calibration']['matrix']

        self.delay = 0.3
        self.process = None
        self.observer = None
        self.timer = wx.Timer(self)
        self.changedInput = False
        self.filter = {'filter': False,
                       'counter': 0,
                       'threshold': 0.1,
                       'debug': False}
        # to be able to add params to runAnalyses from outside
        self.additionalParams4Analyses = {}

        self.notebook = wx.Notebook(self)
        self.scanning_panel = ScanningPanel(self.notebook, self.giface, self.settings['tangible'], scaniface=self)
        self.notebook.AddPage(self.scanning_panel, "Scanning")
        self.scan = self.settings['tangible']['scan']

        self.outputPanel = OutputPanel(self.notebook, self.giface, self.settings['tangible'])
        self.notebook.AddPage(self.outputPanel, "Output")
        self.scanning_panel.settingsChanged.connect(lambda: setattr(self, 'changedInput', True))
        analyses_panel = AnalysesPanel(self.notebook, self.giface, self.settings['tangible'], scaniface=self)
        self.notebook.AddPage(analyses_panel, "Analyses")
        analyses_panel.settingsChanged.connect(lambda: setattr(self, 'changedInput', True))
        self.outputPanel.settingsChanged.connect(lambda: setattr(self, 'changedInput', True))
        self.drawing_panel = DrawingPanel(self.notebook, self.giface, self.settings['tangible'])
        self.notebook.AddPage(self.drawing_panel, "Drawing")
        self.drawing_panel.Bind(EVT_UPDATE_GUI, self.OnUpdate)
        self.drawing_panel.settingsChanged.connect(lambda: setattr(self, 'changedInput', True))
        self.activities_panel = ActivitiesPanel(self.notebook, self.giface, self.settings['tangible'], scaniface=self)
        self.notebook.AddPage(self.activities_panel, "Activities")


        btnStart = wx.Button(self, label="Start")
        btnStop = wx.Button(self, label="Stop")
        btnPause = wx.Button(self, label="Pause")
        self.btnPause = btnPause
        btnScanOnce = wx.Button(self, label="Scan once")
        btnHelp = wx.Button(self, label="Help")
        btnClose = wx.Button(self, label="Close")
        self.status = wx.StaticText(self)

        # bind events
        btnStart.Bind(wx.EVT_BUTTON, lambda evt: self.Start())
        btnStop.Bind(wx.EVT_BUTTON, lambda evt: self.Stop())
        btnPause.Bind(wx.EVT_BUTTON, lambda evt: self.Pause())
        btnScanOnce.Bind(wx.EVT_BUTTON, self.ScanOnce)
        btnHelp.Bind(wx.EVT_BUTTON, self.OnHelp)
        btnClose.Bind(wx.EVT_BUTTON, self.OnClose)
        self.Layout()

        sizer = wx.BoxSizer(wx.VERTICAL)
        hSizer = wx.BoxSizer(wx.HORIZONTAL)
        hSizer.Add(btnStart, flag=wx.EXPAND | wx.ALL, border=5)
        hSizer.Add(btnStop, flag=wx.EXPAND | wx.ALL, border=5)
        hSizer.Add(btnPause, flag=wx.EXPAND | wx.ALL, border=5)
        hSizer.Add(btnScanOnce, flag=wx.EXPAND | wx.ALL, border=5)
        sizer.Add(hSizer, 0, wx.ALL | wx.EXPAND, 5)
        hSizer = wx.BoxSizer(wx.HORIZONTAL)
        hSizer.Add(self.status, flag=wx.EXPAND | wx.LEFT, border=5)
        sizer.Add(hSizer)
        sizer.Add(self.notebook, 1, wx.ALL | wx.EXPAND, 5)
        hSizer = wx.BoxSizer(wx.HORIZONTAL)
        hSizer.AddStretchSpacer()
        hSizer.Add(btnHelp, flag=wx.EXPAND | wx.ALL, border=5)
        hSizer.Add(btnClose, flag=wx.EXPAND | wx.ALL, border=5)
        sizer.Add(hSizer, flag=wx.EXPAND)

        self.SetSizer(sizer)
        sizer.Fit(self)
        self.SetMinSize(self.GetBestSize())
        self.Layout()

        self.Bind(wx.EVT_TIMER, self.RestartIfNotRunning, self.timer)
        self.Bind(wx.EVT_CLOSE, self.OnClose)
        self.Bind(EVT_UPDATE_GUI, self.OnUpdate)
        self.Bind(EVT_ADD_LAYERS, self.OnAddLayers)
        self.Bind(EVT_REMOVE_LAYERS, self.OnRemoveLayers)
        self.Bind(EVT_CHECK_LAYERS, self.OnCheckLayers)

        self.pause = None
        self.resume_once = None

    def OnHelp(self, event):
        """Show help"""
        self.giface.Help(entry='g.gui.tangible', online=False)

    def OnClose(self, event):
        self.Stop()
        UserSettings.SaveToFile(self.settings)
        self.Destroy()

    def OnUpdate(self, event=None):
        for each in self.giface.GetAllMapDisplays():
            each.GetMapWindow().UpdateMap(delay=self.delay)

    def CalibrateModelBBox(self, event):
        if self.IsScanning():
            dlg = wx.MessageDialog(self, 'In order to calibrate, please stop scanning process first.',
                                   'Stop scanning',
                                   wx.OK | wx.ICON_WARNING)
            dlg.ShowModal()
            dlg.Destroy()
            return
        params = {}
        if self.calib_matrix:
            params['calib_matrix'] = self.calib_matrix
        params['rotate'] = self.scan['rotation_angle']
        zrange = ','.join(self.scan['trim_nsewtb'].split(',')[4:])
        params['zrange'] = zrange
        res = gscript.parse_command('r.in.kinect', flags='m', overwrite=True, **params)
        if not res['bbox']:
            gscript.message(_("Failed to find model extent"))
        offsetcm = 2
        n, s, e, w = [int(round(float(each))) for each in res['bbox'].split(',')]
        self.scanning_panel.trim['n'].SetValue(str(n + offsetcm))
        self.scanning_panel.trim['s'].SetValue(str(abs(s) + offsetcm))
        self.scanning_panel.trim['e'].SetValue(str(e + offsetcm))
        self.scanning_panel.trim['w'].SetValue(str(abs(w) + offsetcm))

    def Calibrate(self, event):
        if self.IsScanning():
            dlg = wx.MessageDialog(self, 'In order to calibrate, please stop scanning process first.',
                                   'Stop scanning',
                                   wx.OK | wx.ICON_WARNING)
            dlg.ShowModal()
            dlg.Destroy()
            return
        dlg = wx.MessageDialog(self, 'In order to calibrate, please remove objects from the table.',
                                   'Calibration',
                                   wx.OK | wx.CANCEL | wx.ICON_INFORMATION)
        if dlg.ShowModal() != wx.ID_OK:
            dlg.Destroy()
            return
        dlg.Destroy()

        res = gscript.parse_command('r.in.kinect', flags='c', overwrite=True)
        if not (res['calib_matrix'] and len(res['calib_matrix'].split(',')) == 9):
            gscript.message(_("Failed to calibrate"))
            return
        else:
            self.giface.WriteCmdLog("Measured and corrected tilting of sensor: {angle} degrees".format(angle=res['angle_deviation']))
            if float(res['angle_deviation']) > 3:
                self.giface.WriteWarning("Angle deviation is too high, please level the sensor.")

        offsetcm = 1
        height = str(round(float(res['height']) * 100 - offsetcm, 1))
        self.scanning_panel.trim['b'].SetValue(height)
        nswetb = self.settings['tangible']['scan']['trim_nsewtb'].split(',')
        nswetb[-1] = height
        self.settings['tangible']['scan']['trim_nsewtb'] = ','.join(nswetb)
        self.settings['tangible']['calibration']['matrix'] = res['calib_matrix']
        UserSettings.SaveToFile(self.settings)

        # update
        self.calib_matrix = res['calib_matrix']

    def GatherParameters(self, editMode, continuous):
        """Create dict of input parameteres for r.in.kinect.
        Parameter editMode=True is needed when this dict is passed as stdin
        into r.in.kinect during scanning. Parameter continuous is needed when
        the scanning is supposed to run in loop and not just once"""
        params = {}
        if self.settings['tangible']['output']['scan']:
            params['output'] = self.settings['tangible']['output']['scan'] + 'tmp'
        # drawing
        if self.settings['tangible']['drawing']['active'] and self.settings['tangible']['drawing']['name']:
            params['draw_output'] = self.settings['tangible']['drawing']['name']
            params['draw'] = self.settings['tangible']['drawing']['type']
            params['draw_threshold'] = self.settings['tangible']['drawing']['threshold']
            # we don't want to scan when drawing
            if editMode:
                params['output'] = ""
            else:
                del params['output']
        elif editMode:
            params['draw_output'] = ""

        if self.calib_matrix:
            params['calib_matrix'] = self.calib_matrix
        if self.scan['elevation']:
            params['raster'] = self.scan['elevation']
        elif self.scan['region']:
            params['region'] = self.scan['region']
        if self.scan['trim_tolerance']:
            params['trim_tolerance'] = self.scan['trim_tolerance']

        # flags
        params['flags'] = ''
        if continuous:
            params['flags'] += 'l'
        if not editMode and not params['flags']:
            del params['flags']

        if self.settings['tangible']['analyses']['contours'] and 'output' in params:
            params['contours'] = self.settings['tangible']['analyses']['contours']
            params['contours_step'] = self.settings['tangible']['analyses']['contours_step']
        elif editMode:
            params['contours'] = ""
        # export PLY
        if 'output' in self.settings['tangible'] and self.settings['tangible']['output']['PLY'] and \
           self.settings['tangible']['output']['PLY_file'] and not self.settings['tangible']['drawing']['active']:
            params['ply'] = self.settings['tangible']['output']['PLY_file']
        elif editMode:
            params['ply'] = ""
        # export color
        if 'output' in self.settings['tangible'] and self.settings['tangible']['output']['color'] and \
           self.settings['tangible']['output']['color_name']:
            params['color_output'] = self.settings['tangible']['output']['color_name']
        elif editMode:
            params['color_output'] = ""

        trim_nsew = ','.join(self.scan['trim_nsewtb'].split(',')[:4])
        params['trim'] = trim_nsew
        params['smooth_radius'] = float(self.scan['smooth'])/1000
        if self.scan['interpolate']:
            method = 'interpolation'
        else:
            method = 'mean'
        params['method'] = method
        zrange = ','.join(self.scan['trim_nsewtb'].split(',')[4:])
        params['zrange'] = zrange
        params['rotate'] = self.scan['rotation_angle']
        params['resolution'] = float(self.scan['resolution'])/1000
        params['zexag'] = self.scan['zexag']
        params['numscan'] = self.scan['numscans']
        if self.process and self.process.poll() is None:  # still running
            if self.resume_once is True:
                params['resume_once'] = ''
                self.resume_once = None

            if self.pause is True:
                params['pause'] = ''
            elif self.pause is False:
                params['resume'] = ''

        return params

    def IsScanning(self):
        if self.process and self.process.poll() is None:
            return True
        return False

    def Scan(self, continuous):
        if self.process and self.process.poll() is None:
            return
        self.status.SetLabel("Scanning...")
        wx.SafeYield()
        params = self.GatherParameters(editMode=False, continuous=continuous)
        self.process = gscript.start_command('r.in.kinect', overwrite=True, quiet=True,
                                             stdin=PIPE, **params)
        return self.process

    def ScanOnce(self, event):
        # if already running, resume scanning one time
        if self.process and self.process.poll() is None:  # still running
            self.resume_once = True
            self.changedInput = True
        else:
            self.Scan(continuous=False)
            self.status.SetLabel("Importing scan...")
            self.process.wait()
            self.process = None
            run_analyses(settings=self.settings, analysesFile=self.settings['tangible']['analyses']['file'],
                         giface=self.giface, update=self.OnUpdate, eventHandler=self, scanFilter=self.filter)
            self.status.SetLabel("Done.")
            self.OnUpdate(None)

    def RestartIfNotRunning(self, event):
        """Mechanism to restart scanning if process ends or
        to update scanning properties during running r.in.kinect
        if scanning input changed"""
        if self.process and self.process.poll() is not None:
            if self.observer:
                try:
                    self.observer.stop()
                except TypeError:  # throws error on mac
                    pass
                self.observer.join()
                self.observer = None
            self.Start()
        if self.changedInput:
            self.changedInput = False
            if self.process and self.process.poll() is None:
                params = self.GatherParameters(editMode=True, continuous=True)
                new_input = ["{}={}".format(key, params[key]) for key in params]
                self.process.stdin.write('\n'.join(new_input) + '\n\n')
                # SIGUSR1 is the signal r.in.kinect looks for
                self.process.send_signal(signal.SIGUSR1)

    def Start(self):
        self.Scan(continuous=True)
        self.status.SetLabel("Real-time scanning is running now.")

        if self.observer:
            return
        gisenv = gscript.gisenv()
        mapsetPath = os.path.join(gisenv['GISDBASE'], gisenv['LOCATION_NAME'], gisenv['MAPSET'])
        path1 = os.path.join(mapsetPath, 'fcell')
        if not os.path.exists(path1):
            os.mkdir(os.path.join(mapsetPath, 'fcell'))
        path2 = os.path.join(mapsetPath, 'vector')
        if not os.path.exists(path2):
            os.mkdir(os.path.join(mapsetPath, 'vector'))
        paths = [path1, path2]
        handlers = [RasterChangeHandler(self.runImport, self.settings['tangible']['output']),
                    DrawingChangeHandler(self.runImportDrawing, self.settings['tangible']['drawing']['name'])]

        self.observer = Observer()
        for path, handler in zip(paths, handlers):
            self.observer.schedule(handler, path)

        self.observer.start()
        self.timer.Start(1000)

    def Stop(self):
        if self.process and self.process.poll() is None:  # still running
            self.process.terminate()
            self.process.wait()
            self.process = None
            if self.observer:
                try:
                    self.observer.stop()
                except TypeError:  # throws error on mac
                    pass
                self.observer.join()
                self.observer = None
        self.timer.Stop()
        self.status.SetLabel("Real-time scanning stopped.")
        self.pause = False
        self.btnPause.SetLabel("Pause")

    def Pause(self):
        if self.process and self.process.poll() is None:  # still running
            if not self.pause:
                self.pause = True
                self.btnPause.SetLabel("Resume")
            else:
                self.pause = False
                self.btnPause.SetLabel("Pause")
            self.changedInput = True

    def runImport(self):
        run_analyses(settings=self.settings, analysesFile=self.settings['tangible']['analyses']['file'],
                     giface=self.giface, update=self.OnUpdate, eventHandler=self, scanFilter=self.filter,
                     **self.additionalParams4Analyses)
        evt = updateGUIEvt(self.GetId())
        wx.PostEvent(self, evt)

    def runImportDrawing(self):
        self.drawing_panel.appendVector()
        run_analyses(settings=self.settings, analysesFile=self.settings['tangible']['analyses']['file'],
                     giface=self.giface, update=self.OnUpdate, eventHandler=self, scanFilter=self.filter,
                     **self.additionalParams4Analyses)
        evt = updateGUIEvt(self.GetId())
        wx.PostEvent(self, evt)

    def postEvent(self, receiver, event):
        wx.PostEvent(receiver, event)

    def OnAddLayers(self, event):
        ll = self.giface.GetLayerList()
        for each in event.layerSpecs:
            ll.AddLayer(**each)

    def OnRemoveLayers(self, event):
        ll = self.giface.GetLayerList()
        if not hasattr(ll, 'DeleteLayer'):
            print "Removing layers from layer Manager requires GRASS GIS version > 7.2"
            return
        for each in event.layers:
            ll.DeleteLayer(each)

    def OnCheckLayers(self, event):
        ll = self.giface.GetLayerList()
        if not hasattr(ll, 'CheckLayer'):
            print "Checking and unchecking layers in layer Manager requires GRASS GIS version > 7.2"
            return
        for each in event.layers:
            ll.CheckLayer(each, checked=event.checked)
Ejemplo n.º 40
0
class IngestionMonitor:
    def __init__(self, csv_file):
        self.logger = logging.getLogger("Monitor")

        # Process CSV files and get the specific routes and file masks.
        self.csv_file = csv_file
        self.routes = self.process_csv()

        self.logger.info("Creating observer for %s" % csv_file)
        self.observer = Observer()
        for mask in self.routes:
            # Attach a watcher for each file mask in the CSV file to the Observer.
            mask_path = '/'.join(mask.split('/')[:-1])
            if os.path.isdir(mask_path):
                event_handler = MaskRouteEventHandler(patterns=[mask], routes=self.routes[mask])
                self.observer.schedule(event_handler, mask_path, recursive=True)
            else:
                self.logger.warning("Directory not found: %s" % mask_path)

        if self.watchers == 0:
             self.logger.warning("No watchers set for this observer: %s" % self.csv_file)

    @property
    def watchers(self):
        return len(self.observer._watches)

    def process_csv(self):
        try:
            reader = csv.DictReader(open(self.csv_file, "U"))
        except IOError:
            self.logger.error("%s not found." % self.csv_file)
            return False
        fieldnames = ['uframe_route', 'filename_mask', 'reference_designator', 'data_source']
        if not set(fieldnames).issubset(reader.fieldnames):
            self.logger.error((
                "%s does not have valid column headers. "
                "The following columns are required: %s") % (self.csv_file, ", ".join(fieldnames)))
            return False

        def commented(row):
            ''' Check to see if the row is commented out. Any field that starts with # indictes a 
                comment.'''
            return bool([v for v in row.itervalues() if v and v.startswith("#")])

        routes = {}

        # Load the queue with parameters from each row.
        for row in reader:
            if not commented(row):
                mask = row['filename_mask']
                parameters = {
                    f: row[f] for f in row 
                    if f in ('uframe_route', 'reference_designator', 'data_source')
                    }
                if mask in routes.keys():
                    routes[mask].append(parameters)
                else:
                    routes[mask] = [parameters]
       
        return {mask: routes[mask] for mask in routes}

    def start(self):
        self.logger.info("Starting %s watchers for %s" % (self.watchers, self.csv_file))
        self.observer.start()

    def stop(self):
        self.observer.stop()
        self.observer.join()
Ejemplo n.º 41
0
def main():
    args = parser.parse_args()

    if not os.path.exists(args.input_dir):
        print("Error: Invalid input folder %s" % args.input_dir)
        exit(-1)
    if not os.path.exists(args.output_dir):
        print("Error: Invalid output folder %s" % args.output_dir)
        exit(-1)

    with torch.no_grad():
        config, resmodel = get_model1()
        #config, inresmodel = get_model2()
        #config, incepv3model = get_model3()
        config, rexmodel = get_model4()
        net1 = resmodel.net
        #net2 = inresmodel.net
        #net3 = incepv3model.net
        net4 = rexmodel.net

    checkpoint = torch.load('denoise_res_015.ckpt')
    if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
        resmodel.load_state_dict(checkpoint['state_dict'])
    else:
        resmodel.load_state_dict(checkpoint)

    #checkpoint = torch.load('denoise_inres_014.ckpt')
    #if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
    #inresmodel.load_state_dict(checkpoint['state_dict'])
    #else:
    #inresmodel.load_state_dict(checkpoint)

    #checkpoint = torch.load('denoise_incepv3_012.ckpt')
    #if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
    #incepv3model.load_state_dict(checkpoint['state_dict'])
    #else:
    #incepv3model.load_state_dict(checkpoint)

    checkpoint = torch.load('denoise_rex_001.ckpt')
    if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
        rexmodel.load_state_dict(checkpoint['state_dict'])
    else:
        rexmodel.load_state_dict(checkpoint)

    if not args.no_gpu:
        #inresmodel = inresmodel.cuda()
        resmodel = resmodel.cuda()
        #incepv3model = incepv3model.cuda()
        rexmodel = rexmodel.cuda()
    #inresmodel.eval()
    resmodel.eval()
    #incepv3model.eval()
    rexmodel.eval()

    #inceptionresnetv2 for ramdon padding
    model = inceptionresnetv2(num_classes=1001,
                              pretrained='imagenet+background')
    model = model.cuda()
    model.eval()

    # Load kmean
    kmean = auxkmean(64, 10)
    kmean.importmodel()
    ''' watch the input dir for defense '''
    observer = Observer()
    event_handler = FileEventHandler(batch_size=args.batch_size,
                                     input_dir=args.input_dir,
                                     net1=net1,
                                     net4=net4,
                                     model=model,
                                     itr=args.itr,
                                     output_dir=args.output_dir,
                                     no_gpu=args.no_gpu,
                                     kmean=kmean)

    observer.schedule(event_handler, args.input_dir, recursive=True)
    observer.start()

    print("watchdog start...")

    try:
        while True:
            time.sleep(0.5)
    except KeyboardInterrupt:
        observer.stop()
    observer.join()

    print("\nwatchdog stoped!")
Ejemplo n.º 42
0
class CommandAuto(Command):
    """Automatic rebuilds for Nikola."""

    name = "auto"
    has_server = True
    doc_purpose = "builds and serves a site; automatically detects site changes, rebuilds, and optionally refreshes a browser"
    dns_sd = None
    delta_last_rebuild = datetime.timedelta(milliseconds=100)
    web_runner = None  # type: web.AppRunner

    cmd_options = [
        {
            'name': 'port',
            'short': 'p',
            'long': 'port',
            'default': 8000,
            'type': int,
            'help': 'Port number',
        },
        {
            'name': 'address',
            'short': 'a',
            'long': 'address',
            'type': str,
            'default': '127.0.0.1',
            'help': 'Address to bind',
        },
        {
            'name': 'browser',
            'short': 'b',
            'long': 'browser',
            'type': bool,
            'help': 'Start a web browser',
            'default': False,
        },
        {
            'name': 'ipv6',
            'short': '6',
            'long': 'ipv6',
            'default': False,
            'type': bool,
            'help': 'Use IPv6',
        },
        {
            'name': 'no-server',
            'long': 'no-server',
            'default': False,
            'type': bool,
            'help': 'Disable the server, automate rebuilds only'
        },
        {
            'name': 'process',
            'short': 'n',
            'long': 'process',
            'default': 0,
            'type': int,
            'help': 'Number of subprocesses (nikola build argument)'
        },
        {
            'name':
            'parallel-type',
            'short':
            'P',
            'long':
            'parallel-type',
            'default':
            'process',
            'type':
            str,
            'help':
            "Parallelization mode ('process' or 'thread', nikola build argument)"
        },
    ]

    def _execute(self, options, args):
        """Start the watcher."""
        self.sockets = []
        self.rebuild_queue = asyncio.Queue()
        self.reload_queue = asyncio.Queue()
        self.last_rebuild = datetime.datetime.now()
        self.is_rebuilding = False

        if aiohttp is None and Observer is None:
            req_missing(['aiohttp', 'watchdog'], 'use the "auto" command')
        elif aiohttp is None:
            req_missing(['aiohttp'], 'use the "auto" command')
        elif Observer is None:
            req_missing(['watchdog'], 'use the "auto" command')

        if sys.argv[0].endswith('__main__.py'):
            self.nikola_cmd = [sys.executable, '-m', 'nikola', 'build']
        else:
            self.nikola_cmd = [sys.argv[0], 'build']

        if self.site.configuration_filename != 'conf.py':
            self.nikola_cmd.append('--conf=' +
                                   self.site.configuration_filename)

        if options and options.get('process'):
            self.nikola_cmd += [
                '--process={}'.format(options['process']),
                '--parallel-type={}'.format(options['parallel-type'])
            ]

        port = options and options.get('port')
        self.snippet = '''<script>document.write('<script src="http://'
            + (location.host || 'localhost').split(':')[0]
            + ':{0}/livereload.js?snipver=1"></'
            + 'script>')</script>
        </head>'''.format(port)

        # Deduplicate entries by using a set -- otherwise, multiple rebuilds are triggered
        watched = set(['templates/'] +
                      [get_theme_path(name) for name in self.site.THEMES])
        for item in self.site.config['post_pages']:
            watched.add(os.path.dirname(item[0]))
        for item in self.site.config['FILES_FOLDERS']:
            watched.add(item)
        for item in self.site.config['GALLERY_FOLDERS']:
            watched.add(item)
        for item in self.site.config['LISTINGS_FOLDERS']:
            watched.add(item)
        for item in self.site.config['IMAGE_FOLDERS']:
            watched.add(item)
        for item in self.site._plugin_places:
            watched.add(item)
        # Nikola itself (useful for developers)
        watched.add(pkg_resources.resource_filename('nikola', ''))

        out_folder = self.site.config['OUTPUT_FOLDER']
        if not os.path.exists(out_folder):
            makedirs(out_folder)

        if options and options.get('browser'):
            browser = True
        else:
            browser = False

        if options['ipv6']:
            dhost = '::'
        else:
            dhost = '0.0.0.0'

        host = options['address'].strip('[').strip(']') or dhost

        # Prepare asyncio event loop
        # Required for subprocessing to work
        loop = asyncio.get_event_loop()

        # Set debug setting
        loop.set_debug(self.site.debug)

        # Server can be disabled (Issue #1883)
        self.has_server = not options['no-server']

        if self.has_server:
            loop.run_until_complete(self.set_up_server(host, port, out_folder))

        # Run an initial build so we are up-to-date. The server is running, but we are not watching yet.
        loop.run_until_complete(self.run_initial_rebuild())

        self.wd_observer = Observer()
        # Watch output folders and trigger reloads
        if self.has_server:
            self.wd_observer.schedule(NikolaEventHandler(
                self.reload_page, loop),
                                      out_folder,
                                      recursive=True)

        # Watch input folders and trigger rebuilds
        for p in watched:
            if os.path.exists(p):
                self.wd_observer.schedule(NikolaEventHandler(
                    self.queue_rebuild, loop),
                                          p,
                                          recursive=True)

        # Watch config file (a bit of a hack, but we need a directory)
        _conf_fn = os.path.abspath(self.site.configuration_filename
                                   or 'conf.py')
        _conf_dn = os.path.dirname(_conf_fn)
        self.wd_observer.schedule(ConfigEventHandler(_conf_fn,
                                                     self.queue_rebuild, loop),
                                  _conf_dn,
                                  recursive=False)
        self.wd_observer.start()

        win_sleeper = None
        # https://bugs.python.org/issue23057 (fixed in Python 3.8)
        if sys.platform == 'win32' and sys.version_info < (3, 8):
            win_sleeper = asyncio.ensure_future(windows_ctrlc_workaround())

        if not self.has_server:
            self.logger.info("Watching for changes...")
            # Run the event loop forever (no server mode).
            try:
                # Run rebuild queue
                loop.run_until_complete(self.run_rebuild_queue())

                loop.run_forever()
            except KeyboardInterrupt:
                pass
            finally:
                if win_sleeper:
                    win_sleeper.cancel()
                self.wd_observer.stop()
                self.wd_observer.join()
            loop.close()
            return

        if options['ipv6'] or '::' in host:
            server_url = "http://[{0}]:{1}/".format(host, port)
        else:
            server_url = "http://{0}:{1}/".format(host, port)
        self.logger.info("Serving on {0} ...".format(server_url))

        if browser:
            # Some browsers fail to load 0.0.0.0 (Issue #2755)
            if host == '0.0.0.0':
                server_url = "http://127.0.0.1:{0}/".format(port)
            self.logger.info(
                "Opening {0} in the default web browser...".format(server_url))
            webbrowser.open(server_url)

        # Run the event loop forever and handle shutdowns.
        try:
            # Run rebuild queue
            rebuild_queue_fut = asyncio.ensure_future(self.run_rebuild_queue())
            reload_queue_fut = asyncio.ensure_future(self.run_reload_queue())

            self.dns_sd = dns_sd(port, (options['ipv6'] or '::' in host))
            loop.run_forever()
        except KeyboardInterrupt:
            pass
        finally:
            self.logger.info("Server is shutting down.")
            if win_sleeper:
                win_sleeper.cancel()
            if self.dns_sd:
                self.dns_sd.Reset()
            rebuild_queue_fut.cancel()
            reload_queue_fut.cancel()
            loop.run_until_complete(self.web_runner.cleanup())
            self.wd_observer.stop()
            self.wd_observer.join()
        loop.close()

    async def set_up_server(self, host: str, port: int,
                            out_folder: str) -> None:
        """Set up aiohttp server and start it."""
        webapp = web.Application()
        webapp.router.add_get('/livereload.js', self.serve_livereload_js)
        webapp.router.add_get('/robots.txt', self.serve_robots_txt)
        webapp.router.add_route('*', '/livereload', self.websocket_handler)
        resource = IndexHtmlStaticResource(True, self.snippet, '', out_folder)
        webapp.router.register_resource(resource)
        webapp.on_shutdown.append(self.remove_websockets)

        self.web_runner = web.AppRunner(webapp)
        await self.web_runner.setup()
        website = web.TCPSite(self.web_runner, host, port)
        await website.start()

    async def run_initial_rebuild(self) -> None:
        """Run an initial rebuild."""
        await self._rebuild_site()
        # If there are any clients, have them reload the root.
        await self._send_reload_command(self.site.config['INDEX_FILE'])

    async def queue_rebuild(self, event) -> None:
        """Rebuild the site."""
        # Move events have a dest_path, some editors like gedit use a
        # move on larger save operations for write protection
        event_path = event.dest_path if hasattr(
            event, 'dest_path') else event.src_path
        if sys.platform == 'win32':
            # Windows hidden files support
            is_hidden = os.stat(
                event_path).st_file_attributes & stat.FILE_ATTRIBUTE_HIDDEN
        else:
            is_hidden = False
        has_hidden_component = any(
            p.startswith('.') for p in event_path.split(os.sep))
        if (is_hidden or has_hidden_component or '__pycache__' in event_path
                or event_path.endswith(('.pyc', '.pyo', '.pyd', '_bak', '~'))
                or event.is_directory
            ):  # Skip on folders, these are usually duplicates
            return

        self.logger.debug('Queuing rebuild from {0}'.format(event_path))
        await self.rebuild_queue.put((datetime.datetime.now(), event_path))

    async def run_rebuild_queue(self) -> None:
        """Run rebuilds from a queue (Nikola can only build in a single instance)."""
        while True:
            date, event_path = await self.rebuild_queue.get()
            if date < (self.last_rebuild + self.delta_last_rebuild):
                self.logger.debug(
                    "Skipping rebuild from {0} (within delta)".format(
                        event_path))
                continue
            await self._rebuild_site(event_path)

    async def _rebuild_site(self,
                            event_path: typing.Optional[str] = None) -> None:
        """Rebuild the site."""
        self.is_rebuilding = True
        self.last_rebuild = datetime.datetime.now()
        if event_path:
            self.logger.info('REBUILDING SITE (from {0})'.format(event_path))
        else:
            self.logger.info('REBUILDING SITE')

        p = await asyncio.create_subprocess_exec(*self.nikola_cmd,
                                                 stderr=subprocess.PIPE)
        exit_code = await p.wait()
        out = (await p.stderr.read()).decode('utf-8')

        if exit_code != 0:
            self.logger.error("Rebuild failed\n" + out)
            await self.send_to_websockets({'command': 'alert', 'message': out})
        else:
            self.logger.info("Rebuild successful\n" + out)

        self.is_rebuilding = False

    async def run_reload_queue(self) -> None:
        """Send reloads from a queue to limit CPU usage."""
        while True:
            p = await self.reload_queue.get()
            self.logger.info('REFRESHING: {0}'.format(p))
            await self._send_reload_command(p)
            if self.is_rebuilding:
                await asyncio.sleep(REBUILDING_REFRESH_DELAY)
            else:
                await asyncio.sleep(IDLE_REFRESH_DELAY)

    async def _send_reload_command(self, path: str) -> None:
        """Send a reload command."""
        await self.send_to_websockets({
            'command': 'reload',
            'path': path,
            'liveCSS': True
        })

    async def reload_page(self, event) -> None:
        """Reload the page."""
        # Move events have a dest_path, some editors like gedit use a
        # move on larger save operations for write protection
        if event:
            event_path = event.dest_path if hasattr(
                event, 'dest_path') else event.src_path
        else:
            event_path = self.site.config['OUTPUT_FOLDER']
        p = os.path.relpath(event_path,
                            os.path.abspath(
                                self.site.config['OUTPUT_FOLDER'])).replace(
                                    os.sep, '/')
        await self.reload_queue.put(p)

    async def serve_livereload_js(self, request):
        """Handle requests to /livereload.js and serve the JS file."""
        return FileResponse(LRJS_PATH)

    async def serve_robots_txt(self, request):
        """Handle requests to /robots.txt."""
        return Response(body=b'User-Agent: *\nDisallow: /\n',
                        content_type='text/plain',
                        charset='utf-8')

    async def websocket_handler(self, request):
        """Handle requests to /livereload and initiate WebSocket communication."""
        ws = web.WebSocketResponse()
        await ws.prepare(request)
        self.sockets.append(ws)

        while True:
            msg = await ws.receive()

            self.logger.debug("Received message: {0}".format(msg))
            if msg.type == aiohttp.WSMsgType.TEXT:
                message = msg.json()
                if message['command'] == 'hello':
                    response = {
                        'command':
                        'hello',
                        'protocols': [
                            'http://livereload.com/protocols/official-7',
                        ],
                        'serverName':
                        'Nikola Auto (livereload)',
                    }
                    await ws.send_json(response)
                elif message['command'] != 'info':
                    self.logger.warning(
                        "Unknown command in message: {0}".format(message))
            elif msg.type in (aiohttp.WSMsgType.CLOSED,
                              aiohttp.WSMsgType.CLOSING):
                break
            elif msg.type == aiohttp.WSMsgType.CLOSE:
                self.logger.debug("Closing WebSocket")
                await ws.close()
                break
            elif msg.type == aiohttp.WSMsgType.ERROR:
                self.logger.error(
                    'WebSocket connection closed with exception {0}'.format(
                        ws.exception()))
                break
            else:
                self.logger.warning(
                    "Received unknown message: {0}".format(msg))

        self.sockets.remove(ws)
        self.logger.debug("WebSocket connection closed: {0}".format(ws))

        return ws

    async def remove_websockets(self, app) -> None:
        """Remove all websockets."""
        for ws in self.sockets:
            await ws.close()
        self.sockets.clear()

    async def send_to_websockets(self, message: dict) -> None:
        """Send a message to all open WebSockets."""
        to_delete = []
        for ws in self.sockets:
            if ws.closed:
                to_delete.append(ws)
                continue

            try:
                await ws.send_json(message)
                if ws._close_code:
                    await ws.close()
                    to_delete.append(ws)
            except RuntimeError as e:
                if 'closed' in e.args[0]:
                    self.logger.warning(
                        "WebSocket {0} closed uncleanly".format(ws))
                    to_delete.append(ws)
                else:
                    raise

        for ws in to_delete:
            self.sockets.remove(ws)
Ejemplo n.º 43
0
class _MultiFileWatcher(object):
    """Watches multiple files."""

    _singleton = None

    @classmethod
    def get_singleton(cls):
        """Return the singleton _MultiFileWatcher object.

        Instantiates one if necessary.
        """
        if cls._singleton is None:
            LOGGER.debug("No singleton. Registering one.")
            _MultiFileWatcher()

        return _MultiFileWatcher._singleton

    # Don't allow constructor to be called more than once.
    def __new__(cls):
        """Constructor."""
        if _MultiFileWatcher._singleton is not None:
            raise RuntimeError("Use .get_singleton() instead")
        return super(_MultiFileWatcher, cls).__new__(cls)

    def __init__(self):
        """Constructor."""
        _MultiFileWatcher._singleton = self

        # Map of folder_to_watch -> _FolderEventHandler.
        self._folder_handlers = {}

        # Used for mutation of _folder_handlers dict
        self._lock = threading.Lock()

        # The Observer object from the Watchdog module. Since this class is
        # only instantiated once, we only have a single Observer in Streamlit,
        # and it's in charge of watching all paths we're interested in.
        self._observer = Observer()
        self._observer.start()  # Start observer thread.

    def watch_file(self, file_path, callback):
        """Start watching a file.

        Parameters
        ----------
        file_path : str
            The full path of the file to watch.

        callback : callable
            The function to execute when the file is changed.

        """
        folder_path = os.path.abspath(os.path.dirname(file_path))

        with self._lock:
            folder_handler = self._folder_handlers.get(folder_path)

            if folder_handler is None:
                folder_handler = _FolderEventHandler()
                self._folder_handlers[folder_path] = folder_handler

                folder_handler.watch = self._observer.schedule(folder_handler,
                                                               folder_path,
                                                               recursive=False)

            folder_handler.add_file_change_listener(file_path, callback)

    def stop_watching_file(self, file_path, callback):
        """Stop watching a file.

        Parameters
        ----------
        file_path : str
            The full path of the file to stop watching.

        callback : callable
            The function to execute when the file is changed.

        """
        folder_path = os.path.abspath(os.path.dirname(file_path))

        with self._lock:
            folder_handler = self._folder_handlers.get(folder_path)

            if folder_handler is None:
                LOGGER.debug(
                    "Cannot stop watching path, because it is already not being "
                    "watched. %s",
                    folder_path,
                )
                return

            folder_handler.remove_file_change_listener(file_path, callback)

            if not folder_handler.is_watching_files():
                # Sometimes watchdog's FileSystemEventHandler does not have
                # a .watch property. It's unclear why -- may be due to a
                # race condition.
                if hasattr(folder_handler, "watch"):
                    self._observer.unschedule(folder_handler.watch)
                del self._folder_handlers[folder_path]

    def close(self):
        with self._lock:
            """Close this _MultiFileWatcher object forever."""
            if len(self._folder_handlers) != 0:
                self._folder_handlers = {}
                LOGGER.debug(
                    "Stopping observer thread even though there is a non-zero "
                    "number of event observers!")
            else:
                LOGGER.debug("Stopping observer thread")

            self._observer.stop()
            self._observer.join(timeout=5)
Ejemplo n.º 44
0
        os.mkdir(folder_name)  #makes new dir
        print("Directory ", folder_name, " Created ")
    new_name = folder_destination + '/' + folder_name + '/' + filename  #new location of file
    return new_name


class Myhandler(FileSystemEventHandler):
    i = 1

    def on_modified(self, event):
        for filename in os.listdir(folder_to_track):  #iterates in directory
            src = folder_to_track + '/' + filename  #source Directory
            new_destination = folder(filename)  #new directory
            os.rename(src, new_destination)


folder_to_track = 'C:/Users/usert/Downloads/sorter2'  #source Directory
folder_destination = 'C:/Users/usert/Downloads'  #target directory (main dir)
os.chdir(folder_destination)  #changes working dir
event_handler = Myhandler()
Observer = Observer()
Observer.schedule(event_handler, folder_to_track, recursive=True)
Observer.start()  #creates a new thread

try:
    while True:
        time.sleep(10)  #keeps the thread running
except KeyboardInterrupt:
    Observer.stop()  #stops if interrupted
Observer.join()  #does some work before the thread terminate
Ejemplo n.º 45
0
class WhenChanged(FileSystemEventHandler):
    # files to exclude from being watched
    exclude = re.compile(r'|'.join(r'(.+/)?' + a for a in [
        # Vim swap files
        r'\..*\.sw[px]*$',
        # file creation test file 4913
        r'4913$',
        # backup files
        r'.~$',
        # git directories
        r'\.git/?',
        # __pycache__ directories
        r'__pycache__/?',
    ]))

    def __init__(self,
                 files,
                 command,
                 recursive=False,
                 run_once=False,
                 run_at_start=False):
        self.files = files
        paths = {}
        for f in files:
            paths[os.path.realpath(f)] = f
        self.paths = paths
        self.command = command
        self.recursive = recursive
        self.run_once = run_once
        self.run_at_start = run_at_start
        self.last_run = 0

        self.observer = Observer(timeout=0.1)

        for p in self.paths:
            if os.path.isdir(p):
                # Add directory
                self.observer.schedule(self, p, recursive=True)
            else:
                # Add parent directory
                p = os.path.dirname(p)
                self.observer.schedule(self, p)

    def run_command(self, thefile):
        if self.run_once:
            if os.path.exists(
                    thefile) and os.path.getmtime(thefile) < self.last_run:
                return
        new_command = []
        for item in self.command:
            new_command.append(item.replace('%f', thefile))
        subprocess.call(new_command, shell=(len(new_command) == 1))
        self.last_run = time.time()

    def is_interested(self, path):
        if self.exclude.match(path):
            return False

        if path in self.paths:
            return True

        path = os.path.dirname(path)
        if path in self.paths:
            return True

        if self.recursive:
            while os.path.dirname(path) != path:
                path = os.path.dirname(path)
                if path in self.paths:
                    return True

        return False

    def on_change(self, path):
        if self.is_interested(path):
            self.run_command(path)

    def on_created(self, event):
        if self.observer.__class__.__name__ == 'InotifyObserver':
            # inotify also generates modified events for created files
            return

        if not event.is_directory:
            self.on_change(event.src_path)

    def on_modified(self, event):
        if not event.is_directory:
            self.on_change(event.src_path)

    def on_moved(self, event):
        if not event.is_directory:
            self.on_change(event.dest_path)

    def run(self):
        if self.run_at_start:
            self.run_command('/dev/null')

        self.observer.start()
        try:
            while True:
                time.sleep(60 * 60)
        except KeyboardInterrupt:
            self.observer.stop()
        self.observer.join()
Ejemplo n.º 46
0
class WatchdogReloaderLoop(ReloaderLoop):
    def __init__(self, *args, **kwargs) -> None:
        from watchdog.observers import Observer
        from watchdog.events import PatternMatchingEventHandler

        super().__init__(*args, **kwargs)
        trigger_reload = self.trigger_reload

        class EventHandler(PatternMatchingEventHandler):  # type: ignore
            def on_any_event(self, event):
                trigger_reload(event.src_path)

        reloader_name = Observer.__name__.lower()

        if reloader_name.endswith("observer"):
            reloader_name = reloader_name[:-8]

        self.name = f"watchdog ({reloader_name})"
        self.observer = Observer()
        # Extra patterns can be non-Python files, match them in addition
        # to all Python files in default and extra directories. Ignore
        # __pycache__ since a change there will always have a change to
        # the source file (or initial pyc file) as well. Ignore Git and
        # Mercurial internal changes.
        extra_patterns = [p for p in self.extra_files if not os.path.isdir(p)]
        self.event_handler = EventHandler(
            patterns=["*.py", "*.pyc", "*.zip", *extra_patterns],
            ignore_patterns=[
                "*/__pycache__/*",
                "*/.git/*",
                "*/.hg/*",
                *self.exclude_patterns,
            ],
        )
        self.should_reload = False

    def trigger_reload(self, filename: str) -> None:
        # This is called inside an event handler, which means throwing
        # SystemExit has no effect.
        # https://github.com/gorakhargosh/watchdog/issues/294
        self.should_reload = True
        self.log_reload(filename)

    def __enter__(self) -> ReloaderLoop:
        self.watches: t.Dict[str, t.Any] = {}
        self.observer.start()
        return super().__enter__()

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.observer.stop()
        self.observer.join()

    def run(self) -> None:
        while not self.should_reload:
            self.run_step()
            time.sleep(self.interval)

        sys.exit(3)

    def run_step(self) -> None:
        to_delete = set(self.watches)

        for path in _find_watchdog_paths(self.extra_files,
                                         self.exclude_patterns):
            if path not in self.watches:
                try:
                    self.watches[path] = self.observer.schedule(
                        self.event_handler, path, recursive=True)
                except OSError:
                    # Clear this path from list of watches We don't want
                    # the same error message showing again in the next
                    # iteration.
                    self.watches[path] = None

            to_delete.discard(path)

        for path in to_delete:
            watch = self.watches.pop(path, None)

            if watch is not None:
                self.observer.unschedule(watch)
Ejemplo n.º 47
0
class sr_post(sr_instances):

    # =============
    # check
    # =============

    def check(self):
        self.logger.debug("%s check" % self.program_name)

        if self.config_name == None and self.action != 'foreground': return

        # singleton

        if self.nbr_instances != 1:
            self.logger.error("number of instance must be one")

        # ===============
        # FIXME remove 2018 :  temporary checks and fake subclass
        self.temporary_stuff()
        # ===============

        if self.post_broker == None:
            self.logger.error("post_broker required")

        if self.post_exchange == None:
            self.post_exchange = 'xs_%s' % self.post_broker.username
            if self.post_exchange_suffix:
                self.post_exchange += '_' + self.post_exchange_suffix

        if self.post_base_url == None:
            self.logger.error("post_base_url required")
        elif self.post_base_url.startswith('file:'):
            self.post_base_url = 'file:'

        # if accept_unmatch was not set, accept whatever not rejected

        if self.accept_unmatch == None: self.accept_unmatch = True

        # permanent message headers fields

        if self.to_clusters == None:
            self.to_clusters = self.post_broker.hostname

        # inflight

        try:
            self.inflight = int(self.inflight)
        except:
            pass

        # merge these 2 events

        self.create_modify = 'create' in self.events or 'modify' in self.events

    # =============
    # close
    # =============

    def close(self):
        self.logger.debug("%s close" % self.program_name)

        for plugin in self.on_stop_list:
            if not plugin(self): break

        if self.post_hc:
            self.post_hc.close()
            self.post_hc = None

        if hasattr(self, 'cache') and self.cache:
            self.cache.save()
            self.cache.close()

        if self.sleep > 0 and len(self.obs_watched):
            for ow in self.obs_watched:
                self.observer.unschedule(ow)
            self.observer.stop()

        if self.restore_queue != None:
            self.publisher.restore_clear()

    # =============
    # connect
    # =============

    def connect(self):
        self.logger.debug("%s connect" % self.program_name)

        # =============
        # create message if needed
        # =============

        self.msg = sr_message(self)

        # =============
        # posting
        # =============

        loop = True
        if self.sleep <= 0: loop = False

        self.post_hc = HostConnect(logger=self.logger)
        self.post_hc.set_pika(self.use_pika)
        self.post_hc.set_url(self.post_broker)
        self.post_hc.loop = loop
        self.post_hc.connect()

        self.publisher = Publisher(self.post_hc)
        self.publisher.build()

        self.logger.info("Output AMQP broker(%s) user(%s) vhost(%s)" % \
                        (self.post_broker.hostname,self.post_broker.username,self.post_broker.path) )

        # =============
        # setup message publish
        # =============

        self.msg.user = self.post_broker.username
        self.msg.publisher = self.publisher
        self.msg.pub_exchange = self.post_exchange
        self.msg.post_exchange_split = self.post_exchange_split

        self.logger.info("Output AMQP exchange(%s)" % self.msg.pub_exchange)

        # =============
        # amqp resources
        # =============

        self.declare_exchanges()

        # =============
        # retransmit/restore_queue
        # =============

        if self.restore_queue != None:
            self.publisher.restore_set(self)
            self.msg.pub_exchange = self.publisher.restore_exchange
            self.msg.post_exchange_split = 0

    # =============
    # help
    # =============

    def help(self):
        print("\nUsage: %s -u <url> -pb <post_broker> ... [OPTIONS]\n" %
              self.program_name)
        print("version: %s \n" % sarra.__version__)
        print("OPTIONS:")
        print(
            "-pb|post_broker   <broker>          default:amqp://guest:guest@localhost/"
        )
        print("-c|config   <config_file>")
        print("-pbd <post_base_dir>   default:None")
        print(
            "-e   <events>          default:create|delete|follow|link|modify\n"
        )
        print("-pe  <post_exchange>        default:xs_\"broker.username\"")
        print("-h|--help\n")
        print(
            "-parts [0|1|sz]        0-computed blocksize (default), 1-whole files (no partitioning), sz-fixed blocksize"
        )
        print("-to  <name1,name2,...> defines target clusters, default: ALL")
        print("-tp  <topic_prefix>    default:v02.post")
        print("-sub <subtopic>        default:'path.of.file'")
        print("-rn  <rename>          default:None")
        print("-sum <sum>             default:d")
        print("-caching               default:enable caching")
        print("-reset                 default:enable reset")
        print("-path <path1... pathN> default:required")
        print("-on_post <script>      default:None")
        print("DEBUG:")
        print("-debug")
        print("-r  : randomize chunk posting")
        print("-rr : reconnect between chunks\n")

    # =============
    # on_add     (for SimpleEventHandler module)
    # =============

    def on_add(self, event, src, dst):
        #self.logger.debug("%s %s %s" % ( event, src, dst ) )
        self.new_events['%s %s' % (src, dst)] = (event, src, dst)

    # =============
    # on_created (for SimpleEventHandler)
    # =============

    def on_created(self, event):
        self.on_add('create', event.src_path, None)

    # =============
    # on_deleted (for SimpleEventHandler)
    # =============

    def on_deleted(self, event):
        self.on_add('delete', event.src_path, None)

    # =============
    # on_modified (for SimpleEventHandler)
    # =============

    def on_modified(self, event):
        self.on_add('modify', event.src_path, None)

    # =============
    # on_moved (for SimpleEventHandler)
    # =============

    def on_moved(self, event):
        self.on_add('move', event.src_path, event.dest_path)

    # =============
    # __on_post__ posting of message
    # =============

    def __on_post__(self):
        #self.logger.debug("%s __on_post__" % self.program_name)

        # invoke on_post when provided

        for plugin in self.on_post_list:
            if not plugin(self): return False

        ok = True

        if self.outlet == 'json':
            json_line = json.dumps(
                [self.msg.topic, self.msg.headers, self.msg.notice],
                sort_keys=True) + '\n'
            print("%s" % json_line)

        elif self.outlet == 'url':
            print("%s" % '/'.join(self.msg.notice.split()[1:3]))

        else:
            ok = self.msg.publish()

        # publish counter

        self.publish_count += 1

        return ok

    # =============
    # __on_watch__
    # =============

    def __on_watch__(self):

        # invoke user defined on_message when provided

        for plugin in self.on_watch_list:
            if not plugin(self): return False

        return True

    # =============
    # __on_part__
    # =============

    def __on_part__(self):

        # invoke user defined on_part when provided

        for plugin in self.on_part_list:
            if not plugin(self): return False

        return True

    # =============
    # overwride defaults
    # =============

    def overwrite_defaults(self):
        self.logger.debug("%s overwrite_defaults" % self.program_name)

        self.post_hc = None

        self.obs_watched = []
        self.watch_handler = None

        self.inl = []
        self.new_events = OrderedDict()
        self.left_events = OrderedDict()

        self.blocksize = 200 * 1024 * 1024

    # =============
    # path inflight
    # =============

    def path_inflight(self, path, lstat):
        #self.logger.debug("path_inflight %s" % path )

        if not isinstance(self.inflight, int):
            #self.logger.debug("ok inflight unused")
            return False

        if lstat == None:
            #self.logger.debug("ok lstat None")
            return False

        age = time.time() - lstat[stat.ST_MTIME]
        if age < self.inflight:
            self.logger.debug("%d vs (inflight setting) %d seconds. Too New!" %
                              (age, self.inflight))
            return True

        return False

    # =============
    # path renamed
    # =============

    def path_renamed(self, path):

        newname = path

        # rename path given with no filename

        if self.rename:
            newname = self.rename
            if self.rename[-1] == '/':
                newname += os.path.basename(path)

        # strip 'N' heading directories

        if self.strip > 0:
            strip = self.strip
            if path[0] == '/': strip = strip + 1
            # if we strip too much... keep the filename
            token = path.split('/')
            try:
                token = token[strip:]
            except:
                token = [os.path.basename(path)]
            newname = '/' + '/'.join(token)

        if newname == path: return None

        return newname

    # =============
    # path rejected
    # =============

    def path_rejected(self, path):
        #self.logger.debug("path_rejected %s" % path )

        if not self.post_base_url:
            self.post_base_url = 'file:/'

        if self.masks == []: return False

        self.post_relpath = path
        if self.post_base_dir:
            self.post_relpath = path.replace(self.post_base_dir, '')

        urlstr = self.post_base_url + '/' + self.post_relpath

        if self.realpath_filter and not self.realpath_post:
            if os.path.exist(path):
                fltr_post_relpath = os.path.realpath(path)
                if sys.platform == 'win32':
                    fltr_post_relpath = fltr_post_relpath.replace('\\', '/')

                if self.post_base_dir:
                    fltr_post_relpath = fltr_post_relpath.replace(
                        self.post_base_dir, '')
                urlstr = self.post_base_url + '/' + fltr_post_relpath

        if not self.isMatchingPattern(urlstr, self.accept_unmatch):
            self.logger.debug("%s Rejected by accept/reject options" % urlstr)
            return True

        self.logger.debug("%s not rejected" % urlstr)
        return False

    # =============
    # post_delete
    # =============

    def post_delete(self, path, key=None, value=None):
        #self.logger.debug("post_delete %s (%s,%s)" % (path,key,value) )

        # accept this file

        if self.path_rejected(path): return False

        # post_init (message)
        self.post_init(path, None)

        # sumstr
        hash = sha512()
        hash.update(bytes(os.path.basename(path), encoding='utf-8'))
        sumstr = 'R,%s' % hash.hexdigest()

        # partstr
        partstr = None

        # caching
        if self.caching:
            new_post = self.cache.check(str(sumstr), path, partstr)
            self.cache.delete_path(path)  # dont keep delete in cache
            if not new_post:
                self.logger.debug("already posted as deleted %s %s" %
                                  (path, sumstr))
                return False

        # completing headers

        self.msg.headers['sum'] = sumstr

        # used when moving a file

        if key != None:
            self.msg.headers[key] = value
            if key == 'newname' and self.post_base_dir:
                self.msg.new_dir = os.path.dirname(value)
                self.msg.new_file = os.path.basename(value)
                self.msg.headers[key] = value.replace(self.post_base_dir, '')

        # post message

        ok = self.__on_post__()

        return ok

    # =============
    # post_file
    # =============

    def post_file(self, path, lstat, key=None, value=None):
        #self.logger.debug("post_file %s" % path )

        # accept this file

        if self.path_rejected(path): return False

        # check if it is a part file

        if path.endswith('.' + self.msg.part_ext):
            return self.post_file_part(path, lstat)

        # This variable means that part_file_assemble plugin is loaded and will handle posting the original file (being assembled)

        elif hasattr(self, 'suppress_posting_partial_assembled_file'):
            return False

        # check the value of blocksize

        fsiz = lstat[stat.ST_SIZE]
        blksz = self.set_blocksize(self.blocksize, fsiz)

        # if we should send the file in parts

        if blksz > 0 and blksz < fsiz:
            return self.post_file_in_parts(path, lstat)

        # post_init (message)
        self.post_init(path, lstat)

        # partstr

        partstr = '1,%d,1,0,0' % fsiz

        # xattr turned off  PS 20180423
        # xattr ... check if sum is set in extended fileattributes

        sumstr = ''

        #try :
        #       attr = xattr.xattr(path)
        #       if 'user.sr_sum' in attr :
        #          self.logger.debug("sum set by xattr")
        #          sumstr = (attr['user.sr_sum'].decode("utf-8")).split()[1]
        #except: pass
        sumstr = self.compute_sumstr(path, fsiz, sumstr)

        # caching

        if self.caching:
            new_post = self.cache.check(str(sumstr), self.post_relpath,
                                        partstr)
            if new_post: self.logger.info("caching %s" % path)
            else:
                self.logger.debug("already posted %s" % path)
                return False

        # complete  message

        self.msg.headers['parts'] = partstr
        self.msg.headers['sum'] = sumstr

        # used when moving a file

        if key != None:
            self.msg.headers[key] = value
            if key == 'oldname' and self.post_base_dir:
                self.msg.headers[key] = value.replace(self.post_base_dir, '')

        # post message

        ok = self.__on_post__()

        return ok

    def compute_sumstr(self, path, fsiz, sumstr=''):

        sumflg = self.sumflg

        if sumflg[:2] == 'z,' and len(sumflg) > 2:
            sumstr = sumflg

        else:

            if not sumflg[0] in ['0', 'd', 'n', 's', 'z']: sumflg = 'd'

            self.set_sumalgo(sumflg)
            sumalgo = self.sumalgo
            sumalgo.set_path(path)

            # compute checksum

            if sumflg in ['d', 's']:

                fp = open(path, 'rb')
                i = 0
                while i < fsiz:
                    buf = fp.read(self.bufsize)
                    if not buf: break
                    sumalgo.update(buf)
                    i += len(buf)
                fp.close()

            # setting sumstr

            checksum = sumalgo.get_value()
            sumstr = '%s,%s' % (sumflg, checksum)

        return sumstr

        # xattr turned off PS 20180424
        # setting extended attributes
        #self.logger.debug("xattr set for time and sum")
        #sr_attr = self.msg.time + ' ' + sumstr
        #attr['user.sr_sum' ] = bytes( sr_attr, encoding='utf-8')

    # =============
    # post_file_in_parts
    # =============

    def post_file_in_parts(self, path, lstat):
        #self.logger.debug("post_file_in_parts %s" % path )

        # post_init (message)
        self.post_init(path, lstat)

        # check the value of blocksize

        fsiz = lstat[stat.ST_SIZE]
        chunksize = self.set_blocksize(self.blocksize, fsiz)

        # count blocks and remainder

        block_count = int(fsiz / chunksize)
        remainder = fsiz % chunksize
        if remainder > 0: block_count = block_count + 1

        # default sumstr

        sumstr = self.sumflg

        # loop on chunks

        blocks = list(range(0, block_count))
        if self.randomize:
            random.shuffle(blocks)
            #blocks = [8, 3, 1, 2, 9, 6, 0, 7, 4, 5] # Testing
            self.logger.info('Sending partitions in the following order: ' +
                             str(blocks))

        for i in blocks:

            # setting sumalgo for that part

            sumflg = self.sumflg

            if sumflg[:2] == 'z,' and len(sumflg) > 2:
                sumstr = sumflg

            else:
                sumflg = self.sumflg
                if not self.sumflg[0] in ['0', 'd', 'n', 's', 'z']:
                    sumflg = 'd'
                self.set_sumalgo(sumflg)
                sumalgo = self.sumalgo
                sumalgo.set_path(path)

            # compute block stuff

            current_block = i

            offset = current_block * chunksize
            length = chunksize

            last = current_block == block_count - 1
            if last and remainder > 0:
                length = remainder

            # set partstr

            partstr = 'i,%d,%d,%d,%d' %\
                      (chunksize,block_count,remainder,current_block)

            # compute checksum if needed

            if not self.sumflg in ['0', 'n', 'z']:
                bufsize = self.bufsize
                if length < bufsize: bufsize = length

                fp = open(path, 'rb')
                if offset != 0: fp.seek(offset, 0)
                t = 0
                while t < length:
                    buf = fp.read(bufsize)
                    if not buf: break
                    sumalgo.update(buf)
                    t += len(buf)
                fp.close()

                checksum = sumalgo.get_value()
                sumstr = '%s,%s' % (sumflg, checksum)

            # caching

            if self.caching:
                new_post = self.cache.check(str(sumstr), self.post_relpath,
                                            partstr)
                if new_post:
                    self.logger.info("caching %s (%s)" % (path, partstr))
                else:
                    self.logger.debug("already posted %s (%s)" %
                                      (path, partstr))
                    continue

            # complete  message

            self.msg.headers['parts'] = partstr
            self.msg.headers['sum'] = sumstr

            # post message

            ok = self.__on_post__()
            if not ok:
                self.logger.error('Something went wrong while posting: %s' %
                                  self.msg.notice[2])

        return True

    # =============
    # post_file_part
    # =============

    def post_file_part(self, path, lstat):

        # post_init (message)
        self.post_init(path, lstat)

        # verify suffix

        ok, log_msg, suffix, partstr, sumstr = self.msg.verify_part_suffix(
            path)

        # something went wrong

        if not ok:
            self.logger.debug("file part extension but %s for file %s" %
                              (log_msg, path))
            return False

        # check rename see if it has the right part suffix (if present)
        if 'rename' in self.msg.headers and not suffix in self.msg.headers[
                'rename']:
            self.msg.headers['rename'] += suffix

        # caching

        if self.caching:
            new_post = self.cache.check(str(sumstr), path, partstr)
            if new_post: self.logger.info("caching %s" % path)
            else:
                self.logger.debug("already posted %s" % path)
                return False

        # complete  message

        self.msg.headers['parts'] = partstr
        self.msg.headers['sum'] = sumstr

        # post message and trigger part plugins
        ok = self.__on_part__()

        if ok: ok = self.__on_post__()

        return ok

    # =============
    # post_init
    # =============

    def post_init(self, path, lstat=None, key=None, value=None):

        self.msg.new_dir = os.path.dirname(path)
        self.msg.new_file = os.path.basename(path)

        # relpath
        self.post_relpath = path
        if self.post_base_dir:
            self.post_relpath = path.replace(self.post_base_dir, '')

        # exchange
        self.msg.exchange = self.post_exchange

        # topic
        self.msg.set_topic(self.topic_prefix, self.post_relpath)
        if self.subtopic:
            self.msg.set_topic_usr(self.topic_prefix, self.subtopic)

        # notice
        self.msg.set_notice(self.post_base_url, self.post_relpath)

        # rename
        rename = self.path_renamed(self.post_relpath)

        # headers

        self.msg.headers = {}

        self.msg.trim_headers()

        if self.to_clusters != None:
            self.msg.headers['to_clusters'] = self.to_clusters
        if self.cluster != None:
            self.msg.headers['from_cluster'] = self.cluster
        if self.source != None: self.msg.headers['source'] = self.source
        if rename != None: self.msg.headers['rename'] = rename
        if key != None: self.msg.headers[key] = value

        if lstat == None: return

        self.msg.headers['mtime'] = timeflt2str(lstat.st_mtime)
        self.msg.headers['atime'] = timeflt2str(lstat.st_atime)
        self.msg.headers['mode'] = "%o" % (lstat[stat.ST_MODE] & 0o7777)

    # =============
    # post_link
    # =============

    def post_link(self, path, key=None, value=None):
        #self.logger.debug("post_link %s" % path )

        # accept this file

        if self.path_rejected(path): return False

        # post_init (message)
        self.post_init(path, None)

        # resolve link

        link = os.readlink(path)

        # partstr

        partstr = None

        # sumstr

        hash = sha512()
        hash.update(bytes(link, encoding='utf-8'))
        sumstr = 'L,%s' % hash.hexdigest()

        # caching

        if self.caching:
            new_post = self.cache.check(str(sumstr), self.post_relpath,
                                        partstr)
            if new_post: self.logger.info("caching %s" % path)
            else:
                self.logger.debug("already posted %s" % path)
                return False

        # complete headers

        self.msg.headers['link'] = link
        self.msg.headers['sum'] = sumstr

        # used when moving a file

        if key != None: self.msg.headers[key] = value

        # post message

        ok = self.__on_post__()

        return ok

    # =============
    # post_move
    # =============

    def post_move(self, src, dst):
        #self.logger.debug("post_move %s %s" % (src,dst) )

        # watchdog funny ./ added at end of directory path ... removed

        src = src.replace('/./', '/')
        dst = dst.replace('/./', '/')

        if os.path.islink(dst) and self.realpath_post:
            dst = os.path.realpath(dst)
            if sys.platform == 'win32':
                dst = dst.replace('\\', '/')

        # file

        if os.path.isfile(dst):
            ok = self.post_delete(src, 'newname', dst)
            ok = self.post_file(dst, os.stat(dst), 'oldname', src)
            return True

        # link

        if os.path.islink(dst):
            ok = self.post_delete(src, 'newname', dst)
            ok = self.post_link(dst, 'oldname', src)
            return True

        # directory
        if os.path.isdir(dst):
            for x in os.listdir(dst):

                dst_x = dst + '/' + x
                src_x = src + '/' + x

                ok = self.post_move(src_x, dst_x)

            # directory list to delete at end
            self.move_dir_lst.append((src, dst))

        return True

    # =============
    # post1file
    # =============

    def post1file(self, path, lstat):

        done = True

        # watchdog funny ./ added at end of directory path ... removed

        path = path.replace('/./', '/')

        # always use / as separator for paths being posted.
        if os.sep != '/':  # windows
            path = path.replace(os.sep, '/')

        # path is a link

        if os.path.islink(path):
            ok = self.post_link(path)

            if self.follow_symlinks:
                link = os.readlink(path)
                try:
                    rpath = os.path.realpath(link)
                    if sys.platform == 'win32':
                        rpath = rpath.replace('\\', '/')

                except:
                    return done

                lstat = None
                if os.path.exists(rpath): lstat = os.stat(rpath)

                ok = self.post1file(rpath, lstat)

            return done

        # path deleted

        if lstat == None:
            ok = self.post_delete(path)
            return done

        # path is a file

        if os.path.isfile(path):
            ok = self.post_file(path, lstat)
            return done

        # at this point it is a create,modify directory

        return done

    # =============
    # post1move
    # =============

    def post1move(self, src, dst):
        #self.logger.debug("post1move %s %s" % (src,dst) )

        self.move_dir_lst = []

        ok = self.post_move(src, dst)

        for tup in self.move_dir_lst:
            src, dst = tup
            #self.logger.debug("deleting moved directory %s" % src )
            ok = self.post_delete(src, 'newname', dst)

        return True

    # =============
    # process event
    # =============

    def process_event(self, event, src, dst):
        #self.logger.debug("process_event %s %s %s " % (event,src,dst) )

        done = True
        later = False

        # delete

        if event == 'delete':
            if event in self.events:
                ok = self.post1file(src, None)
            return done

        # move

        if event == 'move':
            if self.create_modify:
                ok = self.post1move(src, dst)
            return done

        # create or modify

        # directory : skipped, its content is watched

        if os.path.isdir(src): return done

        # link ( os.path.exists = false, lstat = None )

        if os.path.islink(src):
            if 'link' in self.events:
                ok = self.post1file(src, None)
            return done

        # file : must exists
        #       (may have been deleted since event caught)

        if not os.path.exists(src): return done

        # file : must be old enough

        lstat = os.stat(src)
        if self.path_inflight(src, lstat): return later

        # post it

        if self.create_modify:
            ok = self.post1file(src, lstat)

        return done

    def post_pulse(self):
        self.logger.info("post_pulse message")

        self.connect()

        # build message

        self.msg.topic = 'v02.pulse'

        self.msg.set_time()
        self.msg.notice = '%s' % self.msg.time

        if self.pulse_message:
            self.msg.topic += '.message'
            self.msg.notice += ' ' + self.pulse_message
        else:
            self.msg.topic += '.tick'

        self.msg.headers = {}
        self.msg.trim_headers()

        # pulse on all exchanges
        # because of its topic, it should not impact any process
        # that does not consider topic v02.pulse

        lst_dict = run_rabbitmqadmin(self.post_broker, "list exchanges name",
                                     self.logger)

        ex = []
        for edict in lst_dict:
            exchange = edict['name']
            if exchange == '': continue
            if exchange[0] != 'x': continue
            if exchange == 'xreport': continue
            # deprecated exchanges
            if exchange == 'xlog': continue
            if exchange[0:3] == 'xl_': continue
            if exchange[0:3] == 'xr_': continue
            ex.append(exchange)
            self.msg.pub_exchange = exchange
            self.msg.message_ttl = self.message_ttl
            self.msg.publish()

        self.close()

    # =============
    # set_blocksize ... directly from c code
    # =============

    def set_blocksize(self, bssetting, fsiz):

        tfactor = 50 * 1024 * 1024

        if bssetting == 0:  ## autocompute
            if fsiz > 100 * tfactor: return 10 * tfactor
            elif fsiz > 10 * tfactor: return int((fsiz + 9) / 10)
            elif fsiz > tfactor: return int((fsiz + 2) / 3)
            else: return fsiz

        elif bssetting == 1:  ## send file as one piece.
            return fsiz

        else:  ## partstr=i
            return bssetting

    # =============
    # wakeup
    # =============

    def wakeup(self):
        #self.logger.debug("wakeup")

        # FIXME: Tiny potential for events to be dropped during copy.
        #     these lists might need to be replaced with watchdog event queues.
        #     left for later work. PS-20170105
        #     more details: https://github.com/gorakhargosh/watchdog/issues/392

        # on_watch

        ok = self.__on_watch__()
        if not ok: return

        # pile up left events to process

        self.left_events.update(self.new_events)
        self.new_events = OrderedDict()

        # work with a copy events and keep done events (to delete them)

        self.done_events = []
        self.cur_events = OrderedDict()
        self.cur_events.update(self.left_events)

        # nothing to do

        if len(self.cur_events) <= 0: return

        # loop on all events

        for key in self.cur_events:
            event, src, dst = self.cur_events[key]
            done = self.process_event(event, src, dst)
            if done: self.left_events.pop(key)

        # heartbeat
        self.heartbeat_check()

    # =============
    # walk
    # =============

    def walk(self, src):
        self.logger.debug("walk %s" % src)

        # how to proceed with symlink

        if os.path.islink(src) and self.realpath_post:
            src = os.path.realpath(src)
            if sys.platform == 'win32':
                src = src.replace('\\', '/')

        # walk src directory, this walk is depth first... there could be a lot of time
        # between *listdir* run, and when a file is visited, if there are subdirectories before you get there.
        # hence the existence check after listdir (crashed in flow_tests of > 20,000)
        for x in os.listdir(src):
            path = src + '/' + x
            if os.path.isdir(path):
                self.walk(path)
                continue

            # add path created
            if os.path.exists(path):
                self.post1file(path, os.stat(path))

    # =============
    # original walk_priming
    # =============

    def walk_priming(self, p):
        """
         Find all the subdirectories of the given path, start watches on them. 
         deal with symbolically linked directories correctly
        """
        if os.path.islink(p):
            realp = os.path.realpath(p)
            if sys.platform == 'win32':
                realp = realp.replace('\\', '/')

            self.logger.info("sr_watch %s is a link to directory %s" %
                             (p, realp))
            if self.realpath_post:
                d = realp
            else:
                d = p + '/' + '.'
        else:
            d = p

        try:
            fs = os.stat(d)
            dir_dev_id = '%s,%s' % (fs.st_dev, fs.st_ino)
            if dir_dev_id in self.inl: return True
        except:
            self.logger.warning("could not stat %s" % d)

        if os.access(d, os.R_OK | os.X_OK):
            try:
                ow = self.observer.schedule(self.watch_handler,
                                            d,
                                            recursive=True)
                self.obs_watched.append(ow)
                self.inl[dir_dev_id] = (ow, d)
                self.logger.info(
                    "sr_watch priming watch (instance=%d) scheduled for: %s " %
                    (len(self.obs_watched), d))
            except:
                self.logger.warning(
                    "sr_watch priming watch: %s failed, deferred." % d)

                # add path created
                self.on_add('create', p, None)
                return True

        else:
            self.logger.warning(
                "sr_watch could not schedule priming watch of: %s (EPERM) deferred."
                % d)

            # add path created
            self.on_add('create', p, None)
            return True

        return True

    # =============
    # watch_dir
    # =============

    def watch_dir(self, sld):
        self.logger.debug("watch_dir %s" % sld)

        if self.force_polling:
            self.logger.info(
                "sr_watch polling observer overriding default (slower but more reliable.)"
            )
            self.observer = PollingObserver()
        else:
            self.logger.info(
                "sr_watch optimal observer for platform selected (best when it works)."
            )
            self.observer = Observer()

        self.obs_watched = []

        self.watch_handler = SimpleEventHandler(self)
        self.walk_priming(sld)

        self.logger.info(
            "sr_watch priming walk done, but not yet active. Starting...")
        self.observer.start()
        self.logger.info("sr_watch now active on %s posting to exchange: %s" %
                         (sld, self.post_exchange))
        self.walk(sld)

    # =============
    # watch_loop
    # =============

    def watch_loop(self):
        self.logger.debug("watch_loop")

        last_time = time.time()
        while True:
            self.wakeup()
            now = time.time()
            elapse = now - last_time
            if elapse < self.sleep: time.sleep(self.sleep - elapse)
            last_time = now

        self.observer.join()

    # ==================================================
    # FIXME in 2018?  get rid of code from HERE TOP

    def temporary_stuff(self):

        # enforcing post_broker

        if self.post_broker == None:
            if self.broker != None:
                self.post_broker = self.broker
                self.logger.warning("use post_broker to set broker")

        # enforcing post_exchange

        if self.post_exchange == None:
            if self.exchange != None:
                self.post_exchange = self.exchange
                self.logger.warning("use post_exchange to set exchange")

        # verify post_base_dir

        if self.post_base_dir == None:
            if self.post_document_root != None:
                self.post_base_dir = self.post_document_root
                self.logger.warning(
                    "use post_base_dir instead of post_document_root")
            elif self.document_root != None:
                self.post_base_dir = self.document_root
                self.logger.warning(
                    "use post_base_dir instead of document_root")

        # faking having a subclass poster from which post is called

        addmodule = namedtuple('AddModule', ['post'])
        self.poster = addmodule(self.post_url)

        if self.poster.post == self.post_url:
            self.logger.debug("MY POSTER TRICK DID WORK !!!")

    def post_url(self,post_exchange,url,to_clusters,\
                      partstr=None,sumstr=None,rename=None,filename=None, \
                      mtime=None,atime=None,mode=None,link=None):

        self.logger.warning(
            "deprecated use of self.poster.post(post_exchange,url...")
        self.logger.warning(
            "should be using self.post1file or self.post_file...")

        post_relpath = url.path
        urlstr = url.geturl()
        post_base_url = urlstr.replace(post_relpath, '')

        # apply accept/reject

        if self.realpath_filter and not self.realpath_post:
            path = post_relpath
            if self.post_base_dir: path = self.post_base_dir + '/' + path
            if os.path.exist(path):
                fltr_post_relpath = os.path.realpath(path)
                if sys.platform == 'win32':
                    fltr_post_relpath = fltr_post_relpath.replace('\\', '/')

                if self.post_base_dir:
                    fltr_post_relpath = fltr_post_relpath.replace(
                        self.post_base_dir, '')
                urlstr = self.post_base_url + '/' + fltr_post_relpath

        if not self.isMatchingPattern(urlstr, self.accept_unmatch):
            self.logger.debug("post of %s Rejected by accept/reject options" %
                              urlstr)
            return True  # need to return true because this isn´t a failure.

        # if caching is enabled make sure it was not already posted

        if self.caching:
            new_post = self.cache.check(str(sumstr), post_relpath, partstr)
            if new_post:

                # delete
                if sumstr.startswith('R,'):
                    self.cache.delete_path(post_relpath)

                # link - never store them, message contains whole payload.
                elif sumstr.startswith('L,'):
                    self.cache.delete_path(post_relpath)

                else:
                    self.logger.info("caching %s" % post_relpath)

            # modified, or repost
            else:
                self.logger.debug("skipped already posted %s %s %s" %
                                  (post_relpath, partstr, sumstr))
                return True

        # set message exchange
        self.msg.exchange = post_exchange

        # set message topic
        self.msg.set_topic(self.topic_prefix, post_relpath)
        if self.subtopic != None:
            self.msg.set_topic_usr(self.topic_prefix, self.subtopic)

        # set message notice
        self.msg.set_notice(post_base_url, post_relpath)

        # set message headers
        self.msg.headers = {}

        self.msg.headers['to_clusters'] = to_clusters

        if partstr != None: self.msg.headers['parts'] = partstr
        if sumstr != None: self.msg.headers['sum'] = sumstr
        if rename != None: self.msg.headers['rename'] = rename
        if mtime != None: self.msg.headers['mtime'] = mtime
        if atime != None: self.msg.headers['atime'] = atime
        if mode != None: self.msg.headers['mode'] = "%o" % (mode & 0o7777)
        if link != None: self.msg.headers['link'] = link

        if self.cluster != None:
            self.msg.headers['from_cluster'] = self.cluster
        if self.source != None: self.msg.headers['source'] = self.source

        self.msg.trim_headers()

        ok = self.__on_post__()

        return ok

    # FIXME in 2018?  get rid of code to HERE BOTTOM
    # ==================================================

    # =============
    # run
    # =============

    def run(self):
        self.logger.info("%s run partflg=%s, sum=%s, caching=%s " % \
              ( self.program_name, self.partflg, self.sumflg, self.caching ))
        self.logger.info("%s realpath_post=%s follow_links=%s force_polling=%s"  % \
              ( self.program_name, self.realpath_post, self.follow_symlinks, self.force_polling ) )

        self.connect()

        # caching
        if self.caching:
            self.cache = sr_cache(self)
            self.cache_stat = True
            if self.reset:
                self.cache.close(unlink=True)
            self.cache.open()
            if not hasattr(self, 'heartbeat_cache_installed'
                           ) or not self.heartbeat_cache_installed:
                self.execfile("on_heartbeat", 'hb_cache')
                self.on_heartbeat_list.append(self.on_heartbeat)
                self.heartbeat_cache_installed = True

        pbd = self.post_base_dir

        for plugin in self.on_start_list:
            if not plugin(self): break

        for d in self.postpath:
            self.logger.debug("postpath = %s" % d)
            if pbd and not d.startswith(pbd): d = pbd + '/' + d

            if self.sleep > 0:
                self.watch_dir(d)
                continue

            if os.path.isdir(d):
                self.walk(d)
            elif os.path.islink(d):
                self.post1file(d, None)
            elif os.path.isfile(d):
                self.post1file(d, os.stat(d))
            else:
                self.logger.error("could not post %s (exists %s)" %
                                  (d, os.path.exists(d)))

        if self.sleep > 0: self.watch_loop()

        self.close()

    def reload(self):
        self.logger.info("%s reload" % self.program_name)
        self.close()
        self.configure()
        self.run()

    def start(self):
        self.logger.info("%s %s startup" %
                         (self.program_name, self.config_name))
        self.log_settings()
        self.run()

    def stop(self):
        self.logger.info("%s stop" % self.program_name)
        self.close()
        os._exit(0)

    def cleanup(self):
        self.logger.info("%s %s cleanup" %
                         (self.program_name, self.config_name))

        if self.post_broker:
            self.post_hc = HostConnect(logger=self.logger)
            self.post_hc.set_pika(self.use_pika)
            self.post_hc.set_url(self.post_broker)
            self.post_hc.connect()
            self.declare_exchanges(cleanup=True)

        # caching

        if hasattr(self, 'cache') and self.cache:
            self.cache.close(unlink=True)
            self.cache = None

        self.close()

    def declare(self):
        self.logger.info("%s %s declare" %
                         (self.program_name, self.config_name))

        # on posting host
        if self.post_broker:
            self.post_hc = HostConnect(logger=self.logger)
            self.post_hc.set_pika(self.use_pika)
            self.post_hc.set_url(self.post_broker)
            self.post_hc.connect()
            self.declare_exchanges()

        self.close()

    def declare_exchanges(self, cleanup=False):

        # restore_queue mode has no post_exchange

        if not self.post_exchange: return

        # define post exchange (splitted ?)

        exchanges = []

        if self.post_exchange_split != 0:
            for n in list(range(self.post_exchange_split)):
                exchanges.append(self.post_exchange + "%02d" % n)
        else:
            exchanges.append(self.post_exchange)

        # do exchanges

        for x in exchanges:
            if cleanup: self.post_hc.exchange_delete(x)
            else: self.post_hc.exchange_declare(x)

    def setup(self):
        self.logger.info("%s %s setup" % (self.program_name, self.config_name))

        # on posting host
        if self.post_broker:
            self.post_hc = HostConnect(logger=self.logger)
            self.post_hc.set_pika(self.use_pika)
            self.post_hc.set_url(self.post_broker)
            self.post_hc.connect()
            self.declare_exchanges()

        self.close()
Ejemplo n.º 48
0
        print("saving to file")
        # print('File moved!')
    else:
        print('File already processed! File: ' + filepath)
        if (cfManager.retrieve_delete_duplicate_files() == True):
            if (os.path.exists(filepath)):
                print('ACTUALLY DELETING THE FILE!!!')
                os.remove(filepath)


my_event_handler.on_created = on_created
my_event_handler.on_deleted = on_deleted
my_event_handler.on_moved = on_moved
my_event_handler.on_modified = on_modified

cfManager = configManager.configManager()

path = cfManager.retrieve_source_directory()
fileMover.create_directory(path)
go_recursively = True
my_observer = Observer()
my_observer.schedule(my_event_handler, path, recursive=go_recursively)

my_observer.start()
try:
    while True:
        time.sleep(1)
except KeyboardInterrupt:
    my_observer.stop()
my_observer.join()
Ejemplo n.º 49
0
                                             new_params["photoset"])

    @staticmethod
    def parse_filepath(file_path):
        """
        Returns a dictionary containing the photoset title and photo title of a given filepath
        """
        parsed = file_path.split(os.sep)
        photo_parsed = os.path.splitext(parsed[-1])
        return {
            "photoset": parsed[-2],
            "photo": photo_parsed[0],
            "ext": photo_parsed[1]
        }


if __name__ == "__main__":
    FLICKRBOX = Flickrbox(sync=True)

    OBSERVER = Observer()
    OBSERVER.schedule(FlickrboxEventHandler(FLICKRBOX),
                      FLICKRBOX.path,
                      recursive=True)
    OBSERVER.start()
    try:
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        OBSERVER.stop()
    OBSERVER.join()
Ejemplo n.º 50
0
class SqliteEventLogStorage(SqlEventLogStorage, ConfigurableClass):
    """SQLite-backed event log storage.

    Users should not directly instantiate this class; it is instantiated by internal machinery when
    ``dagit`` and ``dagster-graphql`` load, based on the values in the ``dagster.yaml`` file in
    ``$DAGSTER_HOME``. Configuration of this class should be done by setting values in that file.

    This is the default event log storage when none is specified in the ``dagster.yaml``.

    To explicitly specify SQLite for event log storage, you can add a block such as the following
    to your ``dagster.yaml``:

    .. code-block:: YAML

        event_log_storage:
          module: dagster.core.storage.event_log
          class: SqliteEventLogStorage
          config:
            base_dir: /path/to/dir

    The ``base_dir`` param tells the event log storage where on disk to store the databases. To
    improve concurrent performance, event logs are stored in a separate SQLite database for each
    run.
    """
    def __init__(self, base_dir, inst_data=None):
        """Note that idempotent initialization of the SQLite database is done on a per-run_id
        basis in the body of connect, since each run is stored in a separate database."""
        self._base_dir = os.path.abspath(check.str_param(base_dir, "base_dir"))
        mkdir_p(self._base_dir)

        self._obs = None

        self._watchers = defaultdict(dict)
        self._inst_data = check.opt_inst_param(inst_data, "inst_data",
                                               ConfigurableClassData)

        # Used to ensure that each run ID attempts to initialize its DB the first time it connects,
        # ensuring that the database will be created if it doesn't exist
        self._initialized_dbs = set()

        # Ensure that multiple threads (like the event log watcher) interact safely with each other
        self._db_lock = threading.Lock()

        if not os.path.exists(self.path_for_shard(INDEX_SHARD_NAME)):
            conn_string = self.conn_string_for_shard(INDEX_SHARD_NAME)
            engine = create_engine(conn_string, poolclass=NullPool)
            self._initdb(engine)
            self.reindex_events()
            self.reindex_assets()

        super().__init__()

    def upgrade(self):
        all_run_ids = self.get_all_run_ids()
        print(  # pylint: disable=print-call
            f"Updating event log storage for {len(all_run_ids)} runs on disk..."
        )
        alembic_config = get_alembic_config(__file__)
        if all_run_ids:
            for run_id in tqdm(all_run_ids):
                with self.run_connection(run_id) as conn:
                    run_alembic_upgrade(alembic_config, conn, run_id)

        print("Updating event log storage for index db on disk...")  # pylint: disable=print-call
        with self.index_connection() as conn:
            run_alembic_upgrade(alembic_config, conn, "index")

        self._initialized_dbs = set()

    @property
    def inst_data(self):
        return self._inst_data

    @classmethod
    def config_type(cls):
        return {"base_dir": StringSource}

    @staticmethod
    def from_config_value(inst_data, config_value):
        return SqliteEventLogStorage(inst_data=inst_data, **config_value)

    def get_all_run_ids(self):
        all_filenames = glob.glob(os.path.join(self._base_dir, "*.db"))
        return [
            os.path.splitext(os.path.basename(filename))[0]
            for filename in all_filenames if
            os.path.splitext(os.path.basename(filename))[0] != INDEX_SHARD_NAME
        ]

    def path_for_shard(self, run_id):
        return os.path.join(self._base_dir,
                            "{run_id}.db".format(run_id=run_id))

    def conn_string_for_shard(self, shard_name):
        check.str_param(shard_name, "shard_name")
        return create_db_conn_string(self._base_dir, shard_name)

    def _initdb(self, engine):
        alembic_config = get_alembic_config(__file__)

        retry_limit = 10

        while True:
            try:

                with engine.connect() as connection:
                    db_revision, head_revision = check_alembic_revision(
                        alembic_config, connection)

                    if not (db_revision and head_revision):
                        SqlEventLogStorageMetadata.create_all(engine)
                        engine.execute("PRAGMA journal_mode=WAL;")
                        stamp_alembic_rev(alembic_config, connection)

                break
            except (db.exc.DatabaseError, sqlite3.DatabaseError,
                    sqlite3.OperationalError) as exc:
                # This is SQLite-specific handling for concurrency issues that can arise when
                # multiple processes (e.g. the dagit process and user code process) contend with
                # each other to init the db. When we hit the following errors, we know that another
                # process is on the case and we should retry.
                err_msg = str(exc)

                if not ("table asset_keys already exists" in err_msg
                        or "table secondary_indexes already exists" in err_msg
                        or "table event_logs already exists" in err_msg
                        or "database is locked" in err_msg
                        or "table alembic_version already exists" in err_msg or
                        "UNIQUE constraint failed: alembic_version.version_num"
                        in err_msg):
                    raise

                if retry_limit == 0:
                    raise
                else:
                    logging.info(
                        "SqliteEventLogStorage._initdb: Encountered apparent concurrent init, "
                        "retrying ({retry_limit} retries left). Exception: {str_exc}"
                        .format(retry_limit=retry_limit, str_exc=err_msg))
                    time.sleep(0.2)
                    retry_limit -= 1

    @contextmanager
    def _connect(self, shard):
        with self._db_lock:
            check.str_param(shard, "shard")

            conn_string = self.conn_string_for_shard(shard)
            engine = create_engine(conn_string, poolclass=NullPool)

            if not shard in self._initialized_dbs:
                self._initdb(engine)
                self._initialized_dbs.add(shard)

            conn = engine.connect()

            try:
                with handle_schema_errors(
                        conn,
                        get_alembic_config(__file__),
                        msg="SqliteEventLogStorage for shard {shard}".format(
                            shard=shard),
                ):
                    yield conn
            finally:
                conn.close()
            engine.dispose()

    def run_connection(self, run_id=None):
        return self._connect(run_id)

    def index_connection(self):
        return self._connect(INDEX_SHARD_NAME)

    def store_event(self, event):
        """
        Overridden method to replicate asset events in a central assets.db sqlite shard, enabling
        cross-run asset queries.

        Args:
            event (EventLogEntry): The event to store.
        """
        check.inst_param(event, "event", EventLogEntry)
        insert_event_statement = self.prepare_insert_event(event)
        run_id = event.run_id

        with self.run_connection(run_id) as conn:
            conn.execute(insert_event_statement)

        if event.is_dagster_event and event.dagster_event.asset_key:
            check.invariant(
                event.dagster_event_type
                == DagsterEventType.ASSET_MATERIALIZATION or
                event.dagster_event_type == DagsterEventType.ASSET_OBSERVATION,
                "Can only store asset materializations and observations in index database",
            )
            # mirror the event in the cross-run index database
            with self.index_connection() as conn:
                conn.execute(insert_event_statement)

            if event.dagster_event.is_step_materialization:
                # Currently, only materializations are stored in the asset catalog.
                # We will store observations after adding a column migration to
                # store latest asset observation timestamp in the asset key table.
                self.store_asset(event)

    def get_event_records(
        self,
        event_records_filter: Optional[EventRecordsFilter] = None,
        limit: Optional[int] = None,
        ascending: bool = False,
    ) -> Iterable[EventLogRecord]:
        """Overridden method to enable cross-run event queries in sqlite.

        The record id in sqlite does not auto increment cross runs, so instead of fetching events
        after record id, we only fetch events whose runs updated after update_timestamp.
        """
        check.opt_inst_param(event_records_filter, "event_records_filter",
                             EventRecordsFilter)
        check.opt_int_param(limit, "limit")
        check.bool_param(ascending, "ascending")

        is_asset_query = event_records_filter and (
            event_records_filter.event_type
            == DagsterEventType.ASSET_MATERIALIZATION
            or event_records_filter.event_type
            == DagsterEventType.ASSET_OBSERVATION)
        if is_asset_query:
            # asset materializations and observations get mirrored into the index shard, so no
            # custom run shard-aware cursor logic needed
            return super(SqliteEventLogStorage, self).get_event_records(
                event_records_filter=event_records_filter,
                limit=limit,
                ascending=ascending)

        query = db.select(
            [SqlEventLogStorageTable.c.id, SqlEventLogStorageTable.c.event])
        if event_records_filter and event_records_filter.asset_key:
            asset_details = next(
                iter(self._get_assets_details([event_records_filter.asset_key
                                               ])))
        else:
            asset_details = None

        if not event_records_filter or not (isinstance(
                event_records_filter.after_cursor, RunShardedEventsCursor)):
            warnings.warn("""
                Called `get_event_records` on a run-sharded event log storage with a query that
                is not run aware (e.g. not using a RunShardedEventsCursor).  This likely has poor
                performance characteristics.  Consider adding a RunShardedEventsCursor to your query
                or switching your instance configuration to use a non-run sharded event log storage
                (e.g. PostgresEventLogStorage, ConsolidatedSqliteEventLogStorage)
            """)

        query = self._apply_filter_to_query(
            query=query,
            event_records_filter=event_records_filter,
            asset_details=asset_details,
            apply_cursor_filters=
            False,  # run-sharded cursor filters don't really make sense
        )
        if limit:
            query = query.limit(limit)
        if ascending:
            query = query.order_by(SqlEventLogStorageTable.c.timestamp.asc())
        else:
            query = query.order_by(SqlEventLogStorageTable.c.timestamp.desc())

        # workaround for the run-shard sqlite to enable cross-run queries: get a list of run_ids
        # whose events may qualify the query, and then open run_connection per run_id at a time.
        run_updated_after = (
            event_records_filter.after_cursor.run_updated_after
            if event_records_filter and isinstance(
                event_records_filter.after_cursor, RunShardedEventsCursor) else
            None)
        run_records = self._instance.get_run_records(
            filters=PipelineRunsFilter(updated_after=run_updated_after),
            order_by="update_timestamp",
            ascending=ascending,
        )

        event_records = []
        for run_record in run_records:
            run_id = run_record.pipeline_run.run_id
            with self.run_connection(run_id) as conn:
                results = conn.execute(query).fetchall()

            for row_id, json_str in results:
                try:
                    event_record = deserialize_json_to_dagster_namedtuple(
                        json_str)
                    if not isinstance(event_record, EventLogEntry):
                        logging.warning(
                            "Could not resolve event record as EventLogEntry for id `{}`."
                            .format(row_id))
                        continue
                    else:
                        event_records.append(
                            EventLogRecord(storage_id=row_id,
                                           event_log_entry=event_record))
                    if limit and len(event_records) >= limit:
                        break
                except seven.JSONDecodeError:
                    logging.warning(
                        "Could not parse event record id `{}`.".format(row_id))

            if limit and len(event_records) >= limit:
                break

        return event_records[:limit]

    def delete_events(self, run_id):
        with self.run_connection(run_id) as conn:
            self.delete_events_for_run(conn, run_id)

        # delete the mirrored event in the cross-run index database
        with self.index_connection() as conn:
            self.delete_events_for_run(conn, run_id)

    def wipe(self):
        # should delete all the run-sharded dbs as well as the index db
        for filename in (glob.glob(os.path.join(self._base_dir, "*.db")) +
                         glob.glob(os.path.join(self._base_dir, "*.db-wal")) +
                         glob.glob(os.path.join(self._base_dir, "*.db-shm"))):
            os.unlink(filename)

        self._initialized_dbs = set()

    def _delete_mirrored_events_for_asset_key(self, asset_key):
        with self.index_connection() as conn:
            conn.execute(SqlEventLogStorageTable.delete().where(  # pylint: disable=no-value-for-parameter
                db.or_(
                    SqlEventLogStorageTable.c.asset_key ==
                    asset_key.to_string(),
                    SqlEventLogStorageTable.c.asset_key == asset_key.to_string(
                        legacy=True),
                )))

    def wipe_asset(self, asset_key):
        # default implementation will update the event_logs in the sharded dbs, and the asset_key
        # table in the asset shard, but will not remove the mirrored event_log events in the asset
        # shard
        super(SqliteEventLogStorage, self).wipe_asset(asset_key)
        self._delete_mirrored_events_for_asset_key(asset_key)

    def watch(self, run_id, start_cursor, callback):
        if not self._obs:
            self._obs = Observer()
            self._obs.start()

        watchdog = SqliteEventLogStorageWatchdog(self, run_id, callback,
                                                 start_cursor)
        self._watchers[run_id][callback] = (
            watchdog,
            self._obs.schedule(watchdog, self._base_dir, True),
        )

    def end_watch(self, run_id, handler):
        if handler in self._watchers[run_id]:
            event_handler, watch = self._watchers[run_id][handler]
            self._obs.remove_handler_for_watch(event_handler, watch)
            del self._watchers[run_id][handler]

    def dispose(self):
        if self._obs:
            self._obs.stop()
            self._obs.join(timeout=15)
Ejemplo n.º 51
0
def watch(build_commands=None, patterns=None, ignore_patterns=None):
    """TODO(keir) docstring"""

    _LOG.info('Starting Pigweed build watcher')

    # If no build directory was specified, search the tree for GN build
    # directories and try to build them all. In the future this may cause
    # slow startup, but for now this is fast enough.
    if not build_commands:
        build_commands = []
        _LOG.info('Searching for GN build dirs...')
        gn_args_files = glob.glob('**/args.gn', recursive=True)
        for gn_args_file in gn_args_files:
            gn_build_dir = pathlib.Path(gn_args_file).parent
            if gn_build_dir.is_dir():
                build_commands.append(BuildCommand(gn_build_dir))

    # Make sure we found something; if not, bail.
    if not build_commands:
        _die("No build dirs found. Did you forget to 'gn gen out'?")

    # Verify that the build output directories exist.
    for i, build_target in enumerate(build_commands, 1):
        if not build_target.build_dir.is_dir():
            _die("Build directory doesn't exist: %s", build_target)
        else:
            _LOG.info('Will build [%d/%d]: %s', i, len(build_commands),
                      build_target)

    _LOG.debug('Patterns: %s', patterns)

    # TODO(keir): Change the watcher to selectively watch some
    # subdirectories, rather than watching everything under a single path.
    #
    # The problem with the current approach is that Ninja's building
    # triggers many events, which are needlessly sent to this script.
    path_of_directory_to_watch = '.'

    # Try to make a short display path for the watched directory that has
    # "$HOME" instead of the full home directory. This is nice for users
    # who have deeply nested home directories.
    path_to_log = pathlib.Path(path_of_directory_to_watch).resolve()
    try:
        path_to_log = path_to_log.relative_to(pathlib.Path.home())
        path_to_log = f'$HOME/{path_to_log}'
    except ValueError:
        # The directory is somewhere other than inside the users home.
        path_to_log = path_of_directory_to_watch

    # Ignore the user-specified patterns.
    ignore_patterns = (ignore_patterns.split(_WATCH_PATTERN_DELIMITER)
                       if ignore_patterns else [])

    ignore_dirs = ['.presubmit', '.python3-env']

    env = pw_cli.env.pigweed_environment()
    if env.PW_EMOJI:
        charset = _EMOJI_CHARSET
    else:
        charset = _ASCII_CHARSET

    event_handler = PigweedBuildWatcher(
        patterns=patterns.split(_WATCH_PATTERN_DELIMITER),
        ignore_patterns=ignore_patterns,
        build_commands=build_commands,
        ignore_dirs=ignore_dirs,
        charset=charset,
    )

    try:
        # It can take awhile to configure the filesystem watcher, so have the
        # message reflect that with the "...". Run inside the try: to
        # gracefully handle the user Ctrl-C'ing out during startup.
        _LOG.info('Attaching filesystem watcher to %s/...', path_to_log)
        observer = Observer()
        observer.schedule(
            event_handler,
            path_of_directory_to_watch,
            recursive=True,
        )
        observer.start()

        event_handler.debouncer.press('Triggering initial build...')

        while observer.isAlive():
            observer.join(1)
    # Ctrl-C on Unix generates KeyboardInterrupt
    # Ctrl-Z on Windows generates EOFError
    except (KeyboardInterrupt, EOFError):
        _exit_due_to_interrupt()

    _LOG.critical('Should never get here')
    observer.join()
Ejemplo n.º 52
0
    def run(self):
        global timeout, w, tuples, regexes, json_pending, last_push, config
        fp = {}
        if osname == "linux":
            w = watcher.AutoWatcher()
            for path in config.get('Analyzer', 'paths').split(","):
                try:
                    print("Recursively monitoring " + path.strip() + "...")
                    w.add_all(path.strip(), inotify.IN_ALL_EVENTS)
                except OSError as err:
                    pass

            if not w.num_watches():
                print("No paths to analyze, nothing to do!")
                sys.exit(1)

            poll = select.poll()
            poll.register(w, select.POLLIN)

            timeout = None

            threshold = watcher.Threshold(w, 256)

            inodes = {}
            inodes_path = {}
            xes = connect_es(config)
            while True:
                events = poll.poll(timeout)
                nread = 0
                if threshold() or not events:
                    #print('reading,', threshold.readable(), 'bytes available')
                    for evt in w.read(0):
                        nread += 1

                        # The last thing to do to improve efficiency here would be
                        # to coalesce similar events before passing them up to a
                        # higher level.

                        # For example, it's overwhelmingly common to have a stream
                        # of inotify events contain a creation, followed by
                        # multiple modifications of the created file.

                        # Recognising this pattern (and others) and coalescing
                        # these events into a single creation event would reduce
                        # the number of trips into our app's presumably more
                        # computationally expensive upper layers.
                        masks = inotify.decode_mask(evt.mask)
                        #print(masks)
                        path = evt.fullpath
                        #print(repr(evt.fullpath), ' | '.join(masks))
                        try:
                            if not u'IN_ISDIR' in masks:

                                if (u'IN_MOVED_FROM'
                                        in masks) and (path in filehandles):
                                    print(
                                        "File moved, closing original handle")
                                    try:
                                        filehandles[path].close()
                                    except Exception as err:
                                        print(err)
                                    del filehandles[path]
                                    inode = inodes_path[path]
                                    del inodes[inode]

                                elif (not u'IN_DELETE' in masks) and (
                                        not path in filehandles) and (
                                            path.find(".gz") == -1):
                                    try:
                                        print("Opening " + path)
                                        idata = os.stat(path)
                                        inode = idata.st_ino
                                        if not inode in inodes:
                                            filehandles[path] = open(path, "r")
                                            print("Started watching " + path)
                                            filehandles[path].seek(0, 2)
                                            inodes[inode] = path
                                            inodes_path[path] = inode

                                    except Exception as err:
                                        print(err)
                                        try:
                                            filehandles[path].close()
                                        except Exception as err:
                                            print(err)
                                        del filehandles[path]
                                        inode = inodes_path[path]
                                        del inodes[inode]

                                # First time we've discovered this file?
                                if u'IN_CLOSE_NOWRITE' in masks and not path in filehandles:
                                    pass

                                # New file created in a folder we're watching??
                                elif u'IN_CREATE' in masks:
                                    pass

                                # File truncated?
                                elif u'IN_CLOSE_WRITE' in masks and path in filehandles:
                                    #    print(path + " truncated!")
                                    filehandles[path].seek(0, 2)

                                # File contents modified?
                                elif u'IN_MODIFY' in masks and path in filehandles:
                                    #      print(path + " was modified")
                                    rd = 0
                                    data = ""
                                    #print("Change in " + path)
                                    try:
                                        while True:
                                            line = filehandles[path].readline()
                                            if not line:
                                                #filehandles[path].seek(0,2)
                                                break
                                            else:
                                                rd += len(line)
                                                data += line
                                        #print("Read %u bytes from %s" % (rd, path))
                                        parseLine(path, data)
                                    except Exception as err:
                                        try:
                                            print("Could not utilize " + path +
                                                  ", closing.." + err)
                                            filehandles[path].close()
                                        except Exception as err:
                                            print(err)
                                        del filehandles[path]
                                        inode = inodes_path[path]
                                        del inodes[inode]

                                # File deleted? (close handle)
                                elif u'IN_DELETE' in masks:
                                    if path in filehandles:
                                        print("Closed " + path)
                                        try:
                                            filehandles[path].close()
                                        except Exception as err:
                                            print(err)
                                        del filehandles[path]
                                        inode = inodes_path[path]
                                        del inodes[inode]
                                        print("Stopped watching " + path)

                                else:
                                    pass

                        except Exception as err:
                            print(err)

                for x in json_pending:
                    if (time.time() >
                        (last_push[x] + 15)) or len(json_pending[x]) >= 50:
                        if not x in fp:
                            fp[x] = True
                            print("First push for " + x + "!")
                        t = NodeThread()
                        t.assign(json_pending[x], x, xes)
                        t.start()
                        json_pending[x] = []
                        last_push[x] = time.time()

                if nread:
                    #print('plugging back in')
                    timeout = None
                    poll.register(w, select.POLLIN)
                else:
                    #print('unplugging,', threshold.readable(), 'bytes available')
                    timeout = 1000
                    poll.unregister(w)

        if osname == "freebsd":
            xes = connect_es(config)
            observer = Observer()
            for path in paths:
                observer.schedule(BSDHandler(), path, recursive=True)
                print("Recursively monitoring " + path.strip() + "...")
            observer.start()
            try:
                while True:
                    for x in json_pending:
                        if len(json_pending[x]) > 0 and (
                            (time.time() > (last_push[x] + 15))
                                or len(json_pending[x]) >= 50):
                            if not x in fp:
                                fp[x] = True
                                print("First push for " + x + "!")
                            t = NodeThread()
                            t.assign(json_pending[x], x, xes)
                            t.start()
                            json_pending[x] = []
                            last_push[x] = time.time()
                    time.sleep(0.5)

            except KeyboardInterrupt:
                observer.stop()
            observer.join()
Ejemplo n.º 53
0
class MediaMiddleware(MiddlewareMixin):
    """
    Middleware for serving and browser-side caching of media files.

    This MUST be your *first* entry in MIDDLEWARE_CLASSES. Otherwise, some
    other middleware might add ETags or otherwise manipulate the caching
    headers which would result in the browser doing unnecessary HTTP
    roundtrips for unchanged media.
    """
    MAX_AGE = 60 * 60 * 24 * 365

    def __init__(self, get_response=None):
        super(MediaMiddleware, self).__init__(get_response)
        self._observer_started = False
        self._cleaned_up = False

        # Go no further if not in dev mode
        if not MEDIA_DEV_MODE:
            return

        # v1.14 register ourselves for cleanup on exit
        global _middleware_instance
        _middleware_instance = self

        # Need an initial refresh to prevent errors on the first request
        refresh_dev_names()

        # Monitor static files for changes (v1.13 - when not unit testing)
        if not UNIT_TESTING:
            self.filesystem_event_handler = RefreshingEventHandler()
            self.filesystem_observer = Observer()
            for static_dir in get_media_dirs():
                self.filesystem_observer.schedule(
                    self.filesystem_event_handler,
                    path=static_dir,
                    recursive=True)
            self.filesystem_observer.start()
            self._observer_started = True

    def __del__(self):
        self.cleanup()

    def cleanup(self):
        if not self._cleaned_up:
            if hasattr(self, 'filesystem_observer'):
                self.filesystem_observer.unschedule_all()
                # Only try to stop if __init__ ran a successful start()
                if self._observer_started:
                    self.filesystem_observer.stop()
                    self.filesystem_observer.join()
        self._cleaned_up = True

    def process_request(self, request):
        if not MEDIA_DEV_MODE:
            return
        if not request.path.startswith(DEV_MEDIA_URL):
            return

        filename = request.path[len(DEV_MEDIA_URL):]
        try:
            backend = get_backend(filename)
        except KeyError:
            raise Http404(
                'The mediagenerator could not find the media file "%s"' %
                filename)
        with _refresh_names_lock:  # Don't serve while still refreshing
            content, mimetype = backend.get_dev_output(filename)
        if not mimetype:
            mimetype = 'application/octet-stream'
        if isinstance(content, unicode):
            content = content.encode('utf-8')
        if mimetype.startswith('text/') or mimetype in TEXT_MIME_TYPES:
            mimetype += '; charset=utf-8'
        response = HttpResponse(content, content_type=mimetype)
        response['Content-Length'] = len(content)

        # Cache manifest files MUST NEVER be cached or you'll be unable to update
        # your cached app!!!
        if response['Content-Type'] != 'text/cache-manifest' and \
                response.status_code == 200:
            patch_cache_control(response, public=True, max_age=self.MAX_AGE)
            response['Expires'] = http_date(time.time() + self.MAX_AGE)
        return response
Ejemplo n.º 54
0
class Upshot(NSObject):
    """OS X status bar icon."""
    image_paths = {
        'icon16': 'icon16.png',
        'icon16-off': 'icon16-off.png',
    }
    images = {}
    statusitem = None
    observer = None  # Screenshot directory observer.
    menuitems = {}  # Shortcut to our menuitems.

    def applicationDidFinishLaunching_(self, notification):
        if not DROPBOX_DIR:  # Oh-oh.
            alert(
                'Unable to detect Dropbox folder',
                'UpShot requires Dropbox, for now. Please install it, then '
                'try again.', ['OK'])
            self.quit_(self)

        if not os.path.exists(PUBLIC_DIR):  # No public folder?
            pressed = alert(
                'Unable to detect Public Dropbox folder',
                'UpShot requires a Dropbox Public folder. You seem to have '
                'Dropbox, but no Public folder.\n\n'
                'Since October 2012, Dropbox will only create a public '
                'folder for you if you opt in to it.\n\n'
                'Please do so before using UpShot.',
                ['Learn How to Create a Dropbox Public Folder', 'Quit UpShot'])
            if pressed == NSAlertFirstButtonReturn:
                # Open Dropboc opt-in
                sw = NSWorkspace.sharedWorkspace()
                sw.openURL_(NSURL.URLWithString_(DROPBOX_PUBLIC_INFO))
            self.quit_(self)

        self.build_menu()
        # Go do something useful.
        if utils.get_pref('dropboxid'):
            self.startListening_()
        else:
            self.stopListening_()
            DropboxDetect.DropboxDetectWindowController.showWindow()

    def build_menu(self):
        """Build the OS X status bar menu."""
        # Create the statusbar item
        statusbar = NSStatusBar.systemStatusBar()
        self.statusitem = statusbar.statusItemWithLength_(
            NSVariableStatusItemLength)

        # Set statusbar icon and color/grayscale mode.
        for tag, img in self.image_paths.items():
            self.images[tag] = NSImage.alloc().initByReferencingFile_(img)
            self.images[tag].setTemplate_(
                utils.get_pref('iconset') == 'grayscale')
        self.statusitem.setImage_(self.images['icon16'])

        self.statusitem.setHighlightMode_(1)
        self.statusitem.setToolTip_('Upshot Screenshot Sharing')

        # Build menu.
        self.menu = NSMenu.alloc().init()

        m = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(
            'Browse Screenshots', 'openShareDir:', '')
        self.menu.addItem_(m)
        self.menuitems['opensharedir'] = m

        self.menu.addItem_(NSMenuItem.separatorItem())

        m = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(
            'Start Screenshot Sharing', 'startListening:', '')
        m.setHidden_(True)  # Sharing is on by default.
        self.menu.addItem_(m)
        self.menuitems['start'] = m

        m = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(
            'Pause Screenshot Sharing', 'stopListening:', '')
        self.menu.addItem_(m)
        self.menuitems['stop'] = m

        m = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(
            "Need to detect Dropbox ID. Open Preferences!", '', '')
        m.setHidden_(True)  # We hopefully don't need this.
        self.menu.addItem_(m)
        self.menuitems['needpref'] = m

        m = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(
            'Preferences...', 'openPreferences:', '')
        self.menu.addItem_(m)
        self.menuitems['preferences'] = m

        self.menu.addItem_(NSMenuItem.separatorItem())

        m = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(
            'Open UpShot Project Website', 'website:', '')
        self.menu.addItem_(m)
        self.menuitems['website'] = m

        m = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(
            'About UpShot', 'about:', '')
        self.menu.addItem_(m)
        self.menuitems['about'] = m

        self.menu.addItem_(NSMenuItem.separatorItem())

        m = NSMenuItem.alloc().initWithTitle_action_keyEquivalent_(
            'Quit UpShot', 'quit:', '')
        self.menu.addItem_(m)
        self.menuitems['quit'] = m

        self.statusitem.setMenu_(self.menu)

    def update_menu(self):
        """Update status bar menu based on app status."""
        if self.statusitem is None:
            return

        # Apply iconset
        self.images['icon16'].setTemplate_(
            utils.get_pref('iconset') == 'grayscale')

        running = (self.observer is not None)
        self.statusitem.setImage_(
            self.images['icon16' if running else 'icon16-off'])

        if utils.get_pref('dropboxid'):  # Runnable.
            self.menuitems['stop'].setHidden_(not running)
            self.menuitems['start'].setHidden_(running)
            self.menuitems['needpref'].setHidden_(True)
        else:  # Need settings.
            self.menuitems['start'].setHidden_(True)
            self.menuitems['stop'].setHidden_(True)
            self.menuitems['needpref'].setHidden_(False)

    def openShareDir_(self, sender=None):
        """Open the share directory in Finder."""
        sw = NSWorkspace.sharedWorkspace()
        sw.openFile_(SHARE_DIR)

    def about_(self, sender=None):
        """Open standard About dialog."""
        app = NSApplication.sharedApplication()
        app.activateIgnoringOtherApps_(True)
        app.orderFrontStandardAboutPanel_(sender)

    def website_(self, sender=None):
        """Open the UpShot homepage in a browser."""
        sw = NSWorkspace.sharedWorkspace()
        sw.openURL_(NSURL.URLWithString_(HOMEPAGE_URL))

    def openPreferences_(self, sender=None):
        Preferences.PreferencesWindowController.showWindow()

    def startListening_(self, sender=None):
        """Start listening for changes to the screenshot dir."""
        event_handler = ScreenshotHandler()
        self.observer = Observer()
        self.observer.schedule(event_handler, path=SCREENSHOT_DIR)
        self.observer.start()
        self.update_menu()
        log.debug('Listening for screen shots to be added to: %s' %
                  (SCREENSHOT_DIR))

        growl = Growler.alloc().init()
        growl.notify('UpShot started', 'and listening for screenshots!')

    def stopListening_(self, sender=None):
        """Stop listening to changes ot the screenshot dir."""
        if self.observer is not None:
            self.observer.stop()
            self.observer.join()
            self.observer = None
            log.debug('Stop listening for screenshots.')

            growl = Growler.alloc().init()
            growl.notify('UpShot paused',
                         'Not listening for screenshots for now!')
        self.update_menu()

    def restart_(self, sender=None):
        self.stopListening_()
        self.startListening_()

    def quit_(self, sender=None):
        """Default quit event."""
        log.debug('Terminating.')
        self.stopListening_()
        NSApplication.sharedApplication().terminate_(sender)
class UnveillanceFSEHandler(FileSystemEventHandler):
    def __init__(self):
        self.watcher_pid_file = os.path.join(MONITOR_ROOT, "watcher.pid.txt")
        self.watcher_log_file = os.path.join(MONITOR_ROOT, "watcher.log.txt")

        self.annex_observer = Observer()
        self.netcat_queue = []
        self.cleanup_upload_lock = False

        FileSystemEventHandler.__init__(self)

    def checkForDuplicate(self, _id, from_file=None):
        if from_file is not None and _id is None:
            _id = self.get_new_hash(file)

        url = "%s/documents/?_id=%s" % (buildServerURL(), _id)
        try:
            r = requests.get(url, verify=False)
        except Exception as e:
            if DEBUG:
                print e
            return None

        try:
            r = json.loads(r.content)
            if 'data' in r.keys():
                return r['data']
        except Exception as e:
            if DEBUG:
                print e

        return None

    def cleanupUploads(self):
        # THIS ANNOYS ME.
        # self.cleanup_upload_lock = True
        if DEBUG:
            print "starting watcher cleanup cron job"

    def addToNetcatQueue(self, netcat_stub, send_now=True):
        if netcat_stub['save_as'] not in [
                ns['save_as'] for ns in self.netcat_queue
        ]:
            self.netcat_queue.append(netcat_stub)

        return self.uploadToAnnex(netcat_stub)

    def get_new_hash(self, file):
        if type(file) in [str, unicode]:
            new_hash = hashEntireFile(file)
        else:
            new_hash = hashEntireStream(file)

        if DEBUG:
            print "NEW HASH: %s" % new_hash

        return new_hash

    def uploadToAnnex(self, netcat_stub):
        use_git_annex = False
        this_dir = os.getcwd()
        os.chdir(ANNEX_DIR)

        if type(netcat_stub['file']) in [str, unicode]:
            if GIT_ANNEX is not None:
                use_git_annex = True
                if DEBUG:
                    print "GIT ANNEX ATTACHED TO INSTANCE."

            if use_git_annex:
                with settings(warn_only=True):
                    # has this stub been uploaded?
                    is_absorbed = local(
                        "%s metadata \"%s\" --json --get=uv_uploaded" %
                        (GIT_ANNEX, netcat_stub['save_as']),
                        capture=True)

                    if DEBUG:
                        print "%s absorbed? (uv_uploaded = %s type = %s)" % (
                            netcat_stub['save_as'], is_absorbed,
                            type(is_absorbed))

                    if is_absorbed == "" or "False":
                        is_absorbed = False
                    elif is_absorbed == "True":
                        is_absorbed = True
                    else:
                        is_absorbed = False
            else:
                is_absorbed = False
        else:
            is_absorbed = False

        if is_absorbed:
            if DEBUG:
                print "%s IS absorbed (uv_uploaded = %s)" % (
                    netcat_stub['save_as'], is_absorbed)

            os.chdir(this_dir)
            return None

        new_hash = self.get_new_hash(netcat_stub['file'])

        possible_duplicate = self.checkForDuplicate(new_hash)
        if possible_duplicate is not None:

            if DEBUG:
                print "Document already exists in Annex and will not be uploaded!  Here it is:"
                print possible_duplicate

            p = UnveillanceFabricProcess(register_upload_attempt,
                                         {'_id': possible_duplicate['_id']})
            p.join()

            os.chdir(this_dir)
            self.netcat_queue.remove(netcat_stub)

            possible_duplicate = self.checkForDuplicate(
                possible_duplicate['_id'])
            possible_duplicate.update({
                'uploaded': False,
                'duplicate_attempt': True
            })
            return possible_duplicate

        with settings(warn_only=True):
            new_save_as = generateMD5Hash(content=new_hash,
                                          salt=local("whoami", capture=True))

        if type(netcat_stub['file']) in [str, unicode]:
            new_file = netcat_stub['file'].replace(netcat_stub['save_as'],
                                                   new_save_as)

            with settings(warn_only=True):
                local("mv \"%s\" %s" % (netcat_stub['file'], new_file))

                if use_git_annex:
                    local("%s metadata %s --json --set=uv_file_alias=\"%s\"" %
                          (GIT_ANNEX, new_file, netcat_stub['save_as']))

            netcat_stub['file'] = new_file

        netcat_stub['alias'] = netcat_stub['save_as']
        netcat_stub['save_as'] = new_save_as
        success_tag = False

        # look up to see if this file is already in the annex

        with settings(warn_only=True):
            if type(netcat_stub['file']) in [str, unicode] and use_git_annex:
                local("%s add %s" % (GIT_ANNEX, netcat_stub['save_as']))

            p = UnveillanceFabricProcess(netcat, netcat_stub)
            p.join()

            if p.error is None and p.output is not None:
                success_tag = True

            if DEBUG:
                print "NETCAT RESULT: (type=%s, success=%s)" % (type(
                    p.output), success_tag)
                print "NETCAT ERROR (none is good!): (type=%s)" % type(p.error)

            if p.output is not None and DEBUG:
                for o in p.output:
                    print "\n%s\n" % o

            if p.error is not None and DEBUG:
                print "ERROR:"
                print p.error

            if type(netcat_stub['file']) in [str, unicode] and use_git_annex:
                local("%s metadata \"%s\" --json --set=uv_uploaded=%s" %
                      (GIT_ANNEX, netcat_stub['save_as'], str(success_tag)))

            self.netcat_queue.remove(netcat_stub)

        os.chdir(this_dir)
        return {'uploaded': success_tag, '_id': new_hash}

    def on_created(self, event):
        use_git_annex = False

        if GIT_ANNEX is not None:
            if DEBUG:
                print "GIT ANNEX ATTACHED TO INSTANCE."

            use_git_annex = True

        if event.event_type != "created":
            return

        if event.src_path == os.path.join(ANNEX_DIR, ".git"):
            print "is git..."
            print event.src_path
            return

        if re.match(re.compile("%s/.*" % os.path.join(ANNEX_DIR, ".git")),
                    event.src_path) is not None:
            return

        if re.match(r'.*\.DS_Store', event.src_path) is not None:
            return

        sleep(3)

        filename = event.src_path.split("/")[-1]
        never_upload = False

        if use_git_annex:
            with settings(warn_only=True):
                # has this stub been uploaded?
                never_upload = local(
                    "%s metadata \"%s\" --json --get=uv_never_upload" %
                    (GIT_ANNEX, filename),
                    capture=True)

                if DEBUG:
                    print "%s valid? (uv_never_upload = %s type = %s)" % ( \
                     filename, never_upload, type(never_upload))

                if never_upload == "True":
                    never_upload = True
                elif never_upload == "":
                    never_upload = False

        print "NEVER UPLOAD? %s" % never_upload
        if never_upload:
            return

        netcat_stub = None
        try:
            netcat_stub = [
                ns for ns in self.netcat_queue if ns['save_as'] == filename
            ][0]
        except Exception as e:
            if DEBUG: print "NO NETCAT STUB FOUND FOR %s" % filename

        #if DEBUG: print "NEW EVENT:\ntype: %s\nis dir: %s\npath: %s\n" % (event.event_type, event.is_directory, event.src_path)

        if netcat_stub is None:
            netcat_stub = {
                'file': event.src_path,
                'save_as': filename,
                'importer_source': "file_added"
            }

            self.addToNetcatQueue(netcat_stub)

        sleep(5)

    def on_message(self, message):
        print "MESSAGE: %s" % message

    def on_open(self, info):
        print "ON OPEN"

    def on_close(self):
        print "ON CLOSE"

    def startAnnexObserver(self):
        print "STARTING OBSERVER on %s" % ANNEX_DIR

        startDaemon(self.watcher_log_file, self.watcher_pid_file)
        self.annex_observer.schedule(self, ANNEX_DIR, recursive=True)
        self.annex_observer.start()

        while True:
            sleep(1)
        '''
			if not self.cleanup_upload_lock:
				t = Thread(target=self.cleanupUploads) 
				t.start()
		'''

    def stopAnnexObserver(self):
        print "STOPPING OBSERVER"
        self.annex_observer.stop()
        self.annex_observer.join()

        stopDaemon(self.watcher_pid_file)
Ejemplo n.º 56
0
class SupysonicWatcher:
    def __init__(self, config):
        self.__delay = config.DAEMON["wait_delay"]
        self.__handler = SupysonicWatcherEventHandler(
            config.BASE["scanner_extensions"])

        self.__folders = {}
        self.__queue = None
        self.__observer = None

    def add_folder(self, folder):
        if isinstance(folder, Folder):
            path = folder.path
        elif isinstance(folder, str):
            path = folder
        else:
            raise TypeError("Expecting string or Folder, got " +
                            str(type(folder)))

        logger.info("Scheduling watcher for %s", path)
        watch = self.__observer.schedule(self.__handler, path, recursive=True)
        self.__folders[path] = watch

    def remove_folder(self, folder):
        if isinstance(folder, Folder):
            path = folder.path
        elif isinstance(folder, str):
            path = folder
        else:
            raise TypeError("Expecting string or Folder, got " +
                            str(type(folder)))

        logger.info("Unscheduling watcher for %s", path)
        self.__observer.unschedule(self.__folders[path])
        del self.__folders[path]
        self.__queue.unschedule_paths(path)

    def start(self):
        self.__queue = ScannerProcessingQueue(self.__delay)
        self.__observer = Observer()
        self.__handler.queue = self.__queue

        with db_session:
            for folder in Folder.select(lambda f: f.root):
                self.add_folder(folder)

        logger.info("Starting watcher")
        self.__queue.start()
        self.__observer.start()

    def stop(self):
        logger.info("Stopping watcher")
        if self.__observer is not None:
            self.__observer.stop()
            self.__observer.join()
        if self.__queue is not None:
            self.__queue.stop()
            self.__queue.join()

        self.__observer = None
        self.__queue = None
        self.__handler.queue = None

    @property
    def running(self):
        return (self.__queue is not None and self.__observer is not None
                and self.__queue.is_alive() and self.__observer.is_alive())
Ejemplo n.º 57
0
class FSHandler:
    path = os.getenv("SERVER_ROUTE")
    patterns = "*"
    ignore_patterns = ""
    ignore_directories = False
    case_sensitive = True
    option = ''
    current_file = ''
    file_information = ''

    def __init__(self, socket, host, port, active_connections):
        self.socket = socket
        self.host = host
        self.port = port
        self.active_connections = active_connections
        self.my_event_handler = PatternMatchingEventHandler(
            self.patterns, self.ignore_patterns, self.ignore_directories,
            self.case_sensitive)
        self.configure()
        self.create_observer()

    def configure(self):
        self.my_event_handler.on_created = self.on_created
        self.my_event_handler.on_deleted = self.on_deleted
        self.my_event_handler.on_modified = self.on_modified
        self.my_event_handler.on_moved = self.on_moved

    def create_observer(self):
        try:
            go_recursively = True
            self.my_observer = Observer()
            self.my_observer.schedule(self.my_event_handler,
                                      self.path,
                                      recursive=go_recursively)
        except:
            print("Error")

    def start_observer(self):
        self.my_observer.start()
        try:
            while True:
                time.sleep(1)
        except KeyboardInterrupt:
            self.my_observer.stop()
            self.my_observer.join()

    def on_created(self, event):
        try:
            client_message = "action,created," + event.src_path.split("\\")[-1]
            if self.option == 'created' and self.current_file == event.src_path:
                print()
            else:
                self.option = 'created'
                self.current_file = event.src_path
                for connection in self.active_connections:
                    connection.send(bytes(client_message, 'latin1'))
        except:
            print("Error de permisos")

    def on_deleted(self, event):
        client_message = "action,deleted," + event.src_path.split("\\")[-1]
        if self.option == 'deleted' and self.current_file == event.src_path:
            print()
        else:
            self.option = 'deleted'
            self.current_file = event.src_path
            for connection in self.active_connections:
                connection.send(bytes(client_message, 'latin1'))

    def send_info_data(self, filename):
        if os.path.isfile(filename) == True:
            file = open(filename, 'rb')
            content = file.read(1024)
            while True:
                while content:
                    for connection in self.active_connections:
                        connection.send(content)
                    content = file.read(1024)
                break
            try:
                for connection in self.active_connections:
                    connection.send(bytes("__##Finish__##", "latin1"))
            except Exception:
                for connection in self.active_connections:
                    connection.send(bytes("__##Finish__##", "latin1"))
            file.close()

    def on_modified(self, event):
        if self.option == 'modified' and self.current_file == event.src_path:
            print()
        else:
            if os.path.isfile(event.src_path) == True:
                self.option = 'modified'
                self.current_file = event.src_path
                client_message = 'action,modified,' + event.src_path.split(
                    "\\")[-1]
                for connection in self.active_connections:
                    connection.send(bytes(client_message, 'latin1'))
                self.send_info_data(event.src_path)

    def on_moved(self, event):
        client_message = "action,moved," + event.src_path.split(
            "\\")[-1] + "," + event.dest_path.split("\\")[-1]
        if self.option == 'moved' and self.current_file == event.src_path:
            print()
        else:
            self.option = 'moved'
            self.current_file = event.dest_path
            for connection in self.active_connections:
                connection.send(bytes(client_message, 'latin1'))
            self.send_info_data(event.dest_path)
Ejemplo n.º 58
0
class AutoOcrScheduler(object):

    SINGLE_FOLDER = 'single_folder'
    MIRROR_TREE = 'mirror_tree'

    OUTPUT_MODES = [SINGLE_FOLDER, MIRROR_TREE]

    def __init__(self,
                 config_dir,
                 input_dir,
                 output_dir,
                 output_mode,
                 success_action=OcrTask.ON_SUCCESS_DO_NOTHING,
                 archive_dir=None,
                 process_existing_files=False):
        self.logger = logger.getChild('scheduler')

        self.config_dir = local.path(config_dir)
        self.input_dir = local.path(input_dir)
        self.output_dir = local.path(output_dir)
        if self.input_dir == self.output_dir:
            raise AutoOcrSchedulerError(
                'Invalid configuration. Input and output directories must not be the same to avoid recursive OCR invocation!'
            )
        self.output_mode = output_mode.lower()
        if self.output_mode not in AutoOcrScheduler.OUTPUT_MODES:
            raise AutoOcrSchedulerError(
                'Invalid output mode: {}. Must be one of: {}'.format(
                    self.output_mode,
                    ', '.join(AutoOcrScheduler.OUTPUT_MODES)))
        self.success_action = success_action.lower()
        if self.success_action not in OcrTask.SUCCESS_ACTIONS:
            raise AutoOcrSchedulerError(
                'Invalid success action: {}. Must be one of {}'.format(
                    self.success_action, ', '.join(OcrTask.SUCCESS_ACTIONS)))
        self.archive_dir = local.path(archive_dir) if archive_dir else None
        if self.success_action == OcrTask.ON_SUCCESS_ARCHIVE and not self.archive_dir:
            raise AutoOcrSchedulerError(
                'Archive directory required for success action {}'.format(
                    self.success_action))

        self.current_tasks = {}
        self.current_outputs = set()

        # Create a Threadpool to run OCR tasks on
        self.threadpool = ThreadPoolExecutor(max_workers=3)

        # Wire up an AutoOcrWatchdogHandler
        watchdog_handler = AutoOcrWatchdogHandler(self.on_file_touched,
                                                  self.on_file_deleted)

        # Schedule watchdog to observe the input directory
        self.observer = Observer()
        self.observer.schedule(watchdog_handler,
                               self.input_dir,
                               recursive=True)
        self.observer.start()
        self.logger.warn('Watching %s', self.input_dir)

        # Process existing files in input directory, if requested
        if process_existing_files:
            self.threadpool.submit(self.walk_existing_files)

    def shutdown(self):
        # Shut down the feed of incoming watchdog events
        if self.observer:
            self.logger.debug('Shutting down filesystem watchdog...')
            self.observer.unschedule_all()
            self.observer.stop()

        # Cancel all outstanding cancelable tasks
        self.logger.debug('Canceling all %d in-flight tasks...',
                          len(self.current_tasks))
        tasks = [task for _, task in self.current_tasks.items()]
        for task in tasks:
            task.cancel()

        # Wait for the threadpool to clean up
        if self.threadpool:
            self.logger.debug('Shutting down threadpool...')
            self.threadpool.shutdown()
            self.threadpool = None

        # Wait for the watchdog to clean up
        if self.observer:
            self.logger.debug('Cleaning up filesystem watchdog...')
            self.observer.join()
            self.observer = None

    def __enter__(self):
        return self

    def __exit__(self, *args):
        self.shutdown()
        return False

    def _map_output_path(self, input_path):
        if self.output_mode == AutoOcrScheduler.MIRROR_TREE:
            return self.output_dir / (input_path - self.input_dir)
        else:
            assert self.output_mode == AutoOcrScheduler.SINGLE_FOLDER
            output_path = self.output_dir / (input_path.name)
            unique = 1
            if output_path.exists() or output_path in self.current_outputs:
                suffix = '.{}.{}{}'.format(datetime.now().strftime('%Y%m%d'),
                                           unique, output_path.suffix)
                output_path = output_path.with_suffix(suffix)

            while output_path.exists() or output_path in self.current_outputs:
                unique = unique + 1
                output_path = output_path.with_suffix('.{}{}'.format(
                    unique, output_path.suffix),
                                                      depth=2)
            return output_path

    def _map_archive_path(self, input_path):
        return self.archive_dir / (input_path - self.input_dir)

    def _get_config_path(self, input_path):
        assert (input_path - self.input_dir)[0] != '..'
        config_path = input_path.parent / 'ocr.config'
        while True:
            if config_path.exists():
                return config_path
            if config_path.parent == self.input_dir:
                break
            config_path = config_path.parent.parent / 'ocr.config'

        config_path = self.config_dir / 'ocr.config'
        if config_path.exists():
            return config_path
        return None

    def queue_path(self, path):
        output_path = self._map_output_path(path)
        config_file = self._get_config_path(path)
        archive_file = self._map_archive_path(path)
        task = OcrTask(path,
                       output_path,
                       self.threadpool.submit,
                       self.on_task_done,
                       config_file=config_file,
                       success_action=self.success_action,
                       archive_path=archive_file)
        self.current_tasks[path] = task
        self.current_outputs.add(output_path)

    def walk_existing_files(self):
        self.logger.debug('Enumerating existing input files...')

        def keep_file(file):
            return any([
                fnmatch.fnmatch(file, pattern)
                for pattern in AutoOcrWatchdogHandler.MATCH_PATTERNS
            ])

        for file in self.input_dir.walk(filter=keep_file):
            self.on_file_touched(file)

    def on_file_touched(self, path):
        if path in self.current_tasks:
            self.current_tasks[path].touch()
        else:
            self.queue_path(path)

    def on_file_deleted(self, path):
        if path in self.current_tasks:
            self.current_tasks[path].cancel()

    def on_task_done(self, task):
        self.current_outputs.remove(task.output_path)
        del self.current_tasks[task.input_path]
Ejemplo n.º 59
0
class DirectEdit(Worker):
    localScanFinished = pyqtSignal()
    directEditUploadCompleted = pyqtSignal(str)
    openDocument = pyqtSignal(str, int)
    editDocument = pyqtSignal(str, int)
    directEditLockError = pyqtSignal(str, str, str)
    directEditConflict = pyqtSignal(str, Path, str)
    directEditError = pyqtSignal(str, list)
    directEditForbidden = pyqtSignal(str, str, str)
    directEditReadonly = pyqtSignal(str)
    directEditStarting = pyqtSignal(str, str)
    directEditLocked = pyqtSignal(str, str, datetime)

    def __init__(self, manager: "Manager", folder: Path) -> None:
        super().__init__()

        self._manager = manager
        self._folder = folder
        self.url = Options.protocol_url
        self.lock = Lock()

        self.autolock = self._manager.autolock_service
        self.use_autolock = self._manager.get_direct_edit_auto_lock()
        self._event_handler: Optional[DriveFSEventHandler] = None
        self._metrics = {"edit_files": 0}
        self._observer: Observer = None
        self.local = LocalClient(self._folder)
        self._upload_queue: Queue = Queue()
        self._lock_queue: Queue = Queue()
        self._error_queue = BlacklistQueue()
        self._stop = False
        self.watchdog_queue: Queue = Queue()

        self.thread.started.connect(self.run)
        self.autolock.orphanLocks.connect(self._autolock_orphans)
        self._manager.directEdit.connect(self.edit)

        # Notification signals
        self.directEditLockError.connect(
            self._manager.notification_service._directEditLockError
        )
        self.directEditStarting.connect(
            self._manager.notification_service._directEditStarting
        )
        self.directEditForbidden.connect(
            self._manager.notification_service._directEditForbidden
        )
        self.directEditReadonly.connect(
            self._manager.notification_service._directEditReadonly
        )
        self.directEditLocked.connect(
            self._manager.notification_service._directEditLocked
        )
        self.directEditUploadCompleted.connect(
            self._manager.notification_service._directEditUpdated
        )

    @pyqtSlot(object)
    def _autolock_orphans(self, locks: List[Path]) -> None:
        log.debug(f"Orphans lock: {locks!r}")
        for lock in locks:
            if self._folder in lock.parents:
                log.info(f"Should unlock {lock!r}")
                if not lock.exists():
                    self.autolock.orphan_unlocked(lock)
                    continue

                ref = self.local.get_path(lock)
                self._lock_queue.put((ref, "unlock_orphan"))

    def autolock_lock(self, src_path: Path) -> None:
        ref = self.local.get_path(src_path)
        self._lock_queue.put((ref, "lock"))

    def autolock_unlock(self, src_path: Path) -> None:
        ref = self.local.get_path(src_path)
        self._lock_queue.put((ref, "unlock"))

    def start(self) -> None:
        self._stop = False
        super().start()

    def stop(self) -> None:
        super().stop()
        self._stop = True

    def stop_client(self, message: str = None) -> None:
        if self._stop:
            raise ThreadInterrupt()

    def _is_valid_folder_name(
        self, name: str, pattern: Pattern = re.compile(f"^{DOC_UID_REG}_")
    ) -> bool:
        """
        Return True if the given *name* is a valid document UID followed by the xpath.
        As we cannot guess the xpath used, we just check the name starts with "UID_".
        Example: 19bf2a19-e95b-4cfb-8fd7-b45e1d7d022f_file-content
        """
        # Prevent TypeError when the given name is None
        if not name:
            return False

        return bool(pattern.match(name))

    @tooltip("Clean up folder")
    def _cleanup(self) -> None:
        """
        - Unlock any remaining doc that has not been unlocked
        - Upload forgotten changes
        - Remove obsolete folders
        """

        if not self.local.exists(ROOT):
            self._folder.mkdir(exist_ok=True)
            return

        def purge(rel_path: Path) -> None:
            """Helper to skip errors while deleting a folder and its content."""
            path = self.local.abspath(rel_path)
            log.debug(f"Removing {path!r}")
            shutil.rmtree(path, ignore_errors=True)

        log.info("Cleanup DirectEdit folder")

        for child in self.local.get_children_info(ROOT):
            # We need a folder
            if not child.folderish:
                log.debug(f"Skipping clean-up of {child.path!r} (not a folder)")
                continue

            # We also need a valid folder name
            if not self._is_valid_folder_name(child.name):
                log.debug(f"Skipping clean-up of {child.path!r} (invalid folder name)")
                continue

            children = self.local.get_children_info(child.path)
            if not children:
                purge(child.path)
                continue

            ref = children[0].path
            try:
                details = self._extract_edit_info(ref)
            except NotFound:
                # Engine is not known anymore
                purge(child.path)
                continue

            try:
                # Don't update if digest are the same
                info = self.local.get_info(ref)
                current_digest = info.get_digest(digest_func=details.digest_func)
                if current_digest != details.digest:
                    log.warning(
                        "Document has been modified and "
                        "not synchronized, add to upload queue"
                    )
                    self._upload_queue.put(ref)
                    continue
            except Exception:
                log.exception("Unhandled clean-up error")
                continue

            # Place for handle reopened of interrupted DirectEdit
            purge(child.path)

    def __get_engine(self, url: str, user: str = None) -> Optional["Engine"]:
        if not url:
            return None

        url = simplify_url(url)
        for engine in self._manager.engines.values():
            bind = engine.get_binder()
            server_url = bind.server_url.rstrip("/")
            if server_url == url and (not user or user == bind.username):
                return engine

        # Some backend are case insensitive
        if not user:
            return None

        user = user.lower()
        for engine in self._manager.engines.values():
            bind = engine.get_binder()
            server_url = simplify_url(bind.server_url)
            if server_url == url and user == bind.username.lower():
                return engine

        return None

    def _get_engine(
        self, server_url: str, doc_id: str = None, user: str = None
    ) -> Optional["Engine"]:
        engine = self.__get_engine(server_url, user=user)

        if not engine:
            values = [force_decode(user) if user else "Unknown", server_url, APP_NAME]
            log.warning(
                f"No engine found for user {user!r} on server {server_url!r}, "
                f"doc_id={doc_id!r}"
            )
            self.directEditError.emit("DIRECT_EDIT_CANT_FIND_ENGINE", values)
        elif engine.has_invalid_credentials():
            # Ping again the user in case it is not obvious
            engine.invalidAuthentication.emit()
            engine = None

        return engine

    def _download(
        self,
        engine: "Engine",
        info: NuxeoDocumentInfo,
        file_path: Path,
        file_out: Path,
        blob: Blob,
        xpath: str,
        url: str = None,
    ) -> Path:
        # Close to processor method - should try to refactor ?
        pair = None
        kwargs: Dict[str, Any] = {}

        if blob.digest:
            # The digest is available in the Blob, use it and disable parameters check
            # as 'digest' is not a recognized param for the Blob.Get operation.
            kwargs["digest"] = blob.digest
            kwargs["check_params"] = False

            pair = engine.dao.get_valid_duplicate_file(blob.digest)

        if pair:
            existing_file_path = engine.local.abspath(pair.local_path)
            try:
                # copyfile() is used to prevent metadata copy
                shutil.copyfile(existing_file_path, file_out)
            except FileNotFoundError:
                pair = None
            else:
                log.info(
                    f"Local file matches remote digest {blob.digest!r}, "
                    f"copied it from {existing_file_path!r}"
                )
                if pair.is_readonly():
                    log.info(f"Unsetting readonly flag on copied file {file_out!r}")
                    unset_path_readonly(file_out)

        if not pair:
            if url:
                try:
                    engine.remote.download(
                        quote(url, safe="/:"),
                        file_path,
                        file_out,
                        blob.digest,
                        callback=self.stop_client,
                        is_direct_edit=True,
                        engine_uid=engine.uid,
                    )
                finally:
                    engine.dao.remove_transfer("download", file_path)
            else:
                engine.remote.get_blob(
                    info,
                    xpath=xpath,
                    file_out=file_out,
                    callback=self.stop_client,
                    **kwargs,
                )

        return file_out

    def _get_info(self, engine: "Engine", doc_id: str) -> Optional[NuxeoDocumentInfo]:
        try:
            doc = engine.remote.fetch(
                doc_id, headers={"fetch-document": "lock"}, enrichers=["permissions"]
            )
        except Forbidden:
            msg = (
                f" Access to the document {doc_id!r} on server {engine.hostname!r}"
                f" is forbidden for user {engine.remote_user!r}"
            )
            log.warning(msg)
            self.directEditForbidden.emit(doc_id, engine.hostname, engine.remote_user)
            return None
        except Unauthorized:
            engine.set_invalid_credentials()
            return None
        except NotFound:
            values = [doc_id, engine.hostname]
            self.directEditError.emit("DIRECT_EDIT_NOT_FOUND", values)
            return None

        doc.update(
            {
                "root": engine.remote.base_folder_ref,
                "repository": engine.remote.client.repository,
            }
        )
        info = NuxeoDocumentInfo.from_dict(doc)

        if info.is_version:
            self.directEditError.emit(
                "DIRECT_EDIT_VERSION", [info.version, info.name, info.uid]
            )
            return None

        if info.lock_owner and info.lock_owner != engine.remote_user:
            # Retrieve the user full name, will be cached
            owner = engine.get_user_full_name(info.lock_owner)

            log.info(
                f"Doc {info.name!r} was locked by {owner} ({info.lock_owner}) "
                f"on {info.lock_created}, edit not allowed"
            )
            self.directEditLocked.emit(info.name, owner, info.lock_created)
            return None
        elif info.permissions and "Write" not in info.permissions:
            log.info(f"Doc {info.name!r} is readonly for you, edit not allowed")
            self.directEditReadonly.emit(info.name)
            return None

        return info

    def _prepare_edit(
        self, server_url: str, doc_id: str, user: str = None, download_url: str = None
    ) -> Optional[Path]:
        start_time = current_milli_time()
        engine = self._get_engine(server_url, doc_id=doc_id, user=user)
        if not engine:
            return None

        # Avoid any link with the engine, remote_doc are not cached so we
        # can do that
        info = self._get_info(engine, doc_id)
        if not info:
            return None

        url = None
        url_info: Dict[str, str] = {}
        if download_url:
            import re

            urlmatch = re.match(
                r"([^\/]+\/){3}(?P<xpath>.+)\/(?P<filename>[^\?]*).*",
                download_url,
                re.I,
            )
            if urlmatch:
                url_info = urlmatch.groupdict()

            url = server_url
            if not url.endswith("/"):
                url += "/"
            url += download_url

        xpath = url_info.get("xpath")
        if not xpath and info.doc_type == "Note":
            xpath = "note:note"
        elif not xpath or xpath == "blobholder:0":
            xpath = "file:content"
        blob = info.get_blob(xpath)
        if not blob:
            log.warning(
                f"No blob associated with xpath {xpath!r} for file {info.path!r}"
            )
            return None

        filename = blob.name
        self.directEditStarting.emit(engine.hostname, filename)

        # Create local structure
        folder_name = safe_filename(f"{doc_id}_{xpath}")
        dir_path = self._folder / folder_name
        dir_path.mkdir(exist_ok=True)

        log.info(f"Editing {filename!r}")
        file_path = dir_path / filename
        tmp_folder = self._folder / f"{doc_id}.dl"
        tmp_folder.mkdir(parents=True, exist_ok=True)
        file_out = tmp_folder / filename

        try:
            # Download the file
            tmp_file = self._download(
                engine, info, file_path, file_out, blob, xpath, url=url
            )
            if tmp_file is None:
                log.warning("Download failed")
                return None
        except CONNECTION_ERROR:
            log.warning("Unable to perform DirectEdit", exc_info=True)
            return None

        # Set the remote_id
        dir_path = self.local.get_path(dir_path)
        self.local.set_remote_id(dir_path, doc_id)
        self.local.set_remote_id(dir_path, server_url, name="nxdirectedit")

        if user:
            self.local.set_remote_id(dir_path, user, name="nxdirectedituser")

        if xpath:
            self.local.set_remote_id(dir_path, xpath, name="nxdirecteditxpath")

        if blob.digest:
            self.local.set_remote_id(dir_path, blob.digest, name="nxdirecteditdigest")
            # Set digest algorithm if not sent by the server
            digest_algorithm = blob.digest_algorithm
            if not digest_algorithm:
                digest_algorithm = get_digest_algorithm(blob.digest)
            if not digest_algorithm:
                raise UnknownDigest(blob.digest)
            self.local.set_remote_id(
                dir_path,
                digest_algorithm.encode("utf-8"),
                name="nxdirecteditdigestalgorithm",
            )
        self.local.set_remote_id(dir_path, filename, name="nxdirecteditname")

        safe_rename(tmp_file, file_path)

        timing = current_milli_time() - start_time
        self.openDocument.emit(filename, timing)
        return file_path

    @pyqtSlot(str, str, str, str)
    def edit(
        self, server_url: str, doc_id: str, user: str = None, download_url: str = None
    ) -> None:
        if not Feature.direct_edit:
            self.directEditError.emit("DIRECT_EDIT_NOT_ENABLED", [])
            return

        log.info(f"Direct Editing doc {doc_id!r} on {server_url!r}")
        try:
            # Download the file
            file_path = self._prepare_edit(
                server_url, doc_id, user=user, download_url=download_url
            )
            log.debug(f"Direct Edit preparation returned file path {file_path!r}")

            # Launch it
            if file_path:
                self._manager.open_local_file(file_path)
        except OSError as e:
            if e.errno == errno.EACCES:
                # Open file anyway
                if e.filename is not None:
                    self._manager.open_local_file(e.filename)
            else:
                raise e

    def _extract_edit_info(self, ref: Path) -> DirectEditDetails:
        dir_path = ref.parent
        server_url = self.local.get_remote_id(dir_path, name="nxdirectedit")
        if not server_url:
            raise NotFound()

        user = self.local.get_remote_id(dir_path, name="nxdirectedituser")
        engine = self._get_engine(server_url, user=user)
        if not engine:
            raise NotFound()

        uid = self.local.get_remote_id(dir_path)
        if not uid:
            raise NotFound()

        digest_algorithm = self.local.get_remote_id(
            dir_path, name="nxdirecteditdigestalgorithm"
        )
        digest = self.local.get_remote_id(dir_path, name="nxdirecteditdigest")
        if not (digest and digest_algorithm):
            raise NotFound()

        xpath = self.local.get_remote_id(dir_path, name="nxdirecteditxpath")
        editing = self.local.get_remote_id(dir_path, name="nxdirecteditlock") == "1"

        details = DirectEditDetails(
            uid=uid,
            engine=engine,
            digest_func=digest_algorithm,
            digest=digest,
            xpath=xpath,
            editing=editing,
        )
        log.debug(f"DirectEdit {details}")
        return details

    def force_update(self, ref: Path, digest: str) -> None:
        dir_path = ref.parent
        self.local.set_remote_id(
            dir_path, digest.encode("utf-8"), name="nxdirecteditdigest"
        )
        self._upload_queue.put(ref)

    def _lock(self, remote: Remote, uid: str) -> bool:
        """Lock a document."""
        try:
            remote.lock(uid)
        except HTTPError as exc:
            if exc.status in (codes.CONFLICT, codes.INTERNAL_SERVER_ERROR):
                # CONFLICT if NXP-24359 is part of the current server HF
                # else INTERNAL_SERVER_ERROR is raised on double lock.
                username = re.findall(r"Document already locked by (.+):", exc.message)
                if username:
                    if username[0] == remote.user_id:
                        # Already locked by the same user
                        log.debug("You already locked that document!")
                        return False
                    else:
                        # Already locked by someone else
                        raise DocumentAlreadyLocked(username[0])
            raise exc
        else:
            # Document locked!
            return True

        return False

    def _handle_lock_queue(self) -> None:
        errors = []

        while "items":
            try:
                item = self._lock_queue.get_nowait()
            except Empty:
                break

            ref, action = item
            log.debug(f"Handling DirectEdit lock queue: action={action}, ref={ref!r}")
            uid = ""

            try:
                details = self._extract_edit_info(ref)
                uid = details.uid
                remote = details.engine.remote
                if action == "lock":
                    self._lock(remote, uid)
                    self.local.set_remote_id(ref.parent, b"1", name="nxdirecteditlock")
                    # Emit the lock signal only when the lock is really set
                    self._send_lock_status(ref)
                    self.autolock.documentLocked.emit(ref.name)
                    continue

                try:
                    remote.unlock(uid)
                except NotFound:
                    purge = True
                else:
                    purge = False

                if purge or action == "unlock_orphan":
                    path = self.local.abspath(ref)
                    log.debug(f"Remove orphan: {path!r}")
                    self.autolock.orphan_unlocked(path)
                    shutil.rmtree(path, ignore_errors=True)
                    continue

                self.local.remove_remote_id(ref.parent, name="nxdirecteditlock")
                # Emit the signal only when the unlock is done
                self._send_lock_status(ref)
                self.autolock.documentUnlocked.emit(ref.name)
            except ThreadInterrupt:
                raise
            except NotFound:
                log.debug(f"Document {ref!r} no more exists")
            except DocumentAlreadyLocked as exc:
                log.warning(f"Document {ref!r} already locked by {exc.username}")
                self.directEditLockError.emit(action, ref.name, uid)
            except CONNECTION_ERROR:
                # Try again in 30s
                log.warning(
                    f"Connection error while trying to {action} document {ref!r}",
                    exc_info=True,
                )
                errors.append(item)
            except Exception:
                log.exception(f"Cannot {action} document {ref!r}")
                self.directEditLockError.emit(action, ref.name, uid)

        # Requeue errors
        for item in errors:
            self._lock_queue.put(item)

    def _send_lock_status(self, ref: str) -> None:
        manager = self._manager
        for engine in manager.engines.values():
            dao = engine.dao
            state = dao.get_normal_state_from_remote(ref)
            if state:
                path = engine.local_folder / state.local_path
                manager.osi.send_sync_status(state, path)

    def _handle_upload_queue(self) -> None:
        while "items":
            try:
                ref = self._upload_queue.get_nowait()
            except Empty:
                break

            os_path = self.local.abspath(ref)

            if os_path.is_dir():
                # The upload file is a folder?!
                # It *may* happen when the user DirectEdit'ed a ZIP file,
                # the OS opened it and automatically decompressed it in-place.
                log.debug(f"Skipping DirectEdit queue ref {ref!r} (folder)")
                continue

            log.debug(f"Handling DirectEdit queue ref: {ref!r}")

            details = self._extract_edit_info(ref)
            xpath = details.xpath
            engine = details.engine
            remote = engine.remote

            if not xpath:
                xpath = "file:content"
                log.info(f"DirectEdit on {ref!r} has no xpath, defaulting to {xpath!r}")

            try:
                # Don't update if digest are the same
                info = self.local.get_info(ref)
                current_digest = info.get_digest(digest_func=details.digest_func)
                if not current_digest or current_digest == details.digest:
                    continue

                start_time = current_milli_time()
                log.debug(
                    f"Local digest: {current_digest} is different from the recorded "
                    f"one: {details.digest} - modification detected for {ref!r}"
                )

                if not details.editing:
                    # Check the remote hash to prevent data loss
                    remote_info = remote.get_info(details.uid)
                    if remote_info.is_version:
                        log.warning(
                            f"Unable to process DirectEdit on {remote_info.name} "
                            f"({details.uid}) because it is a version."
                        )
                        continue
                    remote_blob = remote_info.get_blob(xpath) if remote_info else None
                    if remote_blob and remote_blob.digest != details.digest:
                        log.debug(
                            f"Remote digest: {remote_blob.digest} is different from the "
                            f"recorded  one: {details.digest} - conflict detected for {ref!r}"
                        )
                        self.directEditConflict.emit(ref.name, ref, remote_blob.digest)
                        continue

                log.info(f"Uploading file {os_path!r}")

                if xpath == "note:note":
                    kwargs: Dict[str, Any] = {"applyVersioningPolicy": True}
                    cmd = "NuxeoDrive.AttachBlob"
                else:
                    kwargs = {"xpath": xpath, "void_op": True}
                    cmd = "Blob.AttachOnDocument"

                remote.upload(
                    os_path,
                    command=cmd,
                    document=remote.check_ref(details.uid),
                    engine_uid=engine.uid,
                    is_direct_edit=True,
                    **kwargs,
                )

                # Update hash value
                dir_path = ref.parent
                self.local.set_remote_id(
                    dir_path, current_digest, name="nxdirecteditdigest"
                )
                timing = current_milli_time() - start_time
                self.directEditUploadCompleted.emit(os_path.name)
                self.editDocument.emit(ref.name, timing)
            except ThreadInterrupt:
                raise
            except NotFound:
                # Not found on the server, just skip it
                pass
            except Forbidden:
                msg = (
                    "Upload queue error:"
                    f" Access to the document {ref!r} on server {engine.hostname!r}"
                    f" is forbidden for user {engine.remote_user!r}"
                )
                log.warning(msg)
                self.directEditForbidden.emit(
                    str(ref), engine.hostname, engine.remote_user
                )
            except CONNECTION_ERROR:
                # Try again in 30s
                log.warning(f"Connection error while uploading {ref!r}", exc_info=True)
                self._error_queue.push(ref)
            except Exception as e:
                if (
                    isinstance(e, HTTPError)
                    and e.status == 500
                    and "Cannot set property on a version" in e.message
                ):
                    log.warning(
                        f"Unable to process DirectEdit on {ref} "
                        f"({details}) because it is a version."
                    )
                    continue
                # Try again in 30s
                log.exception(f"DirectEdit unhandled error for ref {ref!r}")
                self._error_queue.push(ref)

    def _handle_queues(self) -> None:
        # Lock any document
        self._handle_lock_queue()

        # Unqueue any errors
        for item in self._error_queue.get():
            self._upload_queue.put(item.path)

        # Handle the upload queue
        self._handle_upload_queue()

        while not self.watchdog_queue.empty():
            evt = self.watchdog_queue.get()
            try:
                self.handle_watchdog_event(evt)
            except ThreadInterrupt:
                raise
            except Exception:
                log.exception("Watchdog error")

    def _execute(self) -> None:
        try:
            self._cleanup()
            self._setup_watchdog()

            while True:
                self._interact()
                try:
                    self._handle_queues()
                except NotFound:
                    continue
                except ThreadInterrupt:
                    raise
                except Exception:
                    log.exception("Unhandled DirectEdit error")
                sleep(0.5)
        except ThreadInterrupt:
            raise
        finally:
            with self.lock:
                self._stop_watchdog()

    def get_metrics(self) -> Metrics:
        metrics = super().get_metrics()
        if self._event_handler:
            metrics["fs_events"] = self._event_handler.counter
        return {**metrics, **self._metrics}

    @tooltip("Setup watchdog")
    def _setup_watchdog(self) -> None:
        log.info(f"Watching FS modification on {self._folder!r}")
        self._event_handler = DriveFSEventHandler(self)
        self._observer = Observer()
        self._observer.schedule(self._event_handler, str(self._folder), recursive=True)
        self._observer.start()

    def _stop_watchdog(self) -> None:
        log.info("Stopping the FS observer thread")
        try:
            self._observer.stop()
            self._observer.join()
        except Exception:
            log.warning("Cannot stop the FS observer")
        finally:
            self._observer = None

    @tooltip("Handle watchdog event")
    def handle_watchdog_event(self, evt: FileSystemEvent) -> None:
        src_path = normalize_event_filename(evt.src_path)

        # Event on the folder by itself
        if src_path.is_dir():
            return

        if self.local.is_temp_file(src_path):
            return

        log.info(f"Handling watchdog event [{evt.event_type}] on {evt.src_path!r}")

        if evt.event_type == "moved":
            src_path = normalize_event_filename(evt.dest_path)

        ref = self.local.get_path(src_path)
        dir_path = self.local.get_path(src_path.parent)
        name = self.local.get_remote_id(dir_path, name="nxdirecteditname")

        if not name:
            return

        editing = self.local.get_remote_id(dir_path, name="nxdirecteditlock") == "1"

        if force_decode(name) != src_path.name:
            if _is_lock_file(src_path.name):
                if evt.event_type == "created" and self.use_autolock and not editing:
                    """
                    [Windows 10] The original file is not modified until
                    we specifically click on the save button. Instead, it
                    applies changes to the temporary file.
                    So the auto-lock does not happen because there is no
                    'modified' event on the original file.
                    Here we try to address that by checking the lock state
                    and use the lock if not already done.
                    """
                    # Recompute the path from 'dir/temp_file' -> 'dir/file'
                    path = src_path.parent / name
                    self.autolock.set_autolock(path, self)
                elif evt.event_type == "deleted":
                    # Free the xattr to let _cleanup() does its work
                    self.local.remove_remote_id(dir_path, name="nxdirecteditlock")
            return

        if self.use_autolock and not editing:
            self.autolock.set_autolock(src_path, self)

        if evt.event_type != "deleted":
            self._upload_queue.put(ref)
Ejemplo n.º 60
0
    if len(sys.argv) == 2:
        #creo el controlador de eventos
        patterns = "*.json"  # archivos que queremos manejar (json para insertar en bd y log snort para convertir a json)
        ignore_patterns = ""
        ignore_directories = True
        case_sensitive = True
        manejador = PatternMatchingEventHandler(patterns, ignore_patterns,
                                                ignore_directories,
                                                case_sensitive)

        #se invocan estas funciones cuando se genera el evento correspondiente
        manejador.on_created = on_created
        manejador.on_modified = on_modified

        #creamos el observador
        path = sys.argv[1]
        recursivo = True
        monitor = Observer()
        monitor.schedule(manejador, path, recursive=recursivo)

        #iniciamos el monitoreo
        monitor.start()
        try:
            while True:
                time.sleep(1)
        except KeyboardInterrupt:
            monitor.stop()
            monitor.join()
    else:
        print("Uso: [insertJson.py] [path logs snort]")