Ejemplo n.º 1
0
 def add_path(self,name, path):
     if name=='default': self._base = path
     self.paths[name] = path
     if self.instant_reload:
         observer = Observer()
         observer.schedule(self.reload_handler, path=path, recursive=True)
         observer.start()
Ejemplo n.º 2
0
def code():
    # file monitor server
    observer = Observer()

    # py yml file monitor
    patterns = ["*.py", "*demo.yml"]  # '*' is necessary, and must in the first.
    restart_processor = ServerStarter(
        [
            {"cmd": "rm -rf %s/*.log" % os.path.join(workspace, "log"), "is_daemon": False},
            {"cmd": "./run.py run", "network_port": (config["simple_server"]["port"],)},
        ]
    )
    monitor = SourceCodeMonitor(restart_processor, patterns)
    observer.schedule(monitor, program_dir, recursive=True)
    observer.schedule(monitor, http_api.__path__[0], recursive=True)

    # # rebuild css and js's min file while source file is change
    # patterns = ['*.css', '*.js', '*static.yml']     # '*' is necessary, and must in the first.
    # monitor = SourceCodeMonitor(BuildCssJsProcessor(program_dir, static), patterns, None, 500)
    # observer.schedule(monitor, program_dir, recursive=True)

    # start monitoring
    observer.start()
    try:
        time.sleep(31536000)  # one year
    except KeyboardInterrupt:
        observer.stop()
Ejemplo n.º 3
0
def main():
    if (len(sys.argv) > 1 and os.path.exists(sys.argv[1]) and os.path.isfile(sys.argv[1])):
        filename = sys.argv[1]
    else:
        filename = gui.get_path("*.csv",defaultFile="data.csv")
    
    commonPath = os.path.abspath(os.path.split(filename)[0])
    outputFile = os.path.join(commonPath, "odmanalysis.csv")
    
    print "Now watching %s for changes" % filename
    handler = OMDCsvChunkHandler(filename,outputFile)
    observer = Observer()
    observer.schedule(handler, path=commonPath, recursive=False)
    handler.startPCChain()
    observer.start()

    try:
        while True:
            time.sleep(1)
                
    except (KeyboardInterrupt, SystemExit):
        print "Stopping..."
        observer.stop()
        time.sleep(1)
    observer.join()
Ejemplo n.º 4
0
def main():
    arguments = docopt(__doc__, version='Storyline HTTP v0.1')

    if arguments.get('--debug'):
        logger = logging.getLogger()
        logger.setLevel(logging.DEBUG)
        handler = logging.StreamHandler(sys.stderr)
        logger.addHandler(handler)

    app.config.from_object(__name__)
    app.debug = arguments.get('--debug')

    story_path = arguments.get('STORY_PATH', '.')

    global plot
    plot = storyfile.load_plot_from_path(story_path)

    observer = Observer()
    observer.schedule(LoggingEventHandler(), path=story_path, recursive=True)
    observer.schedule(Reloader(story_path), path=story_path, recursive=True)

    observer.start()
    try:
        app.run()
    finally:
        observer.stop()
        observer.join()
Ejemplo n.º 5
0
    def watch(self, source, write=True, package=None, run=False, force=False):
        """Watches a source and recompiles on change."""
        from watchdog.events import FileSystemEventHandler
        from watchdog.observers import Observer

        def recompile(path):
            if os.path.isfile(path) and os.path.splitext(path)[1] in code_exts:
                self.compile_path(path, write, package, run, force)

        class watcher(FileSystemEventHandler):
            def on_modified(_, event):
                recompile(event.src_path)
            def on_created(_, event):
                recompile(event.src_path)

        source = fixpath(source)

        self.console.show("Watching        "+showpath(source)+" ...")
        self.console.print("(press Ctrl-C to end)")

        observer = Observer()
        observer.schedule(watcher(), source, recursive=True)
        observer.start()
        try:
            while True:
                time.sleep(.1)
        except KeyboardInterrupt:
            pass
        finally:
            observer.stop()
            observer.join()
Ejemplo n.º 6
0
def main(): 
    global badExtensionCounter, failedFlag, pool, failedProcessCounter#, db
    
    sql_setup() # Set-up SQL Database/check to see if exists
    
    # Initiate File Path Handler
    observer = Observer()
    observer.schedule(MyHandler(), path=file_path, recursive=True)
    observer.start()
    
    cpuCount = multiprocessing.cpu_count() # Count all available CPU's
    print "\nTotal CPU Count: %d"%(cpuCount)
    pool = multiprocessing.Pool(4, worker,(processQueue,)) # Create 4 child processes to handle all queued elements
    active = multiprocessing.active_children() # All active child processes
    print "Total number of active child processes: %s\n"%(str(active))
    
    try:
        while True:
            time.sleep(0.2)
    except KeyboardInterrupt:
        pool.terminate() # Stop all child processes
        pool.join() # Join the processes with parent and terminate
        active = multiprocessing.active_children() # All active child processes, list should be empty at this point.
        print "\nTotal number of active child processes: %s\n"%(str(active))
        shutdown() # Run shutdown sequence        
        observer.stop()
        observer.join()
        sys.exit(1)
Ejemplo n.º 7
0
def generate_and_observe(args, event):
    while event.isSet():
        # Generate the presentation
        monitor_list = generate(args)
        print("Presentation generated.")

        # Make a list of involved directories
        directories = defaultdict(list)
        for file in monitor_list:
            directory, filename = os.path.split(file)
            directories[directory].append(filename)

        observer = Observer()
        handler = HovercraftEventHandler(monitor_list)
        for directory, files in directories.items():
            observer.schedule(handler, directory, recursive=False)

        observer.start()
        while event.wait(1):
            time.sleep(0.05)
            if handler.quit:
                break

        observer.stop()
        observer.join()
Ejemplo n.º 8
0
class WatchFile(object):
    def __init__(self, send_msg_func, *args, **kargs):
        self.path = kargs['path'] if kargs.has_key('path') else '.'
        self.suffix = kargs['suffix'] if kargs.has_key('suffix') else '*'  # star represent any file
        self.observer = Observer()
        self.event_handler = MyFileMonitor(self.suffix, callback=self.get_data)
        self.send_msg_func = send_msg_func
        self.filename = self.zip_filename = ''

    def run(self):
        self.observer.schedule(self.event_handler, self.path, recursive=True)
        self.observer.start()
        try:
            while True:
                time.sleep(1)
        except KeyboardInterrupt:
            self.observer.stop()
        self.observer.join()

    def get_data(self, filename):
        data = self._unpack(filename)
        data = str(data)
        print(data, type(data))
        self.send_msg_func(data)

    def _unpack(self, filename):
        # first rename suffix to zip file
        # may not work on linux
        if system == 'Windows':
            filename = filename[2:] if filename.startswith('.\\') else filename
            filename = filename.lstrip()
            new_name = filename.split('.')[0] + '.zip'
            new_name = new_name[1:] if new_name.startswith('\\') else new_name
        elif system == 'Linux':
            new_name = filename

        print('Old name:', filename, ' New name:', new_name)

        self.filename = filename
        self.zip_filename = new_name
        # waiting for operating sys create the file
        time.sleep(3)
        os.rename(filename, new_name)
        zip_file = zipfile.ZipFile(new_name, 'r')
        json_data = ""
        for name in zip_file.namelist():
            if name == "project.json":
                file = zip_file.open(name, 'r')
                json_data = "".join(file.readlines())
        # change filename back to .sb2
        if new_name.endswith('.zip'):
            os.rename(new_name, filename)

        return self.get_cmd(json_data)

    def get_cmd(self, json_data):
        jsonfy_data = json.loads(json_data)
        child = jsonfy_data['children'][0]
        scripts = child['scripts']
        return scripts
Ejemplo n.º 9
0
Archivo: server.py Proyecto: befks/odoo
class FSWatcher(object):
    def __init__(self):
        self.observer = Observer()
        for path in odoo.modules.module.ad_paths:
            _logger.info('Watching addons folder %s', path)
            self.observer.schedule(self, path, recursive=True)

    def dispatch(self, event):
        if isinstance(event, (FileCreatedEvent, FileModifiedEvent, FileMovedEvent)):
            if not event.is_directory:
                path = getattr(event, 'dest_path', event.src_path)
                if path.endswith('.py'):
                    try:
                        source = open(path, 'rb').read() + '\n'
                        compile(source, path, 'exec')
                    except SyntaxError:
                        _logger.error('autoreload: python code change detected, SyntaxError in %s', path)
                    else:
                        _logger.info('autoreload: python code updated, autoreload activated')
                        restart()

    def start(self):
        self.observer.start()
        _logger.info('AutoReload watcher running')

    def stop(self):
        self.observer.stop()
        self.observer.join()
Ejemplo n.º 10
0
class PropMTimeWatcher:
    def __init__(self, app_data_folder):
        self._app_data_folder = app_data_folder
        self._observer = Observer()
        self.schedule()

    def schedule(self):
        pref = PropMTimePreferences(self._app_data_folder)
        self._observer.unschedule_all()
        for path, watcher in pref.get_all_paths().items():
            if watcher:
                if os.path.exists(path):
                    event_handler = ModHandler(path, self._app_data_folder)
                    log.info('scheduling watcher : %s' % path)
                    self._observer.schedule(event_handler, path=path, recursive=True)
                else:
                    log.error('Error: "%s" does not exist.\n\nPlease edit the path.\n\nTo do this, click on the %s icon and select "Paths".' %
                              (path, __application_name__))
        self._observer.start()

    def request_exit(self):
        self._observer.unschedule_all()
        self._observer.stop()
        self._observer.join(TIMEOUT)
        if self._observer.isAlive():
            log.error('observer still alive')
def main():
    # Fill all changes that occurred when track-changes.py wasn't running.
    if os.path.isdir("out"):
        shutil.rmtree("out", True)

    if not os.path.isdir("out"):
        os.mkdir("out")
 
    startup_changes.sync_offline_changes("posts", "out")

    print "Watching posts directory for changes... CTRL+C to quit."
    watch_directory = "posts"

    event_handler = MyHandler()

    # Run the watchdog.
    observer = Observer()
    observer.schedule(event_handler, watch_directory, True)
    observer.start()

    """
    Keep the script running or else python closes without stopping the observer
    thread and this causes an error.
    """
    try:
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        observer.stop()

    observer.join()
Ejemplo n.º 12
0
    def __init__(self, config):
        """
        Initialize the watcher, use the config passed from main
        """
        self.config = config


        # List of pending files
        self.pending_files = set()

        self.sync_timer = None

        # Setup our watchdog observer
        observer = Observer()
        observer.schedule(ChangeHandler(self.on_file_changed), path=config.directory, recursive=True)
        observer.start()

        logging.info("Starting change tracker, cmd: {}, dir: {}, delay: {}".format(config.sync_cmd,
                                                                                   config.directory,
                                                                                   config.delay))
        try:
            while True:
                time.sleep(0.5)
        except KeyboardInterrupt:
            observer.stop()
        observer.join()
Ejemplo n.º 13
0
def stream_video(video_path):

    global VIDEO_BITRATE
    global AUDIO_BITRATE

    create_working_directory()

    head, tail = os.path.split(video_path)
    name = tail.split('.')[0]

    nonce = '-' + ''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(5))

    if '.avi' in video_path:
        args = "-i %s  -vcodec h264 -b %s -acodec libfaac -ab %s -f hls ./.kickflip/%s.m3u8"
        args = args % (video_path, VIDEO_BITRATE, AUDIO_BITRATE, name+nonce)
    else:
        args = "-i %s -f hls -codec copy ./.kickflip/%s.m3u8"
        args = args % (video_path, name+nonce)

    observer = Observer()
    observer.schedule(SegmentHandler(), path='./.kickflip')

    observer.start()
    time.sleep(3) # This is a f*****g hack.
    process = envoy.run('ffmpeg ' + args)
    observer.stop()

    upload_file(video_path)
    return ''
Ejemplo n.º 14
0
def watch(path, handler=None, debug=True):
	import time
	from watchdog.observers import Observer
	from watchdog.events import FileSystemEventHandler

	class Handler(FileSystemEventHandler):
		def on_any_event(self, event):
			if debug:
				print "File {0}: {1}".format(event.event_type, event.src_path)

			if not handler:
				print "No handler specified"
				return

			handler(event.src_path, event.event_type)

	event_handler = Handler()
	observer = Observer()
	observer.schedule(event_handler, path, recursive=True)
	observer.start()
	try:
		while True:
			time.sleep(1)
	except KeyboardInterrupt:
		observer.stop()
	observer.join()
Ejemplo n.º 15
0
class ActivityCheck(Thread):

    def __init__(self, period, path, mailhost, fromaddr, toaddrs):
        Thread.__init__(self)
        self.period = int(period)
        self.path = path
        self.activity = False
        self.last_time = datetime.datetime.now()
        self.message_sent = False
        self.subject = 'WARNING : ' + HOSTNAME + ' : ' + 'telecaster monitor activity'
        self.logger = EmailLogger(mailhost, fromaddr, toaddrs, self.subject)
        self.event_handler = ActivityEventHandler(ignore_patterns=IGNORE_PATTERNS)
        self.observer = Observer()
        self.observer.schedule(self.event_handler, path, recursive=True)
        self.observer.start()

    def run(self):        
        while True:
            if not self.event_handler.activity:
                now = datetime.datetime.now()
                delta = now - self.last_time
                if delta.total_seconds() > LOG_MAX_PERIOD or not self.message_sent:
                    self.logger.logger.error('The monitor is NOT recording anymore in ' + self.path + ' ! ')
                    self.last_time = now
                    self.message_sent = True
            else:
                self.event_handler.activity = False
            time.sleep(self.period)

    def stop(self):
        self.observer.stop()
Ejemplo n.º 16
0
def main():

    pandoc_path = which("pandoc")
    if not pandoc_path :
        print "pandoc executable must be in the path to be used by pandoc-watch!"
        exit()

    config = Configuration.Instance()

    parseOptions()

    config.setDirContentAndTime(getDirectoryWatchedElements())

    print "Starting pandoc watcher ..."

    while True:
        event_handler = ChangeHandler()
        observer = Observer()
        observer.schedule(event_handler, os.getcwd(), recursive=True)
        observer.start()
        try:
            while True:
                time.sleep(1)
        except KeyboardInterrupt as err:
            print str(err)
            observer.stop()

        print "Stopping pandoc watcher ..."

        exit()
Ejemplo n.º 17
0
def _watch(root=u'.', dest=u'_site', pattern=u'**/*.html', exclude=u'_*/**'):

    try:
        from watchdog.observers import Observer
        from watchdog.events import FileSystemEventHandler
    except ImportError:
        msg = "The build --watch feature requires watchdog. \n"\
            + "Please install it with 'easy_install watchdog'."
        print(msg)
        return None

    class handler(FileSystemEventHandler):
        def on_any_event(self, event):
            exclude_path = os.path.join(os.getcwd(), exclude)
            if not utils.matches_pattern(exclude_path, event.src_path):
                build_files(root=root,
                            dest=dest,
                            pattern=pattern,
                            exclude=exclude)

    observer = Observer()
    observer.schedule(handler(), root, recursive=True)
    observer.start()

    print("Watching '{0}' ...".format(root))

    return observer
Ejemplo n.º 18
0
    def __init__(self, zkconn, root_node_path, conf):
        super(ZkFarmJoiner, self).__init__()
        self.update_remote_timer = None
        self.update_local_timer = None

        self.zkconn = zkconn
        self.conf = conf
        self.node_path = "%s/%s" % (root_node_path, self.myip())

        # force the hostname info key
        info = conf.read()
        info["hostname"] = gethostname()
        conf.write(info)

        zkconn.create(self.node_path, serialize(conf.read()), zc.zk.OPEN_ACL_UNSAFE, EPHEMERAL)

        observer = Observer()
        observer.schedule(self, path=conf.file_path, recursive=True)
        observer.start()

        zkconn.get(self.node_path, self.node_watcher)

        while True:
            with self.cv:
                self.wait()
Ejemplo n.º 19
0
def watch(directory=None, auto_clear=False, beep_on_failure=True,
          onpass=None, onfail=None, extensions=[]):
    """
    Starts a server to render the specified file or directory
    containing a README.
    """
    if directory and not os.path.isdir(directory):
        raise ValueError('Directory not found: ' + directory)
    directory = os.path.abspath(directory or '')

    # Initial run
    event_handler = ChangeHandler(directory, auto_clear, beep_on_failure,
                                  onpass, onfail, extensions)
    event_handler.run()

    # Setup watchdog
    observer = Observer()
    observer.schedule(event_handler, path=directory, recursive=True)
    observer.start()

    # Watch and run tests until interrupted by user
    try:
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        observer.stop()
    observer.join()
Ejemplo n.º 20
0
def watch_project(markdown_fn, output_fn, template_fn, render_first=True):
    class Handler(FileSystemEventHandler):
        def on_any_event(self, event):
            if event.src_path == os.path.abspath(output_fn):
                return
            print('Rendering slides...')
            process_slides(markdown_fn, output_fn, template_fn)

    if render_first == True:
        process_slides(markdown_fn, output_fn, template_fn)
        
    observer = Observer()
    event_handler = Handler()

    dirname = os.path.dirname(os.path.abspath(markdown_fn))
    
    observer.schedule(event_handler, path=dirname, recursive=True)
    print("Watching for events on {:s}...".format(dirname))
    observer.start()

    try:
        while True:
            time.sleep(0.5)
    except KeyboardInterrupt:
        observer.stop()
    observer.join()
Ejemplo n.º 21
0
class AlertEventHandler(FileSystemEventHandler):
    def __init__(self, sensor_uuid, alert_file, hpc):
        """
        Initializes a filesytem watcher that will watch
        the specified file for changes.
        `alert_file` is the absolute path of the snort alert file.
        `hpc` is the hpfeeds client
        """
        self.sensor_uuid = sensor_uuid
        self.alert_file = alert_file
        self.hpc = hpc
        logger.info('connected to hpfeeds broker {}'.format(hpc.brokername))

        self.observer = Observer()
        self.observer.schedule(self, os.path.dirname(alert_file), False)
        
    @property
    def latest_alert_date(self):
        return safe_unpickle('alert_date.pkl')

    @latest_alert_date.setter
    def latest_alert_date(self, date):
        safe_pickle('alert_date.pkl', date)

    def on_any_event(self, event):
        if (not event.event_type == 'deleted') and (event.src_path == self.alert_file):
            alerts = Alert.from_log(self.sensor_uuid, self.alert_file, self.latest_alert_date)
            if alerts:
                logger.info("submitting {} alerts to {}".format(len(alerts), self.hpc.brokername))
                alerts.sort(key=lambda e: e.date)
                self.latest_alert_date = alerts[-1].date            
                
                for alert in alerts:
                    self.hpc.publish("snort.alerts", alert.to_json())
Ejemplo n.º 22
0
def main():
    if not config['play']['scan']:
        raise Exception('''
            Nothing to scan. Add a path in the config file.

            Example:

                play:
                    scan:
                        -
                            type: shows
                            path: /a/path/to/the/shows
            ''')
    obs = Observer()
    for s in config['play']['scan']:
        event_handler = Handler(
            scan_path=s['path'],
            type_=s['type'],
        )
        obs.schedule(
            event_handler,
            s['path'],
            recursive=True,
        )
    obs.start()
    try:
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        obs.stop()
    obs.join()
Ejemplo n.º 23
0
    def __init__(self, input_dir, templates_dir):

        paths = [input_dir, templates_dir]
        threads = []

        try:
            observer = Observer()
            event_handler = WatchEventHandler()

            for i in paths:
                targetPath = str(i)
                observer.schedule(event_handler, targetPath, recursive=True)
                threads.append(observer)

            observer.start()

            signal_watch_init = signal('watch_init')
            signal_watch_init.send(self)

            try:
                while True:
                    time.sleep(1)
            except KeyboardInterrupt:
                wrangler._reporter.log("Stopping with grace and poise", "green")
                observer.stop()
            
            observer.join()
        except:
            return None
Ejemplo n.º 24
0
class Modsync:
    _target = []
    _source = ''
    _observer = None

    def __init__(self):
        pass

    def setSource(self, source):
        self._source = source

    def setTarget(self, target_dir):
        self._target.append(target_dir)

    def getObserver(self):
        return self._observer

    def run(self):

        if not self._source:
            return 0

        self._observer = Observer()
        event_handler = ModsyncEventHandler(self._observer, self._source, self._target)
        self._observer.schedule(event_handler, self._source, recursive=True)
        self._observer.start()
        try:
            time.sleep(2)
            pass
        except KeyboardInterrupt:
            self._observer.stop()
        self._observer.join()
        return 0
Ejemplo n.º 25
0
class Sceduler:

  def __init__(self, config):

    fs = config.get('scheduler', 'fs', 0)
    dest = config.get('store', 'path', 0)
    self.ioqueue = Queue()
    self.iothread = Thread(target=self.ioprocess)
    self.iothread.daemon = True
    self.observer = Observer()
    self.event_handler = IoTask(self.ioqueue, fs, dest)
    self.observer.schedule(self.event_handler, fs, recursive=True)

  def ioprocess(self):
    while True:
      t = self.ioqueue.get()
      try:
        t.process()
      finally:
        self.ioqueue.task_done()

  def start(self):
    self.observer.start()
    self.iothread.start()

  def stop(self):
    self.observer.stop()
    self.iothread.stop()

  def join(self):
     self.observer.join()
     self.iothread.join()
Ejemplo n.º 26
0
	def start(filename,format,callback=None,verbose=False):
		observer = Observer()
		fm = FileMonitor(observer,filename,format,callback,verbose)
		fm._handle()
		observer.schedule(fm, path=os.path.dirname(filename), recursive=False)
		observer.start()
		return fm
class RoleBasedAuthorizationProvider(AbstractAuthorizationProvider,
                                     FileSystemEventHandler):

    def __init__(self, role_loader, roles_config_file_path):
        self.lgr = logging.getLogger(FLASK_SECUREST_LOGGER_NAME)
        self.role_loader = role_loader
        self.permissions_by_roles = None
        self.roles_config_file_path = os.path.abspath(roles_config_file_path)
        self.observer = Observer()
        self.observer.schedule(self,
                               path=os.path.dirname(
                                   self.roles_config_file_path),
                               recursive=False)
        self.load_roles_config()
        self.observer.start()

    def load_roles_config(self):
        try:
            with open(self.roles_config_file_path, 'r') as config_file:
                self.permissions_by_roles = yaml.safe_load(config_file.read())
                self.lgr.info('Loading of roles configuration ended '
                              'successfully')
        except (yaml.parser.ParserError, IOError) as e:
            err = 'Failed parsing {role_config_file} file. Error: {error}.' \
                .format(role_config_file=self.roles_config_file_path, error=e)
            self.lgr.warning(err)
            raise ValueError(err)

    def on_modified(self, event):
        if os.path.abspath(event.src_path) == self.roles_config_file_path:
            self.load_roles_config()

    def authorize(self):
        target_endpoint = rest_security.get_endpoint()
        target_method = rest_security.get_http_method()
        roles = self.role_loader.get_roles()
        return self._is_allowed(target_endpoint, target_method, roles) and \
            not self._is_denied(target_endpoint, target_method, roles)

    def _is_allowed(self, target_endpoint, target_method, user_roles):
        return self._evaluate_permission_by_type(target_endpoint,
                                                 target_method, user_roles,
                                                 'allow')

    def _is_denied(self, target_endpoint, target_method, user_roles):
        return self._evaluate_permission_by_type(target_endpoint,
                                                 target_method, user_roles,
                                                 'deny')

    def _evaluate_permission_by_type(self, target_endpoint, target_method,
                                     user_roles, permission_type):
        for role in user_roles:
            role_permissions = self.permissions_by_roles.get(role,
                                                             {'allow': {},
                                                              'deny': {}})
            relevant_permissions = role_permissions.get(permission_type, {})
            if _is_permission_matching(target_endpoint, target_method,
                                       relevant_permissions):
                return True
        return False
Ejemplo n.º 28
0
def start_watchdog():
    event_handler = RankingHandler()
    observer      = Observer()
    log_handler   = LoggingEventHandler()
    log_observer  = Observer()
    try:
        observer.schedule(event_handler, path='./watch')
        observer.start()
        log_observer.schedule(log_handler, path='./watch')
        log_observer.start()
        logging.info("Watching Directory")
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        logging.info("Kill message sent. Aborting")
        observer.stop()
        log_observer.stop()
    except:
        logging.info("Unexpected error: %s" % sys.exc_info()[0])

        observer.stop()
        log_observer.stop()

        error_message(sys.exc_info()[0])

    observer.join()
    log_observer.join()
Ejemplo n.º 29
0
def send_watchdog(self):

    """
    Method of ``Mail``.
    Send mail when new file created.
    Alter this method if other condition of sending mail needed.
    """

    #r_pipe,w_pipe = Pipe()
    queue = Queue.Queue()
    event = Event()
    #event_handler = GetNameEventHandler(w_pipe,event)
    #send_mail_thread = SendMailThread(r_pipe,event,self)
    event_handler = GetNameEventHandler(queue,event)
    send_mail_thread = SendMailThread(queue,event,self)
    send_mail_thread.start()

    observer = Observer()
    path = self.config['BOOK_PATH']
    observer.schedule(event_handler, path, recursive=True)
    observer.start()

    try:
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        event.set()
        observer.stop()
        #NOTICE:kill threads by force, need to change
        sys.exit()
Ejemplo n.º 30
0
class FileEventHandler(FileSystemEventHandler):
	def __init__(self):
		FileSystemEventHandler.__init__(self)
	
	def on_created(self,event):
		global quiz
		if event.src_path.split('/')[-1] == 'findQuiz':
			with open('question.hortor.net/question/bat/findQuiz', encoding='utf-8') as f:
				quiz=read_question(f)
		elif event.src_path.split('/')[-1] == 'choose':
			sql.sql_write(quiz)
		elif event.src_path.split('/')[-1] == 'fightResult':
			print('本局结束')
			time.sleep(3)
			adbshell.back()
			time.sleep(3)
			adbshell.tap('start')

if __name__ == "__main__":
	observer = Observer()
	event_handler = FileEventHandler()
	observer.schedule(event_handler,'question.hortor.net/question/bat/',True)
	print('-----答题器已运行,请开始排位-----')
	observer.start()
	try:
		while True:
			time.sleep(1)
	except KeyboardInterrup:
		observer.stop()
	observer.join()
Ejemplo n.º 31
0
        elif (filename.endswith('.pdf')):
            folder = 'Books'
        elif (filename.endswith('.mp4')):
            folder = 'Videos'
        else:
            folder = 'General'
        create_folder_if_not_exists(folderToTrack + "/" + folder)
        dist = folderToTrack + "/" + folder + "/" + filename
        try:
            shutil.move(src, dist)
        except Exception:
            print("An error happened")
            print(Exception)


myHandler = FileSystemEventHandler()
myHandler.on_created = on_created

observer = Observer()
observer.schedule(myHandler, folderToTrack, recursive=True)
observer.start()

try:
    print("Observing")
    while True:
        time.sleep(10)
except KeyboardInterrupt:
    observer.stop()

observer.join()
Ejemplo n.º 32
0
if __name__ == "__main__":

    FLAGS = gflags.FLAGS
    gflags.DEFINE_string("state", ".",
                         "Path to state. Used to update metrics.")
    gflags.DEFINE_string(
        "root", "./runs", "Path to log directory. Will "
        "track changes to any file in this directory.")
    gflags.DEFINE_string("env", "main", "The env group for visdom.")
    gflags.DEFINE_string("suffix", ".metric", "Suffix indicating metric file.")
    gflags.DEFINE_boolean("verbose", False, "")
    FLAGS(sys.argv)

    if FLAGS.verbose:
        logger.setLevel(logging.DEBUG)
        logging.basicConfig(level=logging.DEBUG,
                            format='%(asctime)s [DEBUG] - %(message)s',
                            datefmt='%Y-%m-%d %H:%M:%S')

    event_handler = VisdomEventHandler(FLAGS.root, FLAGS.suffix, FLAGS.env)
    observer = Observer()
    observer.schedule(event_handler, FLAGS.root, recursive=True)
    observer.start()
    try:
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        observer.stop()
    observer.join()
Ejemplo n.º 33
0
class Handler(FileSystemEventHandler):
    def on_any_event(self, event):
        for filename in os.listdir(settings.get('source')):
            # Проверяем расширенеи файла
            extension = filename.split(".")[-1]
            # Если этот тип известен то
            if len(extension) > 1 and (extension.lower() in target):
                file = str(settings.get('source')) + filename
                new_path = settings.get(target.get(extension.lower())) + filename
                try:
                    os.rename(file, new_path)
                except Exception:
                    pass


# Запуск всего на отслеживание
handle = Handler()
observer = Observer()
observer.schedule(handle, settings.get('source'), recursive=True)
observer.start()

# Программа будет срабатывать каждую секунду
try:
    while True:
        time.sleep(1000)
except KeyboardInterrupt:
    observer.stop()

observer.join()
Ejemplo n.º 34
0
    if len(sys.argv) == 2 and sys.argv[1] == 'kinect_calibrate':
        calibrate_kinect_mode = True

    if fake_mode == True:
        from kinect import fakenect as fa
        nect = fa.Fakenect()
    else:
        from kinect import kinect as ki
        nect = ki.Kinect()

    window = tkinter.Tk()
    window.attributes('-fullscreen', True)
    window.bind('<Escape>', lambda e: window.destroy())
    sandbox = sb.Sandbox(config, nect, rd.Renderer(config))

    if calibrate_beamer_mode == True:
        sandbox.calibrate_beamer(window)
    elif calibrate_kinect_mode == True:
        sandbox.calibrate_kinect(window)
    else:
        sandbox.execute(window)


if __name__ == '__main__':
    event_handler = ConfigChangeHandler()
    observer = Observer()
    observer.schedule(event_handler, path='config/', recursive=False)
    observer.start()
    main()
Ejemplo n.º 35
0
            self.__close()
            self.__open()
            self.__read()

    def on_created(self, event):
        if event.src_path == self.filename and not event.is_directory:
            logging.info("{0} created, open it".format(self.filename))
            self.__close()
            self.__open()
            self.__read()

    def on_modified(self, event):
        if event.src_path == self.filename:
            logging.info("{0} modified".format(event.src_path))
            self.__read()


if __name__ == '__main__':
    import sys
    observer = Observer()
    watcher = Watcher(sys.argv[1], None)
    w2 = Watcher(sys.argv[2], None)
    observer.schedule(watcher, watcher.dirname, recursive=False)
    observer.schedule(w2, w2.dirname, recursive=False)
    observer.start()
    try:
        observer.join()
    except KeyboardInterrupt:
        observer.stop()
    observer.join()
Ejemplo n.º 36
0
        with open("processing/test.csv", "w"):
            pass
        with open("processing/test1.csv", "w"):
            pass

        global changed
        changed = False


#########################################################################################################

if __name__ == "__main__":

    event_handler = MyHandler()
    observer = Observer()
    observer.schedule(event_handler, path='task_folder', recursive=False)
    observer.start()

    models.append(('LR', LogisticRegression()))
    models.append(('LDA', LinearDiscriminantAnalysis()))
    models.append(('KNN', KNeighborsClassifier()))
    models.append(('CART', DecisionTreeClassifier()))
    models.append(('NB', GaussianNB()))
    models.append(('SVM', SVC()))
    names = [
        'metal1', 'metal2', 'metal3', 'metal4', 'metal5', 't1', 'c1', 't2',
        'c2', 'rate'
    ]
    names2 = ['metal1', 'metal2', 'metal3', 'metal4', 'metal5', 'rate']

    while True:
Ejemplo n.º 37
0
            message = "file "
            message += f"{event.src_path} moved to {event.dest_path}"
            print(message)
            sendTasklist.append(createDatapack("move", event.src_path, event.dest_path))

    event_handler = FileSystemEventHandler()
    event_handler.on_created = on_created
    event_handler.on_deleted = on_deleted
    event_handler.on_modified = on_modified
    event_handler.on_moved = on_moved
    my_observer = Observer()
    print("mpslsh", mountPointSlash)
    while not os.path.exists(mountPointSlash):
        time.sleep(0.1)
    time.sleep(1) # must wait for juicefs to finish mounting
    my_observer.schedule(event_handler, mountPointSlash, recursive=True)
    my_observer.start()
    print("Watchdog observer established")

    async def login():
        wsClient = await websockets.connect(wsUrl)
        await wsClient.send(wsAuth)
        return wsClient

    async def wsSender(wsClient):
        while True:
            while len(sendTasklist) == 0:
                await asyncio.sleep(0.1)
            await wsClient.send(sendTasklist.popleft())

    async def wsReceiver(wsClient):
Ejemplo n.º 38
0
        "py": "Python",
    }

    for ext in ("dwg", "dxf"):
        known_types[ext] = "AutoCAD"

    ext_mapper = ExtensionMapper(known_types, "other", api_url, name_regex)
    handler = NewFileHander(
        ext_mapper,
        args.destination,
        excluded,
        delay=args.delay,
        ln_duration=args.ln_duration,
    )
    observer = Observer()
    observer.schedule(handler, path=args.source, recursive=args.recursive)
    observer.start()

    if args.sort_old:
        if args.recursive:
            files_to_sort = [
                os.path.join(dp, f)
                for dp, dn, fn in os.walk(os.path.expanduser(args.source))
                for f in fn
            ]
        else:
            files_to_sort = [
                os.path.join(args.source, f)
                for f in os.listdir(os.path.expanduser(args.source))
            ]
            files_to_sort = [f for f in files_to_sort if os.path.isfile(f)]
Ejemplo n.º 39
0
# coding: utf-8

import sys
import time

from watchdog.events import PatternMatchingEventHandler
from watchdog.observers import Observer

import logging
logging.basicConfig(level=logging.DEBUG)


class MyEventHandler(PatternMatchingEventHandler):
    def on_any_event(self, event):
        logging.debug(event)


event_handler = MyEventHandler(patterns=['*.py', '*.pyc'],
                               ignore_patterns=['version.py'],
                               ignore_directories=True)
observer = Observer()
observer.schedule(event_handler, sys.argv[1], recursive=True)
observer.start()
try:
    while True:
        time.sleep(1)
finally:
    observer.stop()
    observer.join()
Ejemplo n.º 40
0
            print("file created:{0}".format(event.src_path))

    def on_deleted(self, event):
        if event.is_directory:
            print("directory deleted:{0}".format(event.src_path))
        else:
            print("file deleted:{0}".format(event.src_path))

    def on_modified(self, event):
        print(
            "-----------------------Step on_modified--------------------------"
        )
        if event.is_directory:
            print("directory modified:{0}".format(event.src_path))
        else:
            print("file modified:{0}".format(event.src_path))


if __name__ == "__main__":
    dpath = "G:\\黄大宝python\\测试目录"
    observer = Observer()
    event_handler = FileEventHandler()
    observer.schedule(event_handler, dpath, True)
    observer.start()
    print('------------------------------------------')
    try:
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        observer.stop()
    observer.join()
Ejemplo n.º 41
0
print("Model loaded.")


class ReloadModelHandler(FileSystemEventHandler):
    def on_any_event(self, event):
        # Reload model
        print("Model update detected. Loading new model.")
        time.sleep(MODEL_UPDATE_LAG_IN_SECONDS)
        restoreVars()
        loadModel()


# Setup watchdog
observer = Observer()
observer.schedule(ReloadModelHandler(), path=MODEL_DIR, recursive=False)
observer.start()


def classify(text):
    text_series = pd.Series([text])
    predict_x = np.array(list(vocab_processor.transform(text_series)))
    print(predict_x)

    y_predicted = [
        p['class'] for p in classifier.predict(predict_x, as_iterable=True)
    ]
    print(y_predicted[0])
    topic = news_classes.class_map[str(y_predicted[0])]
    return topic
Ejemplo n.º 42
0
            print("directory created:{0}".format(event.src_path))
        else:
            print("file created:{0}".format(event.src_path))

    def on_deleted(self, event):
        if event.is_directory:
            print("directory deleted:{0}".format(event.src_path))
        else:
            print("file deleted:{0}".format(event.src_path))

    def on_modified(self, event):
        if event.is_directory:
            print("directory modified:{0}".format(event.src_path))
        else:
            print("file modified:{0}".format(event.src_path))


if __name__ == "__main__":
    observer = Observer()
    event_handler = FileEventHandler()
    # 綁定監聽位置與監聽程序
    observer.schedule(event_handler, '../Data/', True)
    observer.start()
    try:
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        observer.stop()
    # 更新監聽
    observer.join()
Ejemplo n.º 43
0
 def run(self):
     event_handler = FileChange()
     observer = Observer()
     observer.schedule(event_handler, path=self.path, recursive=True)
     observer.start()
Ejemplo n.º 44
0
def start_observer(event_handler: LogFileHandler, logs_path: str) -> Observer:
    observer = Observer()
    observer.schedule(event_handler, logs_path, recursive=False)
    observer.start()
    return observer
Ejemplo n.º 45
0
    l1_proc = Level1(CONFIG.get('Paths', 'tm_archive'),
                     CONFIG.get('Paths', 'fits_archive'))
    l1_files = l1_proc.process_fits_files(files=l0_files)
    logger.debug(l1_files)


if __name__ == '__main__':
    tstart = time.perf_counter()
    observer = Observer()
    path = Path('/home/shane/tm')
    soop_path = Path(CONFIG.get('Paths', 'soop_files'))
    logging_handler = LoggingEventHandler(logger=logger)
    tm_handler = GFTSFileHandler(process_tm, TM_REGEX)

    # TODO should be an deticated path from the config
    soop_manager = SOOPManager(soop_path)
    soop_handler = GFTSFileHandler(soop_manager.add_soop_file_to_index,
                                   SOOPManager.SOOP_FILE_REGEX)

    observer.schedule(soop_handler, soop_manager.data_root, recursive=False)
    observer.schedule(logging_handler, path, recursive=True)
    observer.schedule(tm_handler, path, recursive=True)

    observer.start()
    try:
        while True:
            time.sleep(100)
    except KeyboardInterrupt:
        observer.stop()
    observer.join()
Ejemplo n.º 46
0
class SqliteEventLogStorage(SqlEventLogStorage, ConfigurableClass):
    """SQLite-backed event log storage.

    Users should not directly instantiate this class; it is instantiated by internal machinery when
    ``dagit`` and ``dagster-graphql`` load, based on the values in the ``dagster.yaml`` file in
    ``$DAGSTER_HOME``. Configuration of this class should be done by setting values in that file.

    This is the default event log storage when none is specified in the ``dagster.yaml``.

    To explicitly specify SQLite for event log storage, you can add a block such as the following
    to your ``dagster.yaml``:

    .. code-block:: YAML

        run_storage:
          module: dagster.core.storage.event_log
          class: SqliteEventLogStorage
          config:
            base_dir: /path/to/dir

    The ``base_dir`` param tells the event log storage where on disk to store the databases. To
    improve concurrent performance, event logs are stored in a separate SQLite database for each
    run.
    """
    def __init__(self, base_dir, inst_data=None):
        """Note that idempotent initialization of the SQLite database is done on a per-run_id
        basis in the body of connect, since each run is stored in a separate database."""
        self._base_dir = os.path.abspath(check.str_param(base_dir, "base_dir"))
        mkdir_p(self._base_dir)

        self._watchers = defaultdict(dict)
        self._obs = Observer()
        self._obs.start()
        self._inst_data = check.opt_inst_param(inst_data, "inst_data",
                                               ConfigurableClassData)

    def upgrade(self):
        all_run_ids = self.get_all_run_ids()
        print(  # pylint: disable=print-call
            "Updating event log storage for {n_runs} runs on disk...".format(
                n_runs=len(all_run_ids)))
        alembic_config = get_alembic_config(__file__)
        for run_id in tqdm(all_run_ids):
            with self.connect(run_id) as conn:
                run_alembic_upgrade(alembic_config, conn, run_id)

    @property
    def inst_data(self):
        return self._inst_data

    @classmethod
    def config_type(cls):
        return {"base_dir": str}

    @staticmethod
    def from_config_value(inst_data, config_value):
        return SqliteEventLogStorage(inst_data=inst_data, **config_value)

    def get_all_run_ids(self):
        all_filenames = glob.glob(os.path.join(self._base_dir, "*.db"))
        return [
            os.path.splitext(os.path.basename(filename))[0]
            for filename in all_filenames
        ]

    def path_for_run_id(self, run_id):
        return os.path.join(self._base_dir,
                            "{run_id}.db".format(run_id=run_id))

    def conn_string_for_run_id(self, run_id):
        check.str_param(run_id, "run_id")
        return create_db_conn_string(self._base_dir, run_id)

    def _initdb(self, engine):

        alembic_config = get_alembic_config(__file__)

        try:
            SqlEventLogStorageMetadata.create_all(engine)
            engine.execute("PRAGMA journal_mode=WAL;")
            stamp_alembic_rev(alembic_config, engine)
        except (db.exc.DatabaseError, sqlite3.DatabaseError,
                sqlite3.OperationalError) as exc:
            # This is SQLite-specific handling for concurrency issues that can arise when, e.g.,
            # the root nodes of a pipeline execute simultaneously on Airflow with SQLite storage
            # configured and contend with each other to init the db. When we hit the following
            # errors, we know that another process is on the case and it's safe to continue:
            err_msg = str(exc)
            if not ("table event_logs already exists" in err_msg
                    or "database is locked" in err_msg
                    or "table alembic_version already exists" in err_msg
                    or "UNIQUE constraint failed: alembic_version.version_num"
                    in err_msg):
                raise
            else:
                logging.info(
                    "SqliteEventLogStorage._initdb: Encountered apparent concurrent init, "
                    "swallowing {str_exc}".format(str_exc=err_msg))

    @contextmanager
    def connect(self, run_id=None):
        check.str_param(run_id, "run_id")

        conn_string = self.conn_string_for_run_id(run_id)
        engine = create_engine(conn_string, poolclass=NullPool)

        if not os.path.exists(self.path_for_run_id(run_id)):
            self._initdb(engine)

        conn = engine.connect()
        try:
            with handle_schema_errors(
                    conn,
                    get_alembic_config(__file__),
                    msg="SqliteEventLogStorage for run {run_id}".format(
                        run_id=run_id),
            ):
                yield conn
        finally:
            conn.close()
        engine.dispose()

    def wipe(self):
        for filename in (glob.glob(os.path.join(self._base_dir, "*.db")) +
                         glob.glob(os.path.join(self._base_dir, "*.db-wal")) +
                         glob.glob(os.path.join(self._base_dir, "*.db-shm"))):
            os.unlink(filename)

    def watch(self, run_id, start_cursor, callback):
        watchdog = SqliteEventLogStorageWatchdog(self, run_id, callback,
                                                 start_cursor)
        self._watchers[run_id][callback] = (
            watchdog,
            self._obs.schedule(watchdog, self._base_dir, True),
        )

    def end_watch(self, run_id, handler):
        if handler in self._watchers[run_id]:
            event_handler, watch = self._watchers[run_id][handler]
            self._obs.remove_handler_for_watch(event_handler, watch)
            del self._watchers[run_id][handler]
class ConsolidatedSqliteEventLogStorage(AssetAwareSqlEventLogStorage,
                                        ConfigurableClass):
    """SQLite-backed consolidated event log storage intended for test cases only.

    Users should not directly instantiate this class; it is instantiated by internal machinery when
    ``dagit`` and ``dagster-graphql`` load, based on the values in the ``dagster.yaml`` file in
    ``$DAGSTER_HOME``. Configuration of this class should be done by setting values in that file.

    To explicitly specify the consolidated SQLite for event log storage, you can add a block such as
    the following to your ``dagster.yaml``:

    .. code-block:: YAML

        run_storage:
          module: dagster.core.storage.event_log
          class: ConsolidatedSqliteEventLogStorage
          config:
            base_dir: /path/to/dir

    The ``base_dir`` param tells the event log storage where on disk to store the database.
    """
    def __init__(self, base_dir, inst_data=None):
        self._base_dir = check.str_param(base_dir, "base_dir")
        self._conn_string = create_db_conn_string(base_dir,
                                                  SQLITE_EVENT_LOG_FILENAME)
        self._secondary_index_cache = {}
        self._inst_data = check.opt_inst_param(inst_data, "inst_data",
                                               ConfigurableClassData)
        self._watchdog = None
        self._watchers = defaultdict(dict)
        self._obs = Observer()
        self._obs.start()

        if not os.path.exists(self.get_db_path()):
            self._init_db()

    @property
    def inst_data(self):
        return self._inst_data

    @classmethod
    def config_type(cls):
        return {"base_dir": StringSource}

    @staticmethod
    def from_config_value(inst_data, config_value):
        return ConsolidatedSqliteEventLogStorage(inst_data=inst_data,
                                                 **config_value)

    def _init_db(self):
        mkdir_p(self._base_dir)
        engine = create_engine(self._conn_string, poolclass=NullPool)
        SqlEventLogStorageMetadata.create_all(engine)
        engine.execute("PRAGMA journal_mode=WAL;")
        alembic_config = get_alembic_config(__file__)
        connection = engine.connect()
        db_revision, head_revision = check_alembic_revision(
            alembic_config, connection)
        if not (db_revision and head_revision):
            stamp_alembic_rev(alembic_config, engine)

    @contextmanager
    def connect(self, run_id=None):
        engine = create_engine(self._conn_string, poolclass=NullPool)
        conn = engine.connect()
        try:
            with handle_schema_errors(
                    conn,
                    get_alembic_config(__file__),
                    msg="ConsolidatedSqliteEventLogStorage requires migration",
            ):
                yield conn
        finally:
            conn.close()

    def get_db_path(self):
        return os.path.join(self._base_dir,
                            "{}.db".format(SQLITE_EVENT_LOG_FILENAME))

    def upgrade(self):
        alembic_config = get_alembic_config(__file__)
        with self.connect() as conn:
            run_alembic_upgrade(alembic_config, conn)

    def has_secondary_index(self, name, run_id=None):
        if name not in self._secondary_index_cache:
            self._secondary_index_cache[name] = super(
                ConsolidatedSqliteEventLogStorage,
                self).has_secondary_index(name, run_id)
        return self._secondary_index_cache[name]

    def enable_secondary_index(self, name, run_id=None):
        super(ConsolidatedSqliteEventLogStorage,
              self).enable_secondary_index(name)
        if name in self._secondary_index_cache:
            del self._secondary_index_cache[name]

    def watch(self, run_id, start_cursor, callback):
        if not self._watchdog:
            self._watchdog = ConsolidatedSqliteEventLogStorageWatchdog(self)

        watch = self._obs.schedule(self._watchdog, self._base_dir, True)
        cursor = start_cursor if start_cursor is not None else -1
        self._watchers[run_id][callback] = (cursor, watch)

    def on_modified(self):
        keys = [(run_id, callback)
                for run_id, callback_dict in self._watchers.items()
                for callback, _ in callback_dict.items()]
        for run_id, callback in keys:
            cursor, watch = self._watchers[run_id][callback]

            # fetch events
            events = self.get_logs_for_run(run_id, cursor)

            # update cursor
            self._watchers[run_id][callback] = (cursor + len(events), watch)

            for event in events:
                status = callback(event)
                if (status == PipelineRunStatus.SUCCESS
                        or status == PipelineRunStatus.FAILURE
                        or status == PipelineRunStatus.CANCELED):
                    self.end_watch(run_id, callback)

    def end_watch(self, run_id, handler):
        if run_id in self._watchers:
            _cursor, watch = self._watchers[run_id][handler]
            self._obs.remove_handler_for_watch(self._watchdog, watch)
            del self._watchers[run_id][handler]
Ejemplo n.º 48
0
        if getext(event.src_path) in ('.wav'):
            print('%s has been modified.' % event.src_path)
            cmd = trans_cmd + event.src_path
            print(cmd.split())
            res = subprocess.check_call(cmd.split())
        if getext(event.src_path) in ('.txt'):
            print("voice.txt modified")
            cmd = tweet_cmd
            print(cmd.split())
            res = subprocess.check_call(cmd.split())

    def on_deleted(self, event):
        if event.is_directory:
            return
        if getext(event.src_path) in ('.wav'):
            print('%s has been deleted.' % event.src_path)


if __name__ in '__main__':
    while 1:
        event_handler = ChangeHandler()
        observer = Observer()
        observer.schedule(event_handler, BASEDIR, recursive=True)
        observer.start()
        try:
            while True:
                time.sleep(1)
        except KeyboardInterrupt:
            observer.stop()
        observer.join()
Ejemplo n.º 49
0
class RunManager(object):
    """Manages a run's process, wraps its I/O, and synchronizes its files.
    """
    def __init__(self,
                 api,
                 run,
                 project=None,
                 tags=[],
                 cloud=True,
                 job_type="train",
                 output=True,
                 port=None):
        self._api = api
        self._run = run
        self._cloud = cloud
        self._port = port

        self._project = project if project else api.settings("project")
        self._tags = tags
        self._watch_dir = self._run.dir

        self._config = run.config
        self.job_type = job_type
        self.url = self._run.get_url(api)

        # We lock this when the backend is down so Watchdog will keep track of all
        # the file events that happen. Then, when the backend comes back up, we unlock
        # it so all the outstanding events will get handled properly. Watchdog's queue
        # only keeps at most one event per file.
        # Counterintuitively, we use the "reader" locking to guard writes to the W&B
        # backend, and the "writer" locking to indicate that the backend is down. That
        # way, users of the W&B API won't block each other, but can all be
        # blocked by grabbing a "writer" lock.
        self._file_event_lock = wandb.rwlock.RWLock()
        # It starts acquired. We release it when we want to allow the events to happen.
        # (ie. after the Run is successfully created)
        self._file_event_lock.writer_enters()

        self._event_handlers = {}

        self._handler = PatternMatchingEventHandler()
        self._handler.on_created = self._on_file_created
        self._handler.on_modified = self._on_file_modified
        self._handler._patterns = [
            os.path.join(self._watch_dir, os.path.normpath('*'))
        ]
        # Ignore hidden files/folders and output.log because we stream it specially
        self._handler._ignore_patterns = [
            '*/.*', '*.tmp',
            os.path.join(self._run.dir, OUTPUT_FNAME)
        ]

        self._observer = Observer()
        self._observer.schedule(self._handler, self._watch_dir, recursive=True)

        self._stats = stats.Stats()
        # Calling .start() on _meta and _system_stats will spin a thread that reports system stats every 30 seconds
        self._system_stats = stats.SystemStats(run, api)
        self._meta = meta.Meta(api, self._run.dir)
        self._meta.data["jobType"] = job_type
        if self._run.program:
            self._meta.data["program"] = self._run.program
        self._file_pusher = file_pusher.FilePusher(self._push_function)

        self._socket = wandb_socket.Client(self._port)

        logger.debug("Initialized sync for %s/%s", self._project, self._run.id)

        if self._cloud:
            self._observer.start()

            self._api.save_patches(self._watch_dir)

            if output:
                wandb.termlog("Syncing %s" % self.url)
                wandb.termlog("Run `wandb off` to turn off syncing.")
                wandb.termlog("Local directory: %s" % os.path.relpath(run.dir))

            self._api.get_file_stream_api().set_file_policy(
                OUTPUT_FNAME, CRDedupeFilePolicy())

    """ FILE SYNCING / UPLOADING STUFF """

    # TODO: limit / throttle the number of adds / pushes
    def _on_file_created(self, event):
        logger.info('file/dir created: %s', event.src_path)
        if os.path.isdir(event.src_path):
            return None
        save_name = os.path.relpath(event.src_path, self._watch_dir)
        self._file_event_lock.await_readable()
        self._get_handler(event.src_path, save_name).on_created()

    def _on_file_modified(self, event):
        logger.info('file/dir modified: %s', event.src_path)
        if os.path.isdir(event.src_path):
            return None
        save_name = os.path.relpath(event.src_path, self._watch_dir)
        self._file_event_lock.await_readable()
        self._get_handler(event.src_path, save_name).on_modified()

    def _get_handler(self, file_path, save_name):
        if not os.path.split(save_name)[0] == "media" and save_name not in [
                'wandb-history.jsonl', 'wandb-events.jsonl',
                'wandb-summary.json'
        ]:
            # Don't show stats on media files
            self._stats.update_file(file_path)
        if save_name not in self._event_handlers:
            if save_name == 'wandb-history.jsonl':
                self._event_handlers[
                    'wandb-history.jsonl'] = FileEventHandlerTextStream(
                        file_path, 'wandb-history.jsonl', self._api)
            elif save_name == 'wandb-events.jsonl':
                self._event_handlers[
                    'wandb-events.jsonl'] = FileEventHandlerTextStream(
                        file_path, 'wandb-events.jsonl', self._api)
            # Don't try to stream tensorboard files for now.
            # elif 'tfevents' in save_name:
            #    # TODO: This is hard-coded, but we want to give users control
            #    # over streaming files (or detect them).
            #    self._api.get_file_stream_api().set_file_policy(save_name,
            #                                                    BinaryFilePolicy())
            #    self._event_handlers[save_name] = FileEventHandlerBinaryStream(
            #        file_path, save_name, self._api)
            # Overwrite handler (non-deferred) has a bug, wherein if the file is truncated
            # during upload, the request to Google hangs (at least, this is my working
            # theory). So for now we defer uploading everything til the end of the run.
            # TODO: send wandb-summary during run. One option is to copy to a temporary
            # file before uploading.
            elif save_name == config.FNAME:
                self._event_handlers[save_name] = FileEventHandlerConfig(
                    file_path, save_name, self._api, self._file_pusher,
                    self._run)
            elif save_name == 'wandb-summary.json':
                # Load the summary into the syncer process for meta etc to work
                self._run.summary.load()
                self._api.get_file_stream_api().set_file_policy(
                    save_name, OverwriteFilePolicy())
                self._event_handlers[save_name] = FileEventHandlerSummary(
                    file_path, save_name, self._api, self._file_pusher,
                    self._run)
            elif save_name.startswith('media/'):
                # Save media files immediately
                self._event_handlers[save_name] = FileEventHandlerOverwrite(
                    file_path, save_name, self._api, self._file_pusher)
            else:
                self._event_handlers[
                    save_name] = FileEventHandlerOverwriteDeferred(
                        file_path, save_name, self._api, self._file_pusher)
        return self._event_handlers[save_name]

    def _finish_handlers(self):
        # TODO: there was a case where _event_handlers was getting modified in the loop.
        for handler in list(self._event_handlers.values()):
            handler.finish()

    def _push_function(self, save_name, path):
        with open(path, 'rb') as f:
            self._api.push(self._project, {save_name: f},
                           run=self._run.id,
                           progress=lambda _, total: self._stats.
                           update_progress(path, total))

    """ RUN MANAGEMENT STUFF """

    def mirror_stdout_stderr(self):
        """Simple STDOUT and STDERR mirroring used by _init_jupyter"""
        # TODO: Ideally we could start collecting logs without pushing
        fs_api = self._api.get_file_stream_api()
        io_wrap.SimpleTee(
            sys.stdout,
            streaming_log.TextStreamPusher(fs_api,
                                           OUTPUT_FNAME,
                                           prepend_timestamp=True))
        io_wrap.SimpleTee(
            sys.stderr,
            streaming_log.TextStreamPusher(fs_api,
                                           OUTPUT_FNAME,
                                           prepend_timestamp=True,
                                           line_prepend='ERROR'))

    def _get_stdout_stderr_streams(self):
        """Sets up STDOUT and STDERR streams. Only call this once."""
        if six.PY2 or "buffer" not in dir(sys.stdout):
            stdout = sys.stdout
            stderr = sys.stderr
        else:  # we write binary so grab the raw I/O objects in python 3
            try:
                stdout = sys.stdout.buffer.raw
                stderr = sys.stderr.buffer.raw
            except AttributeError:
                # The testing environment and potentially others may have screwed with their
                # io so we fallback to raw stdout / err
                stdout = sys.stdout.buffer
                stderr = sys.stderr.buffer

        output_log_path = os.path.join(self._run.dir, OUTPUT_FNAME)
        self._output_log = WriteSerializingFile(open(output_log_path, 'wb'))

        stdout_streams = [stdout, self._output_log]
        stderr_streams = [stderr, self._output_log]

        if self._cloud:
            # Tee stdout/stderr into our TextOutputStream, which will push lines to the cloud.
            fs_api = self._api.get_file_stream_api()
            self._stdout_stream = streaming_log.TextStreamPusher(
                fs_api,
                OUTPUT_FNAME,
                prepend_timestamp=True,
                lock_function=self._file_event_lock.reader_enters)
            self._stderr_stream = streaming_log.TextStreamPusher(
                fs_api,
                OUTPUT_FNAME,
                line_prepend='ERROR',
                prepend_timestamp=True,
                lock_function=self._file_event_lock.reader_enters)

            stdout_streams.append(self._stdout_stream)
            stderr_streams.append(self._stderr_stream)

        return stdout_streams, stderr_streams

    def _close_stdout_stderr_streams(self, exitcode):
        self._output_log.f.close()
        self._output_log = None

        # Close output-capturing stuff. This also flushes anything left in the buffers.
        if self._stdout_tee.tee_file is not None:
            # we don't have tee_file's in headless mode
            self._stdout_tee.tee_file.close()
            # TODO(adrian): we should close these even in headless mode
            # but in python 2 the read thread doesn't stop on its own
            # for some reason
            self._stdout_tee.close_join()
        if self._stderr_tee.tee_file is not None:
            self._stderr_tee.tee_file.close()
            self._stderr_tee.close_join()

        if self._cloud:
            # not set in dry run mode
            self._stdout_stream.close()
            self._stderr_stream.close()
            self._api.get_file_stream_api().finish(exitcode)
            # Ensures we get a new file stream thread
            self._api._file_stream_api = None

    def _setup_resume(self, resume_status):
        # write the tail of the history file
        try:
            history_tail = json.loads(resume_status['historyTail'])
            jsonlfile.write_jsonl_file(
                os.path.join(self._run.dir, wandb_run.HISTORY_FNAME),
                history_tail)
        except ValueError:
            print("warning: couldn't load recent history")

        # write the tail of the events file
        try:
            events_tail = json.loads(resume_status['eventsTail'])
            jsonlfile.write_jsonl_file(
                os.path.join(self._run.dir, wandb_run.EVENTS_FNAME),
                events_tail)
        except ValueError:
            print("warning: couldn't load recent events")

        # Note: these calls need to happen after writing the files above. Because the access
        # to self._run.events below triggers events to initialize, but we need the previous
        # events to be written before that happens.

        # output.log
        self._api.get_file_stream_api().set_file_policy(
            OUTPUT_FNAME, CRDedupeFilePolicy(resume_status['logLineCount']))

        # history
        self._api.get_file_stream_api().set_file_policy(
            wandb_run.HISTORY_FNAME,
            DefaultFilePolicy(
                start_chunk_id=resume_status['historyLineCount']))
        self._event_handlers[
            wandb_run.HISTORY_FNAME] = FileEventHandlerTextStream(
                self._run.history.fname,
                wandb_run.HISTORY_FNAME,
                self._api,
                seek_end=True)

        # events
        self._api.get_file_stream_api().set_file_policy(
            wandb_run.EVENTS_FNAME,
            DefaultFilePolicy(start_chunk_id=resume_status['eventsLineCount']))
        self._event_handlers[
            wandb_run.EVENTS_FNAME] = FileEventHandlerTextStream(
                self._run.events.fname,
                wandb_run.EVENTS_FNAME,
                self._api,
                seek_end=True)

    def init_run(self, env=None):
        self._system_stats.start()
        self._meta.start()
        self._api.get_file_stream_api().start()
        if self._cloud:
            storage_id = None
            if self._run.resume != 'never':
                resume_status = self._api.run_resume_status(
                    project=self._api.settings("project"),
                    entity=self._api.settings("entity"),
                    name=self._run.id)
                if resume_status == None and self._run.resume == 'must':
                    raise LaunchError(
                        "resume='must' but run (%s) doesn't exist" %
                        self._run.id)
                if resume_status:
                    print('Resuming run: %s' % self._run.get_url(self._api))
                    self._setup_resume(resume_status)
                    storage_id = resume_status['id']

            if not self._upsert_run(False, storage_id, env):
                self._upsert_run_thread = threading.Thread(
                    target=self._upsert_run, args=(True, storage_id, env))
                self._upsert_run_thread.daemon = True
                self._upsert_run_thread.start()

    def shutdown(self, exitcode=0):
        """Stops system stats, streaming handlers, and uploads files without output, used by wandb.monitor"""
        self._system_stats.shutdown()
        self._meta.shutdown()
        self._finish_handlers()
        self._file_pusher.shutdown()
        self._api.get_file_stream_api().finish(exitcode)
        # Ensures we get a new file stream thread
        self._api._file_stream_api = None

    def _upsert_run(self, retry, storage_id, env):
        """Upsert the Run (ie. for the first time with all its attributes)

        Arguments:
            retry: (bool) Whether to retry if the connection fails (ie. if the backend is down).
                False is useful so we can start running the user process even when the W&B backend
                is down, and let syncing finish later.
        Returns:
            True if the upsert succeeded, False if it failed because the backend is down.
        Throws:
            LaunchError on other failures
        """
        if retry:
            num_retries = None
        else:
            num_retries = 0  # no retries because we want to let the user process run even if the backend is down

        try:
            upsert_result = self._run.save(id=storage_id,
                                           num_retries=num_retries,
                                           job_type=self.job_type,
                                           api=self._api)
        except wandb.api.CommError as e:
            # TODO: Get rid of str contains check
            if self._run.resume == 'never' and 'exists' in str(e):
                raise LaunchError("resume='never' but run (%s) exists" %
                                  self._run.id)
            else:
                if isinstance(e.exc, (requests.exceptions.HTTPError,
                                      requests.exceptions.Timeout,
                                      requests.exceptions.ConnectionError)):
                    wandb.termerror(
                        'Failed to connect to W&B. Retrying in the background.'
                    )
                    return False

                raise LaunchError(
                    'Launch exception: {}, see {} for details.  To disable wandb set WANDB_MODE=dryrun'
                    .format(e, util.get_log_file_path()))

        self._run.set_environment(environment=env)

        # unblock file syncing and console streaming, which need the Run to have a .storage_id
        self._file_event_lock.writer_leaves()

        return True

    def run_user_process(self, program, args, env):
        """Launch a user process, capture its output, and sync its files to the backend.

        This returns after the process has ended and syncing is done.
        Captures ctrl-c's, signals, etc.
        """
        stdout_streams, stderr_streams = self._get_stdout_stderr_streams()

        if sys.platform == "win32":
            # PTYs don't work in windows so we use pipes.
            self._stdout_tee = io_wrap.Tee.pipe(*stdout_streams)
            self._stderr_tee = io_wrap.Tee.pipe(*stderr_streams)
            # Seems like the following actually isn't necessary on Windows
            # TODO(adrian): we may need to do the following if we use pipes instead of PTYs
            # because Python on Unix doesn't like writing UTF-8 to files
            # tell child python interpreters we accept utf-8
            # env['PYTHONIOENCODING'] = 'UTF-8'
        else:
            self._stdout_tee = io_wrap.Tee.pty(*stdout_streams)
            self._stderr_tee = io_wrap.Tee.pty(*stderr_streams)

        self._stdout_stream.write_string(
            " ".join(psutil.Process(os.getpid()).cmdline()) + "\n\n")

        command = [program] + list(args)
        runner = util.find_runner(program)
        if runner:
            command = runner + command
        command = ' '.join(six.moves.shlex_quote(arg) for arg in command)

        try:
            self.proc = subprocess.Popen(
                command,
                env=env,
                stdout=self._stdout_tee.tee_file,
                stderr=self._stderr_tee.tee_file,
                shell=True,
            )
        except (OSError, IOError):
            raise Exception('Could not find program: %s' % command)

        self._sync_etc()

    def wrap_existing_process(self,
                              pid,
                              stdout_read_fd,
                              stderr_read_fd,
                              port=None):
        """Do syncing, etc. for an already-running process.

        This returns after the process has ended and syncing is done.
        Captures ctrl-c's, signals, etc.
        """
        stdout_read_file = os.fdopen(stdout_read_fd, 'rb')
        stderr_read_file = os.fdopen(stderr_read_fd, 'rb')
        stdout_streams, stderr_streams = self._get_stdout_stderr_streams()
        self._stdout_tee = io_wrap.Tee(stdout_read_file, *stdout_streams)
        self._stderr_tee = io_wrap.Tee(stderr_read_file, *stderr_streams)

        self.proc = Process(pid)

        try:
            self.init_run()
        except LaunchError as e:
            wandb.termerror(str(e))
            self._socket.launch_error()
            return

        # Signal the main process that we're all hooked up
        self._socket.ready()

        self._sync_etc(headless=True)

    def _sync_etc(self, headless=False):
        # Ignore SIGQUIT (ctrl-\). The child process will # handle it, and we'll
        # exit when the child process does.
        #
        # We disable these signals after running the process so the child doesn't
        # inherit this behaviour.
        try:
            signal.signal(signal.SIGQUIT, signal.SIG_IGN)
        except AttributeError:  # SIGQUIT doesn't exist on windows
            pass

        if self._api.update_available:
            wandb.termlog(
                "An update is available!  To upgrade, please run:\n $ pip install wandb --upgrade"
            )
        # Add a space before user output
        wandb.termlog()

        if env.get_show_run():
            webbrowser.open_new_tab(self._run.get_url(self._api))

        exitcode = None
        try:
            while True:
                res = bytearray()
                try:
                    res = self._socket.recv(2)
                except socket.timeout:
                    pass
                if len(res) == 2 and res[0] == 2:
                    exitcode = res[1]
                    break
                elif len(res) > 0:
                    wandb.termerror(
                        "Invalid message received from child process: %s" %
                        str(res))
                    break
                else:
                    exitcode = self.proc.poll()
                    if exitcode is not None:
                        break
                    time.sleep(1)
        except KeyboardInterrupt:
            exitcode = 255
            if headless:
                wandb.termlog('Ctrl-c pressed.')
            else:
                wandb.termlog(
                    'Ctrl-c pressed; waiting for program to end. Press ctrl-c again to kill it.'
                )
                try:
                    while self.proc.poll() is None:
                        time.sleep(0.1)
                except KeyboardInterrupt:
                    pass

                if self.proc.poll() is None:
                    wandb.termlog('Program still alive. Killing it.')
                    try:
                        self.proc.kill()
                    except OSError:
                        pass
        """TODO(adrian): garbage that appears in the logs sometimes

        Exception ignored in: <bound method Popen.__del__ of <subprocess.Popen object at 0x111adce48>>
        Traceback (most recent call last):
          File "/Users/adrian/.pyenv/versions/3.6.0/Python.framework/Versions/3.6/lib/python3.6/subprocess.py", line 760, in __del__
        AttributeError: 'NoneType' object has no attribute 'warn'
        """

        if exitcode is None:
            exitcode = 254
            wandb.termlog(
                'Killing program failed; syncing files anyway. Press ctrl-c to abort syncing.'
            )
        else:
            if exitcode == 0:
                wandb.termlog('Program ended.')
            else:
                wandb.termlog(
                    'Program failed with code %d. Press ctrl-c to abort syncing.'
                    % exitcode)
        #termlog('job (%s) Process exited with code: %s' % (program, exitcode))

        self._meta.data["exitcode"] = exitcode
        if exitcode == 0:
            self._meta.data["state"] = "finished"
        elif exitcode == 255:
            self._meta.data["state"] = "killed"
        else:
            self._meta.data["state"] = "failed"

        self._meta.shutdown()
        self._system_stats.shutdown()

        if exitcode != 0 and START_TIME - time.time() < 30:
            wandb.termlog("Process crashed early, not syncing files")
            sys.exit(exitcode)

        # TODO: these can be slow to complete
        self._close_stdout_stderr_streams(exitcode)

        # If we're not syncing to the cloud, we're done
        if not self._cloud:
            sys.exit(exitcode)

        # Show run summary/history
        self._run.summary.load()
        summary = self._run.summary._summary
        if len(summary):
            wandb.termlog('Run summary:')
            max_len = max([len(k) for k in summary.keys()])
            format_str = '  {:>%s} {}' % max_len
            for k, v in summary.items():
                # arrays etc. might be too large. for now we just don't print them
                if isinstance(v, six.string_types):
                    if len(v) >= 20:
                        v = v[:20] + '...'
                    wandb.termlog(format_str.format(k, v))
                elif isinstance(v, numbers.Number):
                    wandb.termlog(format_str.format(k, v))

        self._run.history.load()
        history_keys = self._run.history.keys()
        if len(history_keys):
            wandb.termlog('Run history:')
            max_len = max([len(k) for k in history_keys])
            for key in history_keys:
                vals = util.downsample(self._run.history.column(key), 40)
                if any((not isinstance(v, numbers.Number) for v in vals)):
                    continue
                line = sparkline.sparkify(vals)
                format_str = u'  {:>%s} {}' % max_len
                wandb.termlog(format_str.format(key, line))

        if self._run.has_examples:
            wandb.termlog('Saved %s examples' % self._run.examples.count())

        wandb.termlog('Waiting for final file modifications.')
        # This is a a heuristic delay to catch files that were written just before
        # the end of the script.
        # TODO: ensure we catch all saved files.
        # TODO(adrian): do we need this?
        time.sleep(2)
        try:
            # avoid hanging if we crashed before the observer was started
            if self._observer.is_alive():
                self._observer.stop()
                self._observer.join()
        # TODO: py2 TypeError: PyCObject_AsVoidPtr called with null pointer
        except TypeError:
            pass
        # TODO: py3 SystemError: <built-in function stop> returned a result with an error set
        except SystemError:
            pass

        self._finish_handlers()
        self._file_pusher.finish()

        wandb.termlog('Syncing files in %s:' %
                      os.path.relpath(self._watch_dir))
        for file_path in self._stats.files():
            wandb.termlog('  %s' % os.path.relpath(file_path, self._watch_dir))
        step = 0
        spinner_states = ['-', '\\', '|', '/']
        stop = False
        self._stats.update_all_files()
        while True:
            if not self._file_pusher.is_alive():
                stop = True
            summary = self._stats.summary()
            line = (
                ' %(completed_files)s of %(total_files)s files,'
                ' %(uploaded_bytes).03f of %(total_bytes).03f bytes uploaded\r'
                % summary)
            line = spinner_states[step % 4] + line
            step += 1
            wandb.termlog(line, newline=False)
            if stop:
                break
            time.sleep(0.25)
            #print('FP: ', self._file_pusher._pending, self._file_pusher._jobs)
        # clear progress line.
        wandb.termlog(' ' * 79)

        # Check md5s of uploaded files against what's on the file system.
        # TODO: We're currently using the list of uploaded files as our source
        #     of truth, but really we should use the files on the filesystem
        #     (ie if we missed a file this wouldn't catch it).
        # This polls the server, because there a delay between when the file
        # is done uploading, and when the datastore gets updated with new
        # metadata via pubsub.
        wandb.termlog('Verifying uploaded files... ', newline=False)
        error = False
        mismatched = None
        for delay_base in range(4):
            mismatched = []
            download_urls = self._api.download_urls(self._project,
                                                    run=self._run.id)
            for fname, info in download_urls.items():
                if fname == 'wandb-history.h5' or OUTPUT_FNAME:
                    continue
                local_path = os.path.join(self._watch_dir, fname)
                local_md5 = util.md5_file(local_path)
                if local_md5 != info['md5']:
                    mismatched.append((local_path, local_md5, info['md5']))
            if not mismatched:
                break
            wandb.termlog('  Retrying after %ss' % (delay_base**2))
            time.sleep(delay_base**2)

        if mismatched:
            print('')
            error = True
            for local_path, local_md5, remote_md5 in mismatched:
                wandb.termerror(
                    '%s (%s) did not match uploaded file (%s) md5' %
                    (local_path, local_md5, remote_md5))
        else:
            print('verified!')

        if error:
            wandb.termerror('Sync failed %s' % self.url)
        else:
            wandb.termlog('Synced %s' % self.url)
        sys.exit(exitcode)
Ejemplo n.º 50
0
class GitWatcher(object):
    def __init__(self,
                 path=None,
                 events_timeout=1,
                 notifier=None,
                 notify_callback=None):
        self.event_handler = MyEventHandler(ignore_patterns=IGNORE_PATTERNS,
                                            parent=self)
        self.path = path
        self.events_timeout = events_timeout
        self.observer = Observer()
        self.observer.schedule(self.event_handler, self.path, recursive=True)
        self.observer.start()
        self.events_list = []
        self.stopped = False
        self.git_handler = GitDirectoryHandler(path=self.path, parent=self)
        self._thread = Thread(target=self._run)
        self.notifier = notifier
        self.notify_callback = notify_callback
        if self.notifier:
            self.notifier.parent = self

    def on_notify(self, message):
        print(message)
        if self.notify_callback:
            self.notify_callback('Синхронизация с сервером')
        self.git_handler.update_from_remote()
        if self.notify_callback:
            self.notify_callback('Синхронизация с сервером выполнена')

    def on_push(self):
        self.notifier.send_notify('hello')
        if self.notify_callback:
            self.notify_callback('Отправка файлов')

    def on_any_event(self, event):
        self.events_list.append({
            'event': event,
            'timestamp': datetime.datetime.now()
        })

    def _run(self):
        while not self.stopped:
            time.sleep(0.1)
            if len(self.events_list) > 0:
                last_event = self.events_list[-1]
                if (datetime.datetime.now() - last_event['timestamp']
                    ).total_seconds() > self.events_timeout:
                    self.process_events()

    @staticmethod
    def touch(path):
        with open(path, 'w'):
            pass

    @staticmethod
    def get_empty_file_path(path):
        return os.path.join(path, '.empty')

    def handle_empty_directory(self, path):
        dir_list = os.listdir(path)
        if len(dir_list) == 0:
            self.touch(self.get_empty_file_path(path))
        elif os.path.isfile(
                self.get_empty_file_path(path)) and len(dir_list) > 1:
            os.unlink(self.get_empty_file_path(path))

    def process_events(self):
        modified_paths = {}
        for event in self.events_list:
            if event['event'].src_path in modified_paths:
                if (EVENTS_PRIORITIES.get(event['event'].event_type, 100) >
                        EVENTS_PRIORITIES.get(
                            modified_paths[event['event'].src_path].event_type,
                            0)):
                    modified_paths[event['event'].src_path] = event['event']
            else:
                modified_paths[event['event'].src_path] = event['event']
        modified_files = []
        for k, v in modified_paths.items():
            print(k, v)
            if v.is_directory and os.path.isdir(k):
                self.handle_empty_directory(k)
            if os.path.basename(k) == '.empty' or (v.is_directory and
                                                   v.event_type == 'modified'):
                continue
            modified_files.append('[{} {}]'.format(
                EVENTS_ABBR.get(v.event_type), os.path.relpath(k, self.path)))
        self.git_handler.process_changes()

        self.events_list = []

    def start(self):
        self._thread.start()

    def stop(self):
        self.stopped = True
        self._thread.join()
        self.observer.stop()
        self.observer.join()
        if self.notifier:
            self.notifier.stop()

    def __del__(self):
        self.observer.stop()
        self.observer.join()
Ejemplo n.º 51
0
app = Flask(__name__)

image_path = "./image_test"
config_path = "./config/config.ini"
config = Config.read_config(config_path)


@app.route("/")
def top():
    return render_template("index.html")


@app.route("/config")
def config():
    return render_template("config.html")


@app.route("/save_config", methods=["POST"])
def save_config():
    return render_template("index.html")


if __name__ == "__main__":
    print("on app")
    change_handler = ChangeHandler()
    observer = Observer()
    observer.schedule(change_handler, image_path, recursive=True)
    observer.start()
    app.run(host="127.0.0.1", port=5000)
Ejemplo n.º 52
0
                {'id': xmlid,
                'name': '%s.xml' % toolname,
                'src': 'hda', 'file_type':'xml', 'visible':'true'},
                {'id': lintid,
                'name': '%s Planemo lint report' % toolname,
                'src': 'hda', 'file_type':'txt', 'visible':'true'}],
            'name': '%s test reports and tested toolshed archive' % toolname}
        resdict = gi.histories.create_dataset_collection(hid, newcoll)
        logging.info('Create_results added collection with results')


if __name__ == "__main__":
    logging.basicConfig(
        level=logging.INFO,
        format="%(asctime)s - %(message)s",
        datefmt="%Y-%m-%d %H:%M:%S",
        # filename='/export/galaxy/toolwatcher.log'),
        # filemode="w",
    )
    event_handler = ToolHandler(watchme=WATCHME, planemo_galaxy_root=PLANEMO_GALAXY_ROOT,
       conda_prefix=CONDA_PREFIX, patterns=WATCH_PATTERN, galaxy_root=GALAXY_ROOT)
    observer = Observer()
    observer.schedule(event_handler, path=WATCHME, recursive=True)
    observer.start()
    try:
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        observer.stop()
    observer.join()
Ejemplo n.º 53
0

class MyHandler(FileSystemEventHandler):
    file_format_correct = True

    # file has been created
    def on_created(self, event):
        if not os.path.isdir(event.src_path):
            self.file_format_correct = handle_bill_move(event)

    # if a file is modified
    def on_modified(self, event):
        if self.file_format_correct and not os.path.isdir(event.src_path):
            self.file_format_correct = handle_bill_move(event)


event_handler = MyHandler()

# starting the observer and keep it running until you enter "control + c"
observer = Observer()
observer.schedule(event_handler, track_folder, recursive=True)
observer.start()
write_log("Watcher started!")

try:
    while True:
        time.sleep(10)
except KeyboardInterrupt:
    observer.stop()
observer.join()
Ejemplo n.º 54
0
class Library(RegexMatchingEventHandler):
    def __init__(self, **kwargs):
        """
        Initializes a new instance of the :class:`Library` class.
        """
        self._supported = kwargs.get('supported', ['.mp3', '.ogg'])
        self._base_path = os.path.join(os.path.expanduser(kwargs.get('base_path', '~/Music')), '')
        self._libraries = list()
        self._current_library = 0
        self._current_song = -1
        self.on_changed = lambda *a, **kw: None

        regexes = [
            r'{root_dir}'.format(
                root_dir=os.path.abspath(os.path.join(self._base_path, os.pardir)).replace('/', '\/')
            )
        ]
        super(Library, self).__init__(regexes=regexes)

    def __enter__(self):
        """Starts the song library"""
        self._rescan_library()
        self._observer = Observer()
        parent = os.path.abspath(os.path.join(self._base_path, os.pardir))
        self._observer.schedule(self, path=parent, recursive=True)
        self._observer.start()
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        """Stops the song library"""
        self._observer.stop()
        self._observer.join()

    @property
    def library(self):
        """Gets the current selected song library"""
        return self._current_library

    @library.setter
    def library(self, value):
        """Selects a song library"""
        if not self._libraries:
            return

        self._current_library = value % len(self._libraries)
        self._current_song = -1

    @property
    def song(self):
        """Gets the current selected song of the current song library"""
        if self._current_library >= len(self._libraries):
            return
        if self._current_song >= len(self._libraries[self._current_library]):
            return
        return self._libraries[self._current_library][self._current_song]

    def next(self):
        """Selects the next song in the current song library"""
        if not self._libraries:
            return
        self._current_song = (self._current_song + 1) % len(self._libraries[self._current_library])

    def _rescan_library(self):
        """Scans the root directory for song libraries"""
        self._libraries.clear()

        logger.info('loading audio library %s', self._base_path)
        if not os.path.isdir(self._base_path):
            logger.info('audio library is empty')
            self.on_changed()
            return

        for index, directory in enumerate(sorted(os.listdir(self._base_path))):
            directory = os.path.join(self._base_path, directory)
            if not os.path.isdir(directory):
                continue

            logger.info('adding songs from directory %s', directory)
            self._libraries.append(sorted(
                [os.path.join(directory, file)
                 for file in os.listdir(directory)
                 if os.path.isfile(os.path.join(directory, file))
                 and os.path.splitext(file.lower())[1] in self._supported]
            ))

        self.library = self._current_library
        self.on_changed()

    def on_any_event(self, event):
        if isinstance(event, (DirCreatedEvent, DirModifiedEvent, DirDeletedEvent)):
            self._rescan_library()

        elif isinstance(event, FileCreatedEvent):
            self._on_file_created(str(event.src_path))

        elif isinstance(event, FileMovedEvent):
            self._on_file_removed(str(event.src_path))
            self._on_file_created(str(event.dest_path))

    def _on_file_removed(self, file_path):
        changed = False
        for index, library in enumerate(self._libraries[:]):
            if file_path in library:
                logger.info('remove song from directory %s', file_path)
                library.remove(file_path)
                changed = True
                if not library:
                    self._libraries.remove(library)

        self.on_changed()

    def _on_file_created(self, file_path):
        dir_name, file_name = os.path.split(file_path)
        if not dir_name.startswith(self._base_path):
            return

        library = [i for i, x in enumerate(self._libraries) if x[0].startswith(dir_name)]
        if len(library) > 1:
            logging.warning('can\'t add song, multiple libraries matches: %s', file_path)
            return

        if len(library) == 0:
            logging.info('add new library: %s', dir_name)
            self._libraries.append([file_path])
            self._libraries = sorted(self._libraries)
            self.on_changed()
            return

        index = library[0]
        if file_path not in self._libraries[index]:
            logging.info('add song to library: %s', file_path)
            library = self._libraries[index]
            library.append(file_path)
            self._libraries[index] = sorted(library)
            self.on_changed()

    def __str__(self):
        return '%s' % self._libraries
Ejemplo n.º 55
0
        self.ch = slack_channel

    def on_created(self, event):
        # slack.chat.post_message('self.ch', event.src_path)
        slack.files.upload(event.src_path, channels=self.ch)


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('--token', help='api_token from slack')
    parser.add_argument('--ch', help='channel from slack')
    parser.add_argument('--path', help='directory to monitor')

    args = parser.parse_args()
    print(
        'Monitoring directory {} \n Slack token {} \n Slack channel {}'.format(
            args.path, args.token, args.ch))
    slack = Slacker(args.token)

    event_handler = MyHandler(args.ch)
    observer = Observer()
    observer.schedule(event_handler, path=args.path, recursive=True)
    observer.start()

    try:
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        observer.stop()
    observer.join()
Ejemplo n.º 56
0
import sys
import time
import logging
from watchdog.observers import Observer
from watchdog.events import LoggingEventHandler

if __name__ == "__main__":
    logging.basicConfig(level=logging.INFO,
                        format='%(asctime)s - %(message)s',
                        datefmt='%Y-%m-%d %H:%M:%S')
    path = sys.argv[1] if len(sys.argv) > 1 else '.'
    event_handler = LoggingEventHandler()
    observer = Observer()
    observer.schedule(event_handler, path, recursive=T)
    observer.start()
    try:
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        observer.stop()
    observer.join()

Ejemplo n.º 57
0
    def _execute(self, options, args):
        """Start the watcher."""
        self.logger = get_logger('auto', STDERR_HANDLER)
        LRSocket.logger = self.logger

        if WebSocket is object and watchdog is None:
            req_missing(['ws4py', 'watchdog'], 'use the "auto" command')
        elif WebSocket is object:
            req_missing(['ws4py'], 'use the "auto" command')
        elif watchdog is None:
            req_missing(['watchdog'], 'use the "auto" command')

        self.cmd_arguments = ['nikola', 'build']
        if self.site.configuration_filename != 'conf.py':
            self.cmd_arguments.append('--conf=' +
                                      self.site.configuration_filename)

        # Run an initial build so we are up-to-date
        subprocess.call(self.cmd_arguments)

        port = options and options.get('port')
        self.snippet = '''<script>document.write('<script src="http://'
            + (location.host || 'localhost').split(':')[0]
            + ':{0}/livereload.js?snipver=1"></'
            + 'script>')</script>
        </head>'''.format(port)

        # Do not duplicate entries -- otherwise, multiple rebuilds are triggered
        watched = set(['templates/'] +
                      [get_theme_path(name) for name in self.site.THEMES])
        for item in self.site.config['post_pages']:
            watched.add(os.path.dirname(item[0]))
        for item in self.site.config['FILES_FOLDERS']:
            watched.add(item)
        for item in self.site.config['GALLERY_FOLDERS']:
            watched.add(item)
        for item in self.site.config['LISTINGS_FOLDERS']:
            watched.add(item)
        for item in self.site._plugin_places:
            watched.add(item)
        # Nikola itself (useful for developers)
        watched.add(pkg_resources.resource_filename('nikola', ''))

        out_folder = self.site.config['OUTPUT_FOLDER']
        if options and options.get('browser'):
            browser = True
        else:
            browser = False

        if options['ipv6']:
            dhost = '::'
        else:
            dhost = None

        host = options['address'].strip('[').strip(']') or dhost

        # Server can be disabled (Issue #1883)
        self.has_server = not options['no-server']

        # Instantiate global observer
        observer = Observer()
        if self.has_server:
            # Watch output folders and trigger reloads
            observer.schedule(OurWatchHandler(self.do_refresh),
                              out_folder,
                              recursive=True)

        # Watch input folders and trigger rebuilds
        for p in watched:
            if os.path.exists(p):
                observer.schedule(OurWatchHandler(self.do_rebuild),
                                  p,
                                  recursive=True)

        # Watch config file (a bit of a hack, but we need a directory)
        _conf_fn = os.path.abspath(self.site.configuration_filename
                                   or 'conf.py')
        _conf_dn = os.path.dirname(_conf_fn)
        observer.schedule(ConfigWatchHandler(_conf_fn, self.do_rebuild),
                          _conf_dn,
                          recursive=False)

        try:
            self.logger.info("Watching files for changes...")
            observer.start()
        except KeyboardInterrupt:
            pass

        parent = self

        class Mixed(WebSocketWSGIApplication):
            """A class that supports WS and HTTP protocols on the same port."""
            def __call__(self, environ, start_response):
                if environ.get('HTTP_UPGRADE') is None:
                    return parent.serve_static(environ, start_response)
                return super(Mixed, self).__call__(environ, start_response)

        if self.has_server:
            ws = make_server(host,
                             port,
                             server_class=WSGIServer,
                             handler_class=WebSocketWSGIRequestHandler,
                             app=Mixed(handler_cls=LRSocket))
            ws.initialize_websockets_manager()
            self.logger.info("Serving HTTP on {0} port {1}...".format(
                host, port))
            if browser:
                if options['ipv6'] or '::' in host:
                    server_url = "http://[{0}]:{1}/".format(host, port)
                else:
                    server_url = "http://{0}:{1}/".format(host, port)

                self.logger.info(
                    "Opening {0} in the default web browser...".format(
                        server_url))
                # Yes, this is racy
                webbrowser.open('http://{0}:{1}'.format(host, port))

            try:
                self.dns_sd = dns_sd(port, (options['ipv6'] or '::' in host))
                ws.serve_forever()
            except KeyboardInterrupt:
                self.logger.info("Server is shutting down.")
                if self.dns_sd:
                    self.dns_sd.Reset()
                # This is a hack, but something is locking up in a futex
                # and exit() doesn't work.
                os.kill(os.getpid(), 15)
        else:
            # Workaround: can’t have nothing running (instant exit)
            #    but also can’t join threads (no way to exit)
            # The joys of threading.
            try:
                while True:
                    time.sleep(1)
            except KeyboardInterrupt:
                self.logger.info("Shutting down.")
                # This is a hack, but something is locking up in a futex
                # and exit() doesn't work.
                os.kill(os.getpid(), 15)
Ejemplo n.º 58
0
class MakeMKV_Watchdog(FileSystemEventHandler):
    def __init__(self, *args, **kwargs):
        super().__init__()
        self.log = logging.getLogger(__name__)
        self.log.info('Starting up...')

        fileExt = kwargs.pop(
            'fileExt',
            None)  # Try to get fileExt keyword from keyword dictionary
        if (fileExt is None):  # If fileExt is None
            self.fileExt = ('.mkv',
                            )  # Set to fileExt attribute to default of '.mkv'
        elif isinstance(fileExt, str):  # Else, if it is string
            self.fileExt = (fileExt,
                            )  # Set fileExt attribute to tuple of fileExt
        elif isinstance(fileExt, list):  # Else, if it is list
            self.fileExt = tuple(
                fileExt)  # Set fileExt attribute to tuple of fileExt
        else:  # Else
            self.fileExt = fileExt  # Set fileExt attribute using fileExt keyword value

        self.converter = VideoConverter(**kwargs)
        self.Queue = Queue(
        )  # Initialize queue for sending files to converting thread
        self.Observer = Observer()  # Initialize a watchdog Observer
        for arg in args:  # Iterate over input arguments
            self.Observer.schedule(
                self, arg, recursive=True
            )  # Add each input argument to observer as directory to watch; recursively
            for file in self._getDirListing(
                    arg
            ):  # Iterate over all files (if any) in the input directory
                self.Queue.put(file)  # Enqueue the file

        self.Observer.start()  # Start the observer

        self.__runThread = Thread(
            target=self.__run)  # Thread for dequeuing files and converting
        self.__runThread.start()  # Start the thread

    def on_created(self, event):
        '''
    Purpose:
      Method to handle events when file is created.
    '''
        if event.is_directory: return  # If directory; just return
        if event.src_path.endswith(self.fileExt):
            self.Queue.put(
                event.src_path
            )  # Add split file path (dirname, basename,) tuple to to_convert list
            self.log.debug('New file added to queue : {}'.format(
                event.src_path))  # Log info

    def on_moved(self, event):
        '''
    Purpose:
      Method to handle events when file is created.
    '''
        if event.is_directory: return  # If directory; just return
        if event.dest_path.endswith(self.fileExt):
            self.Queue.put(
                event.dest_path
            )  # Add split file path (dirname, basename,) tuple to to_convert list
            self.log.debug('New file added to queue : {}'.format(
                event.dest_path))  # Log info

    def join(self):
        '''
    Method to wait for the watchdog Observer to finish.
    The Observer will be stopped when _sigintEvent or _sigtermEvent is set
    '''
        self.Observer.join()  # Join the observer thread

    def _getDirListing(self, dir):
        '''
    Purpose:
      Method to get list of files in a directory (non-recursive), that
      ends with given extension
    Inputs:
      dir  : Path of directory to search
    Keywords:
      ext  : Tuple of extensions to check, default = ('.mkv',)
    Outputs:
      Returns list of files
    '''
        files = []
        for file in os.listdir(dir):
            if file.endswith(self.fileExt):
                path = os.path.join(dir, file)
                if os.path.isfile(path):
                    files.append(path)
        return files

    def _checkSize(self, file):
        '''
    Purpose:
      Method to check that file size has stopped changing
    Inputs:
      file   : Full path to a file
    Outputs:
      None.
    Keywords:
      None.
    '''
        self.log.debug('Waiting for file to finish being created')
        prev = -1  # Set previous file size to -1
        curr = os.path.getsize(file)  # Get current file size
        while (prev != curr):  # While sizes differ
            time.sleep(SLEEP)  # Sleep a few seconds seconds
            prev = curr  # Set previous size to current size
            curr = os.path.getsize(file)  # Update current size

    def __run(self, **kwargs):
        '''
    Purpose:
      A thread to dequeue video file paths and convert them
    Inputs:
      None.
    Outputs:
      None.
    Keywords:
      None.
    '''
        while isRunning():  # While the kill event is NOT set
            try:  # Try
                file = self.Queue.get(
                    timeout=TIMEOUT
                )  # Get a file from the queue; block for 0.5 seconds then raise exception
            except:  # Catch exception
                continue  # Do nothing

            self._checkSize(
                file)  # Wait to make sure file finishes copying/moving

            try:
                out_file = self.converter.transcode(file)  # Convert file
            except:
                self.log.exception('Failed to convert file')
            else:
                if out_file is not None and isRunning():
                    plexMediaScanner(
                        'scan',
                        'refresh',
                        section='TV Shows'
                        if self.converter.metaData.isEpisode else 'Movies')

            self.Queue.task_done()

        self.log.info('MakeMKV watchdog stopped!')
        self.Observer.stop()
Ejemplo n.º 59
0
    #System
    '.bak': "/Users/emreakayoglu/Downloads/TEST/Text/Other/System",
    '.cab': "/Users/emreakayoglu/Downloads/TEST/Text/Other/System",
    '.cfg': "/Users/emreakayoglu/Downloads/TEST/Text/Other/System",
    '.cpl': "/Users/emreakayoglu/Downloads/TEST/Text/Other/System",
    '.cur': "/Users/emreakayoglu/Downloads/TEST/Text/Other/System",
    '.dll': "/Users/emreakayoglu/Downloads/TEST/Text/Other/System",
    '.dmp': "/Users/emreakayoglu/Downloads/TEST/Text/Other/System",
    '.drv': "/Users/emreakayoglu/Downloads/TEST/Text/Other/System",
    '.icns': "/Users/emreakayoglu/Downloads/TEST/Text/Other/System",
    '.ico': "/Users/emreakayoglu/Downloads/TEST/Text/Other/System",
    '.ini': "/Users/emreakayoglu/Downloads/TEST/Text/Other/System",
    '.lnk': "/Users/emreakayoglu/Downloads/TEST/Text/Other/System",
    '.msi': "/Users/emreakayoglu/Downloads/TEST/Text/Other/System",
    '.sys': "/Users/emreakayoglu/Downloads/TEST/Text/Other/System",
    '.tmp': "/Users/emreakayoglu/Downloads/TEST/Text/Other/System",
}

folder_to_track = '/Users/emreakayoglu/Desktop/TEST'
folder_destination = '/Users/emreakayoglu/Downloads/TEST'
event_handler = MyHandler()
observer = Observer()
observer.schedule(event_handler, folder_to_track, recursive=True)
observer.start()

try:
    while True:
        time.sleep(10)
except KeyboardInterrupt:
    observer.stop()
observer.join()
Ejemplo n.º 60
-1
    def mainLoop(self):

        logging.debug("Monitor: started main loop.")
        self.session = self.dm.Session()
        self.library = Library(self.dm.Session)
        
        observer = Observer()
        self.eventHandler = MonitorEventHandler(self)
        for path in self.paths:
            if os.path.exists(path):
                observer.schedule(self.eventHandler, path, recursive=True)
        observer.start()
        
        while True:
            try:
                (msg, args) = self.queue.get(block=True, timeout=1)
            except:
                msg = None
                
            #dispatch messages
            if msg == "scan":
                self.dofullScan(self.paths)

            if msg == "events":
                self.doEventProcessing(args)
            
            #time.sleep(1)
            if self.quit:
                break
            
        self.session.close()
        self.session = None
        observer.stop()
        logging.debug("Monitor: stopped main loop.")