コード例 #1
0
ファイル: env.py プロジェクト: syrusakbary/assetsy
 def add_path(self,name, path):
     if name=='default': self._base = path
     self.paths[name] = path
     if self.instant_reload:
         observer = Observer()
         observer.schedule(self.reload_handler, path=path, recursive=True)
         observer.start()
コード例 #2
0
ファイル: run.py プロジェクト: remyzane/flask-http-api
def code():
    # file monitor server
    observer = Observer()

    # py yml file monitor
    patterns = ["*.py", "*demo.yml"]  # '*' is necessary, and must in the first.
    restart_processor = ServerStarter(
        [
            {"cmd": "rm -rf %s/*.log" % os.path.join(workspace, "log"), "is_daemon": False},
            {"cmd": "./run.py run", "network_port": (config["simple_server"]["port"],)},
        ]
    )
    monitor = SourceCodeMonitor(restart_processor, patterns)
    observer.schedule(monitor, program_dir, recursive=True)
    observer.schedule(monitor, http_api.__path__[0], recursive=True)

    # # rebuild css and js's min file while source file is change
    # patterns = ['*.css', '*.js', '*static.yml']     # '*' is necessary, and must in the first.
    # monitor = SourceCodeMonitor(BuildCssJsProcessor(program_dir, static), patterns, None, 500)
    # observer.schedule(monitor, program_dir, recursive=True)

    # start monitoring
    observer.start()
    try:
        time.sleep(31536000)  # one year
    except KeyboardInterrupt:
        observer.stop()
コード例 #3
0
ファイル: ODMWatchdog.py プロジェクト: jkokorian/ODMAnalysis
def main():
    if (len(sys.argv) > 1 and os.path.exists(sys.argv[1]) and os.path.isfile(sys.argv[1])):
        filename = sys.argv[1]
    else:
        filename = gui.get_path("*.csv",defaultFile="data.csv")
    
    commonPath = os.path.abspath(os.path.split(filename)[0])
    outputFile = os.path.join(commonPath, "odmanalysis.csv")
    
    print "Now watching %s for changes" % filename
    handler = OMDCsvChunkHandler(filename,outputFile)
    observer = Observer()
    observer.schedule(handler, path=commonPath, recursive=False)
    handler.startPCChain()
    observer.start()

    try:
        while True:
            time.sleep(1)
                
    except (KeyboardInterrupt, SystemExit):
        print "Stopping..."
        observer.stop()
        time.sleep(1)
    observer.join()
コード例 #4
0
ファイル: http.py プロジェクト: eykd/storyline
def main():
    arguments = docopt(__doc__, version='Storyline HTTP v0.1')

    if arguments.get('--debug'):
        logger = logging.getLogger()
        logger.setLevel(logging.DEBUG)
        handler = logging.StreamHandler(sys.stderr)
        logger.addHandler(handler)

    app.config.from_object(__name__)
    app.debug = arguments.get('--debug')

    story_path = arguments.get('STORY_PATH', '.')

    global plot
    plot = storyfile.load_plot_from_path(story_path)

    observer = Observer()
    observer.schedule(LoggingEventHandler(), path=story_path, recursive=True)
    observer.schedule(Reloader(story_path), path=story_path, recursive=True)

    observer.start()
    try:
        app.run()
    finally:
        observer.stop()
        observer.join()
コード例 #5
0
ファイル: command.py プロジェクト: Cortlandd/coconut
    def watch(self, source, write=True, package=None, run=False, force=False):
        """Watches a source and recompiles on change."""
        from watchdog.events import FileSystemEventHandler
        from watchdog.observers import Observer

        def recompile(path):
            if os.path.isfile(path) and os.path.splitext(path)[1] in code_exts:
                self.compile_path(path, write, package, run, force)

        class watcher(FileSystemEventHandler):
            def on_modified(_, event):
                recompile(event.src_path)
            def on_created(_, event):
                recompile(event.src_path)

        source = fixpath(source)

        self.console.show("Watching        "+showpath(source)+" ...")
        self.console.print("(press Ctrl-C to end)")

        observer = Observer()
        observer.schedule(watcher(), source, recursive=True)
        observer.start()
        try:
            while True:
                time.sleep(.1)
        except KeyboardInterrupt:
            pass
        finally:
            observer.stop()
            observer.join()
コード例 #6
0
def main(): 
    global badExtensionCounter, failedFlag, pool, failedProcessCounter#, db
    
    sql_setup() # Set-up SQL Database/check to see if exists
    
    # Initiate File Path Handler
    observer = Observer()
    observer.schedule(MyHandler(), path=file_path, recursive=True)
    observer.start()
    
    cpuCount = multiprocessing.cpu_count() # Count all available CPU's
    print "\nTotal CPU Count: %d"%(cpuCount)
    pool = multiprocessing.Pool(4, worker,(processQueue,)) # Create 4 child processes to handle all queued elements
    active = multiprocessing.active_children() # All active child processes
    print "Total number of active child processes: %s\n"%(str(active))
    
    try:
        while True:
            time.sleep(0.2)
    except KeyboardInterrupt:
        pool.terminate() # Stop all child processes
        pool.join() # Join the processes with parent and terminate
        active = multiprocessing.active_children() # All active child processes, list should be empty at this point.
        print "\nTotal number of active child processes: %s\n"%(str(active))
        shutdown() # Run shutdown sequence        
        observer.stop()
        observer.join()
        sys.exit(1)
コード例 #7
0
ファイル: __init__.py プロジェクト: ramiro/hovercraft
def generate_and_observe(args, event):
    while event.isSet():
        # Generate the presentation
        monitor_list = generate(args)
        print("Presentation generated.")

        # Make a list of involved directories
        directories = defaultdict(list)
        for file in monitor_list:
            directory, filename = os.path.split(file)
            directories[directory].append(filename)

        observer = Observer()
        handler = HovercraftEventHandler(monitor_list)
        for directory, files in directories.items():
            observer.schedule(handler, directory, recursive=False)

        observer.start()
        while event.wait(1):
            time.sleep(0.05)
            if handler.quit:
                break

        observer.stop()
        observer.join()
コード例 #8
0
ファイル: watch_file.py プロジェクト: rli9/slam
class WatchFile(object):
    def __init__(self, send_msg_func, *args, **kargs):
        self.path = kargs['path'] if kargs.has_key('path') else '.'
        self.suffix = kargs['suffix'] if kargs.has_key('suffix') else '*'  # star represent any file
        self.observer = Observer()
        self.event_handler = MyFileMonitor(self.suffix, callback=self.get_data)
        self.send_msg_func = send_msg_func
        self.filename = self.zip_filename = ''

    def run(self):
        self.observer.schedule(self.event_handler, self.path, recursive=True)
        self.observer.start()
        try:
            while True:
                time.sleep(1)
        except KeyboardInterrupt:
            self.observer.stop()
        self.observer.join()

    def get_data(self, filename):
        data = self._unpack(filename)
        data = str(data)
        print(data, type(data))
        self.send_msg_func(data)

    def _unpack(self, filename):
        # first rename suffix to zip file
        # may not work on linux
        if system == 'Windows':
            filename = filename[2:] if filename.startswith('.\\') else filename
            filename = filename.lstrip()
            new_name = filename.split('.')[0] + '.zip'
            new_name = new_name[1:] if new_name.startswith('\\') else new_name
        elif system == 'Linux':
            new_name = filename

        print('Old name:', filename, ' New name:', new_name)

        self.filename = filename
        self.zip_filename = new_name
        # waiting for operating sys create the file
        time.sleep(3)
        os.rename(filename, new_name)
        zip_file = zipfile.ZipFile(new_name, 'r')
        json_data = ""
        for name in zip_file.namelist():
            if name == "project.json":
                file = zip_file.open(name, 'r')
                json_data = "".join(file.readlines())
        # change filename back to .sb2
        if new_name.endswith('.zip'):
            os.rename(new_name, filename)

        return self.get_cmd(json_data)

    def get_cmd(self, json_data):
        jsonfy_data = json.loads(json_data)
        child = jsonfy_data['children'][0]
        scripts = child['scripts']
        return scripts
コード例 #9
0
ファイル: server.py プロジェクト: befks/odoo
class FSWatcher(object):
    def __init__(self):
        self.observer = Observer()
        for path in odoo.modules.module.ad_paths:
            _logger.info('Watching addons folder %s', path)
            self.observer.schedule(self, path, recursive=True)

    def dispatch(self, event):
        if isinstance(event, (FileCreatedEvent, FileModifiedEvent, FileMovedEvent)):
            if not event.is_directory:
                path = getattr(event, 'dest_path', event.src_path)
                if path.endswith('.py'):
                    try:
                        source = open(path, 'rb').read() + '\n'
                        compile(source, path, 'exec')
                    except SyntaxError:
                        _logger.error('autoreload: python code change detected, SyntaxError in %s', path)
                    else:
                        _logger.info('autoreload: python code updated, autoreload activated')
                        restart()

    def start(self):
        self.observer.start()
        _logger.info('AutoReload watcher running')

    def stop(self):
        self.observer.stop()
        self.observer.join()
コード例 #10
0
ファイル: watcher.py プロジェクト: jamesabel/propmtime
class PropMTimeWatcher:
    def __init__(self, app_data_folder):
        self._app_data_folder = app_data_folder
        self._observer = Observer()
        self.schedule()

    def schedule(self):
        pref = PropMTimePreferences(self._app_data_folder)
        self._observer.unschedule_all()
        for path, watcher in pref.get_all_paths().items():
            if watcher:
                if os.path.exists(path):
                    event_handler = ModHandler(path, self._app_data_folder)
                    log.info('scheduling watcher : %s' % path)
                    self._observer.schedule(event_handler, path=path, recursive=True)
                else:
                    log.error('Error: "%s" does not exist.\n\nPlease edit the path.\n\nTo do this, click on the %s icon and select "Paths".' %
                              (path, __application_name__))
        self._observer.start()

    def request_exit(self):
        self._observer.unschedule_all()
        self._observer.stop()
        self._observer.join(TIMEOUT)
        if self._observer.isAlive():
            log.error('observer still alive')
コード例 #11
0
def main():
    # Fill all changes that occurred when track-changes.py wasn't running.
    if os.path.isdir("out"):
        shutil.rmtree("out", True)

    if not os.path.isdir("out"):
        os.mkdir("out")
 
    startup_changes.sync_offline_changes("posts", "out")

    print "Watching posts directory for changes... CTRL+C to quit."
    watch_directory = "posts"

    event_handler = MyHandler()

    # Run the watchdog.
    observer = Observer()
    observer.schedule(event_handler, watch_directory, True)
    observer.start()

    """
    Keep the script running or else python closes without stopping the observer
    thread and this causes an error.
    """
    try:
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        observer.stop()

    observer.join()
コード例 #12
0
ファイル: watcher.py プロジェクト: shlomiatar/dev-dir-sync
    def __init__(self, config):
        """
        Initialize the watcher, use the config passed from main
        """
        self.config = config


        # List of pending files
        self.pending_files = set()

        self.sync_timer = None

        # Setup our watchdog observer
        observer = Observer()
        observer.schedule(ChangeHandler(self.on_file_changed), path=config.directory, recursive=True)
        observer.start()

        logging.info("Starting change tracker, cmd: {}, dir: {}, delay: {}".format(config.sync_cmd,
                                                                                   config.directory,
                                                                                   config.delay))
        try:
            while True:
                time.sleep(0.5)
        except KeyboardInterrupt:
            observer.stop()
        observer.join()
コード例 #13
0
def stream_video(video_path):

    global VIDEO_BITRATE
    global AUDIO_BITRATE

    create_working_directory()

    head, tail = os.path.split(video_path)
    name = tail.split('.')[0]

    nonce = '-' + ''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(5))

    if '.avi' in video_path:
        args = "-i %s  -vcodec h264 -b %s -acodec libfaac -ab %s -f hls ./.kickflip/%s.m3u8"
        args = args % (video_path, VIDEO_BITRATE, AUDIO_BITRATE, name+nonce)
    else:
        args = "-i %s -f hls -codec copy ./.kickflip/%s.m3u8"
        args = args % (video_path, name+nonce)

    observer = Observer()
    observer.schedule(SegmentHandler(), path='./.kickflip')

    observer.start()
    time.sleep(3) # This is a f*****g hack.
    process = envoy.run('ffmpeg ' + args)
    observer.stop()

    upload_file(video_path)
    return ''
コード例 #14
0
ファイル: __init__.py プロジェクト: sbktechnology/frappe
def watch(path, handler=None, debug=True):
	import time
	from watchdog.observers import Observer
	from watchdog.events import FileSystemEventHandler

	class Handler(FileSystemEventHandler):
		def on_any_event(self, event):
			if debug:
				print "File {0}: {1}".format(event.event_type, event.src_path)

			if not handler:
				print "No handler specified"
				return

			handler(event.src_path, event.event_type)

	event_handler = Handler()
	observer = Observer()
	observer.schedule(event_handler, path, recursive=True)
	observer.start()
	try:
		while True:
			time.sleep(1)
	except KeyboardInterrupt:
		observer.stop()
	observer.join()
コード例 #15
0
class ActivityCheck(Thread):

    def __init__(self, period, path, mailhost, fromaddr, toaddrs):
        Thread.__init__(self)
        self.period = int(period)
        self.path = path
        self.activity = False
        self.last_time = datetime.datetime.now()
        self.message_sent = False
        self.subject = 'WARNING : ' + HOSTNAME + ' : ' + 'telecaster monitor activity'
        self.logger = EmailLogger(mailhost, fromaddr, toaddrs, self.subject)
        self.event_handler = ActivityEventHandler(ignore_patterns=IGNORE_PATTERNS)
        self.observer = Observer()
        self.observer.schedule(self.event_handler, path, recursive=True)
        self.observer.start()

    def run(self):        
        while True:
            if not self.event_handler.activity:
                now = datetime.datetime.now()
                delta = now - self.last_time
                if delta.total_seconds() > LOG_MAX_PERIOD or not self.message_sent:
                    self.logger.logger.error('The monitor is NOT recording anymore in ' + self.path + ' ! ')
                    self.last_time = now
                    self.message_sent = True
            else:
                self.event_handler.activity = False
            time.sleep(self.period)

    def stop(self):
        self.observer.stop()
コード例 #16
0
ファイル: pandocwatch.py プロジェクト: Nukesor/uni
def main():

    pandoc_path = which("pandoc")
    if not pandoc_path :
        print "pandoc executable must be in the path to be used by pandoc-watch!"
        exit()

    config = Configuration.Instance()

    parseOptions()

    config.setDirContentAndTime(getDirectoryWatchedElements())

    print "Starting pandoc watcher ..."

    while True:
        event_handler = ChangeHandler()
        observer = Observer()
        observer.schedule(event_handler, os.getcwd(), recursive=True)
        observer.start()
        try:
            while True:
                time.sleep(1)
        except KeyboardInterrupt as err:
            print str(err)
            observer.stop()

        print "Stopping pandoc watcher ..."

        exit()
コード例 #17
0
ファイル: generator.py プロジェクト: shaunstanislaus/tags
def _watch(root=u'.', dest=u'_site', pattern=u'**/*.html', exclude=u'_*/**'):

    try:
        from watchdog.observers import Observer
        from watchdog.events import FileSystemEventHandler
    except ImportError:
        msg = "The build --watch feature requires watchdog. \n"\
            + "Please install it with 'easy_install watchdog'."
        print(msg)
        return None

    class handler(FileSystemEventHandler):
        def on_any_event(self, event):
            exclude_path = os.path.join(os.getcwd(), exclude)
            if not utils.matches_pattern(exclude_path, event.src_path):
                build_files(root=root,
                            dest=dest,
                            pattern=pattern,
                            exclude=exclude)

    observer = Observer()
    observer.schedule(handler(), root, recursive=True)
    observer.start()

    print("Watching '{0}' ...".format(root))

    return observer
コード例 #18
0
ファイル: watcher.py プロジェクト: sectioneight/zkfarmer
    def __init__(self, zkconn, root_node_path, conf):
        super(ZkFarmJoiner, self).__init__()
        self.update_remote_timer = None
        self.update_local_timer = None

        self.zkconn = zkconn
        self.conf = conf
        self.node_path = "%s/%s" % (root_node_path, self.myip())

        # force the hostname info key
        info = conf.read()
        info["hostname"] = gethostname()
        conf.write(info)

        zkconn.create(self.node_path, serialize(conf.read()), zc.zk.OPEN_ACL_UNSAFE, EPHEMERAL)

        observer = Observer()
        observer.schedule(self, path=conf.file_path, recursive=True)
        observer.start()

        zkconn.get(self.node_path, self.node_watcher)

        while True:
            with self.cv:
                self.wait()
コード例 #19
0
ファイル: watcher.py プロジェクト: carsongee/pytest-watch
def watch(directory=None, auto_clear=False, beep_on_failure=True,
          onpass=None, onfail=None, extensions=[]):
    """
    Starts a server to render the specified file or directory
    containing a README.
    """
    if directory and not os.path.isdir(directory):
        raise ValueError('Directory not found: ' + directory)
    directory = os.path.abspath(directory or '')

    # Initial run
    event_handler = ChangeHandler(directory, auto_clear, beep_on_failure,
                                  onpass, onfail, extensions)
    event_handler.run()

    # Setup watchdog
    observer = Observer()
    observer.schedule(event_handler, path=directory, recursive=True)
    observer.start()

    # Watch and run tests until interrupted by user
    try:
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        observer.stop()
    observer.join()
コード例 #20
0
ファイル: watch.py プロジェクト: AaronRanAn/slidedeck
def watch_project(markdown_fn, output_fn, template_fn, render_first=True):
    class Handler(FileSystemEventHandler):
        def on_any_event(self, event):
            if event.src_path == os.path.abspath(output_fn):
                return
            print('Rendering slides...')
            process_slides(markdown_fn, output_fn, template_fn)

    if render_first == True:
        process_slides(markdown_fn, output_fn, template_fn)
        
    observer = Observer()
    event_handler = Handler()

    dirname = os.path.dirname(os.path.abspath(markdown_fn))
    
    observer.schedule(event_handler, path=dirname, recursive=True)
    print("Watching for events on {:s}...".format(dirname))
    observer.start()

    try:
        while True:
            time.sleep(0.5)
    except KeyboardInterrupt:
        observer.stop()
    observer.join()
コード例 #21
0
ファイル: assets.py プロジェクト: andela-ijubril/edx-platform
def watch_assets(options):
    """
    Watch for changes to asset files, and regenerate js/css
    """
    # Don't watch assets when performing a dry run
    if tasks.environment.dry_run:
        return

    observer = Observer()

    CoffeeScriptWatcher().register(observer)
    SassWatcher().register(observer)
    XModuleSassWatcher().register(observer)
    XModuleAssetsWatcher().register(observer)

    print("Starting asset watcher...")
    observer.start()
    if not getattr(options, 'background', False):
        # when running as a separate process, the main thread needs to loop
        # in order to allow for shutdown by contrl-c
        try:
            while True:
                observer.join(2)
        except KeyboardInterrupt:
            observer.stop()
        print("\nStopped asset watcher.")
コード例 #22
0
ファイル: scan_watch.py プロジェクト: thomaserlang/seplis
def main():
    if not config['play']['scan']:
        raise Exception('''
            Nothing to scan. Add a path in the config file.

            Example:

                play:
                    scan:
                        -
                            type: shows
                            path: /a/path/to/the/shows
            ''')
    obs = Observer()
    for s in config['play']['scan']:
        event_handler = Handler(
            scan_path=s['path'],
            type_=s['type'],
        )
        obs.schedule(
            event_handler,
            s['path'],
            recursive=True,
        )
    obs.start()
    try:
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        obs.stop()
    obs.join()
コード例 #23
0
ファイル: Watcher.py プロジェクト: kidd001/Wrangler.py
    def __init__(self, input_dir, templates_dir):

        paths = [input_dir, templates_dir]
        threads = []

        try:
            observer = Observer()
            event_handler = WatchEventHandler()

            for i in paths:
                targetPath = str(i)
                observer.schedule(event_handler, targetPath, recursive=True)
                threads.append(observer)

            observer.start()

            signal_watch_init = signal('watch_init')
            signal_watch_init.send(self)

            try:
                while True:
                    time.sleep(1)
            except KeyboardInterrupt:
                wrangler._reporter.log("Stopping with grace and poise", "green")
                observer.stop()
            
            observer.join()
        except:
            return None
コード例 #24
0
ファイル: modsync.py プロジェクト: walterdolce/modsync
class Modsync:
    _target = []
    _source = ''
    _observer = None

    def __init__(self):
        pass

    def setSource(self, source):
        self._source = source

    def setTarget(self, target_dir):
        self._target.append(target_dir)

    def getObserver(self):
        return self._observer

    def run(self):

        if not self._source:
            return 0

        self._observer = Observer()
        event_handler = ModsyncEventHandler(self._observer, self._source, self._target)
        self._observer.schedule(event_handler, self._source, recursive=True)
        self._observer.start()
        try:
            time.sleep(2)
            pass
        except KeyboardInterrupt:
            self._observer.stop()
        self._observer.join()
        return 0
コード例 #25
0
ファイル: dcapsync.py プロジェクト: kofemann/dcapsync
class Sceduler:

  def __init__(self, config):

    fs = config.get('scheduler', 'fs', 0)
    dest = config.get('store', 'path', 0)
    self.ioqueue = Queue()
    self.iothread = Thread(target=self.ioprocess)
    self.iothread.daemon = True
    self.observer = Observer()
    self.event_handler = IoTask(self.ioqueue, fs, dest)
    self.observer.schedule(self.event_handler, fs, recursive=True)

  def ioprocess(self):
    while True:
      t = self.ioqueue.get()
      try:
        t.process()
      finally:
        self.ioqueue.task_done()

  def start(self):
    self.observer.start()
    self.iothread.start()

  def stop(self):
    self.observer.stop()
    self.iothread.stop()

  def join(self):
     self.observer.join()
     self.iothread.join()
コード例 #26
0
ファイル: simplefilemon.py プロジェクト: ixzkn/macropad2
	def start(filename,format,callback=None,verbose=False):
		observer = Observer()
		fm = FileMonitor(observer,filename,format,callback,verbose)
		fm._handle()
		observer.schedule(fm, path=os.path.dirname(filename), recursive=False)
		observer.start()
		return fm
コード例 #27
0
class RoleBasedAuthorizationProvider(AbstractAuthorizationProvider,
                                     FileSystemEventHandler):

    def __init__(self, role_loader, roles_config_file_path):
        self.lgr = logging.getLogger(FLASK_SECUREST_LOGGER_NAME)
        self.role_loader = role_loader
        self.permissions_by_roles = None
        self.roles_config_file_path = os.path.abspath(roles_config_file_path)
        self.observer = Observer()
        self.observer.schedule(self,
                               path=os.path.dirname(
                                   self.roles_config_file_path),
                               recursive=False)
        self.load_roles_config()
        self.observer.start()

    def load_roles_config(self):
        try:
            with open(self.roles_config_file_path, 'r') as config_file:
                self.permissions_by_roles = yaml.safe_load(config_file.read())
                self.lgr.info('Loading of roles configuration ended '
                              'successfully')
        except (yaml.parser.ParserError, IOError) as e:
            err = 'Failed parsing {role_config_file} file. Error: {error}.' \
                .format(role_config_file=self.roles_config_file_path, error=e)
            self.lgr.warning(err)
            raise ValueError(err)

    def on_modified(self, event):
        if os.path.abspath(event.src_path) == self.roles_config_file_path:
            self.load_roles_config()

    def authorize(self):
        target_endpoint = rest_security.get_endpoint()
        target_method = rest_security.get_http_method()
        roles = self.role_loader.get_roles()
        return self._is_allowed(target_endpoint, target_method, roles) and \
            not self._is_denied(target_endpoint, target_method, roles)

    def _is_allowed(self, target_endpoint, target_method, user_roles):
        return self._evaluate_permission_by_type(target_endpoint,
                                                 target_method, user_roles,
                                                 'allow')

    def _is_denied(self, target_endpoint, target_method, user_roles):
        return self._evaluate_permission_by_type(target_endpoint,
                                                 target_method, user_roles,
                                                 'deny')

    def _evaluate_permission_by_type(self, target_endpoint, target_method,
                                     user_roles, permission_type):
        for role in user_roles:
            role_permissions = self.permissions_by_roles.get(role,
                                                             {'allow': {},
                                                              'deny': {}})
            relevant_permissions = role_permissions.get(permission_type, {})
            if _is_permission_matching(target_endpoint, target_method,
                                       relevant_permissions):
                return True
        return False
コード例 #28
0
ファイル: watcher.py プロジェクト: willzfarmer/twilight
def start_watchdog():
    event_handler = RankingHandler()
    observer      = Observer()
    log_handler   = LoggingEventHandler()
    log_observer  = Observer()
    try:
        observer.schedule(event_handler, path='./watch')
        observer.start()
        log_observer.schedule(log_handler, path='./watch')
        log_observer.start()
        logging.info("Watching Directory")
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        logging.info("Kill message sent. Aborting")
        observer.stop()
        log_observer.stop()
    except:
        logging.info("Unexpected error: %s" % sys.exc_info()[0])

        observer.stop()
        log_observer.stop()

        error_message(sys.exc_info()[0])

    observer.join()
    log_observer.join()
コード例 #29
0
ファイル: sendmail.py プロジェクト: kiterub/send2kindle
def send_watchdog(self):

    """
    Method of ``Mail``.
    Send mail when new file created.
    Alter this method if other condition of sending mail needed.
    """

    #r_pipe,w_pipe = Pipe()
    queue = Queue.Queue()
    event = Event()
    #event_handler = GetNameEventHandler(w_pipe,event)
    #send_mail_thread = SendMailThread(r_pipe,event,self)
    event_handler = GetNameEventHandler(queue,event)
    send_mail_thread = SendMailThread(queue,event,self)
    send_mail_thread.start()

    observer = Observer()
    path = self.config['BOOK_PATH']
    observer.schedule(event_handler, path, recursive=True)
    observer.start()

    try:
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        event.set()
        observer.stop()
        #NOTICE:kill threads by force, need to change
        sys.exit()
コード例 #30
0
ファイル: __main__.py プロジェクト: ptrifonov/easywall
class Main(object):
    """
    TODO: Doku
    """
    def __init__(self) -> None:
        self.cfg = Config(CONFIG_PATH)

        loglevel = self.cfg.get_value("LOG", "level")
        to_stdout = self.cfg.get_value("LOG", "to_stdout")
        to_files = self.cfg.get_value("LOG", "to_files")
        logpath = self.cfg.get_value("LOG", "filepath")
        logfile = self.cfg.get_value("LOG", "filename")
        self.log = Log(str(loglevel), bool(to_stdout), bool(to_files),
                       str(logpath), str(logfile))

        info("starting easywall")

        self.rules_handler = RulesHandler()
        self.rules_handler.ensure_files_exist()
        self.easywall = Easywall(self.cfg)
        self.event_handler = ModifiedHandler(self.apply)
        self.observer = Observer()
        self.stop_flag = False

        info("easywall has been started")

    def apply(self, filename: str) -> None:
        """
        TODO: Doku
        """
        info("starting apply process from easywall")
        delete_file_if_exists(filename)
        self.easywall.apply()

    def start_observer(self) -> None:
        """
        this function is called to keep the main process running
        if someone is pressing ctrl + C the software will initiate the stop process
        """
        self.observer.schedule(self.event_handler, ".")
        self.observer.start()

        try:
            while not self.stop_flag:
                sleep(2)
        except KeyboardInterrupt:
            info("KeyboardInterrupt received, starting shutdown")
        finally:
            self.shutdown()

    def shutdown(self) -> None:
        """
        the function stops all threads and shuts the software down gracefully
        """
        info("starting shutdown")

        self.observer.stop()
        delete_file_if_exists(".acceptance")
        self.observer.join()

        info("shutdown completed")
        self.log.close_logging()
コード例 #31
0
ファイル: watch.py プロジェクト: GravityOpenSource/magnetic
 def handle(self, args):
     event_handler = EventHandler()
     observer = Observer()
     observer.schedule(event_handler, watch_path, recursive=True)
     observer.setDaemon(False)
     observer.start()
コード例 #32
0
ファイル: main.py プロジェクト: davidhuser/dhis2-docs-1
def main():
    # setup command line arguments
    parser = argparse.ArgumentParser(description='Preprocessor for Markdown'
                                     ' files.')

    parser.add_argument('FILENAME',
                        help='Input file name (or directory if '
                        'watching)')

    # Argument for watching directory and subdirectory to process .mdpp files
    parser.add_argument('-w',
                        '--watch',
                        action='store_true',
                        help='Watch '
                        'current directory and subdirectories for changing '
                        '.mdpp files and process in local directory. File '
                        'output name is same as file input name.')

    parser.add_argument('-o',
                        '--output',
                        help='Output file name. If no '
                        'output file is specified, writes output to stdout.')
    parser.add_argument('-e',
                        '--exclude',
                        help='List of modules to '
                        'exclude, separated by commas. Available modules: ' +
                        ', '.join(MarkdownPP.modules.keys()))
    args = parser.parse_args()

    # If watch flag is on, watch dirs instead of processing individual file
    if args.watch:
        # Get full directory path to print
        p = os.path.abspath(args.FILENAME)
        print("Watching: " + p + " (and subdirectories)")

        # Custom watchdog event handler specific for .mdpp files
        event_handler = MarkdownPPFileEventHandler()
        observer = Observer()
        # pass event handler, directory, and flag to recurse subdirectories
        observer.schedule(event_handler, args.FILENAME, recursive=True)
        observer.start()

        try:
            while True:
                time.sleep(1)
        except KeyboardInterrupt:
            observer.stop()
        observer.join()

    else:
        mdpp = open(args.FILENAME, 'r')
        if args.output:
            md = open(args.output, 'w')
        else:
            md = sys.stdout

        modules = list(MarkdownPP.modules)

        if args.exclude:
            for module in args.exclude.split(','):
                if module in modules:
                    modules.remove(module)
                else:
                    print('Cannot exclude ', module, ' - no such module')

        MarkdownPP.MarkdownPP(input=mdpp, output=md, modules=modules)

        mdpp.close()
        md.close()
コード例 #33
0
    def run(self):
        global timeout, w, tuples, regexes, json_pending, last_push, config
        fp = {}
        if osname == "linux":
            w = watcher.AutoWatcher()
            for path in config.get('Analyzer', 'paths').split(","):
                try:
                    print("Recursively monitoring " + path.strip() + "...")
                    w.add_all(path.strip(), inotify.IN_ALL_EVENTS)
                except OSError as err:
                    pass

            if not w.num_watches():
                print("No paths to analyze, nothing to do!")
                sys.exit(1)

            poll = select.poll()
            poll.register(w, select.POLLIN)

            timeout = None

            threshold = watcher.Threshold(w, 256)

            inodes = {}
            inodes_path = {}
            xes = connect_es(config)
            while True:
                events = poll.poll(timeout)
                nread = 0
                if threshold() or not events:
                    #print('reading,', threshold.readable(), 'bytes available')
                    for evt in w.read(0):
                        nread += 1

                        # The last thing to do to improve efficiency here would be
                        # to coalesce similar events before passing them up to a
                        # higher level.

                        # For example, it's overwhelmingly common to have a stream
                        # of inotify events contain a creation, followed by
                        # multiple modifications of the created file.

                        # Recognising this pattern (and others) and coalescing
                        # these events into a single creation event would reduce
                        # the number of trips into our app's presumably more
                        # computationally expensive upper layers.
                        masks = inotify.decode_mask(evt.mask)
                        #print(masks)
                        path = evt.fullpath
                        #print(repr(evt.fullpath), ' | '.join(masks))
                        try:
                            if not u'IN_ISDIR' in masks:

                                if (u'IN_MOVED_FROM'
                                        in masks) and (path in filehandles):
                                    print(
                                        "File moved, closing original handle")
                                    try:
                                        filehandles[path].close()
                                    except Exception as err:
                                        print(err)
                                    del filehandles[path]
                                    inode = inodes_path[path]
                                    del inodes[inode]

                                elif (not u'IN_DELETE' in masks) and (
                                        not path in filehandles) and (
                                            path.find(".gz") == -1):
                                    try:
                                        print("Opening " + path)
                                        idata = os.stat(path)
                                        inode = idata.st_ino
                                        if not inode in inodes:
                                            filehandles[path] = open(path, "r")
                                            print("Started watching " + path)
                                            filehandles[path].seek(0, 2)
                                            inodes[inode] = path
                                            inodes_path[path] = inode

                                    except Exception as err:
                                        print(err)
                                        try:
                                            filehandles[path].close()
                                        except Exception as err:
                                            print(err)
                                        del filehandles[path]
                                        inode = inodes_path[path]
                                        del inodes[inode]

                                # First time we've discovered this file?
                                if u'IN_CLOSE_NOWRITE' in masks and not path in filehandles:
                                    pass

                                # New file created in a folder we're watching??
                                elif u'IN_CREATE' in masks:
                                    pass

                                # File truncated?
                                elif u'IN_CLOSE_WRITE' in masks and path in filehandles:
                                    #    print(path + " truncated!")
                                    filehandles[path].seek(0, 2)

                                # File contents modified?
                                elif u'IN_MODIFY' in masks and path in filehandles:
                                    #      print(path + " was modified")
                                    rd = 0
                                    data = ""
                                    #print("Change in " + path)
                                    try:
                                        while True:
                                            line = filehandles[path].readline()
                                            if not line:
                                                #filehandles[path].seek(0,2)
                                                break
                                            else:
                                                rd += len(line)
                                                data += line
                                        #print("Read %u bytes from %s" % (rd, path))
                                        parseLine(path, data)
                                    except Exception as err:
                                        try:
                                            print("Could not utilize " + path +
                                                  ", closing.." + err)
                                            filehandles[path].close()
                                        except Exception as err:
                                            print(err)
                                        del filehandles[path]
                                        inode = inodes_path[path]
                                        del inodes[inode]

                                # File deleted? (close handle)
                                elif u'IN_DELETE' in masks:
                                    if path in filehandles:
                                        print("Closed " + path)
                                        try:
                                            filehandles[path].close()
                                        except Exception as err:
                                            print(err)
                                        del filehandles[path]
                                        inode = inodes_path[path]
                                        del inodes[inode]
                                        print("Stopped watching " + path)

                                else:
                                    pass

                        except Exception as err:
                            print(err)

                for x in json_pending:
                    if (time.time() >
                        (last_push[x] + 15)) or len(json_pending[x]) >= 500:
                        if not x in fp:
                            fp[x] = True
                            print("First push for " + x + "!")
                        t = NodeThread()
                        t.assign(json_pending[x], x, xes)
                        t.start()
                        json_pending[x] = []
                        last_push[x] = time.time()

                if nread:
                    #print('plugging back in')
                    timeout = None
                    poll.register(w, select.POLLIN)
                else:
                    #print('unplugging,', threshold.readable(), 'bytes available')
                    timeout = 1000
                    poll.unregister(w)

        if osname == "freebsd":
            xes = connect_es(config)
            observer = Observer()
            for path in paths:
                observer.schedule(BSDHandler(), path, recursive=True)
                syslog.syslog(syslog.LOG_INFO,
                              "Recursively monitoring " + path.strip() + "...")
            observer.start()
            try:
                while True:
                    for x in json_pending:
                        if not x in last_push:
                            last_push[x] = time.time()
                        if len(json_pending[x]) > 0 and (
                            (time.time() > (last_push[x] + 15))
                                or len(json_pending[x]) >= 500):
                            if not x in fp:
                                fp[x] = True
                                syslog.syslog(syslog.LOG_INFO,
                                              "First push for " + x + "!")
                            t = NodeThread()
                            t.assign(json_pending[x], x, xes)
                            t.start()
                            json_pending[x] = []
                            last_push[x] = time.time()
                    time.sleep(0.5)

            except KeyboardInterrupt:
                observer.stop()
            observer.join()
コード例 #34
0
class FalcoTest(Test):
    def setUp(self):
        """
        Load the kernel module if not already loaded.
        """
        build_dir = "/build"
        if 'BUILD_DIR' in os.environ:
            build_dir = os.environ['BUILD_DIR']

        self.falcodir = self.params.get('falcodir', '/', default=build_dir)

        self.psp_conv_path = os.path.join(build_dir, "falcoctl")
        self.psp_conv_url = "https://github.com/falcosecurity/falcoctl/releases/download/v0.0.4/falcoctl-0.0.4-linux-amd64"

        self.stdout_is = self.params.get('stdout_is', '*', default='')
        self.stderr_is = self.params.get('stderr_is', '*', default='')

        self.stdout_contains = self.params.get('stdout_contains',
                                               '*',
                                               default='')

        if not isinstance(self.stdout_contains, list):
            self.stdout_contains = [self.stdout_contains]

        self.stderr_contains = self.params.get('stderr_contains',
                                               '*',
                                               default='')

        if not isinstance(self.stderr_contains, list):
            self.stderr_contains = [self.stderr_contains]

        self.stdout_not_contains = self.params.get('stdout_not_contains',
                                                   '*',
                                                   default='')

        if not isinstance(self.stdout_not_contains, list):
            if self.stdout_not_contains == '':
                self.stdout_not_contains = []
            else:
                self.stdout_not_contains = [self.stdout_not_contains]

        self.stderr_not_contains = self.params.get('stderr_not_contains',
                                                   '*',
                                                   default='')

        if not isinstance(self.stderr_not_contains, list):
            if self.stderr_not_contains == '':
                self.stderr_not_contains = []
            else:
                self.stderr_not_contains = [self.stderr_not_contains]

        self.exit_status = self.params.get('exit_status', '*', default=0)
        self.should_detect = self.params.get('detect', '*', default=False)
        self.check_detection_counts = self.params.get('check_detection_counts',
                                                      '*',
                                                      default=True)
        self.trace_file = self.params.get('trace_file', '*', default='')

        if self.trace_file and not os.path.isabs(self.trace_file):
            self.trace_file = os.path.join(build_dir, "test", self.trace_file)

        self.json_output = self.params.get('json_output', '*', default=False)
        self.json_include_output_property = self.params.get(
            'json_include_output_property', '*', default=True)
        self.json_include_tags_property = self.params.get(
            'json_include_tags_property', '*', default=True)
        self.all_events = self.params.get('all_events', '*', default=False)
        self.priority = self.params.get('priority', '*', default='debug')
        self.addl_cmdline_opts = self.params.get('addl_cmdline_opts',
                                                 '*',
                                                 default='')
        self.rules_file = self.params.get('rules_file',
                                          '*',
                                          default=os.path.join(
                                              self.basedir,
                                              '../rules/falco_rules.yaml'))

        if not isinstance(self.rules_file, list):
            self.rules_file = [self.rules_file]

        self.validate_rules_file = self.params.get('validate_rules_file',
                                                   '*',
                                                   default=False)

        if self.validate_rules_file == False:
            self.validate_rules_file = []
        else:
            if not isinstance(self.validate_rules_file, list):
                self.validate_rules_file = [self.validate_rules_file]

        self.psp_rules_file = os.path.join(build_dir, "psp_rules.yaml")

        self.psp_file = self.params.get('psp_file', '*', default="")

        self.rules_args = ""

        if self.psp_file != "":
            self.rules_args = self.rules_args + "-r " + self.psp_rules_file + " "

        for file in self.validate_rules_file:
            if not os.path.isabs(file):
                file = os.path.join(self.basedir, file)
            self.rules_args = self.rules_args + "-V " + file + " "

        for file in self.rules_file:
            if not os.path.isabs(file):
                file = os.path.join(self.basedir, file)
            self.rules_args = self.rules_args + "-r " + file + " "

        self.conf_file = self.params.get('conf_file',
                                         '*',
                                         default=os.path.join(
                                             self.basedir, '../falco.yaml'))
        self.conf_file = self.conf_file.replace("BUILD_DIR", build_dir)
        if not os.path.isabs(self.conf_file):
            self.conf_file = os.path.join(self.basedir, self.conf_file)

        self.run_duration = self.params.get('run_duration', '*', default='')

        self.disabled_rules = self.params.get('disabled_rules',
                                              '*',
                                              default='')

        if self.disabled_rules == '':
            self.disabled_rules = []

        if not isinstance(self.disabled_rules, list):
            self.disabled_rules = [self.disabled_rules]

        self.disabled_args = ""

        for rule in self.disabled_rules:
            self.disabled_args = self.disabled_args + "-D " + rule + " "

        self.detect_counts = self.params.get('detect_counts',
                                             '*',
                                             default=False)
        if self.detect_counts == False:
            self.detect_counts = {}
        else:
            detect_counts = {}
            for item in self.detect_counts:
                for key, value in list(item.items()):
                    detect_counts[key] = value
            self.detect_counts = detect_counts

        self.rules_warning = self.params.get('rules_warning',
                                             '*',
                                             default=False)
        if self.rules_warning == False:
            self.rules_warning = set()
        else:
            self.rules_warning = set(self.rules_warning)

        # Maps from rule name to set of evttypes
        self.rules_events = self.params.get('rules_events', '*', default=False)
        if self.rules_events == False:
            self.rules_events = {}
        else:
            events = {}
            for item in self.rules_events:
                for item2 in item:
                    events[item2[0]] = set(item2[1])
            self.rules_events = events

        if self.should_detect:
            self.detect_level = self.params.get('detect_level', '*')

            if not isinstance(self.detect_level, list):
                self.detect_level = [self.detect_level]

        self.package = self.params.get('package', '*', default='None')

        self.addl_docker_run_args = self.params.get('addl_docker_run_args',
                                                    '*',
                                                    default='')

        self.copy_local_driver = self.params.get('copy_local_driver',
                                                 '*',
                                                 default=False)

        # Used by possibly_copy_local_driver as well as docker run
        self.module_dir = os.path.expanduser("~/.falco")

        self.outputs = self.params.get('outputs', '*', default='')

        if self.outputs == '':
            self.outputs = {}
        else:
            outputs = []
            for item in self.outputs:
                for key, value in list(item.items()):
                    output = {}
                    output['file'] = key
                    output['line'] = value
                    outputs.append(output)
                    filedir = os.path.dirname(output['file'])
                    # Create the parent directory for the trace file if it doesn't exist.
                    if not os.path.isdir(filedir):
                        os.makedirs(filedir)
            self.outputs = outputs

        self.output_strictly_contains = self.params.get(
            'output_strictly_contains', '*', default='')

        if self.output_strictly_contains == '':
            self.output_strictly_contains = {}
        else:
            output_strictly_contains = []
            for item in self.output_strictly_contains:
                for key, value in list(item.items()):
                    output = {}
                    output['actual'] = key
                    output['expected'] = value
                    output_strictly_contains.append(output)
                    if not output['actual'] == 'stdout':
                        # Clean up file from previous tests, if any
                        if os.path.exists(output['actual']):
                            os.remove(output['actual'])
                        # Create the parent directory for the file if it doesn't exist.
                        filedir = os.path.dirname(output['actual'])
                        if not os.path.isdir(filedir):
                            os.makedirs(filedir)
            self.output_strictly_contains = output_strictly_contains

        self.grpcurl_res = None
        self.grpc_observer = None
        self.grpc_address = self.params.get('address',
                                            'grpc/*',
                                            default='/var/run/falco.sock')
        if self.grpc_address.startswith("unix://"):
            self.is_grpc_using_unix_socket = True
            self.grpc_address = self.grpc_address[len("unix://"):]
        else:
            self.is_grpc_using_unix_socket = False
        self.grpc_proto = self.params.get('proto', 'grpc/*', default='')
        self.grpc_service = self.params.get('service', 'grpc/*', default='')
        self.grpc_method = self.params.get('method', 'grpc/*', default='')
        self.grpc_results = self.params.get('results', 'grpc/*', default='')
        if self.grpc_results == '':
            self.grpc_results = []
        else:
            if type(self.grpc_results) == str:
                self.grpc_results = [self.grpc_results]

        self.disable_tags = self.params.get('disable_tags', '*', default='')

        if self.disable_tags == '':
            self.disable_tags = []

        self.run_tags = self.params.get('run_tags', '*', default='')

        if self.run_tags == '':
            self.run_tags = []

        self.time_iso_8601 = self.params.get('time_iso_8601',
                                             '*',
                                             default=False)

    def tearDown(self):
        if self.package != 'None':
            self.uninstall_package()

    def check_rules_warnings(self, res):

        found_warning = set()

        for match in re.finditer('Rule ([^:]+): warning \(([^)]+)\):',
                                 res.stderr.decode("utf-8")):
            rule = match.group(1)
            warning = match.group(2)
            found_warning.add(rule)

        self.log.debug("Expected warning rules: {}".format(self.rules_warning))
        self.log.debug("Actual warning rules: {}".format(found_warning))

        if found_warning != self.rules_warning:
            self.fail(
                "Expected rules with warnings {} does not match actual rules with warnings {}"
                .format(self.rules_warning, found_warning))

    def check_rules_events(self, res):

        found_events = {}

        for match in re.finditer('Event types for rule ([^:]+): (\S+)',
                                 res.stderr.decode("utf-8")):
            rule = match.group(1)
            events = set(match.group(2).split(","))
            found_events[rule] = events

        self.log.debug("Expected events for rules: {}".format(
            self.rules_events))
        self.log.debug("Actual events for rules: {}".format(found_events))

        for rule in list(found_events.keys()):
            if found_events.get(rule) != self.rules_events.get(rule):
                self.fail(
                    "rule {}: expected events {} differs from actual events {}"
                    .format(rule, self.rules_events.get(rule),
                            found_events.get(rule)))

    def check_detections(self, res):
        # Get the number of events detected.
        match = re.search('Events detected: (\d+)', res.stdout.decode("utf-8"))
        if match is None:
            self.fail(
                "Could not find a line 'Events detected: <count>' in falco output"
            )

        events_detected = int(match.group(1))

        if not self.should_detect and events_detected > 0:
            self.fail(
                "Detected {} events when should have detected none".format(
                    events_detected))

        if self.should_detect:
            if events_detected == 0:
                self.fail(
                    "Detected {} events when should have detected > 0".format(
                        events_detected))

            for level in self.detect_level:
                level_line = '(?i){}: (\d+)'.format(level)
                match = re.search(level_line, res.stdout.decode("utf-8"))

                if match is None:
                    self.fail(
                        "Could not find a line '{}: <count>' in falco output".
                        format(level))

                    events_detected = int(match.group(1))

                    if not events_detected > 0:
                        self.fail(
                            "Detected {} events at level {} when should have detected > 0"
                            .format(events_detected, level))

    def check_detections_by_rule(self, res):
        # Get the number of events detected for each rule. Must match the expected counts.
        match = re.search('Triggered rules by rule name:(.*)',
                          res.stdout.decode("utf-8"), re.DOTALL)
        if match is None:
            self.fail(
                "Could not find a block 'Triggered rules by rule name: ...' in falco output"
            )

        triggered_rules = match.group(1)

        for rule, count in list(self.detect_counts.items()):
            expected = '\s{}: (\d+)'.format(
                re.sub(r'([$\.*+?()[\]{}|^])', r'\\\1', rule))
            match = re.search(expected, triggered_rules)

            if match is None:
                actual_count = 0
            else:
                actual_count = int(match.group(1))

            if actual_count != count:
                self.fail(
                    "Different counts for rule {}: expected={}, actual={}".
                    format(rule, count, actual_count))
            else:
                self.log.debug("Found expected count for rule {}: {}".format(
                    rule, count))

    def check_outputs(self):
        for output in self.outputs:
            # Open the provided file and match each line against the
            # regex in line.
            file = open(output['file'], 'r')
            found = False
            for line in file:
                match = re.search(output['line'], line)

                if match is not None:
                    found = True

            if found == False:
                self.fail("Could not find a line '{}' in file '{}'".format(
                    output['line'], output['file']))

        return True

    def check_json_output(self, res):
        if self.json_output:
            # Just verify that any lines starting with '{' are valid json objects.
            # Doesn't do any deep inspection of the contents.
            for line in res.stdout.decode("utf-8").splitlines():
                if line.startswith('{'):
                    obj = json.loads(line)
                    attrs = ['time', 'rule', 'priority']
                    if self.json_include_output_property:
                        attrs.append('output')
                    if self.json_include_tags_property:
                        attrs.append('tags')
                    for attr in attrs:
                        if not attr in obj:
                            self.fail(
                                "Falco JSON object {} does not contain property \"{}\""
                                .format(line, attr))

    def check_output_strictly_contains(self, res):
        for output in self.output_strictly_contains:
            # Read the expected output (from a file) and actual output (either from a file or the stdout),
            # then check if the actual one strictly contains the expected one.

            expected = open(output['expected']).read()

            if output['actual'] == 'stdout':
                actual = res.stdout.decode("utf-8")
            else:
                actual = open(output['actual']).read()

            actual_cursor = actual
            expected_lines = expected.splitlines()
            for line in expected_lines:
                pos = actual_cursor.find(line)
                if pos < 0:
                    self.fail(
                        "Output '{}' does not strictly contains the expected content '{}'"
                        .format(output['actual'], output['expected']))
                    return False
                actual_cursor = actual_cursor[pos + len(line):]

        return True

    def install_package(self):

        if self.package.startswith("docker:"):

            image = self.package.split(":", 1)[1]
            # Remove an existing falco-test container first. Note we don't check the output--docker rm
            # doesn't have an -i equivalent.
            res = process.run("docker rm falco-test", ignore_status=True)

            self.falco_binary_path = "docker run --rm --name falco-test --privileged " \
                                     "-v /var/run/docker.sock:/host/var/run/docker.sock " \
                                     "-v /dev:/host/dev -v /proc:/host/proc:ro -v /boot:/host/boot:ro " \
                                     "-v /lib/modules:/host/lib/modules:ro -v {}:/root/.falco:ro " \
                                     "-v /usr:/host/usr:ro {} {} falco".format(
                                         self.module_dir, self.addl_docker_run_args, image)

        elif self.package.endswith(".deb"):
            self.falco_binary_path = '/usr/bin/falco'

            package_glob = "{}/{}".format(self.falcodir, self.package)

            matches = glob.glob(package_glob)

            if len(matches) != 1:
                self.fail(
                    "Package path {} did not match exactly 1 file. Instead it matched: {}",
                    package_glob, ",".join(matches))

            package_path = matches[0]

            cmdline = "dpkg -i {}".format(package_path)
            self.log.debug(
                "Installing debian package via \"{}\"".format(cmdline))
            res = process.run(cmdline, timeout=120, sudo=True)

        elif self.package.endswith(".rpm"):
            self.falco_binary_path = '/usr/bin/falco'

            package_glob = "{}/{}".format(self.falcodir, self.package)

            matches = glob.glob(package_glob)

            if len(matches) != 1:
                self.fail(
                    "Package path {} did not match exactly 1 file. Instead it matched: {}",
                    package_glob, ",".join(matches))

            package_path = matches[0]

            cmdline = "rpm -i --nodeps --noscripts {}".format(package_path)
            self.log.debug(
                "Installing centos package via \"{}\"".format(cmdline))
            res = process.run(cmdline, timeout=120, sudo=True)

    def uninstall_package(self):

        if self.package.startswith("docker:"):
            self.log.debug("Nothing to do, docker run with --rm")

        elif self.package.endswith(".rpm"):
            cmdline = "rpm -e --noscripts --nodeps falco"
            self.log.debug(
                "Uninstalling centos package via \"{}\"".format(cmdline))
            res = process.run(cmdline, timeout=120, sudo=True)

        elif self.package.endswith(".deb"):
            cmdline = "dpkg --purge falco"
            self.log.debug(
                "Uninstalling debian package via \"{}\"".format(cmdline))
            res = process.run(cmdline, timeout=120, sudo=True)

    def possibly_copy_driver(self):
        # Remove the contents of ~/.falco regardless of copy_local_driver.
        self.log.debug("Checking for module dir {}".format(self.module_dir))
        if os.path.isdir(self.module_dir):
            self.log.info("Removing files below directory {}".format(
                self.module_dir))
            for rmfile in glob.glob(self.module_dir + "/*"):
                self.log.debug("Removing file {}".format(rmfile))
                os.remove(rmfile)

        if self.copy_local_driver:
            verlines = [
                str.strip() for str in subprocess.check_output(
                    [self.falco_binary_path, "--version"]).splitlines()
            ]
            verstr = verlines[0].decode("utf-8")
            self.log.info("verstr {}".format(verstr))
            falco_version = verstr.split(" ")[2]
            self.log.info("falco_version {}".format(falco_version))
            arch = subprocess.check_output(["uname", "-m"]).rstrip()
            self.log.info("arch {}".format(arch))
            kernel_release = subprocess.check_output(["uname", "-r"]).rstrip()
            self.log.info("kernel release {}".format(kernel_release))

            # falco-driver-loader has a more comprehensive set of ways to
            # find the config hash. We only look at /boot/config-<kernel release>
            md5_output = subprocess.check_output(
                ["md5sum", "/boot/config-{}".format(kernel_release)]).rstrip()
            config_hash = md5_output.split(" ")[0]

            probe_filename = "falco-{}-{}-{}-{}.ko".format(
                falco_version, arch, kernel_release, config_hash)
            driver_path = os.path.join(self.falcodir, "driver", "falco.ko")
            module_path = os.path.join(self.module_dir, probe_filename)
            self.log.debug("Copying {} to {}".format(driver_path, module_path))
            shutil.copyfile(driver_path, module_path)

    def init_grpc_handler(self):
        self.grpcurl_res = None
        if len(self.grpc_results) > 0:
            if not self.is_grpc_using_unix_socket:
                self.fail(
                    "This test suite supports gRPC with unix socket only")

            cmdline = "grpcurl -format text -import-path ../userspace/falco " \
                "-proto {} -plaintext -unix {} " \
                "{}/{}".format(self.grpc_proto, self.grpc_address,
                               self.grpc_service, self.grpc_method)
            that = self

            class GRPCUnixSocketEventHandler(PatternMatchingEventHandler):
                def on_created(self, event):
                    # that.log.info("EVENT: {}", event)
                    that.grpcurl_res = process.run(cmdline)

            path = os.path.dirname(self.grpc_address)
            process.run("mkdir -p {}".format(path))
            event_handler = GRPCUnixSocketEventHandler(patterns=['*'],
                                                       ignore_directories=True)
            self.grpc_observer = Observer()
            self.grpc_observer.schedule(event_handler, path, recursive=False)
            self.grpc_observer.start()

    def check_grpc(self):
        if self.grpc_observer is not None:
            self.grpc_observer.stop()
            self.grpc_observer = None
            if self.grpcurl_res is None:
                self.fail("gRPC responses not found")

            for exp_result in self.grpc_results:
                found = False
                for line in self.grpcurl_res.stdout.decode(
                        "utf-8").splitlines():
                    if exp_result in line:
                        found = True
                        break

                if found == False:
                    self.fail(
                        "Could not find a line with '{}' in gRPC responses (protobuf text"
                        .format(exp_result))

    def test(self):
        self.log.info("Trace file %s", self.trace_file)

        self.falco_binary_path = '{}/userspace/falco/falco'.format(
            self.falcodir)

        self.possibly_copy_driver()

        self.init_grpc_handler()

        if self.package != 'None':
            # This sets falco_binary_path as a side-effect.
            self.install_package()

        trace_arg = self.trace_file

        if self.trace_file:
            trace_arg = "-e {}".format(self.trace_file)

        # Possibly run psp converter
        if self.psp_file != "":

            if not os.path.isfile(self.psp_conv_path):
                self.log.info("Downloading {} to {}".format(
                    self.psp_conv_url, self.psp_conv_path))

                urllib.request.urlretrieve(self.psp_conv_url,
                                           self.psp_conv_path)
                os.chmod(self.psp_conv_path, stat.S_IEXEC)

            conv_cmd = '{} convert psp --psp-path {} --rules-path {}'.format(
                self.psp_conv_path, os.path.join(self.basedir, self.psp_file),
                self.psp_rules_file)

            conv_proc = process.SubProcess(conv_cmd)

            conv_res = conv_proc.run(timeout=180, sig=9)

            if conv_res.exit_status != 0:
                self.error(
                    "psp_conv command \"{}\" exited with unexpected return value {}. Full stdout={} stderr={}"
                    .format(conv_cmd, conv_res.exit_status, conv_res.stdout,
                            conv_res.stderr))

            with open(self.psp_rules_file, 'r') as myfile:
                psp_rules = myfile.read()
                self.log.debug("Converted Rules: {}".format(psp_rules))

        # Run falco
        cmd = '{} {} {} -c {} {} -o json_output={} -o json_include_output_property={} -o json_include_tags_property={} -o priority={} -v {}'.format(
            self.falco_binary_path, self.rules_args, self.disabled_args,
            self.conf_file, trace_arg, self.json_output,
            self.json_include_output_property, self.json_include_tags_property,
            self.priority, self.addl_cmdline_opts)

        for tag in self.disable_tags:
            cmd += ' -T {}'.format(tag)

        for tag in self.run_tags:
            cmd += ' -t {}'.format(tag)

        if self.run_duration:
            cmd += ' -M {}'.format(self.run_duration)

        if self.all_events:
            cmd += ' -A'

        if self.time_iso_8601:
            cmd += ' -o time_format_iso_8601=true'

        self.falco_proc = process.SubProcess(cmd)

        res = self.falco_proc.run(timeout=180, sig=9)

        if self.stdout_is != '':
            print(self.stdout_is)
            if self.stdout_is != res.stdout.decode("utf-8"):
                self.fail("Stdout was not exactly {}".format(self.stdout_is))

        if self.stderr_is != '':
            if self.stderr_is != res.stdout.decode("utf-8"):
                self.fail("Stdout was not exactly {}".format(self.stderr_is))

        for pattern in self.stderr_contains:
            match = re.search(pattern, res.stderr.decode("utf-8"), re.DOTALL)
            if match is None:
                self.fail(
                    "Stderr of falco process did not contain content matching {}"
                    .format(pattern))

        for pattern in self.stdout_contains:
            match = re.search(pattern, res.stdout.decode("utf-8"), re.DOTALL)
            if match is None:
                self.fail(
                    "Stdout of falco process '{}' did not contain content matching {}"
                    .format(res.stdout.decode("utf-8"), pattern))

        for pattern in self.stderr_not_contains:
            match = re.search(pattern, res.stderr.decode("utf-8"))
            if match is not None:
                self.fail(
                    "Stderr of falco process contained content matching {} when it should have not"
                    .format(pattern))

        for pattern in self.stdout_not_contains:
            match = re.search(pattern, res.stdout.decode("utf-8"))
            if match is not None:
                self.fail(
                    "Stdout of falco process '{}' did contain content matching {} when it should have not"
                    .format(res.stdout.decode("utf-8"), pattern))

        if res.exit_status != self.exit_status:
            self.error(
                "Falco command \"{}\" exited with unexpected return value {} (!= {})"
                .format(cmd, res.exit_status, self.exit_status))

        # No need to check any outputs if the falco process exited abnormally.
        if res.exit_status != 0:
            return

        self.check_rules_warnings(res)
        if len(self.rules_events) > 0:
            self.check_rules_events(res)
        if len(self.validate_rules_file) == 0 and self.check_detection_counts:
            self.check_detections(res)
        if len(self.detect_counts) > 0:
            self.check_detections_by_rule(res)
        self.check_json_output(res)
        self.check_outputs()
        self.check_output_strictly_contains(res)
        self.check_grpc()
        pass
コード例 #35
0
def start(services, auto_upgrade, anchore_module, skip_config_validate, skip_db_compat_check, all):
    """
    Startup and monitor service processes. Specify a list of service names or empty for all.
    """

    global config
    ecode = ExitCode.ok

    auto_upgrade = True

    if not anchore_module:
        module_name = "anchore_engine"
    else:
        module_name = str(anchore_module)

    if os.environ.get('ANCHORE_ENGINE_SKIP_DB_COMPAT_CHECK', str(skip_db_compat_check)).lower() in ['true', 't', 'y', 'yes']:
        skip_db_compat_check = True
    else:
        skip_db_compat_check = False

    if services:
        input_services = list(services)
    else:
        input_services = os.getenv('ANCHORE_ENGINE_SERVICES', '').strip().split()

    if not input_services and not all:
        raise click.exceptions.BadArgumentUsage('No services defined to start. Must either provide service arguments, ANCHORE_ENGINE_SERVICES env var, or --all option')

    try:
        validate_params = {
            'services': True,
            'webhooks': True,
            'credentials': True
        }
        if skip_config_validate:
            try:
                items = skip_config_validate.split(',')
                for item in items:
                    validate_params[item] = False
            except Exception as err:
                raise Exception(err)

        # find/set up configuration        
        configdir = config['configdir']
        configfile = os.path.join(configdir, "config.yaml")

        localconfig = None
        if os.path.exists(configfile):
            try:
                localconfig = anchore_engine.configuration.localconfig.load_config(configdir=configdir, configfile=configfile, validate_params=validate_params)
            except Exception as err:
                raise Exception("cannot load local configuration: " + str(err))
        else:
            raise Exception("cannot locate configuration file ({})".format(configfile))

        # load the appropriate DB module
        try:
            logger.info("Loading DB routines from module ({})".format(module_name))
            module = importlib.import_module(module_name + ".db.entities.upgrade")
        except Exception as err:
            raise Exception("Input anchore-module (" + str(module_name) + ") cannot be found/imported - exception: " + str(err))

        # get the list of local services to start
        startFailed = False
        if not input_services:
            config_services = localconfig.get('services', {})
            if not config_services:
                logger.warn('could not find any services to execute in the config file')
                sys.exit(1)

            input_services = [name for name, srv_conf in list(config_services.items()) if srv_conf.get('enabled')]

        services = []
        for service_conf_name in input_services:
            if service_conf_name in list(service_map.values()):
                svc = service_conf_name
            else:
                svc = service_map.get(service_conf_name)

            if svc:
                services.append(svc)
            else:
                logger.warn('specified service {} not found in list of available services {} - removing from list of services to start'.format(service_conf_name, list(service_map.keys())))

        if 'anchore-catalog' in services:
            services.remove('anchore-catalog')
            services.insert(0, 'anchore-catalog')

        if not services:
            logger.error("No services found in ANCHORE_ENGINE_SERVICES or as enabled in config.yaml to start - exiting")
            sys.exit(1)

        # preflight - db checks
        try:
            db_params = anchore_engine.db.entities.common.get_params(localconfig)

            # override db_timeout since upgrade might require longer db session timeout setting
            try:
                if 'timeout' in db_params.get('db_connect_args', {}):
                    db_params['db_connect_args']['timeout'] = 86400
                elif 'connect_timeout' in db_params.get('db_connect_args', {}):
                    db_params['db_connect_args']['connect_timeout'] = 86400
            except Exception as err:
                pass

            anchore_manager.util.db.connect_database(db_params, db_retries=300)
            code_versions, db_versions = anchore_manager.util.db.init_database(upgrade_module=module, localconfig=localconfig, do_db_compatibility_check=(not skip_db_compat_check))

            in_sync = False
            timed_out = False
            max_timeout = 3600

            timer = time.time()
            while not in_sync and not timed_out:
                code_versions, db_versions = module.get_versions()

                if code_versions and db_versions:
                    if code_versions['db_version'] != db_versions['db_version']:
                        if auto_upgrade and 'anchore-catalog' in services:
                            logger.info("Auto-upgrade is set - performing upgrade.")
                            try:
                                # perform the upgrade logic here
                                rc = module.run_upgrade()
                                if rc:
                                    logger.info("Upgrade completed")
                                else:
                                    logger.info("No upgrade necessary. Completed.")
                            except Exception as err:
                                raise err

                            in_sync = True
                        else:
                            logger.warn(
                                "this version of anchore-engine requires the anchore DB version ({}) but we discovered anchore DB version ({}) in the running DB - it is safe to run the upgrade while seeing this message - will retry for {} more seconds.".format(
                                    str(code_versions['db_version']), str(db_versions['db_version']), str(max_timeout - int(time.time() - timer))))
                            time.sleep(5)
                    else:
                        logger.info("DB version and code version in sync.")
                        in_sync = True
                else:
                    logger.warn('no existing anchore DB data can be discovered, assuming bootstrap')
                    in_sync = True

                if (max_timeout - int(time.time() - timer)) < 0:
                    timed_out = True

            if not in_sync:
                raise Exception("this version of anchore-engine requires the anchore DB version (" + str(code_versions['db_version']) + ") but we discovered anchore DB version (" + str(
                    db_versions['db_version']) + ") in the running DB - please perform the DB upgrade process and retry")

        except Exception as err:
            raise err

        finally:
            rc = anchore_engine.db.entities.common.do_disconnect()

        # start up services
        logger.info('Starting services: {}'.format(services))

        for supportdir in ["/var/log/anchore", "/var/run/anchore"]:
            try:
                if not os.path.exists(supportdir):
                    os.makedirs(supportdir, 0o755)
            except Exception as err:
                logger.error("cannot create log directory {} - exception: {}".format(supportdir, str(err)))
                raise err

        pids = []
        keepalive_threads = []
        for service in services:
            pidfile = "/var/run/anchore/" + service + ".pid"
            try:
                terminate_service(service, flush_pidfile=True)

                service_thread = ServiceThread(startup_service, (service, configdir))
                keepalive_threads.append(service_thread)
                max_tries = 30
                tries = 0
                alive = True
                while not os.path.exists(pidfile) and tries < max_tries:
                    logger.info("waiting for service pidfile {} to exist {}/{}".format(pidfile, tries, max_tries))

                    try:
                        alive = service_thread.thread.is_alive()
                    except:
                        pass
                    if not alive:
                        logger.info("service thread has stopped {}".format(service))
                        break

                    time.sleep(1)
                    tries = tries + 1

                logger.info("auto_restart_services setting: {}".format(localconfig.get('auto_restart_services', False)))
                if not localconfig.get('auto_restart_services', False):
                    logger.info("checking for startup failure pidfile={}, is_alive={}".format(os.path.exists(pidfile), alive))
                    if not os.path.exists(pidfile) or not alive:
                        raise Exception("service thread for ({}) failed to start".format(service))

                time.sleep(1)
            except Exception as err:
                startFailed = True
                logger.warn("service start failed - exception: {}".format(str(err)))
                break

        if startFailed:
            logger.fatal("one or more services failed to start. cleanly terminating the others")
            for service in services:
                terminate_service(service, flush_pidfile=True)
            sys.exit(1)
        else:
            # start up the log watchers
            try:
                observer = Observer()
                observer.schedule(AnchoreLogWatcher(), path="/var/log/anchore/")
                observer.start()

                try:
                    while True:
                        time.sleep(1)
                        if localconfig.get('auto_restart_services', False):  # 'auto_restart_services' in localconfig and localconfig['auto_restart_services']:
                            for service_thread in keepalive_threads:
                                if not service_thread.thread.is_alive():
                                    logger.info("restarting service: {}".format(service_thread.thread.name))
                                    service_thread.start()

                except KeyboardInterrupt:
                    observer.stop()
                observer.join()

            except Exception as err:
                logger.error("failed to startup log watchers - exception: {}".format(str(err)))
                raise err

    except Exception as err:
        log_error('servicestart', err)
        ecode = ExitCode.failed

    doexit(ecode)
コード例 #36
0
class PortListerPlugin(octoprint.plugin.StartupPlugin,
                       octoprint.plugin.AssetPlugin,
                       octoprint.plugin.SettingsPlugin):
    def on_after_startup(self, *args, **kwargs):
        self._logger.info("Port Lister %s %s" % (repr(args), repr(kwargs)))
        event_handler = PortListEventHandler(self)
        self._observer = Observer()
        self._observer.schedule(event_handler, "/dev", recursive=False)
        self._observer.start()

    def on_port_created(self, port, *args, **kwargs):
        # if we're already connected ignore it
        if self._printer.is_closed_or_error():
            connection_options = get_connection_options()
            self._logger.info("on_port_created connection_options %s" %
                              (repr(connection_options)))

            # is the new device in the port list? yes, tell the view model
            self._logger.info("Checking if %s is in %s" %
                              (port, repr(connection_options["ports"])))
            if port in connection_options["ports"]:
                self._plugin_manager.send_plugin_message(
                    self._plugin_name, port)

                # if autoconnect and the new port matches, try to connect
                if self._settings.global_get_boolean(["serial",
                                                      "autoconnect"]):
                    self._logger.info(
                        "autoconnect_delay %d",
                        self._settings.get(["autoconnect_delay"]))
                    Timer(self._settings.get(["autoconnect_delay"]),
                          self.do_auto_connect, [port]).start()
                else:
                    self._logger.info(
                        "Not autoconnecting because autoconnect is turned off."
                    )
            else:
                self._logger.warning(
                    "Won't autoconnect because %s isn't in %s" %
                    (port, repr(connection_options["ports"])))
        else:
            self._logger.warning(
                "Not auto connecting because printer is not closed nor in error state."
            )

    def on_shutdown(self, *args, **kwargs):
        self._logger.info("Shutting down file system observer")
        self._observer.stop()
        self._observer.join()

    def do_auto_connect(self, port, *args, **kwargs):
        try:
            self._logger.info("do_auto_connect")
            (autoport, baudrate) = self._settings.global_get([
                "serial", "port"
            ]), self._settings.global_get_int(["serial", "baudrate"])
            if not autoport:
                autoport = "AUTO"
            if not port:
                port = "AUTO"
            if autoport == "AUTO" or os.path.realpath(
                    autoport) == os.path.realpath(port):
                self._logger.info("realpath match")
                printer_profile = self._printer_profile_manager.get_default()
                profile = printer_profile[
                    "id"] if "id" in printer_profile else "_default"
                if not self._printer.is_closed_or_error():
                    self._logger.info(
                        "Not autoconnecting; printer already connected")
                    return
                self._logger.info(
                    "Attempting to connect to %s at %d with profile %s" %
                    (autoport, baudrate, repr(profile)))
                self._printer.connect(port=autoport,
                                      baudrate=baudrate,
                                      profile=profile)
            else:
                self._logger.info("realpath no match")
                self._logger.info(
                    "Skipping auto connect on %s because it isn't %s" %
                    (os.path.realpath(port), os.path.realpath(autoport)))
        except:
            self._logger.error("Exception in do_auto_connect %s",
                               get_exception_string())

    def get_settings_defaults(self, *args, **kwargs):
        return dict(autoconnect_delay=20)

    def get_assets(self, *args, **kwargs):
        return dict(js=["js/portlister.js"])

    def get_update_information(self, *args, **kwargs):
        return dict(portlister=dict(
            displayName="PortLister",
            displayVersion=self._plugin_version,

            # use github release method of version check
            type="github_release",
            user="******",
            repo="OctoPrint-PortLister",
            current=self._plugin_version,

            # update method: pip
            pip=
            "https://github.com/markwal/OctoPrint-PortLister/archive/{target_version}.zip"
        ))
コード例 #37
0
 def handle(self, *args, **options):
     event_handler = FileChecker()
     observer = Observer()
     observer.schedule(event_handler, path="static", recursive=True)
     observer.start()
     call("python manage.py runserver", shell=True)
コード例 #38
0
 def run(self):
     event_handler = FileChange()
     observer = Observer()
     observer.schedule(event_handler, path=self.path, recursive=True)
     observer.start()
     print 'start watching %s directory stylus file' % (self.path)
コード例 #39
0
class SqliteEventLogStorage(AssetAwareSqlEventLogStorage, ConfigurableClass):
    """SQLite-backed event log storage.

    Users should not directly instantiate this class; it is instantiated by internal machinery when
    ``dagit`` and ``dagster-graphql`` load, based on the values in the ``dagster.yaml`` file in
    ``$DAGSTER_HOME``. Configuration of this class should be done by setting values in that file.

    This is the default event log storage when none is specified in the ``dagster.yaml``.

    To explicitly specify SQLite for event log storage, you can add a block such as the following
    to your ``dagster.yaml``:

    .. code-block:: YAML

        event_log_storage:
          module: dagster.core.storage.event_log
          class: SqliteEventLogStorage
          config:
            base_dir: /path/to/dir

    The ``base_dir`` param tells the event log storage where on disk to store the databases. To
    improve concurrent performance, event logs are stored in a separate SQLite database for each
    run.
    """

    def __init__(self, base_dir, inst_data=None):
        """Note that idempotent initialization of the SQLite database is done on a per-run_id
        basis in the body of connect, since each run is stored in a separate database."""
        self._base_dir = os.path.abspath(check.str_param(base_dir, "base_dir"))
        mkdir_p(self._base_dir)

        self._watchers = defaultdict(dict)
        self._obs = Observer()
        self._obs.start()
        self._inst_data = check.opt_inst_param(inst_data, "inst_data", ConfigurableClassData)

        # Used to ensure that each run ID attempts to initialize its DB the first time it connects,
        # ensuring that the database will be created if it doesn't exist
        self._initialized_dbs = set()

        # Ensure that multiple threads (like the event log watcher) interact safely with each other
        self._db_lock = threading.Lock()

    def upgrade(self):
        all_run_ids = self.get_all_run_ids()
        print(  # pylint: disable=print-call
            f"Updating event log storage for {len(all_run_ids)} runs on disk..."
        )
        alembic_config = get_alembic_config(__file__)
        for run_id in tqdm(all_run_ids):
            with self.run_connection(run_id) as conn:
                run_alembic_upgrade(alembic_config, conn, run_id)

        print("Updating event log storage for index db on disk...")  # pylint: disable=print-call
        with self.index_connection() as conn:
            run_alembic_upgrade(alembic_config, conn, "index")

        self._initialized_dbs = set()

    @property
    def inst_data(self):
        return self._inst_data

    @classmethod
    def config_type(cls):
        return {"base_dir": StringSource}

    @staticmethod
    def from_config_value(inst_data, config_value):
        return SqliteEventLogStorage(inst_data=inst_data, **config_value)

    def get_all_run_ids(self):
        all_filenames = glob.glob(os.path.join(self._base_dir, "*.db"))
        return [
            os.path.splitext(os.path.basename(filename))[0]
            for filename in all_filenames
            if os.path.splitext(os.path.basename(filename))[0] != INDEX_SHARD_NAME
        ]

    def path_for_run_id(self, run_id):
        return os.path.join(self._base_dir, "{run_id}.db".format(run_id=run_id))

    def conn_string_for_shard(self, shard_name):
        check.str_param(shard_name, "shard_name")
        return create_db_conn_string(self._base_dir, shard_name)

    def _initdb(self, engine):
        alembic_config = get_alembic_config(__file__)

        retry_limit = 10

        while True:
            try:

                with engine.connect() as connection:
                    db_revision, head_revision = check_alembic_revision(alembic_config, connection)

                    if not (db_revision and head_revision):
                        SqlEventLogStorageMetadata.create_all(engine)
                        engine.execute("PRAGMA journal_mode=WAL;")
                        stamp_alembic_rev(alembic_config, connection)

                break
            except (db.exc.DatabaseError, sqlite3.DatabaseError, sqlite3.OperationalError) as exc:
                # This is SQLite-specific handling for concurrency issues that can arise when
                # multiple processes (e.g. the dagit process and user code process) contend with
                # each other to init the db. When we hit the following errors, we know that another
                # process is on the case and we should retry.
                err_msg = str(exc)

                if not (
                    "table asset_keys already exists" in err_msg
                    or "table secondary_indexes already exists" in err_msg
                    or "table event_logs already exists" in err_msg
                    or "database is locked" in err_msg
                    or "table alembic_version already exists" in err_msg
                    or "UNIQUE constraint failed: alembic_version.version_num" in err_msg
                ):
                    raise

                if retry_limit == 0:
                    raise
                else:
                    logging.info(
                        "SqliteEventLogStorage._initdb: Encountered apparent concurrent init, "
                        "retrying ({retry_limit} retries left). Exception: {str_exc}".format(
                            retry_limit=retry_limit, str_exc=err_msg
                        )
                    )
                    time.sleep(0.2)
                    retry_limit -= 1

    @contextmanager
    def _connect(self, shard):
        with self._db_lock:
            check.str_param(shard, "shard")

            conn_string = self.conn_string_for_shard(shard)
            engine = create_engine(conn_string, poolclass=NullPool)

            if not shard in self._initialized_dbs:
                self._initdb(engine)
                self._initialized_dbs.add(shard)

            conn = engine.connect()

            try:
                with handle_schema_errors(
                    conn,
                    get_alembic_config(__file__),
                    msg="SqliteEventLogStorage for shard {shard}".format(shard=shard),
                ):
                    yield conn
            finally:
                conn.close()
            engine.dispose()

    def run_connection(self, run_id=None):
        return self._connect(run_id)

    def index_connection(self):
        return self._connect(INDEX_SHARD_NAME)

    def store_event(self, event):
        """
        Overridden method to replicate asset events in a central assets.db sqlite shard, enabling
        cross-run asset queries.

        Args:
            event (EventRecord): The event to store.
        """
        check.inst_param(event, "event", EventRecord)
        insert_event_statement = self.prepare_insert_event(event)
        run_id = event.run_id

        with self.run_connection(run_id) as conn:
            conn.execute(insert_event_statement)

        if event.is_dagster_event and event.dagster_event.asset_key:
            # mirror the event in the cross-run index database
            with self.index_connection() as conn:
                conn.execute(insert_event_statement)

            self.store_asset_key(event)

    def delete_events(self, run_id):
        with self.run_connection(run_id) as conn:
            self.delete_events_for_run(conn, run_id)

        # delete the mirrored event in the cross-run index database
        with self.index_connection() as conn:
            self.delete_events_for_run(conn, run_id)

    def wipe(self):
        # should delete all the run-sharded dbs as well as the index db
        for filename in (
            glob.glob(os.path.join(self._base_dir, "*.db"))
            + glob.glob(os.path.join(self._base_dir, "*.db-wal"))
            + glob.glob(os.path.join(self._base_dir, "*.db-shm"))
        ):
            os.unlink(filename)

        self._initialized_dbs = set()

    def _delete_mirrored_events_for_asset_key(self, asset_key):
        with self.index_connection() as conn:
            conn.execute(
                SqlEventLogStorageTable.delete().where(  # pylint: disable=no-value-for-parameter
                    db.or_(
                        SqlEventLogStorageTable.c.asset_key == asset_key.to_string(),
                        SqlEventLogStorageTable.c.asset_key == asset_key.to_string(legacy=True),
                    )
                )
            )

    def wipe_asset(self, asset_key):
        # default implementation will update the event_logs in the sharded dbs, and the asset_key
        # table in the asset shard, but will not remove the mirrored event_log events in the asset
        # shard
        super(SqliteEventLogStorage, self).wipe_asset(asset_key)
        self._delete_mirrored_events_for_asset_key(asset_key)

    def watch(self, run_id, start_cursor, callback):
        watchdog = SqliteEventLogStorageWatchdog(self, run_id, callback, start_cursor)
        self._watchers[run_id][callback] = (
            watchdog,
            self._obs.schedule(watchdog, self._base_dir, True),
        )

    def end_watch(self, run_id, handler):
        if handler in self._watchers[run_id]:
            event_handler, watch = self._watchers[run_id][handler]
            self._obs.remove_handler_for_watch(event_handler, watch)
            del self._watchers[run_id][handler]
コード例 #40
0
ファイル: dashboard.py プロジェクト: jglaser/signac-dashboard
class Dashboard:
    """A dashboard application to display a :py:class:`signac.Project`.

    The Dashboard class is designed to be used as a base class for a child
    class such as :code:`MyDashboard` which can be customized and launched via
    its command line interface (CLI). The CLI is invoked by calling
    :py:meth:`.main` on an instance of this class.

    **Configuration options:** The :code:`config` dictionary recognizes the
    following options:

    - **HOST**: Sets binding address (default: localhost).
    - **PORT**: Sets port to listen on (default: 8888).
    - **DEBUG**: Enables debug mode if :code:`True` (default: :code:`False`).
    - **PROFILE**: Enables the profiler
      :py:class:`werkzeug.middleware.profiler.ProfilerMiddleware` if
      :code:`True` (default: :code:`False`).
    - **PER_PAGE**: Maximum number of jobs to show per page
      (default: 25).
    - **SECRET_KEY**: This must be specified to run via WSGI with multiple
      workers, so that sessions remain intact. See the
      `Flask docs <http://flask.pocoo.org/docs/1.0/config/#SECRET_KEY>`_
      for more information.
    - **ALLOW_WHERE**: If True, search queries can include :code:`$where`
      statements, which potentially allows arbitrary code execution from user
      input. *Caution:* This should only be enabled in trusted environments,
      never on a publicly-accessible server (default: False).

    :param config: Configuration dictionary (default: :code:`{}`).
    :type config: dict
    :param project: signac project (default: :code:`None`, autodetected).
    :type project: :py:class:`signac.Project`
    :param modules: List of :py:class:`~.Module` instances to display.
    :type modules: list
    """
    def __init__(self, config={}, project=None, modules=[]):
        if project is None:
            self.project = signac.get_project()
        else:
            self.project = project

        self.config = config
        self.modules = modules

        self.event_handler = _FileSystemEventHandler(self)
        self.observer = Observer()
        self.observer.schedule(self.event_handler, self.project.workspace())

        self._prepare()

    def _create_app(self, config={}):
        """Creates a Flask application.

        :param config: Dictionary of configuration parameters.
        """
        app = Flask('signac-dashboard')
        app.config.update({
            'SECRET_KEY': os.urandom(24),
            'SEND_FILE_MAX_AGE_DEFAULT': 300,  # Cache control for static files
        })

        # Load the provided config
        app.config.update(config)

        # Enable profiling
        if app.config.get('PROFILE'):
            logger.warning("Application profiling is enabled.")
            from werkzeug.contrib.profiler import ProfilerMiddleware
            app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[10])

        # Set up default signac-dashboard static and template paths
        signac_dashboard_path = os.path.dirname(__file__)
        app.static_folder = signac_dashboard_path + '/static'
        app.template_folder = signac_dashboard_path + '/templates'

        # Set up custom template paths
        # The paths in DASHBOARD_PATHS give the preferred order of template
        # loading
        loader_list = []
        for dashpath in list(app.config.get('DASHBOARD_PATHS', [])):
            logger.warning("Adding '{}' to dashboard paths.".format(dashpath))
            loader_list.append(
                jinja2.FileSystemLoader(dashpath + '/templates'))

        # The default loader goes last and is overridden by any custom paths
        loader_list.append(app.jinja_loader)

        app.jinja_loader = jinja2.ChoiceLoader(loader_list)

        turbolinks(app)

        return app

    def _create_assets(self):
        """Add assets for inclusion in the dashboard HTML."""

        assets = Environment(self.app)
        # jQuery is served as a standalone file
        jquery = Bundle('js/jquery-*.min.js', output='gen/jquery.min.js')
        # JavaScript is combined into one file and minified
        js_all = Bundle('js/js_all/*.js',
                        filters='jsmin',
                        output='gen/app.min.js')
        # SCSS (Sassy CSS) is compiled to CSS
        scss_all = Bundle('scss/app.scss',
                          filters='libsass',
                          output='gen/app.css')
        assets.register('jquery', jquery)
        assets.register('js_all', js_all)
        assets.register('scss_all', scss_all)
        return assets

    def register_module_asset(self, asset):
        """Register an asset required by a dashboard module.

        Some modules require special scripts or stylesheets, like the
        :py:class:`signac_dashboard.modules.Notes` module. It is recommended to
        use a namespace for each module that matches the example below:

        .. code-block:: python

            dashboard.register_module_asset({
                'file': 'templates/my-module/js/my-script.js',
                'url': '/module/my-module/js/my-script.js'
            })

        :param asset: A dictionary with keys :code:`'file'` and :code:`'url'`.
        :type asset: dict
        """
        self._module_assets.append(asset)

    def _prepare(self):
        """Prepare this dashboard instance to run."""

        # Set configuration defaults and save to the project document
        self.config.setdefault('PAGINATION', True)
        self.config.setdefault('PER_PAGE', 25)

        # Create and configure the Flask application
        self.app = self._create_app(self.config)

        # Add assets and routes
        self.assets = self._create_assets()
        self._register_routes()

        # Add module assets and routes
        self._module_assets = []
        for module in self.modules:
            try:
                module.register(self)
            except Exception as e:
                logger.error('Error while registering {} module: {}'.format(
                    module.name, e))
                logger.error('Removing module {} from dashboard.'.format(
                    module.name))
                self.modules.remove(module)

        # Clear dashboard and project caches.
        self.update_cache()

    def run(self, *args, **kwargs):
        """Runs the dashboard webserver.

        Use :py:meth:`~.main` instead of this method for the command-line
        interface. Arguments to this function are passed directly to
        :py:meth:`flask.Flask.run`.
        """
        host = self.config.get('HOST', 'localhost')
        port = self.config.get('PORT', 8888)
        max_retries = 5

        for _ in range(max_retries):
            try:
                self.app.run(host, port, *args, **kwargs)
                break
            except OSError as e:
                logger.warning(e)
                if port:
                    port += 1
                pass

    @lru_cache()
    def _schema_variables(self):
        schema = self.project.detect_schema(exclude_const=True)
        return [key for key in schema]

    @lru_cache()
    def _project_min_len_unique_id(self):
        return self.project.min_len_unique_id()

    def job_title(self, job):
        """Override this method for custom job titles.

        This method generates job titles. By default, the title is a pretty
        (but verbose) form of the job state point, based on the project schema.

        :param job: The job being titled.
        :type job: :py:class:`signac.contrib.job.Job`
        :returns: Title to be displayed.
        :rtype: str
        """
        def _format_num(num):
            if isinstance(num, bool):
                return str(num)
            elif isinstance(num, Real):
                return str(round(num, 2))
            return str(num)

        try:
            s = []
            for keys in sorted(self._schema_variables()):
                v = job.statepoint()[keys[0]]
                try:
                    for key in keys[1:]:
                        v = v[key]
                except KeyError:  # Particular key is present in overall
                    continue      # schema, but not this state point.
                else:
                    s.append('{}={}'.format('.'.join(keys), _format_num(v)))
            return ' '.join(s)
        except Exception as error:
            logger.debug(
                "Error while generating job title: '{}'. "
                "Returning job-id as fallback.".format(error))
            return str(job)

    def job_subtitle(self, job):
        """Override this method for custom job subtitles.

        This method generates job subtitles. By default, the subtitle is a
        minimal unique substring of the job id.

        :param job: The job being subtitled.
        :type job: :py:class:`signac.contrib.job.Job`
        :returns: Subtitle to be displayed.
        :rtype: str
        """
        return str(job)[:max(8, self._project_min_len_unique_id())]

    def job_sorter(self, job):
        """Override this method for custom job sorting.

        This method returns a key that can be compared to sort jobs. By
        default, the sorting key is based on :py:func:`Dashboard.job_title`,
        with natural sorting of numbers. Good examples of such keys are
        strings or tuples of properties that should be used to sort.

        :param job: The job being sorted.
        :type job: :py:class:`signac.contrib.job.Job`
        :returns: Key for sorting.
        :rtype: any comparable type
        """
        key = natsort.natsort_keygen(key=self.job_title, alg=natsort.REAL)
        return key(job)

    @lru_cache()
    def _get_all_jobs(self):
        return sorted(self.project.find_jobs(), key=self.job_sorter)

    @lru_cache(maxsize=100)
    def _job_search(self, query):
        if '$where' in query and not self.config.get('ALLOW_WHERE', False):
            flash('Searches using $where allow arbitrary code execution and '
                  'are only allowed when the configuration option '
                  '\'ALLOW_WHERE\' is enabled. See also: <a href="https://docs.signac.io/projects/dashboard/en/latest/security.html">Security Guidelines</a>',  # noqa:E501
                  'warning')
            raise RuntimeError('ALLOW_WHERE must be enabled for this query.')

        querytype = 'statepoint'
        if query[:4] == 'doc:':
            query = query[4:]
            querytype = 'document'

        try:
            if query is None:
                f = None
            else:
                try:
                    f = json.loads(query)
                except json.JSONDecodeError:
                    query = shlex.split(query)
                    f = signac.contrib.filterparse.parse_filter_arg(query)
                    flash("Search string interpreted as '{}'.".format(
                        json.dumps(f)))
            if querytype == 'document':
                jobs = self.project.find_jobs(doc_filter=f)
            else:
                jobs = self.project.find_jobs(filter=f)
            return sorted(jobs, key=lambda job: self.job_sorter(job))
        except json.JSONDecodeError as error:
            flash('Failed to parse query argument. '
                  'Ensure that \'{}\' is valid JSON!'.format(query),
                  'warning')
            raise error

    @lru_cache(maxsize=65536)
    def _job_details(self, job):
        return {
            'job': job,
            'title': self.job_title(job),
            'subtitle': self.job_subtitle(job),
        }

    def _setup_pagination(self, jobs):
        total_count = len(jobs) if isinstance(jobs, list) else 0
        page = request.args.get('page', 1)
        try:
            page = int(page)
        except (ValueError, TypeError):
            page = 1
            flash('Pagination Error. Displaying page {}.'.format(page),
                  'danger')
        pagination = Pagination(page, self.config['PER_PAGE'], total_count)
        if pagination.page < 1 or pagination.page > pagination.pages:
            pagination.page = max(1, min(pagination.page, pagination.pages))
            if pagination.pages > 0:
                flash('Pagination Error. Displaying page {}.'.format(
                    pagination.page), 'danger')
        return pagination

    def _render_job_view(self, *args, **kwargs):
        g.active_page = 'jobs'
        view_mode = request.args.get('view', kwargs.get(
            'default_view', 'list'))
        if view_mode == 'grid':
            if 'enabled_modules' in session and \
                    len(session.get('enabled_modules', [])) == 0:
                flash('No modules are enabled.', 'info')
            return render_template('jobs_grid.html', *args, **kwargs)
        elif view_mode == 'list':
            return render_template('jobs_list.html', *args, **kwargs)
        else:
            return self._render_error(
                ValueError('Invalid view mode: {}'.format(view_mode)))

    def _render_error(self, error):
        if isinstance(error, Exception):
            error_string = "{}: {}".format(type(error).__name__, error)
        else:
            error_string = error
        logger.error(error_string)
        flash(error_string, 'danger')
        return render_template('error.html')

    def _get_job_details(self, jobs):
        return [self._job_details(job) for job in list(jobs)]

    def add_url(self, import_name, url_rules=[],
                import_file='signac_dashboard', **options):
        """Add a route to the dashboard.

        This method allows custom view functions to be triggered for specified
        routes. These view functions are imported lazily, when their route
        is triggered. For example, write a file :code:`my_views.py`:

        .. code-block:: python

            def my_custom_view(dashboard):
                return 'This is a custom message.'

        Then, in :code:`dashboard.py`:

        .. code-block:: python

            from signac_dashboard import Dashboard

            class MyDashboard(Dashboard):
                pass

            if __name__ == '__main__':
                dashboard = MyDashboard()
                dashboard.add_url('my_custom_view', url_rules=['/custom-url'],
                                  import_file='my_views')
                dashboard.main()

        Finally, launching the dashboard with :code:`python dashboard.py run`
        and navigating to :code:`/custom-url` will show the custom
        message. This can be used in conjunction with user-provided jinja
        templates and the method :py:func:`flask.render_template` for extending
        dashboard functionality.

        :param import_name: The view function name to be imported.
        :type import_name: str
        :param url_rules: A list of URL rules, see
            :py:meth:`flask.Flask.add_url_rule`.
        :type url_rules: list
        :param import_file: The module from which to import (default:
            :code:`'signac_dashboard'`).
        :type import_file: str
        :param \\**options: Additional options to pass to
            :py:meth:`flask.Flask.add_url_rule`.
        """
        if import_file is not None:
            import_name = import_file + '.' + import_name
        for url_rule in url_rules:
            self.app.add_url_rule(
                rule=url_rule,
                view_func=LazyView(dashboard=self, import_name=import_name),
                **options)

    def _register_routes(self):
        """Registers routes with the Flask application.

        This method configures context processors, templates, and sets up
        routes for a basic Dashboard instance. Additionally, routes declared by
        modules are registered by this method.
        """
        dashboard = self

        @dashboard.app.after_request
        def prevent_caching(response):
            if 'Cache-Control' not in response.headers:
                response.headers['Cache-Control'] = 'no-store'
            return response

        @dashboard.app.context_processor
        def injections():
            session.setdefault('enabled_modules',
                               [i for i in range(len(self.modules))
                                if self.modules[i].enabled])
            return {
                'APP_NAME': 'signac-dashboard',
                'APP_VERSION': __version__,
                'PROJECT_NAME': self.project.config['project'],
                'PROJECT_DIR': self.project.config['project_dir'],
                'modules': self.modules,
                'enabled_modules': session['enabled_modules'],
                'module_assets': self._module_assets
            }

        # Add pagination support from http://flask.pocoo.org/snippets/44/
        @dashboard.app.template_global()
        def url_for_other_page(page):
            args = request.args.copy()
            args['page'] = page
            return url_for(request.endpoint, **args)

        @dashboard.app.template_global()
        def modify_query(**new_values):
            args = request.args.copy()
            for key, value in new_values.items():
                args[key] = value
            return '{}?{}'.format(request.path, url_encode(args))

        @dashboard.app.errorhandler(404)
        def page_not_found(error):
            return self._render_error(str(error))

        self.add_url('views.home', ['/'])
        self.add_url('views.settings', ['/settings'])
        self.add_url('views.search', ['/search'])
        self.add_url('views.jobs_list', ['/jobs/'])
        self.add_url('views.show_job', ['/jobs/<jobid>'])
        self.add_url('views.get_file', ['/jobs/<jobid>/file/<path:filename>'])
        self.add_url('views.change_modules', ['/modules'], methods=['POST'])

    def update_cache(self):
        """Clear project and dashboard server caches.

        The dashboard relies on caching for performance. If the data space is
        altered, this method may need to be called before the dashboard
        reflects those changes.
        """
        # Try to update signac project cache. Requires signac 0.9.2 or later.
        with warnings.catch_warnings():
            warnings.simplefilter(action='ignore', category=FutureWarning)
            try:
                self.project.update_cache()
            except Exception:
                pass

        # Clear caches of all dashboard methods
        members = inspect.getmembers(self, predicate=inspect.ismethod)
        for func in filter(lambda f: hasattr(f, 'cache_clear'),
                           map(lambda x: x[1], members)):
            func.cache_clear()

    def __call__(self, environ, start_response):
        """Call the dashboard as a WSGI application."""
        return self.app(environ, start_response)

    def main(self):
        """Runs the command line interface.

        Call this function to use signac-dashboard from its command line
        interface. For example, save this script as :code:`dashboard.py`:

        .. code-block:: python

            from signac_dashboard import Dashboard

            class MyDashboard(Dashboard):
                pass

            if __name__ == '__main__':
                MyDashboard().main()

        Then the dashboard can be launched with:

        .. code-block:: bash

            python dashboard.py run
        """

        def _run(args):
            kwargs = vars(args)
            if kwargs.get('host', None) is not None:
                self.config['HOST'] = kwargs.pop('host')
            if kwargs.get('port', None) is not None:
                self.config['PORT'] = kwargs.pop('port')
            self.config['PROFILE'] = kwargs.pop('profile')
            self.config['DEBUG'] = kwargs.pop('debug')
            self.run()

        parser = argparse.ArgumentParser(
            description="signac-dashboard is a web-based data visualization "
                        "and analysis tool, part of the signac framework.")
        parser.add_argument(
            '--debug',
            action='store_true',
            help="Show traceback on error for debugging.")
        parser.add_argument(
            '--version',
            action='store_true',
            help="Display the version number and exit.")
        subparsers = parser.add_subparsers()

        parser_run = subparsers.add_parser('run')
        parser_run.add_argument(
            '-p', '--profile',
            action='store_true',
            help='Enable flask performance profiling.')
        parser_run.add_argument(
            '-d', '--debug',
            action='store_true',
            help='Enable flask debug mode.')
        parser_run.add_argument(
            '--host', type=str,
            help='Host (binding address). Default: localhost')
        parser_run.add_argument(
            '--port', type=int,
            help='Port to listen on. Default: 8888')
        parser_run.set_defaults(func=_run)

        # This is a hack, as argparse itself does not
        # allow to parse only --version without any
        # of the other required arguments.
        if '--version' in sys.argv:
            print('signac-dashboard', __version__)
            sys.exit(0)

        args = parser.parse_args()

        if args.debug:
            logger.setLevel(logging.DEBUG)

        if not hasattr(args, 'func'):
            parser.print_usage()
            sys.exit(2)
        try:
            self.observer.start()
            args.func(args)
        except RuntimeWarning as warning:
            logger.warning("Warning: {}".format(warning))
            if args.debug:
                raise
            sys.exit(1)
        except Exception as error:
            logger.error('Error: {}'.format(error))
            if args.debug:
                raise
            sys.exit(1)
        finally:
            self.observer.stop()
            self.observer.join()
コード例 #41
0
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler

import json
import os
import time


class handel(FileSystemEventHandler):
    def on_modified(self, even):
        for filename in os.listdir(myfld):
            src = myfld + "/" + filename
            new_destination = des + "/" + filename
            os.rename(src, new_destination)


myfld = "main-directory"
des = "destination-directory"
event_handler = handel()
Observer = Observer()
Observer.schedule(event_handler, myfld, recursive=True)

Observer.start()
try:
    while True:
        time.sleep(10)
except KeyboardInterrupt:
    Observer.stop()
Observer.join()
コード例 #42
0
ファイル: config.py プロジェクト: nach00/WeedBot
class Config(dict):
    """
    :type filename: str
    :type path: str
    :type bot: cloudbot.bot.CloudBot
    :type observer: Observer
    :type event_handler: ConfigEventHandler
    """
    def __init__(self, bot, *args, **kwargs):
        """
        :type bot: cloudbot.bot.CloudBot
        :type args: list
        :type kwargs: dict
        """
        super().__init__(*args, **kwargs)
        self.filename = "config.json"
        self.path = os.path.abspath(self.filename)
        self.bot = bot
        self.update(*args, **kwargs)

        # populate self with config data
        self.load_config()
        self.reloading_enabled = self.get("reloading",
                                          {}).get("config_reloading", True)

        if self.reloading_enabled:
            # start watcher
            self.observer = Observer()

            pattern = "*{}".format(self.filename)

            self.event_handler = ConfigEventHandler(self.bot,
                                                    self,
                                                    patterns=[pattern])
            self.observer.schedule(self.event_handler,
                                   path='.',
                                   recursive=False)
            self.observer.start()

    def stop(self):
        """shuts down the config reloader"""
        if self.reloading_enabled:
            self.observer.stop()

    def load_config(self):
        """(re)loads the bot config from the config file"""
        if not os.path.exists(self.path):
            # if there is no config, show an error and die
            logger.critical("No config file found, bot shutting down!")
            print("No config file found! Bot shutting down in five seconds.")
            print("Copy 'config.default.json' to 'config.json' for defaults.")
            print(
                "For help, see http://git.io/cloudbotirc. Thank you for using CloudBot!"
            )
            time.sleep(5)
            sys.exit()

        with open(self.path) as f:
            self.update(json.load(f))
            logger.debug("Config loaded from file.")

        # reload permissions
        if self.bot.connections:
            for connection in self.bot.connections:
                connection.permissions.reload()

    def save_config(self):
        """saves the contents of the config dict to the config file"""
        json.dump(self, open(self.path, 'w'), sort_keys=True, indent=4)
        logger.info("Config saved to file.")
コード例 #43
0
def main(argv):
    """Program entry point"""
    # Parse command line
    parser = argparse.ArgumentParser(
        description='Inspect your chrome web history')
    parser.add_argument('-l',
                        '--list-profiles',
                        action='store_true',
                        default='False',
                        help='List all chrome profiles for the current user')
    parser.add_argument('-p',
                        '--profile',
                        default='Default',
                        help='the Chrome profile name to inspect')
    parser.add_argument('-t',
                        '--time',
                        action='store_true',
                        default=False,
                        help='Print the time of the history entry')
    parser.add_argument('-u',
                        '--url',
                        action='store_true',
                        default='False',
                        help='Also print the url of the history entry')
    parser.add_argument('-m',
                        '--markdown',
                        action='store_true',
                        default=False,
                        help='Output in Markdown-friendly format')
    parser.add_argument('-f',
                        '--follow',
                        action='store_true',
                        default='False',
                        help='follow profile\'s History file for changes')
    count_group = parser.add_mutually_exclusive_group()
    count_group.add_argument('-n',
                             '--count',
                             type=int,
                             default=10,
                             help='number of entries to show')
    count_group.add_argument('-a',
                             '--all',
                             action='store_true',
                             default=False,
                             help='print all entries in History file')
    args = parser.parse_args(argv[1:])

    # --list-profiles will preempt other functionality
    if args.list_profiles is True:
        return list_chrome_profiles()

    # Read table from database.
    if args.follow is True:
        observer = Observer()
        history_filename = os.path.join(get_chrome_userdata_path(),
                                        args.profile, 'History')
        observer.schedule(
            FileChangedEventHandler(observer, history_filename, args),
            os.path.join(get_chrome_userdata_path(), args.profile))

        # Watch file
        observer.start()
        try:
            while True:
                sleep(1)
        except KeyboardInterrupt:
            observer.stop()
        observer.join()

    else:
        print_history(args)

    return 0
コード例 #44
0
ファイル: main.py プロジェクト: minuJeong/gl_kata
 def init_observer(self):
     handler = FileSystemEventHandler()
     handler.on_modified = self.on_gl_modified
     observer = Observer()
     observer.schedule(handler, "./gl/", True)
     observer.start()
コード例 #45
0
ファイル: eyewitness-server.py プロジェクト: zcyph/eyewitness
        conn.close()
        print(
            f'{current_date} {current_time}: eyewitness-server: transport complete!'
        )
    except Error as e:
        print(e)
        print(
            f'{current_date} {current_time}: eyewitness-server: subspace anomaly detected!'
        )


# # Event handler condition: if changes to file happens
# my_event_handler.on_created = on_created
my_event_handler.on_modified = on_modified
# my_event_handler.on_deleted = on_deleted
# my_event_handler.on_moved = on_moved

# Create observer to watch for file changes
go_recursively = False
my_observer = Observer()
my_observer.schedule(my_event_handler, Places_File_Backup)

# Start the observer
my_observer.start()
try:
    while True:
        time.sleep(1)
except:
    my_observer.stop()
my_observer.join()
コード例 #46
0
ファイル: cli.py プロジェクト: Sickelmo83/quake-cli-tools
def main():
    # Fix for frozen packages
    def handleSIGINT(signum, frame):
        raise KeyboardInterrupt

    signal.signal(signal.SIGINT, handleSIGINT)

    parser = Parser(
        prog='qmount',
        description=
        'Default action is to mount the given pak file as a logical volume.',
        epilog=
        'example: qmount TEST.PAK => mounts TEST.PAK as a logical volume.')

    parser.add_argument('file',
                        metavar='file.pak',
                        action=ResolvePathAction,
                        help='pak file to mount')

    parser.add_argument('-f',
                        '--file-browser',
                        dest='open_file_browser',
                        action='store_true',
                        help='opens a file browser once mounted')

    parser.add_argument('--verbose',
                        dest='verbose',
                        action='store_true',
                        help='verbose mode')

    parser.add_argument(
        '-v',
        '--version',
        dest='version',
        action='version',
        help=argparse.SUPPRESS,
        version=f'{parser.prog} version {qcli.qmount.__version__}')

    args = parser.parse_args()

    dir = os.path.dirname(args.file) or '.'
    if not os.path.exists(dir):
        os.makedirs(dir)

    archive_name = os.path.basename(args.file)
    context = {'dirty': False}
    files = {}

    # If the pak file exists put the contents into the file dictionary
    if os.path.exists(args.file):
        with pak.PakFile(args.file) as pak_file:
            for info in pak_file.infolist():
                name = info.filename
                files[name] = pak_file.read(name)

    else:
        context['dirty'] = True

    temp_directory = platforms.temp_volume(archive_name)

    # Copy pak file contents into the temporary directory
    for filename in files:
        abs_path = os.path.join(temp_directory, filename)
        dir = os.path.dirname(abs_path)

        if not os.path.exists(dir):
            os.makedirs(dir)

        with open(abs_path, 'wb') as out_file:
            out_file.write(files[filename])

    # Open a native file browser
    if args.open_file_browser:
        platforms.open_file_browser(temp_directory)

    # Start file watching
    observer = Observer()
    handler = TempPakFileHandler(
        context,
        temp_directory,
        files,
        args.verbose,
        ignore_patterns=['*/.DS_Store', '*/Thumbs.db'],
        ignore_directories=True)
    observer.schedule(handler, path=temp_directory, recursive=True)

    print('Press Ctrl+C to save and quit')

    observer.start()

    # Wait for user to terminate
    try:
        while True:
            time.sleep(1)

            # Detect the deletion of the watched directory.
            if not os.path.exists(temp_directory):
                raise KeyboardInterrupt

    except KeyboardInterrupt:
        print()
        try:
            observer.stop()

        except:
            """This is a temporary workaround. Watchdog will raise an exception
            if the watched media is ejected."""

    observer.join()

    # Write out updated files
    if context['dirty']:
        print(f'Updating changes to {archive_name}')

        with pak.PakFile(args.file, 'w') as pak_file:
            for filename in files:
                pak_file.writestr(filename, files[filename])

    else:
        print(f'No changes detected to {archive_name}')

    # Clean up temp directory
    platforms.unmount_temp_volume(temp_directory)

    sys.exit(0)
コード例 #47
0
class Reloader():
    """docstring for Reloader"""
    def __init__(self, skillName, authToken):
        self.skillName = skillName
        self.authToken = authToken

        self.timerStartAlice = None
        self.isInStartAlice = False
        #self.AliceConfigs = settings
        self.readConfig()
        self.webInterfaceActive = self.getConfig('webInterfaceActive')
        #self.webInterfaceActive = settings["webInterfaceActive"]
        # __pycache__
        self.patterns = "*"
        self.ignore_patterns = ""
        self.ignore_directories = False
        self.case_sensitive = True

        self.onStart()

        # create the event handler
        self.eventHandler = PatternMatchingEventHandler(
            self.patterns, self.ignore_patterns, self.ignore_directories,
            self.case_sensitive)

        # Handle all the events
        self.eventHandler.on_modified = self.on_modified

        # Observer
        os.chdir("/home/pi/ProjectAlice/skills")
        self.path = sys.argv[1] if len(sys.argv) > 1 else '.'

        self.recursive = True
        self.observer = Observer()
        self.observer.schedule(self.eventHandler,
                               self.path,
                               recursive=self.recursive)
        self.observer.start()

#-----------------------------------------------

    def readConfig(self):
        with open('config.json') as config_file:
            self._config = json.load(config_file)

    #-----------------------------------------------
    def getConfig(self, configName: str):
        return self._config[configName]

    #-----------------------------------------------
    def onStart(self):
        if self.webInterfaceActive:
            print(f"Alice Web Interface is Active")
            print(
                f"Reloader runs best when Alice Web Interface is not active.")
            print(
                f"If you do not absolutely need to use the interface then turn it off."
            )
            print(f"This means a speed increase of 1.5 seconds at restart.")

    #-----------------------------------------------
    def startAlice(self):
        if self.isInStartAlice:
            return
        #print("er  i startAlice")
        self.isInStartAlice = True
        os.system("/home/pi/bin/alice-start > /dev/null 2>&1 &")

        if self.timerStartAlice != None:
            self.timerStartAlice.cancel()
        self.isInStartAlice = False

    #-----------------------------------------------
    def getProcs(self) -> dict:
        dct = dict()
        for p in psutil.process_iter(["pid", "name", "cmdline"]):
            dct.update({p.info["pid"]: {"cmdline": p.info["cmdline"]}})
        return dct

    #-----------------------------------------------
    def on_modified(self, event):
        procsDict = self.getProcs()
        for key in procsDict:
            if 'train' in procsDict[key]["cmdline"]:
                print(
                    'Alice is training, wait a while and then enter "ctrl-s" again.'
                )
                print()
                return

        # wwreloader.py	TextInputWidget
        if event.src_path.find("__pycache__") == -1:

            # # We do not use reload because it does not work on all skills.
            # if event.src_path.find("widgets") == 16:
            # 	print(f'\nReload, a file is modified, "{event.src_path}", ProjectAlice reloaded through API')
            # 	#"TextInputWidget/widgets/SimpleCommand.py"
            # 	self.reloadApiCall()
            # 	return

            print(
                f'\nReload, a file is modified, "{event.src_path}", ProjectAlice restarted'
            )
            os.system(
                "kill -2 `ps ax|grep 'venv/bin/python main.py'|grep -v 'grep'|  awk '{print $1}'` > /dev/null 2>&1 &"
            )

            #Read the Alice config.py and determine if "webInterfaceActive": True or False,
            # if true then increase the timer delay interval from 1.0 secs to ??? 2.5 secs.
            if self.webInterfaceActive:
                self.timerStartAlice = Timer(2.5, self.startAlice)
            else:
                self.timerStartAlice = Timer(1.0, self.startAlice)

            self.timerStartAlice.start()
            print()

    #-----------------------------------------------
    def reloadApiCall(self):
        #curl --location --request POST 'http://localhost:5000/api/v1.0.1/login/?=' --form 'username='******'pin='

        url = f"http://localhost:5000/api/v1.0.1/skills/{self.skillName}/reload/"

        payload = {'': ''}
        files = []
        headers = {'auth': self.authToken}

        response = requests.request("GET",
                                    url,
                                    headers=headers,
                                    data=payload,
                                    files=files)

        print(response.text.encode('utf8'))
コード例 #48
0
class CommandAuto(Command):
    """Automatic rebuilds for Nikola."""

    name = "auto"
    has_server = True
    doc_purpose = "builds and serves a site; automatically detects site changes, rebuilds, and optionally refreshes a browser"
    dns_sd = None
    delta_last_rebuild = datetime.timedelta(milliseconds=100)

    cmd_options = [
        {
            'name': 'port',
            'short': 'p',
            'long': 'port',
            'default': 8000,
            'type': int,
            'help': 'Port number (default: 8000)',
        },
        {
            'name': 'address',
            'short': 'a',
            'long': 'address',
            'type': str,
            'default': '127.0.0.1',
            'help': 'Address to bind (default: 127.0.0.1 -- localhost)',
        },
        {
            'name': 'browser',
            'short': 'b',
            'long': 'browser',
            'type': bool,
            'help': 'Start a web browser',
            'default': False,
        },
        {
            'name': 'ipv6',
            'short': '6',
            'long': 'ipv6',
            'default': False,
            'type': bool,
            'help': 'Use IPv6',
        },
        {
            'name': 'no-server',
            'long': 'no-server',
            'default': False,
            'type': bool,
            'help': 'Disable the server, automate rebuilds only'
        },
    ]

    def _execute(self, options, args):
        """Start the watcher."""
        self.sockets = []
        self.rebuild_queue = asyncio.Queue()
        self.last_rebuild = datetime.datetime.now()

        if aiohttp is None and Observer is None:
            req_missing(['aiohttp', 'watchdog'], 'use the "auto" command')
        elif aiohttp is None:
            req_missing(['aiohttp'], 'use the "auto" command')
        elif Observer is None:
            req_missing(['watchdog'], 'use the "auto" command')

        if sys.argv[0].endswith('__main__.py'):
            self.nikola_cmd = [sys.executable, '-m', 'nikola', 'build']
        else:
            self.nikola_cmd = [sys.argv[0], 'build']

        if self.site.configuration_filename != 'conf.py':
            self.nikola_cmd.append('--conf=' +
                                   self.site.configuration_filename)

        # Run an initial build so we are up-to-date (synchronously)
        self.logger.info("Rebuilding the site...")
        subprocess.call(self.nikola_cmd)

        port = options and options.get('port')
        self.snippet = '''<script>document.write('<script src="http://'
            + (location.host || 'localhost').split(':')[0]
            + ':{0}/livereload.js?snipver=1"></'
            + 'script>')</script>
        </head>'''.format(port)

        # Deduplicate entries by using a set -- otherwise, multiple rebuilds are triggered
        watched = set(['templates/'] +
                      [get_theme_path(name) for name in self.site.THEMES])
        for item in self.site.config['post_pages']:
            watched.add(os.path.dirname(item[0]))
        for item in self.site.config['FILES_FOLDERS']:
            watched.add(item)
        for item in self.site.config['GALLERY_FOLDERS']:
            watched.add(item)
        for item in self.site.config['LISTINGS_FOLDERS']:
            watched.add(item)
        for item in self.site.config['IMAGE_FOLDERS']:
            watched.add(item)
        for item in self.site._plugin_places:
            watched.add(item)
        # Nikola itself (useful for developers)
        watched.add(pkg_resources.resource_filename('nikola', ''))

        out_folder = self.site.config['OUTPUT_FOLDER']
        if options and options.get('browser'):
            browser = True
        else:
            browser = False

        if options['ipv6']:
            dhost = '::'
        else:
            dhost = '0.0.0.0'

        host = options['address'].strip('[').strip(']') or dhost

        # Set up asyncio server
        webapp = web.Application()
        webapp.router.add_get('/livereload.js', self.serve_livereload_js)
        webapp.router.add_get('/robots.txt', self.serve_robots_txt)
        webapp.router.add_route('*', '/livereload', self.websocket_handler)
        resource = IndexHtmlStaticResource(True, self.snippet, '', out_folder)
        webapp.router.register_resource(resource)

        # Prepare asyncio event loop
        # Required for subprocessing to work
        loop = asyncio.get_event_loop()

        # Set debug setting
        loop.set_debug(self.site.debug)

        # Server can be disabled (Issue #1883)
        self.has_server = not options['no-server']

        if self.has_server:
            handler = webapp.make_handler()
            srv = loop.run_until_complete(
                loop.create_server(handler, host, port))

        self.wd_observer = Observer()
        # Watch output folders and trigger reloads
        if self.has_server:
            self.wd_observer.schedule(NikolaEventHandler(
                self.reload_page, loop),
                                      'output/',
                                      recursive=True)

        # Watch input folders and trigger rebuilds
        for p in watched:
            if os.path.exists(p):
                self.wd_observer.schedule(NikolaEventHandler(
                    self.run_nikola_build, loop),
                                          p,
                                          recursive=True)

        # Watch config file (a bit of a hack, but we need a directory)
        _conf_fn = os.path.abspath(self.site.configuration_filename
                                   or 'conf.py')
        _conf_dn = os.path.dirname(_conf_fn)
        self.wd_observer.schedule(ConfigEventHandler(_conf_fn,
                                                     self.run_nikola_build,
                                                     loop),
                                  _conf_dn,
                                  recursive=False)
        self.wd_observer.start()

        if not self.has_server:
            self.logger.info("Watching for changes...")
            # Run the event loop forever (no server mode).
            try:
                # Run rebuild queue
                loop.run_until_complete(self.run_rebuild_queue())

                loop.run_forever()
            except KeyboardInterrupt:
                pass
            finally:
                self.wd_observer.stop()
                self.wd_observer.join()
            loop.close()
            return

        host, port = srv.sockets[0].getsockname()

        self.logger.info("Serving HTTP on {0} port {1}...".format(host, port))
        if browser:
            if options['ipv6'] or '::' in host:
                server_url = "http://[{0}]:{1}/".format(host, port)
            else:
                server_url = "http://{0}:{1}/".format(host, port)

            self.logger.info(
                "Opening {0} in the default web browser...".format(server_url))
            webbrowser.open('http://{0}:{1}'.format(host, port))

        # Run the event loop forever and handle shutdowns.
        try:
            # Run rebuild queue
            loop.run_until_complete(self.run_rebuild_queue())

            self.dns_sd = dns_sd(port, (options['ipv6'] or '::' in host))
            loop.run_forever()
        except KeyboardInterrupt:
            pass
        finally:
            self.logger.info("Server is shutting down.")
            if self.dns_sd:
                self.dns_sd.Reset()
            srv.close()
            self.rebuild_queue.put((None, None))
            loop.run_until_complete(srv.wait_closed())
            loop.run_until_complete(webapp.shutdown())
            loop.run_until_complete(handler.shutdown(5.0))
            loop.run_until_complete(webapp.cleanup())
            self.wd_observer.stop()
            self.wd_observer.join()
        loop.close()

    @asyncio.coroutine
    def run_nikola_build(self, event):
        """Rebuild the site."""
        # Move events have a dest_path, some editors like gedit use a
        # move on larger save operations for write protection
        event_path = event.dest_path if hasattr(
            event, 'dest_path') else event.src_path
        if sys.platform == 'win32':
            # Windows hidden files support
            is_hidden = os.stat(
                event_path).st_file_attributes & stat.FILE_ATTRIBUTE_HIDDEN
        else:
            is_hidden = False
        has_hidden_component = any(
            p.startswith('.') for p in event_path.split(os.sep))
        if (is_hidden or has_hidden_component or '__pycache__' in event_path
                or event_path.endswith(('.pyc', '.pyo', '.pyd', '_bak', '~'))
                or event.is_directory
            ):  # Skip on folders, these are usually duplicates
            return

        self.logger.debug('Queuing rebuild from {0}'.format(event_path))
        yield from self.rebuild_queue.put(
            (datetime.datetime.now(), event_path))

    @asyncio.coroutine
    def run_rebuild_queue(self):
        """Run rebuilds from a queue (Nikola can only build in a single instance)."""
        while True:
            date, event_path = yield from self.rebuild_queue.get()
            if date is None:
                # Shutdown queue
                return
            if date < (self.last_rebuild + self.delta_last_rebuild):
                self.logger.debug(
                    "Skipping rebuild from {0} (within delta)".format(
                        event_path))
                continue
            self.last_rebuild = datetime.datetime.now()
            self.logger.info('REBUILDING SITE (from {0})'.format(event_path))
            p = yield from asyncio.create_subprocess_exec(
                *self.nikola_cmd, stderr=subprocess.PIPE)
            exit_code = yield from p.wait()
            error = yield from p.stderr.read()
            errord = error.decode('utf-8')

            if exit_code != 0:
                self.logger.error(errord)
                yield from self.send_to_websockets({
                    'command': 'alert',
                    'message': errord
                })
            else:
                self.logger.info("Rebuild successful\n" + errord)

    @asyncio.coroutine
    def reload_page(self, event):
        """Reload the page."""
        # Move events have a dest_path, some editors like gedit use a
        # move on larger save operations for write protection
        event_path = event.dest_path if hasattr(
            event, 'dest_path') else event.src_path
        p = os.path.relpath(event_path,
                            os.path.abspath(
                                self.site.config['OUTPUT_FOLDER'])).replace(
                                    os.sep, '/')
        self.logger.info('REFRESHING: {0}'.format(p))
        yield from self.send_to_websockets({
            'command': 'reload',
            'path': p,
            'liveCSS': True
        })

    @asyncio.coroutine
    def serve_livereload_js(self, request):
        """Handle requests to /livereload.js and serve the JS file."""
        return FileResponse(LRJS_PATH)

    @asyncio.coroutine
    def serve_robots_txt(self, request):
        """Handle requests to /robots.txt."""
        return Response(body=b'User-Agent: *\nDisallow: /\n',
                        content_type='text/plain',
                        charset='utf-8')

    @asyncio.coroutine
    def websocket_handler(self, request):
        """Handle requests to /livereload and initiate WebSocket communication."""
        ws = web.WebSocketResponse()
        yield from ws.prepare(request)
        self.sockets.append(ws)

        while True:
            msg = yield from ws.receive()

            self.logger.debug("Received message: {0}".format(msg))
            if msg.type == aiohttp.WSMsgType.TEXT:
                message = msg.json()
                if message['command'] == 'hello':
                    response = {
                        'command':
                        'hello',
                        'protocols': [
                            'http://livereload.com/protocols/official-7',
                        ],
                        'serverName':
                        'Nikola Auto (livereload)',
                    }
                    yield from ws.send_json(response)
                elif message['command'] != 'info':
                    self.logger.warn(
                        "Unknown command in message: {0}".format(message))
            elif msg.type == aiohttp.WSMsgType.CLOSED:
                break
            elif msg.type == aiohttp.WSMsgType.CLOSE:
                self.logger.debug("Closing WebSocket")
                yield from ws.close()
                break
            elif msg.type == aiohttp.WSMsgType.ERROR:
                self.logger.error(
                    'WebSocket connection closed with exception {0}'.format(
                        ws.exception()))
                break
            else:
                self.logger.warn("Received unknown message: {0}".format(msg))

        self.sockets.remove(ws)
        self.logger.debug("WebSocket connection closed: {0}".format(ws))

        return ws

    @asyncio.coroutine
    def send_to_websockets(self, message):
        """Send a message to all open WebSockets."""
        to_delete = []
        for ws in self.sockets:
            if ws.closed:
                to_delete.append(ws)
                continue

            try:
                yield from ws.send_json(message)
            except RuntimeError as e:
                if 'closed' in e.args[0]:
                    self.logger.warn(
                        "WebSocket {0} closed uncleanly".format(ws))
                    to_delete.append(ws)
                else:
                    raise

        for ws in to_delete:
            self.sockets.remove(ws)
コード例 #49
0
class LogicGUI:
    """
    A full completed logic handles all previous classes and interaction with the user
    """
    def __init__(self, ):

        self.lock = threading.Lock()  # for threading purposes

        #  makes a broadcasting socket for discovery and connection purposes
        self._broad = raw_socket.socket(raw_socket.AF_INET,
                                        raw_socket.SOCK_DGRAM)
        self._broad.bind(
            ("", 0))  # binds it with the current host and any available port

        self._socket = Socket("", 0)  # makes a socket for data transfer

        self.gui = tkinter.Tk()  # GUI window
        self.gui.title("NRVC")

        self.gui.resizable(False, False)

        self.text = tkinter.scrolledtext.ScrolledText(
        )  # a scrollable text box
        self.text.pack()

        self.text.tag_configure('big', font=('Verdana', 12, 'bold'))
        self.text.tag_configure("small", font=('Tempus Sans ITC', 8, 'bold'))

        welcome = \
            """Network Repository Version Control\n"""
        self.text.insert(tkinter.END, welcome, "big")
        self.text.config(state=tkinter.DISABLED)  # disabled means READONLY

        self.b1 = tkinter.Button(text="Connect", command=self.connect)
        self.b2 = tkinter.Button(text="Accept Connection", command=self.accept)
        self.b1.pack()
        self.b2.pack()

        self.gui.withdraw()  # just hides the main window

        self.repo_path = filedialog.askdirectory(
            title="Folder To Watch")  # asks for directory to watch
        if not self.repo_path:  # if he closed the window exit
            exit()

        self.gui.deiconify()  # un hide main window

        self.enter_text("Watching : {}".format(
            self.repo_path))  # tells you what directory you are watching

        self.observer = Observer()  # declares observer
        self.sender = SenderEventHandler(self._socket, self, self.lock,
                                         self.repo_path)  # declares sender
        self.receiver = Receiver(self._socket, self, self.repo_path,
                                 self.lock)  # declares receiver

    def mainloop(self):
        self.gui.mainloop()  # enter the window mainloop
        self.end()  # if the mainloop exited end the script

    def start(self):
        """
        Starting the observer, sender and receiver and such
        """
        self.enter_text("Connection Successful !")

        tkinter.Button(text="Sync",
                       command=self.sync_req).pack()  # makes a sync button

        self.enter_text("Observer Starting !")

        self.observer.schedule(self.sender, self.repo_path, recursive=True)
        self.observer.start()  # starts a new thread observing repo path

        self.enter_text("Receiver Starting !")

        self.receiver.main_loop()  # enters the receiver main loop

    def connect(self):
        """
        in case of connecting the function handles connecting to another script
        """
        # destroy the choosing buttons
        self.b1.destroy()
        self.b2.destroy()

        # gets port of the broadcast listening socket of the other script to send to
        port = ""
        while not port.isnumeric():
            port = simpledialog.askstring(
                "NRVC", "Enter the key from the other script ")
            if not port:
                exit()

        port = int(port)

        self._broad.setsockopt(raw_socket.SOL_SOCKET, raw_socket.SO_BROADCAST,
                               1)  # getting socket ready for sending

        # a Hello message holding the main socket port number totally hidden from user
        msg = "NRVC{}".format(self._socket.port).encode()

        # thread handling receiving connections
        accepting = threading.Thread(target=self._socket.accept)
        accepting.start()
        self.enter_text('Trying To Connect !')
        while accepting.is_alive(
        ):  # since there is no connections continue broadcasting
            self._broad.sendto(msg, ('<broadcast>', port))
            time.sleep(0.2)  # in a specific interval
        cont = threading.Thread(target=self.start)
        cont.start()

    def accept(self):
        """
        Receives broadcast message on a specific port 
        and gets information to connect to the other script 
        """
        # destroy choose buttons
        self.b1.destroy()
        self.b2.destroy()
        # shows the port of the broadcasting socket as a key to get a broadcast message to
        self.enter_text("Enter this key in the other script : {}".format(
            self._broad.getsockname()[1]))
        # a new thread for handling the receiving  , leaving the mainloop thread to continue handling gui
        cont = threading.Thread(target=self.receive)
        cont.start()

    def receive(self):
        """
            Receives broadcast messages on a specific port
        """
        while True:
            data, addr = self._broad.recvfrom(
                10)  # receives 10 bytes from an address
            data = data.decode()
            if data[:
                    4] == "NRVC":  # to avoid being interfered with other messages
                port = int(data[4:]
                           )  # the port number of the other script main socket
                self._socket.connect(
                    addr[0], port)  # connect our socket to the other socket
                self.start()

    def enter_text(self, msg):
        """
        adds a new line of information to the visible text box for the user
        :param msg:  message to add
        """
        self.text.config(state=tkinter.NORMAL)
        self.text.insert(tkinter.END, "... {} \n".format(msg), "small")
        self.text.config(state=tkinter.DISABLED)

    def sync_req(self):
        """
        Called when you hit sync button 
        """
        path_to_sync = ""

        while not path_to_sync.startswith(
                self.repo_path):  # if the path to sync is not in the repo path
            self.enter_text(
                "Please choose the directory inside the repo to Sync")
            path_to_sync = filedialog.askdirectory(title="Path to sync",
                                                   initialdir=self.repo_path)
            if path_to_sync == "":
                return
        # another check step
        if simpledialog.messagebox.askquestion(
                "NRVC", "Are you sure you want to sync this path ?\n"
                "Note that any conflicts will be overwritten."):
            self.lock.acquire()
            self._socket.send_msg("sync")
            self._socket.send_msg(path_to_sync[len(self.repo_path):])
            self.enter_text("Sync requested !")
            self.lock.release()

    def end(self):
        self.receiver.ender()
コード例 #50
0
ファイル: watcher.py プロジェクト: Ajay191191/codeSync
class ChangeHandler(FileSystemEventHandler):
    def __init__(self, *args, **kwargs):
        super(ChangeHandler, self).__init__()
        self._load_config()
        self._watch_dirs()

    def _load_config(self):
        self.conf = configparser.SafeConfigParser()
        self.conf.read(['watcherConfig.txt'])

    def _watch_dirs(self):
        """sets up the watchdog observer and schedules sections to watch"""
        self.observer = Observer()
        self._schedule_sections()
        self.observer.start()
        try:
            while True:
                time.sleep(1)
        except KeyboardInterrupt:
            self.observer.stop()
        self.observer.join()

    def _schedule_sections(self):
        """creates section data dir for later reference
           and schedules each conf section with the observer
        """
        # dict of dir --> remote_dir mapping
        self.section_data = {}
        # TOOD: might be unnecessary to store conf in dict again
        for section in self.conf.sections():
            local_dir = self.conf.get(section, 'local_dir')
            self.section_data.setdefault(local_dir, ObjectList()).append({
                'remote_dir': self.conf.get(section, 'remote_dir'),
                'remote_addr': self.conf.get(section, 'remote_addr'),
            })
            self.observer.schedule(self, local_dir, recursive=True)
            # last_updated time will be used to prevent oversyncing
            self.section_data[local_dir].last_updated = 0

    def _should_sync_dir(self, event, key, local_dir):
        """returns True if dir syncing should happen
           also updates the last modified time of the folder in the process"""
        # some files get removed before sync (ie git locks)
        file_updated_time = os.stat(
            event.src_path if os.path.exists(event.src_path) else local_dir).st_mtime
        if file_updated_time > self.section_data[key].last_updated:
            self.section_data[key].last_updated = file_updated_time
            return True
        else:
            return False

    def _sync_dir(self, data, local_dir):
        for item in data:
            remote_dir = item['remote_dir']
            remote_addr = item['remote_addr']
            if remote_dir:
                remote_file_path = "{}:{}".format(remote_addr, remote_dir)
                exclude_string = "--include '.venv/src/' --exclude '.venv/*'"
                call_str = "rsync -azvp --delete {} {} {}".format(
                    exclude_string, local_dir, remote_file_path)
                print('Running command: {}'.format(call_str))
                self.make_subprocess_call(call_str)
            else:
                raise ValueError('Not sure where server is at :(')

    def make_subprocess_call(self, command):
        proc = Popen(command, shell=True, stdout=PIPE, stderr=PIPE)
        for line in iter(proc.stdout.readline, b''):
            print(line, end="")
        proc.communicate()

    def on_any_event(self, event):
        """React to any change from any of the dirs from the config"""
        remote_dir = None
        for key, data in self.section_data.items():
            # match the dir, primitive and probably could change
            if event.src_path.startswith(key):
                local_dir = key + '/'
                if self._should_sync_dir(event, key, local_dir):
                    self._sync_dir(data, local_dir)
コード例 #51
0
class Configuration(object):
    """
    NOTE: This class is auto generated by the swagger code generator program.
    Ref: https://github.com/swagger-api/swagger-codegen
    Do not edit the class manually.
    """
    def __init__(self):
        """
        Constructor
        """
        # Default Base url
        self.host = "https://api.mypurecloud.com"
        # Default api client
        self.api_client = None
        # Temp file folder for downloading files
        self.temp_folder_path = None

        # Authentication Settings
        # dict to store API key(s)
        self.api_key = {}
        # dict to store API prefix (e.g. Bearer)
        self.api_key_prefix = {}
        # Username for HTTP basic authentication
        self.username = ""
        # Password for HTTP basic authentication
        self.password = ""

        # access token for OAuth
        self.access_token = ""
        # used to determine if access token should be refresh transparently when using Code Authorization
        self.should_refresh_access_token = True
        # maximum amount of time other threads will wait for a thread to request a new access token when it expires
        self.refresh_token_wait_time = 10

        # SSL/TLS verification
        # Set this to false to skip verifying SSL certificate when calling API from https server.
        self.verify_ssl = True
        # Set this to customize the certificate file to verify the peer.
        self.ssl_ca_cert = None
        # client certificate file
        self.cert_file = None
        # client key file
        self.key_file = None

        # proxy
        self.proxy = None

        # proxy username
        self.proxy_username = None

        # proxy password
        self.proxy_password = None

        # Logging Settings
        self.logger = Logger()

        # Private config file variables
        # path to the config file
        # default to ~/.genesyscloudpython/config
        self.config_file_path = os.path.join(expanduser("~"),
                                             ".genesyscloudpython", "config")
        # private directory observer instance
        self._observer = None

        # flag to control running of _config_updater thread
        self.live_reload_config = True

        # update config from config file if possible
        self._update_config_from_file()

        # if live_reload_config set, start the config_updater thread
        if self.live_reload_config:
            run_observer = threading.Thread(target=self._run_observer)
            run_observer.setDaemon(True)
            run_observer.start()

            self._config_updater()

    @property
    def config_file_path(self):
        return self.__config_file_path

    @config_file_path.setter
    def config_file_path(self, value):
        self.__config_file_path = value
        self._update_config_from_file()
        if not hasattr(self, "_observer"):
            return
        if self.live_reload_config:
            self._config_updater()

    def get_api_key_with_prefix(self, identifier):
        """
        Gets API key (with prefix if set).

        :param identifier: The identifier of apiKey.
        :return: The token for api key authentication.
        """
        if self.api_key.get(identifier) and self.api_key_prefix.get(
                identifier):
            return self.api_key_prefix[identifier] + ' ' + self.api_key[
                identifier]
        elif self.api_key.get(identifier):
            return self.api_key[identifier]

    def get_basic_auth_token(self):
        """
        Gets HTTP basic authentication header (string).

        :return: The token for basic HTTP authentication.
        """
        return urllib3.util.make_headers(basic_auth=self.username + ':' + self.password)\
                           .get('authorization')

    def auth_settings(self, access_token=None):
        """
        Gets Auth Settings dict for api client.

        :return: The Auth Settings information dict.
        """

        return {
            'Guest Chat JWT': {
                'type': 'api_key',
                'in': 'header',
                'key': 'Authorization',
                'value': self.get_api_key_with_prefix('Authorization')
            },
            'PureCloud OAuth': {
                'type':
                'oauth2',
                'in':
                'header',
                'key':
                'Authorization',
                'value':
                'Bearer ' + self.access_token if access_token is None
                or access_token == "" else 'Bearer ' + access_token
            },
        }

    def to_debug_report(self):
        """
        Gets the essential information for debugging.

        :return: The report for debugging.
        """
        return "Python SDK Debug Report:\n"\
               "OS: {env}\n"\
               "Python Version: {pyversion}\n"\
               "Version of the API: v2\n"\
               "SDK Package Version: 148.0.0".\
               format(env=sys.platform, pyversion=sys.version)

    def _update_config_from_file(self):
        try:
            config = configparser.ConfigParser()
            # try to parse as INI format
            try:
                # if it doesn't exist, this function will return []
                if config.read(self.config_file_path) == []:
                    return
            except configparser.MissingSectionHeaderError as e:
                # this exception means it's possibly JSON
                try:
                    with open(self.config_file_path, "r") as read_file:
                        config = json.load(read_file)
                except Exception:
                    return
            # logging
            log_level = _get_config_string(config, "logging", "log_level")
            if log_level is not None:
                self.logger.log_level = LogLevel.from_string(log_level)

            log_format = _get_config_string(config, "logging", "log_format")
            if log_format is not None:
                self.logger.log_format = LogFormat.from_string(log_format)

            log_to_console = _get_config_bool(config, "logging",
                                              "log_to_console")
            if log_to_console is not None:
                self.logger.log_to_console = log_to_console

            log_file_path = _get_config_string(config, "logging",
                                               "log_file_path")
            if log_file_path is not None:
                self.logger.log_file_path = log_file_path

            log_response_body = _get_config_bool(config, "logging",
                                                 "log_response_body")
            if log_response_body is not None:
                self.logger.log_response_body = log_response_body

            log_request_body = _get_config_bool(config, "logging",
                                                "log_request_body")
            if log_request_body is not None:
                self.logger.log_request_body = log_request_body

            # general
            host = _get_config_string(config, "general", "host")
            if host is not None:
                self.host = host

            live_reload_config = _get_config_bool(config, "general",
                                                  "live_reload_config")
            if live_reload_config is not None:
                self.live_reload_config = live_reload_config

            # reauthentication
            refresh_access_token = _get_config_bool(config, "reauthentication",
                                                    "refresh_access_token")
            if refresh_access_token is not None:
                self.should_refresh_access_token = refresh_access_token

            refresh_token_wait_max = _get_config_int(config,
                                                     "reauthentication",
                                                     "refresh_token_wait_max")
            if refresh_token_wait_max is not None:
                self.refresh_token_wait_time = refresh_token_wait_max
        except Exception:
            return

    def _run_observer(self):
        self._observer = Observer()
        self._observer.start()
        try:
            while True:
                time.sleep(1)
        finally:
            self._observer.stop()
            self._observer.join()

    def _config_updater(self):
        if self._observer is not None:
            self._observer.unschedule_all()
        event_handler = ConfigFileEventHandler(self)

        # watch the parent directory of the config file
        watched_directory = os.path.dirname(self.config_file_path)
        # go up the directory tree if the parent directory doesn't yet exist
        while not os.path.exists(watched_directory):
            watched_directory = os.path.dirname(watched_directory)
            if watched_directory == "":
                return

        while True:
            try:
                self._observer.schedule(event_handler,
                                        watched_directory,
                                        recursive=True)
                break
            except FileNotFoundError:
                watched_directory = os.path.dirname(watched_directory)
                if watched_directory == "":
                    return
            except Exception as e:
                return
コード例 #52
0
def run(application_path: 'the path to the application to run',
        reloader: 'reload the application on changes' = False,
        workers: 'the number of asynchronous tasks to run' = 1,
        debug: 'enable debug mode' = False,
        **kwargs):
    """Import and run an application."""
    if kwargs['quiet']:
        # If quiet mode has been enabled, set the number of verbose
        # flags to -1 so that the level above warning will be used.
        verbosity = -1
    else:
        # argparse gives None not 0.
        verbosity = kwargs['verbose'] or 0

    # Set the log level based on the number of verbose flags. Do this
    # before the app is imported so any log calls made will respect the
    # specified level.
    log_level = logging.WARNING - (verbosity * 10)
    logging.basicConfig(level=log_level)

    import_path, app = _import_application(application_path)

    # Now that we have an application, set it's log level, too.
    app.logger.setLevel(log_level)

    if reloader or debug:
        # If the reloader is requested (or debug is enabled), create
        # threads for running the application and watching the file
        # system for changes.
        app.logger.info('Running {!r} with reloader...'.format(app))

        # Find the root of the application and watch for changes
        watchdir = os.path.abspath(import_module(import_path).__file__)
        for _ in import_path.split('.'):
            watchdir = os.path.dirname(watchdir)

        # Create observer and runner threads
        observer = Observer()
        loop = _new_event_loop()
        runner = Thread(
            target=app.run_forever,
            kwargs={
                'num_workers': workers,
                'loop': loop,
                'debug': debug
            },
        )

        # This function is called by watchdog event handler when changes
        # are detected by the observers
        def restart_process(event):
            """Restart the process in-place."""
            os.execv(sys.executable, [sys.executable] + sys.argv[:])

        # Create the handler and watch the files
        handler = PatternMatchingEventHandler(
            patterns=['*.py', '*.ini'],
            ignore_directories=True,
        )
        handler.on_any_event = restart_process
        observer.schedule(handler, watchdir, recursive=True)

        # Start running everything
        runner.start()
        observer.start()

    else:
        # If the reloader is not needed, avoid the overhead
        app.logger.info('Running {!r} forever...'.format(app))
        app.run_forever(num_workers=workers, debug=debug)
コード例 #53
0
                    name = name.replace('-',
                                        ' ').replace('_',
                                                     ' ').replace('+', ' ')
                    item["name"] = " ".join(
                        [i for i in name.split() if not i.isdigit()])
                    item["mac"] = newName.replace("/mnt/jarvis", "/Volumes")
                    item["category"] = category
                    item["path"] = newName

        with open(
                '/Users/davidyang/Documents/PWP-Lib-Search/public/plants.json',
                'w') as publicJson:
            json.dump(plantData, publicJson)
        logging.info("rename file success: " + newName)

    event_handler.on_created = on_created
    event_handler.on_deleted = on_deleted
    event_handler.on_moved = on_moved

    nWatch = Observer()
    targetPath = str(path)
    nWatch.schedule(event_handler, targetPath, recursive=False)

    nWatch.start()

    try:
        while True:
            time.sleep(1000)
    except KeyboardInterrupt:
        nWatch.stop()
    nWatch.join()
コード例 #54
0
class Observer(QtCore.QObject):
    """ Instance that watches the changes of a directory

    Working on two different modes. Timed based or change based.

    Attributes:
        obs: the watchdog instance. Read watchdog documentation for more info
        server: the server to report the changes to
        messages: for triggering the QtSignal and print the messages to GUI
        lib_path: default base path
        target_path: rest of the path (from lib_path) to the target directory.
                     For the ease of naming zip file.
        tot_path: lib_path + target_path
        handler: class that handles the directory changes accordingly
        mode: 0 for change based mode
              1 for time based mode
    """
    sig = QtCore.pyqtSignal()

    def __init__(self, server: Server, lib_path: str, target_path: str):
        super(Observer, self).__init__()

        self.obs = Obs()
        self.server = server
        self.messages = []
        self.log = _ObserverLog()

        self.lib_path = lib_path
        self.target_path = target_path
        self.tot_path = os.path.join(lib_path, target_path)
        self.mode = 0

    def dispatch(self, event):
        """ dispatch the calling cases to other methods

        MUST NOT CHANGE METHOD NAME. Else, it will not work.

        :param event: the triggering signals
        :return: returns 1 if no according changes known
                 returns 0 if no error
        """
        # TODO(Jerry): July 22, 2019
        #  remove the print statements and somehow append to log box
        if event.src_path.endswith('.log'):
            self.messages.append(time_stamp(dates=False) + 'Logfile modified')
            self.sig.emit()

            filename = os.path.join('../archive', str(datetime.date.today()))

            self.messages.append(
                time_stamp(dates=False) + 'Output: ' + filename)
            self.messages.append(
                time_stamp(dates=False) + 'Target: ' + self.tot_path)
            self.sig.emit()

            if zip_folder(self.tot_path, filename):
                self.messages.append(
                    time_stamp(_type=2, dates=False) + 'Error: ZIP failed')
                return er.Other

            self.messages.append(time_stamp(dates=False) + 'Target Zipped')
            self.sig.emit()

            retval = self.server.broadcast_string('zip')
            print_error(retval, 'observer.Hanlder.dispatch:: Sending zip',
                        print_to)

            retval = self.server.broadcast_zip(filename + '.zip')
            print_error(retval, 'Zip Sent', print_to)

            self.messages.append(time_stamp(dates=False) + 'File Sent')
            self.sig.emit()
        else:
            return er.NoSuchOp
        return 0

    def set_server(self, server: Server) -> int:
        """ Sets the server of the observer

        :param server: the new server binded to the observer
        :return: returns 0 if no error
        """
        self.server = server
        return 0

    def get_target_path(self) -> str:
        """ Gets the target_path of the observer

        :return: returns the target path
        """
        return self.target_path

    def get_mode(self) -> int:
        """ Gets the mode the observer

        :return: returns the mode
        """
        return self.mode

    def get_messages(self):
        return self.messages

    def set_messages(self):
        return self.messages.clear()

    def set_mode(self, mode: int):
        """ Sets the mode of the observer

        :param mode: 0 for change based
                     1 for time based
                     else false
        :return: returns 1 for unknown mode
                 returns 0 if no error
        """
        if mode != 0 and mode != 1:
            return 1
        self.mode = mode
        return 0

    def start_observe(self, recursive: bool = False) -> int:
        """ Initializes the observer and gets it to start observing

        :param recursive: if the observer observes the nested folders
        :return: returns 1 if observer fails to initialize
                 returns 0 if no error
        """
        try:
            self.obs.schedule(self, self.tot_path, recursive)
            self.obs.start()
        except RuntimeError:
            return 1
        return 0
        """ Resumes the observer

        :return: returns 0 if no error
        """
        self.handler.pause = False
        return 0

    def close(self) -> int:
        """ Closes the observer thread

        :return: returns 1 if observer fails to close
                 returns 0 if no error
        """
        try:
            self.obs.stop()
            self.obs.join()
        except RuntimeError:
            return 1
        return 0

    def get_details(self):
        """ Get the details of the handler

        :return: returns all the details in a list
        """
        return self.log.get_latest_log()
コード例 #55
0
class SqliteEventLogStorage(EventLogStorage, ConfigurableClass):
    def __init__(self, base_dir, inst_data=None):
        '''Note that idempotent initialization of the SQLite database is done on a per-run_id
        basis in the body of store_event, since each run is stored in a separate database.'''
        self._base_dir = check.str_param(base_dir, 'base_dir')
        mkdir_p(self._base_dir)

        self._known_run_ids = set([])
        self._watchers = {}
        self._obs = Observer()
        self._obs.start()
        self._inst_data = check.opt_inst_param(inst_data, 'inst_data',
                                               ConfigurableClassData)

    @property
    def inst_data(self):
        return self._inst_data

    @classmethod
    def config_type(cls):
        return SystemNamedDict('SqliteEventLogStorageConfig',
                               {'base_dir': Field(String)})

    @staticmethod
    def from_config_value(inst_data, config_value, **kwargs):
        return SqliteEventLogStorage(inst_data=inst_data,
                                     **dict(config_value, **kwargs))

    @contextmanager
    def _connect(self, run_id):
        try:
            with sqlite3.connect(self.filepath_for_run_id(run_id)) as conn:
                yield conn
        finally:
            conn.close()

    def filepath_for_run_id(self, run_id):
        check.str_param(run_id, 'run_id')
        return os.path.join(self._base_dir,
                            '{run_id}.db'.format(run_id=run_id))

    def store_event(self, event):
        check.inst_param(event, 'event', EventRecord)
        run_id = event.run_id
        if not run_id in self._known_run_ids:
            with self._connect(run_id) as conn:
                conn.cursor().execute(CREATE_EVENT_LOG_SQL)
                conn.cursor().execute('PRAGMA journal_mode=WAL;')
                self._known_run_ids.add(run_id)
        with self._connect(run_id) as conn:
            dagster_event_type = None
            if event.is_dagster_event:
                dagster_event_type = event.dagster_event.event_type_value

            conn.cursor().execute(
                INSERT_EVENT_SQL,
                (serialize_dagster_namedtuple(event), dagster_event_type,
                 event.timestamp),
            )

    def get_logs_for_run(self, run_id, cursor=-1):
        check.str_param(run_id, 'run_id')
        check.int_param(cursor, 'cursor')
        check.invariant(
            cursor >= -1,
            'Don\'t know what to do with negative cursor {cursor}'.format(
                cursor=cursor),
        )

        events = []
        if not os.path.exists(self.filepath_for_run_id(run_id)):
            return events

        cursor += 1  # adjust from 0 based offset to 1
        try:
            with self._connect(run_id) as conn:
                results = conn.cursor().execute(FETCH_EVENTS_SQL,
                                                (str(cursor), )).fetchall()
        except sqlite3.Error as err:
            six.raise_from(EventLogInvalidForRun(run_id=run_id), err)

        try:
            for (json_str, ) in results:
                events.append(
                    check.inst_param(
                        deserialize_json_to_dagster_namedtuple(json_str),
                        'event', EventRecord))
        except (seven.JSONDecodeError, check.CheckError) as err:
            six.raise_from(EventLogInvalidForRun(run_id=run_id), err)

        return events

    def get_stats_for_run(self, run_id):
        if not os.path.exists(self.filepath_for_run_id(run_id)):
            return None

        try:
            with self._connect(run_id) as conn:
                results = conn.cursor().execute(FETCH_STATS_SQL).fetchall()
        except sqlite3.Error as err:
            six.raise_from(EventLogInvalidForRun(run_id=run_id), err)

        try:
            counts = {}
            times = {}
            for result in results:
                if result[0]:
                    counts[result[0]] = result[1]
                    times[result[0]] = result[2]

            return PipelineRunStatsSnapshot(
                run_id=run_id,
                steps_succeeded=counts.get(DagsterEventType.STEP_SUCCESS.value,
                                           0),
                steps_failed=counts.get(DagsterEventType.STEP_FAILURE.value,
                                        0),
                materializations=counts.get(
                    DagsterEventType.STEP_MATERIALIZATION.value, 0),
                expectations=counts.get(
                    DagsterEventType.STEP_EXPECTATION_RESULT.value, 0),
                start_time=float(
                    times.get(DagsterEventType.PIPELINE_START.value, 0.0)),
                end_time=float(
                    times.get(
                        DagsterEventType.PIPELINE_SUCCESS.value,
                        times.get(DagsterEventType.PIPELINE_FAILURE.value,
                                  0.0),
                    )),
            )
        except (seven.JSONDecodeError, check.CheckError) as err:
            six.raise_from(EventLogInvalidForRun(run_id=run_id), err)

    def wipe(self):
        for filename in glob.glob(os.path.join(self._base_dir, '*.db')):
            os.unlink(filename)

    def delete_events(self, run_id):
        path = self.filepath_for_run_id(run_id)
        if os.path.exists(path):
            os.unlink(path)

    @property
    def is_persistent(self):
        return True

    def watch(self, run_id, start_cursor, callback):
        watchdog = EventLogStorageWatchdog(self, run_id, callback,
                                           start_cursor)
        self._watchers[run_id] = self._obs.schedule(watchdog, self._base_dir,
                                                    True)

    def end_watch(self, run_id, handler):
        self._obs.remove_handler_for_watch(handler, self._watchers[run_id])
        del self._watchers[run_id]
コード例 #56
0
            #upload_file([filename], [bucket], [key], callback=[...])
            response = s3_client.upload_file(file_name, bucket, object_name)
        except ClientError as e:
            logging.error(e)
            return False
        return True

    def on_created(self, event):
        text = 'New file created in ' + event.src_path + ' local path'
        self.send_message_to_slack(text)
        #self.upload_to_s3_bucket(file_name=event.src_path, bucket='bucket_name')
        self.process(event)

    def on_modified(self, event):
        text = 'File Modified ' + event.src_path + ' local path'
        # self.send_message_to_slack(text)
        self.process(event)


if __name__ == "__main__":
    path = sys.argv[1] if len(sys.argv) > 1 else '.'
    event_handler = MyHandler()
    observer = Observer()
    observer.schedule(event_handler, path, recursive=True)
    observer.start()
    try:
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        observer.stop()
    observer.join()
コード例 #57
0
ファイル: gitchat.py プロジェクト: impulselabsinc/students
class App(object):
    def __init__(self):

        # Set up the fie watcher and path
        # you can also pass it a path from
        # the command line
        self.path = sys.argv[1] if len(sys.argv) > 1 else "../gitchat-messages"
        handler = CustomHandler(self)
        self.observer = Observer()
        self.observer.schedule(handler, self.path, recursive=False)

        # Initialize line numbers so we know where to apply color tags
        self.lineNumber = 0

        # Limited de-duplication by keeping track of last change loaded
        self.previousFile = []

        # Initialize the queue that goes between the filewatcher and the GUI
        self.queue = Queue()

        # Initialize GUI root object and set the window size (W x H)
        self.root = tk.Tk()
        self.root.geometry('600x400')

        # Build the GUI
        self.root.title("Gitchat")
        self.menu = menu.Menu(self.root)
        self.notebook = ttk.Notebook(self.root)
        self.notebook.pack()
        self.chtframe = chatframe.ChatFrame(self.notebook)
        self.initChatUi()
        self.mcframe = minecraftframe.MinecraftFrame(self.notebook)

        # Start watching files
        self.observer.start()

    def initChatUi(self):

        # Welcome message for Gitchat
        welcomeMsg = " " * 50 + "Welcome to Gitchat" + " " * 50
        self.lineNumber = self.lineNumber + 1

        self.chtframe.text.insert(tk.INSERT, welcomeMsg + "\n")
        self.chtframe.text.tag_add(
            "red",
            str(self.lineNumber) + ".0",
            str(self.lineNumber) + "." + str(len(welcomeMsg)))

        # Watched directory is displayed in GUI
        self.lineNumber = self.lineNumber + 1
        self.chtframe.text.insert("end", "Watching %s...\n" % self.path)

        # Text widget is disabled so users cannot type into it and mess up the numbering for tags
        self.chtframe.text.config(state=tk.DISABLED)

        # Have the GUI listen for shutdown events
        self.root.bind("<Destroy>", self.shutdown)

        # Subscribe the GUI to file watcher events
        self.root.bind("<<WatchdogEvent>>", self.handle_watchdog_event)

    def handle_watchdog_event(self, event):

        # Pick up watchdog events from the queue
        watchdog_event = self.queue.get()

        # Open the file that generated the event
        with open(watchdog_event.src_path) as f:

            # Read the entire file - probably
            lines = f.readlines()

            # Close the file after reaing it
            f.close()

            # Simple deduping test to account for
            # new files generating both a create
            # event and an update event
            if self.previousFile == lines:
                return None
            else:
                # Store the current file for the next compare
                self.previousFile = lines

                # Process each line
                # And keep track of the running total of lines for accurate tagging
                for line in lines:
                    tmpLineNumber = self.chtframe.chat_insert(
                        line, self.lineNumber)
                    self.lineNumber = self.lineNumber + (tmpLineNumber -
                                                         self.lineNumber)

    def shutdown(self, event):
        # Shuts down filewatcher
        print 'shutting down observer...'
        self.observer.stop()

    def mainloop(self):
        # Start the GUI loop
        self.root.mainloop()

    def notify(self, event):
        # Used by the file watcher to put events on the queue and to notify the GUI
        self.queue.put(event)
        self.root.event_generate("<<WatchdogEvent>>", when="tail")
コード例 #58
0
class Wukong(object):

    _profiling = False
    _dev = False

    def init(self):
        global conversation
        self.detector = None
        self._thinking = False
        self._interrupted = False
        print('''
********************************************************
*          wukong-robot - 中文语音对话机器人           *
*          (c) 2019 潘伟洲 <*****@*****.**>              *
*     https://github.com/wzpan/wukong-robot.git        *
********************************************************

            后台管理端:http://{}:{}
            如需退出,可以按 Ctrl-4 组合键

'''.format(config.get('/server/host', '0.0.0.0'),
           config.get('/server/port', '5000')))
        config.init()
        self._conversation = Conversation(self._profiling)
        self._conversation.say(
            '{} 你好!试试对我喊唤醒词叫醒我吧'.format(config.get('first_name', '主人')), True)
        self._observer = Observer()
        event_handler = ConfigMonitor(self._conversation)
        self._observer.schedule(event_handler, constants.CONFIG_PATH, False)
        self._observer.schedule(event_handler, constants.DATA_PATH, False)
        self._observer.start()
        if config.get('/LED/enable',
                      False) and config.get('/LED/type') == 'aiy':
            thread.start_new_thread(self._init_aiy_button_event, ())
        if config.get('/muse/enable', False):
            self._wakeup = multiprocessing.Event()
            self.bci = BCI.MuseBCI(self._wakeup)
            self.bci.start()
            thread.start_new_thread(self._loop_event, ())

    def _loop_event(self):
        while True:
            self._wakeup.wait()
            self._conversation.interrupt()
            query = self._conversation.activeListen()
            self._conversation.doResponse(query)
            self._wakeup.clear()

    def _signal_handler(self, signal, frame):
        self._interrupted = True
        utils.clean()
        self._observer.stop()

    def _detected_callback(self):
        def start_record():
            logger.info('开始录音')
            self._conversation.isRecording = True
            utils.setRecordable(True)

        if not utils.is_proper_time():
            logger.warning('勿扰模式开启中')
            return
        if self._conversation.isRecording:
            logger.warning('正在录音中,跳过')
            return
        self._conversation.interrupt()
        if config.get('/LED/enable', False):
            LED.wakeup()
        utils.setRecordable(False)
        Player.play(constants.getData('beep_hi.wav'),
                    onCompleted=start_record,
                    wait=True)

    def _do_not_bother_on_callback(self):
        if config.get('/do_not_bother/hotword_switch', False):
            utils.do_not_bother = True
            Player.play(constants.getData('off.wav'))
            logger.info('勿扰模式打开')

    def _do_not_bother_off_callback(self):
        if config.get('/do_not_bother/hotword_switch', False):
            utils.do_not_bother = False
            Player.play(constants.getData('on.wav'))
            logger.info('勿扰模式关闭')

    def _interrupt_callback(self):
        return self._interrupted

    def _init_aiy_button_event(self):
        from aiy.board import Board
        with Board() as board:
            while True:
                board.button.wait_for_press()
                self._conversation.interrupt()
                query = self._conversation.activeListen()
                self._conversation.doResponse(query)

    def run(self):
        self.init()

        # capture SIGINT signal, e.g., Ctrl+C
        signal.signal(signal.SIGINT, self._signal_handler)

        # site
        server.run(self._conversation, self)

        statistic.report(0)

        try:
            self.initDetector()
        except AttributeError:
            logger.error('初始化离线唤醒功能失败')
            pass

    def initDetector(self):
        if self.detector is not None:
            self.detector.terminate()
        if config.get('/do_not_bother/hotword_switch', False):
            models = [
                constants.getHotwordModel(config.get('hotword',
                                                     'wukong.pmdl')),
                constants.getHotwordModel(
                    utils.get_do_not_bother_on_hotword()),
                constants.getHotwordModel(
                    utils.get_do_not_bother_off_hotword())
            ]
        else:
            models = constants.getHotwordModel(
                config.get('hotword', 'wukong.pmdl'))
        self.detector = snowboydecoder.HotwordDetector(models,
                                                       sensitivity=config.get(
                                                           'sensitivity', 0.5))
        # main loop
        try:
            if config.get('/do_not_bother/hotword_switch', False):
                callbacks = [
                    self._detected_callback, self._do_not_bother_on_callback,
                    self._do_not_bother_off_callback
                ]
            else:
                callbacks = self._detected_callback
            self.detector.start(
                detected_callback=callbacks,
                audio_recorder_callback=self._conversation.converse,
                interrupt_check=self._interrupt_callback,
                silent_count_threshold=config.get('silent_threshold', 15),
                recording_timeout=config.get('recording_timeout', 5) * 4,
                sleep_time=0.03)
            self.detector.terminate()
        except Exception as e:
            logger.critical('离线唤醒机制初始化失败:{}'.format(e))

    def help(self):
        print(
            """=====================================================================================
    python3 wukong.py [命令]
    可选命令:
      md5                      - 用于计算字符串的 md5 值,常用于密码设置
      update                   - 手动更新 wukong-robot
      upload [thredNum]        - 手动上传 QA 集语料,重建 solr 索引。
                                 threadNum 表示上传时开启的线程数(可选。默认值为 10)
      profiling                - 运行过程中打印耗时数据
      train <w1> <w2> <w3> <m> - 传入三个wav文件,生成snowboy的.pmdl模型
                                 w1, w2, w3 表示三个1~3秒的唤醒词录音
                                 m 表示snowboy的.pmdl模型
    如需更多帮助,请访问:https://wukong.hahack.com/#/run
====================================================================================="""
        )

    def md5(self, password):
        """
        计算字符串的 md5 值
        """
        return hashlib.md5(str(password).encode('utf-8')).hexdigest()

    def update(self):
        """
        更新 wukong-robot
        """
        updater = Updater()
        return updater.update()

    def fetch(self):
        """
        检测 wukong-robot 的更新
        """
        updater = Updater()
        updater.fetch()

    def upload(self, threadNum=10):
        """
        手动上传 QA 集语料,重建 solr 索引
        """
        try:
            qaJson = os.path.join(constants.TEMP_PATH, 'qa_json')
            make_json.run(constants.getQAPath(), qaJson)
            solr_tools.clear_documents(config.get('/anyq/host', '0.0.0.0'),
                                       'collection1',
                                       config.get('/anyq/solr_port', '8900'))
            solr_tools.upload_documents(config.get('/anyq/host',
                                                   '0.0.0.0'), 'collection1',
                                        config.get('/anyq/solr_port', '8900'),
                                        qaJson, threadNum)
        except Exception as e:
            logger.error("上传失败:{}".format(e))

    def restart(self):
        """
        重启 wukong-robot
        """
        logger.critical('程序重启...')
        try:
            self.detector.terminate()
        except AttributeError:
            pass
        python = sys.executable
        os.execl(python, python, *sys.argv)

    def profiling(self):
        """
        运行过程中打印耗时数据
        """
        logger.info('性能调优')
        self._profiling = True
        self.run()

    def dev(self):
        logger.info('使用测试环境')
        self._dev = True
        self.run()

    def train(self, w1, w2, w3, m):
        '''
        传入三个wav文件,生成snowboy的.pmdl模型
        '''
        def get_wave(fname):
            with open(fname, 'rb') as infile:
                return base64.b64encode(infile.read()).decode('utf-8')

        url = 'https://snowboy.kitt.ai/api/v1/train/'
        data = {
            "name":
            "wukong-robot",
            "language":
            "zh",
            "token":
            config.get('snowboy_token', '', True),
            "voice_samples": [{
                "wave": get_wave(w1)
            }, {
                "wave": get_wave(w2)
            }, {
                "wave": get_wave(w3)
            }]
        }
        response = requests.post(url, json=data)
        if response.ok:
            with open(m, "wb") as outfile:
                outfile.write(response.content)
            return 'Snowboy模型已保存至{}'.format(m)
        else:
            return "Snowboy模型生成失败,原因:{}".format(response.text)
コード例 #59
0
def main():
    
        parser = argparse.ArgumentParser('RGB Matrix App')
        parser.add_argument('-a', action="store", dest="ncps", type=int, help="Matrix side length")
        parser.add_argument('-b', action="store", dest="np", type=int, help="Num. pixels per cell")
        parser.add_argument('files', metavar='FILE', nargs='*', help='files to read, if empty, stdin is used. Each line is [[r,g,b],...]')
            
        global n_cells_per_side
        global n_pixeis
        global filess
        global patterns
        
        if os.path.isfile('RGBMatrixConf.txt') == True:
            my_file = open('RGBMatrixConf.txt','r')
            first_line = my_file.readline().rstrip()
            arglist = first_line.split( )
            arglist.extend(sys.argv)
            res = parser.parse_args(arglist)
            n_cells_per_side = res.ncps
            n_pixeis = res.np
            filess = res.files
            
            #print (sys.argv[1], " ", len(sys.argv[1]))
            if  len(sys.argv)>1 and os.path.isfile(sys.argv[1]) == True:
                patterns.append(sys.argv[1])
                observer = Observer()
                observer.schedule(MyHandler(), path='.')
                observer.start()

                try:
                    while True:
                        time.sleep(1)
                except KeyboardInterrupt:
                    observer.stop()
                observer.join()
            else:
                global counter
                global canvas
                # ler do stdin
                # print ("ler do stdin")
                
                win = GraphicsWindow()
                win.setTitle("RGB Matrix")
                canvas = win.canvas()
                canvas.setWidth(n_cells_per_side*n_pixeis)
                canvas.setHeight(n_cells_per_side*n_pixeis)
                counter = False
                for i in range(n_cells_per_side*n_cells_per_side):
                    x = int(i/n_cells_per_side)
                    y = i%n_cells_per_side
                    canvas.setColor(0,0,0)
                    canvas.drawRectangle(x*n_pixeis,y*n_pixeis,n_pixeis,n_pixeis)
                
                for line in sys.stdin:
                    cells = arg_as_list(line)
                    if (len(cells) == (n_cells_per_side*n_cells_per_side)):
                        
                        canvas.clear()
                        
                        for i in range(n_cells_per_side*n_cells_per_side):
                            x = int(i/n_cells_per_side)
                            y = i%n_cells_per_side
                            canvas.setColor(cells[i][0],cells[i][1],cells[i][2])
                            canvas.drawRectangle(x*n_pixeis,y*n_pixeis,n_pixeis,n_pixeis)
                    #time.sleep(0.5)
        else:
            raise IOError("File RGBMatrixConf.txt doesn't appear to exists.")
コード例 #60
-1
ファイル: monitor.py プロジェクト: Kalinon/ComicStreamer
    def mainLoop(self):

        logging.debug("Monitor: started main loop.")
        self.session = self.dm.Session()
        self.library = Library(self.dm.Session)
        
        observer = Observer()
        self.eventHandler = MonitorEventHandler(self)
        for path in self.paths:
            if os.path.exists(path):
                observer.schedule(self.eventHandler, path, recursive=True)
        observer.start()
        
        while True:
            try:
                (msg, args) = self.queue.get(block=True, timeout=1)
            except:
                msg = None
                
            #dispatch messages
            if msg == "scan":
                self.dofullScan(self.paths)

            if msg == "events":
                self.doEventProcessing(args)
            
            #time.sleep(1)
            if self.quit:
                break
            
        self.session.close()
        self.session = None
        observer.stop()
        logging.debug("Monitor: stopped main loop.")