Ejemplo n.º 1
0
def watch_assets(options):
    """
    Watch for changes to asset files, and regenerate js/css
    """
    # Don't watch assets when performing a dry run
    if tasks.environment.dry_run:
        return

    observer = Observer()

    CoffeeScriptWatcher().register(observer)
    SassWatcher().register(observer)
    XModuleSassWatcher().register(observer)
    XModuleAssetsWatcher().register(observer)

    print("Starting asset watcher...")
    observer.start()
    if not getattr(options, 'background', False):
        # when running as a separate process, the main thread needs to loop
        # in order to allow for shutdown by contrl-c
        try:
            while True:
                observer.join(2)
        except KeyboardInterrupt:
            observer.stop()
        print("\nStopped asset watcher.")
Ejemplo n.º 2
0
def code():
    # file monitor server
    observer = Observer()

    # py yml file monitor
    patterns = ["*.py", "*demo.yml"]  # '*' is necessary, and must in the first.
    restart_processor = ServerStarter(
        [
            {"cmd": "rm -rf %s/*.log" % os.path.join(workspace, "log"), "is_daemon": False},
            {"cmd": "./run.py run", "network_port": (config["simple_server"]["port"],)},
        ]
    )
    monitor = SourceCodeMonitor(restart_processor, patterns)
    observer.schedule(monitor, program_dir, recursive=True)
    observer.schedule(monitor, http_api.__path__[0], recursive=True)

    # # rebuild css and js's min file while source file is change
    # patterns = ['*.css', '*.js', '*static.yml']     # '*' is necessary, and must in the first.
    # monitor = SourceCodeMonitor(BuildCssJsProcessor(program_dir, static), patterns, None, 500)
    # observer.schedule(monitor, program_dir, recursive=True)

    # start monitoring
    observer.start()
    try:
        time.sleep(31536000)  # one year
    except KeyboardInterrupt:
        observer.stop()
Ejemplo n.º 3
0
def main():
    if not config['play']['scan']:
        raise Exception('''
            Nothing to scan. Add a path in the config file.

            Example:

                play:
                    scan:
                        -
                            type: shows
                            path: /a/path/to/the/shows
            ''')
    obs = Observer()
    for s in config['play']['scan']:
        event_handler = Handler(
            scan_path=s['path'],
            type_=s['type'],
        )
        obs.schedule(
            event_handler,
            s['path'],
            recursive=True,
        )
    obs.start()
    try:
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        obs.stop()
    obs.join()
Ejemplo n.º 4
0
class Modsync:
    _target = []
    _source = ''
    _observer = None

    def __init__(self):
        pass

    def setSource(self, source):
        self._source = source

    def setTarget(self, target_dir):
        self._target.append(target_dir)

    def getObserver(self):
        return self._observer

    def run(self):

        if not self._source:
            return 0

        self._observer = Observer()
        event_handler = ModsyncEventHandler(self._observer, self._source, self._target)
        self._observer.schedule(event_handler, self._source, recursive=True)
        self._observer.start()
        try:
            time.sleep(2)
            pass
        except KeyboardInterrupt:
            self._observer.stop()
        self._observer.join()
        return 0
Ejemplo n.º 5
0
def send_watchdog(self):

    """
    Method of ``Mail``.
    Send mail when new file created.
    Alter this method if other condition of sending mail needed.
    """

    #r_pipe,w_pipe = Pipe()
    queue = Queue.Queue()
    event = Event()
    #event_handler = GetNameEventHandler(w_pipe,event)
    #send_mail_thread = SendMailThread(r_pipe,event,self)
    event_handler = GetNameEventHandler(queue,event)
    send_mail_thread = SendMailThread(queue,event,self)
    send_mail_thread.start()

    observer = Observer()
    path = self.config['BOOK_PATH']
    observer.schedule(event_handler, path, recursive=True)
    observer.start()

    try:
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        event.set()
        observer.stop()
        #NOTICE:kill threads by force, need to change
        sys.exit()
Ejemplo n.º 6
0
class Sceduler:

  def __init__(self, config):

    fs = config.get('scheduler', 'fs', 0)
    dest = config.get('store', 'path', 0)
    self.ioqueue = Queue()
    self.iothread = Thread(target=self.ioprocess)
    self.iothread.daemon = True
    self.observer = Observer()
    self.event_handler = IoTask(self.ioqueue, fs, dest)
    self.observer.schedule(self.event_handler, fs, recursive=True)

  def ioprocess(self):
    while True:
      t = self.ioqueue.get()
      try:
        t.process()
      finally:
        self.ioqueue.task_done()

  def start(self):
    self.observer.start()
    self.iothread.start()

  def stop(self):
    self.observer.stop()
    self.iothread.stop()

  def join(self):
     self.observer.join()
     self.iothread.join()
Ejemplo n.º 7
0
    def __init__(self, input_dir, templates_dir):

        paths = [input_dir, templates_dir]
        threads = []

        try:
            observer = Observer()
            event_handler = WatchEventHandler()

            for i in paths:
                targetPath = str(i)
                observer.schedule(event_handler, targetPath, recursive=True)
                threads.append(observer)

            observer.start()

            signal_watch_init = signal('watch_init')
            signal_watch_init.send(self)

            try:
                while True:
                    time.sleep(1)
            except KeyboardInterrupt:
                wrangler._reporter.log("Stopping with grace and poise", "green")
                observer.stop()
            
            observer.join()
        except:
            return None
Ejemplo n.º 8
0
    def compile(self):
        """
        Generate autodoc rst files from code docstrings and compile sphinx output
        """
        if self.options.get('watch', False):
            # If the watch folder is set, we will set up
            # an observer using watchdog and re-compile on code change

            from watchdog.observers import Observer

            observer = Observer()
            handler = CompileEventHandler(self)
            observer.schedule(handler, path=self.doc_path, recursive=True)
            observer.start()

            print "Watching folder @ {} for changes.".format(self.doc_path)

            try:
                while True:
                    time.sleep(1)
            except KeyboardInterrupt:
                print "Stopping watch folder and Exiting."
                observer.stop()
            observer.join()

            print "Goodbye"

        else:

            print "Compiling autodoc rst and sphinx files"

            self._compile_all()

            print "Files compiled. Goodbye"
Ejemplo n.º 9
0
def main():
    arguments = docopt(__doc__, version='Storyline HTTP v0.1')

    if arguments.get('--debug'):
        logger = logging.getLogger()
        logger.setLevel(logging.DEBUG)
        handler = logging.StreamHandler(sys.stderr)
        logger.addHandler(handler)

    app.config.from_object(__name__)
    app.debug = arguments.get('--debug')

    story_path = arguments.get('STORY_PATH', '.')

    global plot
    plot = storyfile.load_plot_from_path(story_path)

    observer = Observer()
    observer.schedule(LoggingEventHandler(), path=story_path, recursive=True)
    observer.schedule(Reloader(story_path), path=story_path, recursive=True)

    observer.start()
    try:
        app.run()
    finally:
        observer.stop()
        observer.join()
Ejemplo n.º 10
0
def serve(site, director):
    """Run a simple web server that serve the output directory and watches for
    changes to the site. When something is changed, it should be generated.
    """
    # Override the log level to display some interactive messages with the
    # user. With the dev server running, there's no sense in being silent.
    logger.setLevel(logging.INFO)

    # Start the watchdog.
    event_handler = SiteHandler(director)
    observer = Observer()
    observer.schedule(event_handler, site.path, recursive=True)
    observer.start()

    # The simple HTTP server is pretty dumb and does not even take a path to
    # serve. The only way to serve the right path is to change the directory.
    outdir = director.outdir
    os.chdir(outdir)

    socketserver.TCPServer.allow_reuse_address = True
    httpd = socketserver.TCPServer(('', PORT), SimpleHTTPRequestHandler)

    logger.info(
        _('Serving {outdir} at http://localhost:{port}/.'
          '\nPress Ctrl-C to quit.').format(outdir=outdir, port=PORT))
    try:
        httpd.serve_forever()
    except KeyboardInterrupt:
        logger.info(_('\nBye.'))
        observer.stop()

    observer.join()
Ejemplo n.º 11
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--after', help="a command to run after")
    args = parser.parse_args()

    py_event_handler = TouchFileEventHandler(
        patterns=['*.py'],
        touch_file='index.rst'
    )
    rst_event_handler = MakeEventHandler(
        patterns=['*.rst'],
        make_target='html',
        after=args.after)

    observer = Observer()
    observer.schedule(py_event_handler, path='..', recursive=True)
    observer.schedule(rst_event_handler, path='.', recursive=True)

    observer.start()
    try:
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        observer.stop()
    observer.join()
Ejemplo n.º 12
0
def filemonitor(topdir, mode, jfs):
    errors = {}
    def saferun(cmd, *args):
        log.debug('running %s with args %s', cmd, args)
        try:
            return apply(cmd, args)
        except Exception as e:
            puts(colored.red('Ouch. Something\'s wrong with "%s":' % args[0]))
            log.exception('SAFERUN: Got exception when processing %s', args)
            errors.update( {args[0]:e} )
            return False

    if mode == 'archive':
        event_handler = ArchiveEventHandler(jfs, topdir)
    elif mode == 'sync':
        event_handler = SyncEventHandler(jfs, topdir)
        #event_handler = LoggingEventHandler()
    elif mode == 'share':
        event_handler = ShareEventHandler(jfs, topdir)
    observer = Observer()
    observer.schedule(event_handler, topdir, recursive=True)
    observer.start()
    try:
        puts(colored.green('Starting JottaCloud monitor'))
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        observer.stop()
        puts(colored.red('JottaCloud monitor stopped'))
    observer.join()
Ejemplo n.º 13
0
def watch_directory(watch_dir, target_dir, condition=None):
    if condition:
        condition.acquire()

    watch_path = abspath(watch_dir)
    logger.info('Watch path: %s' % watch_path)

    target_path = abspath(target_dir)
    logger.info('Target path: %s' % target_path)

    handler = ModifiedHandler(watch_path, target_path)
    obs = Observer()

    obs.schedule(handler, watch_path, recursive=True)
    obs.start()

    if condition:
        condition.notify()
        condition.release()
    try:
        while True:
            sleep(1)
    except KeyboardInterrupt:
        obs.stop()
    obs.join()
Ejemplo n.º 14
0
def go_watch():
    try:
        print 'Start watching %s' % PATH_TO_WATCH
        logging.basicConfig(level=logging.INFO,
                            format='%(asctime)s - %(message)s',
                            datefmt='%Y-%m-%d %H:%M:%S')
        event_handler = LoggingEventHandler()
        observer = Observer()
        observer.schedule(event_handler, PATH_TO_WATCH, recursive=True)
        observer.start()
        event_handler.on_modified = sync_upload
        event_handler.on_deleted = sync_upload_delete
        event_handler.on_created = sync_upload_create
        event_handler.on_moved = sync_upload_move
        time_loop = 1
        try:
            while True:
                time.sleep(1)
                time_loop += 1
                if not time_loop % AUTO_SYNC_TIME:
                    print 'Auto sync every %s second' % AUTO_SYNC_TIME
                    if not observer.event_queue.unfinished_tasks:
                        sync_download()
                        check_dir_deleted()
                    print 'Auto check downloaded file or folder'
                    check_dir_deleted()
        except KeyboardInterrupt:
            print 'End watching.'
            observer.stop()
        observer.join()
    except Exception, e:
        print '*' * 10
        print e
        print '*' * 10
        return
Ejemplo n.º 15
0
    def watch(self, source, write=True, package=None, run=False, force=False):
        """Watches a source and recompiles on change."""
        from watchdog.events import FileSystemEventHandler
        from watchdog.observers import Observer

        def recompile(path):
            if os.path.isfile(path) and os.path.splitext(path)[1] in code_exts:
                self.compile_path(path, write, package, run, force)

        class watcher(FileSystemEventHandler):
            def on_modified(_, event):
                recompile(event.src_path)
            def on_created(_, event):
                recompile(event.src_path)

        source = fixpath(source)

        self.console.show("Watching        "+showpath(source)+" ...")
        self.console.print("(press Ctrl-C to end)")

        observer = Observer()
        observer.schedule(watcher(), source, recursive=True)
        observer.start()
        try:
            while True:
                time.sleep(.1)
        except KeyboardInterrupt:
            pass
        finally:
            observer.stop()
            observer.join()
Ejemplo n.º 16
0
def watch_project(markdown_fn, output_fn, template_fn, render_first=True):
    class Handler(FileSystemEventHandler):
        def on_any_event(self, event):
            if event.src_path == os.path.abspath(output_fn):
                return
            print('Rendering slides...')
            process_slides(markdown_fn, output_fn, template_fn)

    if render_first == True:
        process_slides(markdown_fn, output_fn, template_fn)
        
    observer = Observer()
    event_handler = Handler()

    dirname = os.path.dirname(os.path.abspath(markdown_fn))
    
    observer.schedule(event_handler, path=dirname, recursive=True)
    print("Watching for events on {:s}...".format(dirname))
    observer.start()

    try:
        while True:
            time.sleep(0.5)
    except KeyboardInterrupt:
        observer.stop()
    observer.join()
Ejemplo n.º 17
0
def main():
    if (len(sys.argv) > 1 and os.path.exists(sys.argv[1]) and os.path.isfile(sys.argv[1])):
        filename = sys.argv[1]
    else:
        filename = gui.get_path("*.csv",defaultFile="data.csv")
    
    commonPath = os.path.abspath(os.path.split(filename)[0])
    outputFile = os.path.join(commonPath, "odmanalysis.csv")
    
    print "Now watching %s for changes" % filename
    handler = OMDCsvChunkHandler(filename,outputFile)
    observer = Observer()
    observer.schedule(handler, path=commonPath, recursive=False)
    handler.startPCChain()
    observer.start()

    try:
        while True:
            time.sleep(1)
                
    except (KeyboardInterrupt, SystemExit):
        print "Stopping..."
        observer.stop()
        time.sleep(1)
    observer.join()
Ejemplo n.º 18
0
def watch(directory=None, auto_clear=False, beep_on_failure=True,
          onpass=None, onfail=None, extensions=[]):
    """
    Starts a server to render the specified file or directory
    containing a README.
    """
    if directory and not os.path.isdir(directory):
        raise ValueError('Directory not found: ' + directory)
    directory = os.path.abspath(directory or '')

    # Initial run
    event_handler = ChangeHandler(directory, auto_clear, beep_on_failure,
                                  onpass, onfail, extensions)
    event_handler.run()

    # Setup watchdog
    observer = Observer()
    observer.schedule(event_handler, path=directory, recursive=True)
    observer.start()

    # Watch and run tests until interrupted by user
    try:
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        observer.stop()
    observer.join()
Ejemplo n.º 19
0
def main(): 
    global badExtensionCounter, failedFlag, pool, failedProcessCounter#, db
    
    sql_setup() # Set-up SQL Database/check to see if exists
    
    # Initiate File Path Handler
    observer = Observer()
    observer.schedule(MyHandler(), path=file_path, recursive=True)
    observer.start()
    
    cpuCount = multiprocessing.cpu_count() # Count all available CPU's
    print "\nTotal CPU Count: %d"%(cpuCount)
    pool = multiprocessing.Pool(4, worker,(processQueue,)) # Create 4 child processes to handle all queued elements
    active = multiprocessing.active_children() # All active child processes
    print "Total number of active child processes: %s\n"%(str(active))
    
    try:
        while True:
            time.sleep(0.2)
    except KeyboardInterrupt:
        pool.terminate() # Stop all child processes
        pool.join() # Join the processes with parent and terminate
        active = multiprocessing.active_children() # All active child processes, list should be empty at this point.
        print "\nTotal number of active child processes: %s\n"%(str(active))
        shutdown() # Run shutdown sequence        
        observer.stop()
        observer.join()
        sys.exit(1)
Ejemplo n.º 20
0
def generate_and_observe(args, event):
    while event.isSet():
        # Generate the presentation
        monitor_list = generate(args)
        print("Presentation generated.")

        # Make a list of involved directories
        directories = defaultdict(list)
        for file in monitor_list:
            directory, filename = os.path.split(file)
            directories[directory].append(filename)

        observer = Observer()
        handler = HovercraftEventHandler(monitor_list)
        for directory, files in directories.items():
            observer.schedule(handler, directory, recursive=False)

        observer.start()
        while event.wait(1):
            time.sleep(0.05)
            if handler.quit:
                break

        observer.stop()
        observer.join()
Ejemplo n.º 21
0
class WatchFile(object):
    def __init__(self, send_msg_func, *args, **kargs):
        self.path = kargs['path'] if kargs.has_key('path') else '.'
        self.suffix = kargs['suffix'] if kargs.has_key('suffix') else '*'  # star represent any file
        self.observer = Observer()
        self.event_handler = MyFileMonitor(self.suffix, callback=self.get_data)
        self.send_msg_func = send_msg_func
        self.filename = self.zip_filename = ''

    def run(self):
        self.observer.schedule(self.event_handler, self.path, recursive=True)
        self.observer.start()
        try:
            while True:
                time.sleep(1)
        except KeyboardInterrupt:
            self.observer.stop()
        self.observer.join()

    def get_data(self, filename):
        data = self._unpack(filename)
        data = str(data)
        print(data, type(data))
        self.send_msg_func(data)

    def _unpack(self, filename):
        # first rename suffix to zip file
        # may not work on linux
        if system == 'Windows':
            filename = filename[2:] if filename.startswith('.\\') else filename
            filename = filename.lstrip()
            new_name = filename.split('.')[0] + '.zip'
            new_name = new_name[1:] if new_name.startswith('\\') else new_name
        elif system == 'Linux':
            new_name = filename

        print('Old name:', filename, ' New name:', new_name)

        self.filename = filename
        self.zip_filename = new_name
        # waiting for operating sys create the file
        time.sleep(3)
        os.rename(filename, new_name)
        zip_file = zipfile.ZipFile(new_name, 'r')
        json_data = ""
        for name in zip_file.namelist():
            if name == "project.json":
                file = zip_file.open(name, 'r')
                json_data = "".join(file.readlines())
        # change filename back to .sb2
        if new_name.endswith('.zip'):
            os.rename(new_name, filename)

        return self.get_cmd(json_data)

    def get_cmd(self, json_data):
        jsonfy_data = json.loads(json_data)
        child = jsonfy_data['children'][0]
        scripts = child['scripts']
        return scripts
Ejemplo n.º 22
0
Archivo: server.py Proyecto: befks/odoo
class FSWatcher(object):
    def __init__(self):
        self.observer = Observer()
        for path in odoo.modules.module.ad_paths:
            _logger.info('Watching addons folder %s', path)
            self.observer.schedule(self, path, recursive=True)

    def dispatch(self, event):
        if isinstance(event, (FileCreatedEvent, FileModifiedEvent, FileMovedEvent)):
            if not event.is_directory:
                path = getattr(event, 'dest_path', event.src_path)
                if path.endswith('.py'):
                    try:
                        source = open(path, 'rb').read() + '\n'
                        compile(source, path, 'exec')
                    except SyntaxError:
                        _logger.error('autoreload: python code change detected, SyntaxError in %s', path)
                    else:
                        _logger.info('autoreload: python code updated, autoreload activated')
                        restart()

    def start(self):
        self.observer.start()
        _logger.info('AutoReload watcher running')

    def stop(self):
        self.observer.stop()
        self.observer.join()
Ejemplo n.º 23
0
class PropMTimeWatcher:
    def __init__(self, app_data_folder):
        self._app_data_folder = app_data_folder
        self._observer = Observer()
        self.schedule()

    def schedule(self):
        pref = PropMTimePreferences(self._app_data_folder)
        self._observer.unschedule_all()
        for path, watcher in pref.get_all_paths().items():
            if watcher:
                if os.path.exists(path):
                    event_handler = ModHandler(path, self._app_data_folder)
                    log.info('scheduling watcher : %s' % path)
                    self._observer.schedule(event_handler, path=path, recursive=True)
                else:
                    log.error('Error: "%s" does not exist.\n\nPlease edit the path.\n\nTo do this, click on the %s icon and select "Paths".' %
                              (path, __application_name__))
        self._observer.start()

    def request_exit(self):
        self._observer.unschedule_all()
        self._observer.stop()
        self._observer.join(TIMEOUT)
        if self._observer.isAlive():
            log.error('observer still alive')
def main():
    # Fill all changes that occurred when track-changes.py wasn't running.
    if os.path.isdir("out"):
        shutil.rmtree("out", True)

    if not os.path.isdir("out"):
        os.mkdir("out")
 
    startup_changes.sync_offline_changes("posts", "out")

    print "Watching posts directory for changes... CTRL+C to quit."
    watch_directory = "posts"

    event_handler = MyHandler()

    # Run the watchdog.
    observer = Observer()
    observer.schedule(event_handler, watch_directory, True)
    observer.start()

    """
    Keep the script running or else python closes without stopping the observer
    thread and this causes an error.
    """
    try:
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        observer.stop()

    observer.join()
Ejemplo n.º 25
0
    def __init__(self, config):
        """
        Initialize the watcher, use the config passed from main
        """
        self.config = config


        # List of pending files
        self.pending_files = set()

        self.sync_timer = None

        # Setup our watchdog observer
        observer = Observer()
        observer.schedule(ChangeHandler(self.on_file_changed), path=config.directory, recursive=True)
        observer.start()

        logging.info("Starting change tracker, cmd: {}, dir: {}, delay: {}".format(config.sync_cmd,
                                                                                   config.directory,
                                                                                   config.delay))
        try:
            while True:
                time.sleep(0.5)
        except KeyboardInterrupt:
            observer.stop()
        observer.join()
Ejemplo n.º 26
0
def stream_video(video_path):

    global VIDEO_BITRATE
    global AUDIO_BITRATE

    create_working_directory()

    head, tail = os.path.split(video_path)
    name = tail.split('.')[0]

    nonce = '-' + ''.join(random.choice(string.ascii_uppercase + string.digits) for x in range(5))

    if '.avi' in video_path:
        args = "-i %s  -vcodec h264 -b %s -acodec libfaac -ab %s -f hls ./.kickflip/%s.m3u8"
        args = args % (video_path, VIDEO_BITRATE, AUDIO_BITRATE, name+nonce)
    else:
        args = "-i %s -f hls -codec copy ./.kickflip/%s.m3u8"
        args = args % (video_path, name+nonce)

    observer = Observer()
    observer.schedule(SegmentHandler(), path='./.kickflip')

    observer.start()
    time.sleep(3) # This is a f*****g hack.
    process = envoy.run('ffmpeg ' + args)
    observer.stop()

    upload_file(video_path)
    return ''
Ejemplo n.º 27
0
class ActivityCheck(Thread):

    def __init__(self, period, path, mailhost, fromaddr, toaddrs):
        Thread.__init__(self)
        self.period = int(period)
        self.path = path
        self.activity = False
        self.last_time = datetime.datetime.now()
        self.message_sent = False
        self.subject = 'WARNING : ' + HOSTNAME + ' : ' + 'telecaster monitor activity'
        self.logger = EmailLogger(mailhost, fromaddr, toaddrs, self.subject)
        self.event_handler = ActivityEventHandler(ignore_patterns=IGNORE_PATTERNS)
        self.observer = Observer()
        self.observer.schedule(self.event_handler, path, recursive=True)
        self.observer.start()

    def run(self):        
        while True:
            if not self.event_handler.activity:
                now = datetime.datetime.now()
                delta = now - self.last_time
                if delta.total_seconds() > LOG_MAX_PERIOD or not self.message_sent:
                    self.logger.logger.error('The monitor is NOT recording anymore in ' + self.path + ' ! ')
                    self.last_time = now
                    self.message_sent = True
            else:
                self.event_handler.activity = False
            time.sleep(self.period)

    def stop(self):
        self.observer.stop()
Ejemplo n.º 28
0
def watch(path, handler=None, debug=True):
	import time
	from watchdog.observers import Observer
	from watchdog.events import FileSystemEventHandler

	class Handler(FileSystemEventHandler):
		def on_any_event(self, event):
			if debug:
				print "File {0}: {1}".format(event.event_type, event.src_path)

			if not handler:
				print "No handler specified"
				return

			handler(event.src_path, event.event_type)

	event_handler = Handler()
	observer = Observer()
	observer.schedule(event_handler, path, recursive=True)
	observer.start()
	try:
		while True:
			time.sleep(1)
	except KeyboardInterrupt:
		observer.stop()
	observer.join()
Ejemplo n.º 29
0
def main():

    pandoc_path = which("pandoc")
    if not pandoc_path :
        print "pandoc executable must be in the path to be used by pandoc-watch!"
        exit()

    config = Configuration.Instance()

    parseOptions()

    config.setDirContentAndTime(getDirectoryWatchedElements())

    print "Starting pandoc watcher ..."

    while True:
        event_handler = ChangeHandler()
        observer = Observer()
        observer.schedule(event_handler, os.getcwd(), recursive=True)
        observer.start()
        try:
            while True:
                time.sleep(1)
        except KeyboardInterrupt as err:
            print str(err)
            observer.stop()

        print "Stopping pandoc watcher ..."

        exit()
Ejemplo n.º 30
0
    else:
        print("Directory exists, nothing to do here.")


def main():
    config = configparser.ConfigParser()

    config.read("config.ini")

    #check_for_log_dir(config)
    w = Watcher(config)
    w.run()


if __name__ == '__main__':
    config = configparser.ConfigParser()

    config.read("config.ini")
    o = Observer()
    o.schedule(Handler(), path=config['System']['location_to_watch'])
    o.start()

    try:
        while True:
            import time
            time.sleep(1)
    except KeyboardInterrupt:
        o.stop()

    o.join()
Ejemplo n.º 31
0
def start(services, auto_upgrade, anchore_module, skip_config_validate, skip_db_compat_check, all):
    """
    Startup and monitor service processes. Specify a list of service names or empty for all.
    """

    global config
    ecode = 0

    auto_upgrade = True

    if not anchore_module:
        module_name = "anchore_engine"
    else:
        module_name = str(anchore_module)

    if os.environ.get('ANCHORE_ENGINE_SKIP_DB_COMPAT_CHECK', str(skip_db_compat_check)).lower() in ['true', 't', 'y', 'yes']:
        skip_db_compat_check = True
    else:
        skip_db_compat_check = False

    if services:
        input_services = list(services)
    else:
        input_services = os.getenv('ANCHORE_ENGINE_SERVICES', '').strip().split()

    if not input_services and not all:
        raise click.exceptions.BadArgumentUsage('No services defined to start. Must either provide service arguments, ANCHORE_ENGINE_SERVICES env var, or --all option')

    try:
        validate_params = {
            'services': True,
            'webhooks': True,
            'credentials': True
        }
        if skip_config_validate:
            try:
                items = skip_config_validate.split(',')
                for item in items:
                    validate_params[item] = False
            except Exception as err:
                raise Exception(err)

        # find/set up configuration        
        configdir = config['configdir']
        configfile = os.path.join(configdir, "config.yaml")

        localconfig = None
        if os.path.exists(configfile):
            try:
                localconfig = anchore_engine.configuration.localconfig.load_config(configdir=configdir, configfile=configfile, validate_params=validate_params)
            except Exception as err:
                raise Exception("cannot load local configuration: " + str(err))
        else:
            raise Exception("cannot locate configuration file ({})".format(configfile))

        # load the appropriate DB module
        try:
            logger.info("Loading DB routines from module ({})".format(module_name))
            module = importlib.import_module(module_name + ".db.entities.upgrade")
#        except TableNotFoundError as ex:
#            logger.info("Initialized DB not found.")
        except Exception as err:
            raise Exception("Input anchore-module (" + str(module_name) + ") cannot be found/imported - exception: " + str(err))

        # get the list of local services to start
        startFailed = False
        if not input_services:
            config_services = localconfig.get('services', {})
            if not config_services:
                logger.warn('could not find any services to execute in the config file')
                sys.exit(1)

            input_services = [ name for name, srv_conf in list(config_services.items()) if srv_conf.get('enabled')]

        services = []
        for service_conf_name in input_services:
            if service_conf_name in list(service_map.values()):
                svc = service_conf_name
            else:
                svc = service_map.get(service_conf_name)

            if svc:
                services.append(svc)
            else:
                logger.warn('specified service {} not found in list of available services {} - removing from list of services to start'.format(service_conf_name, list(service_map.keys())))

        if 'anchore-catalog' in services:
            services.remove('anchore-catalog')
            services.insert(0, 'anchore-catalog')

        if not services:
            logger.error("No services found in ANCHORE_ENGINE_SERVICES or as enabled in config.yaml to start - exiting")
            sys.exit(1)


        # preflight - db checks
        try:
            db_params = anchore_engine.db.entities.common.get_params(localconfig)
            
            #override db_timeout since upgrade might require longer db session timeout setting
            try:
                if 'timeout' in db_params.get('db_connect_args', {}):
                    db_params['db_connect_args']['timeout'] = 86400
                elif 'connect_timeout' in db_params.get('db_connect_args', {}):
                    db_params['db_connect_args']['connect_timeout'] = 86400
            except Exception as err:
                pass
            
            anchore_manager.cli.utils.connect_database(config, db_params, db_retries=300)
            code_versions, db_versions = anchore_manager.cli.utils.init_database(upgrade_module=module, localconfig=localconfig, do_db_compatibility_check=(not skip_db_compat_check))

            in_sync = False
            timed_out = False
            max_timeout = 3600

            timer = time.time()
            while not in_sync and not timed_out:
                code_versions, db_versions = module.get_versions()

                if code_versions and db_versions:
                    if code_versions['db_version'] != db_versions['db_version']:
                        if auto_upgrade and 'anchore-catalog' in services:
                            logger.info("Auto-upgrade is set - performing upgrade.")
                            try:
                                # perform the upgrade logic here
                                rc = module.run_upgrade()
                                if rc:
                                    logger.info("Upgrade completed")
                                else:
                                    logger.info("No upgrade necessary. Completed.")
                            except Exception as err:
                                raise err

                            in_sync = True
                        else:
                            logger.warn("this version of anchore-engine requires the anchore DB version ({}) but we discovered anchore DB version ({}) in the running DB - it is safe to run the upgrade while seeing this message - will retry for {} more seconds.".format(str(code_versions['db_version']), str(db_versions['db_version']), str(max_timeout - int(time.time() - timer))))
                            time.sleep(5)
                    else:
                        logger.info("DB version and code version in sync.")
                        in_sync = True
                else:
                    logger.warn('no existing anchore DB data can be discovered, assuming bootstrap')
                    in_sync = True

                if (max_timeout - int(time.time() - timer)) < 0:
                    timed_out = True

            if not in_sync:
                raise Exception("this version of anchore-engine requires the anchore DB version ("+str(code_versions['db_version'])+") but we discovered anchore DB version ("+str(db_versions['db_version'])+") in the running DB - please perform the DB upgrade process and retry")

        except Exception as err:
            raise err

        finally:
            rc = anchore_engine.db.entities.common.do_disconnect()

        # start up services
        logger.info('Starting services: {}'.format(services))

        for supportdir in ["/var/log/anchore", "/var/run/anchore"]:
            try:
                if not os.path.exists(supportdir):
                    os.makedirs(supportdir, 0o755)
            except Exception as err:
                logger.error("cannot create log directory {} - exception: {}".format(supportdir, str(err)))
                raise err

        pids = []
        keepalive_threads = []
        for service in services:
            pidfile = "/var/run/anchore/" + service + ".pid"
            try:
                terminate_service(service, flush_pidfile=True)

                service_thread = ServiceThread(startup_service, (service, configdir))
                keepalive_threads.append(service_thread)
                max_tries = 30
                tries = 0
                alive = True
                while not os.path.exists(pidfile) and tries < max_tries:
                    logger.info("waiting for service pidfile {} to exist {}/{}".format(pidfile, tries, max_tries))

                    try:
                        alive = service_thread.thread.is_alive()
                    except:
                        pass
                    if not alive:
                        logger.info("service thread has stopped {}".format(service))
                        break

                    time.sleep(1)
                    tries = tries + 1

                logger.info("auto_restart_services setting: {}".format(localconfig.get('auto_restart_services', False)))
                if not localconfig.get('auto_restart_services', False):
                    logger.info("checking for startup failure pidfile={}, is_alive={}".format(os.path.exists(pidfile), alive))
                    if not os.path.exists(pidfile) or not alive:
                        raise Exception("service thread for ({}) failed to start".format(service))

                time.sleep(1)
            except Exception as err:
                startFailed = True
                logger.warn("service start failed - exception: {}".format(str(err)))
                break

        if startFailed:
            logger.fatal("one or more services failed to start. cleanly terminating the others")
            for service in services:
                terminate_service(service, flush_pidfile=True)
            sys.exit(1)
        else:
            # start up the log watchers
            try:
                observer = Observer()
                observer.schedule(AnchoreLogWatcher(), path="/var/log/anchore/")
                observer.start()

                try:
                    while True:
                        time.sleep(1)
                        if localconfig.get('auto_restart_services', False): #'auto_restart_services' in localconfig and localconfig['auto_restart_services']:
                            for service_thread in keepalive_threads:
                                if not service_thread.thread.is_alive():
                                    logger.info("restarting service: {}".format(service_thread.thread.name))
                                    service_thread.start()

                except KeyboardInterrupt:
                    observer.stop()
                observer.join()

            except Exception as err:
                logger.error("failed to startup log watchers - exception: {}".format(str(err)))
                raise err

    except Exception as err:
        logger.error(anchore_manager.cli.utils.format_error_output(config, 'servicestart', {}, err))
        if not ecode:
            ecode = 2
            
    anchore_manager.cli.utils.doexit(ecode)
Ejemplo n.º 32
0
def start(auto_upgrade, anchore_module, skip_config_validate,
          skip_db_compat_check):
    global config
    """
    """
    ecode = 0

    auto_upgrade = True

    if not anchore_module:
        module_name = "anchore_engine"
    else:
        module_name = str(anchore_module)

    if os.environ.get('ANCHORE_ENGINE_SKIP_DB_COMPAT_CHECK',
                      str(skip_db_compat_check)).lower() in [
                          'true', 't', 'y', 'yes'
                      ]:
        skip_db_compat_check = True
    else:
        skip_db_compat_check = False

    try:
        service_map = {
            'analyzer': 'anchore-worker',
            'simplequeue': 'anchore-simplequeue',
            'apiext': 'anchore-api',
            'catalog': 'anchore-catalog',
            'kubernetes_webhook': 'anchore-kubernetes-webhook',
            'policy_engine': 'anchore-policy-engine',
            'feeds': 'anchore-feeds'
        }

        validate_params = {
            'services': True,
            'webhooks': True,
            'credentials': True
        }
        if skip_config_validate:
            try:
                items = skip_config_validate.split(',')
                for item in items:
                    validate_params[item] = False
            except Exception as err:
                raise Exception(err)

        # find/set up configuration
        configdir = config['configdir']
        configfile = os.path.join(configdir, "config.yaml")

        localconfig = None
        if os.path.exists(configfile):
            try:
                localconfig = anchore_engine.configuration.localconfig.load_config(
                    configdir=configdir,
                    configfile=configfile,
                    validate_params=validate_params)
            except Exception as err:
                raise Exception("cannot load local configuration: " + str(err))
        else:
            raise Exception(
                "cannot locate configuration file ({})".format(configfile))

        # load the appropriate DB module
        try:
            logger.info(
                "Loading DB routines from module ({})".format(module_name))
            module = importlib.import_module(module_name +
                                             ".db.entities.upgrade")
        except TableNotFoundError as ex:
            logger.info("Initialized DB not found.")
        except Exception as err:
            raise Exception("Input anchore-module (" + str(module_name) +
                            ") cannot be found/imported - exception: " +
                            str(err))

        # get the list of local services to start
        startFailed = False
        if 'ANCHORE_ENGINE_SERVICES' in os.environ:
            input_services = os.environ['ANCHORE_ENGINE_SERVICES'].split()
        else:
            config_services = localconfig.get('services', {})
            if not config_services:
                logger.warn(
                    'could not find any services to execute in the config file'
                )
                sys.exit(1)

            input_services = [
                name for name, srv_conf in config_services.items()
                if srv_conf.get('enabled')
            ]

        services = []
        for service_conf_name in input_services:
            if service_conf_name in service_map.values():
                service = service_conf_name
            else:
                service = service_map.get(service_conf_name)

            if service:
                services.append(service)
            else:
                logger.warn(
                    'specified service {} not found in list of available services {} - removing from list of services to start'
                    .format(service_conf_name, service_map.keys()))

        if 'anchore-catalog' in services:
            services.remove('anchore-catalog')
            services.insert(0, 'anchore-catalog')

        if not services:
            logger.error(
                "No services found in ANCHORE_ENGINE_SERVICES or as enabled in config.yaml to start - exiting"
            )
            sys.exit(1)

        # preflight - db checks
        try:
            db_params = anchore_engine.db.entities.common.get_params(
                localconfig)
            db_params = anchore_manager.cli.utils.connect_database(
                config, db_params, db_retries=300)

            code_versions, db_versions = anchore_manager.cli.utils.init_database(
                upgrade_module=module,
                localconfig=localconfig,
                do_db_compatibility_check=(not skip_db_compat_check))

            in_sync = False
            timed_out = False
            max_timeout = 3600

            timer = time.time()
            while not in_sync and not timed_out:
                code_versions, db_versions = module.get_versions()

                if code_versions and db_versions:
                    if code_versions['db_version'] != db_versions['db_version']:
                        if auto_upgrade and 'anchore-catalog' in services:
                            logger.info(
                                "Auto-upgrade is set - performing upgrade.")
                            try:
                                # perform the upgrade logic here
                                rc = module.run_upgrade()
                                if rc:
                                    logger.info("Upgrade completed")
                                else:
                                    logger.info(
                                        "No upgrade necessary. Completed.")
                            except Exception as err:
                                raise err

                            in_sync = True
                        else:
                            logger.warn(
                                "this version of anchore-engine requires the anchore DB version ({}) but we discovered anchore DB version ({}) in the running DB - it is safe to run the upgrade while seeing this message - will retry for {} more seconds."
                                .format(
                                    str(code_versions['db_version']),
                                    str(db_versions['db_version']),
                                    str(max_timeout -
                                        int(time.time() - timer))))
                            time.sleep(5)
                    else:
                        logger.info("DB version and code version in sync.")
                        in_sync = True
                else:
                    logger.warn(
                        'no existing anchore DB data can be discovered, assuming bootstrap'
                    )
                    in_sync = True

                if (max_timeout - int(time.time() - timer)) < 0:
                    timed_out = True

            if not in_sync:
                raise Exception(
                    "this version of anchore-engine requires the anchore DB version ("
                    + str(code_versions['db_version']) +
                    ") but we discovered anchore DB version (" +
                    str(db_versions['db_version']) +
                    ") in the running DB - please perform the DB upgrade process and retry"
                )

        except Exception as err:
            raise err

        finally:
            rc = anchore_engine.db.entities.common.do_disconnect()

        # start up services
        logger.info('Starting services: {}'.format(services))
        try:
            if not os.path.exists("/var/log/anchore"):
                os.makedirs("/var/log/anchore/", 0755)
        except Exception as err:
            logger.error(
                "cannot create log directory /var/log/anchore - exception: {}".
                format(str(err)))
            raise err

        pids = []
        keepalive_threads = []
        for service in services:
            pidfile = "/var/run/" + service + ".pid"
            try:
                service_thread = ServiceThread(startup_service,
                                               (service, configdir))
                keepalive_threads.append(service_thread)
                max_tries = 30
                tries = 0
                while not os.path.exists(pidfile) and tries < max_tries:
                    time.sleep(1)
                    tries = tries + 1

                time.sleep(2)
            except Exception as err:
                startFailed = True
                logger.warn("service start failed - exception: {}".format(
                    str(err)))

        if startFailed:
            logger.error(
                "one or more services failed to start. cleanly terminating the others"
            )
            for service in services:
                terminate_service(service, flush_pidfile=True)

            sys.exit(1)
        else:
            # start up the log watchers
            try:
                observer = Observer()
                observer.schedule(AnchoreLogWatcher(),
                                  path="/var/log/anchore/")
                observer.start()

                try:
                    while True:
                        time.sleep(1)
                        if 'auto_restart_services' in localconfig and localconfig[
                                'auto_restart_services']:
                            for service_thread in keepalive_threads:
                                if not service_thread.thread.is_alive():
                                    logger.info(
                                        "restarting service: {}".format(
                                            service_thread.thread.name))
                                    service_thread.start()

                except KeyboardInterrupt:
                    observer.stop()
                observer.join()

            except Exception as err:
                logger.error(
                    "failed to startup log watchers - exception: {}".format(
                        str(err)))
                raise err

    except Exception as err:
        logger.error(
            anchore_manager.cli.utils.format_error_output(
                config, 'servicestart', {}, err))
        if not ecode:
            ecode = 2

    anchore_manager.cli.utils.doexit(ecode)
Ejemplo n.º 33
0
else:
    print 'Loading and merging local config data: ' + local_config_file_path
    with open(local_config_file_path) as local_config_file:
        local_config = json.load(local_config_file)
    config = merge_two_dicts(config, local_config)

contentSourceDir = config['contentSourceDirectory']
contentDistDir = config['contentDestinationDirectory']
imgSourceDir = config['imagesSourceDirectory']
imgDistDir = config['imagesDestinationDirectory']

print 'content source directory: ' + contentSourceDir
print 'content destination directory: ' + contentDistDir
print 'image source directory: ' + imgSourceDir
print 'image destination directory: ' + imgDistDir

if __name__ == "__main__":
    md_to_json_handler = MdToJsonHandler.MdToJsonHandler(
        contentSourceDir, contentDistDir)
    image_copier_handler = ImageCopierHandler.ImageCopierHandler(
        imgSourceDir, imgDistDir)
    observer = Observer()
    observer.schedule(md_to_json_handler, contentSourceDir, recursive=True)
    observer.schedule(image_copier_handler, imgSourceDir, recursive=True)
    observer.start()
    try:
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        observer.stop()
    observer.join()
Ejemplo n.º 34
-1
    def mainLoop(self):

        logging.debug("Monitor: started main loop.")
        self.session = self.dm.Session()
        self.library = Library(self.dm.Session)
        
        observer = Observer()
        self.eventHandler = MonitorEventHandler(self)
        for path in self.paths:
            if os.path.exists(path):
                observer.schedule(self.eventHandler, path, recursive=True)
        observer.start()
        
        while True:
            try:
                (msg, args) = self.queue.get(block=True, timeout=1)
            except:
                msg = None
                
            #dispatch messages
            if msg == "scan":
                self.dofullScan(self.paths)

            if msg == "events":
                self.doEventProcessing(args)
            
            #time.sleep(1)
            if self.quit:
                break
            
        self.session.close()
        self.session = None
        observer.stop()
        logging.debug("Monitor: stopped main loop.")