Ejemplo n.º 1
0
def background_thread():
    global observer
    event_handler = CsvWatcher()
    observer = Observer()
    observer.schedule(event_handler, './', recursive=True)
    observer.start()
Ejemplo n.º 2
0
 def __init__(self, watchDirectory: str): 
     self.watchDirectory = watchDirectory
     self.observer = Observer() 
Ejemplo n.º 3
0
def monitor(configuration):
    """Monitors the filesystem for crontab changes"""

    pid = multiprocessing.current_process().pid

    print('Starting global crontab monitor process')
    logger.info('Starting global crontab monitor process')

    # Set base_dir and base_dir_len

    shared_state['base_dir'] = os.path.join(configuration.user_settings)
    shared_state['base_dir_len'] = len(shared_state['base_dir'])

    # Allow e.g. logrotate to force log re-open after rotates
    register_hangup_handler(configuration)

    # Monitor crontab configurations

    crontab_monitor_home = shared_state['base_dir']
    recursive_crontab_monitor = True

    crontab_monitor = Observer()
    crontab_pattern = os.path.join(crontab_monitor_home, '*', crontab_name)
    atjobs_pattern = os.path.join(crontab_monitor_home, '*', atjobs_name)
    shared_state['crontab_handler'] = MiGCrontabEventHandler(
        patterns=[crontab_pattern, atjobs_pattern], ignore_directories=False,
        case_sensitive=True)

    crontab_monitor.schedule(shared_state['crontab_handler'],
                             configuration.user_settings,
                             recursive=recursive_crontab_monitor)
    crontab_monitor.start()

    if len(crontab_monitor._emitters) != 1:
        logger.error('(%s) Number of crontab_monitor._emitters != 1' % pid)
        return 1
    crontab_monitor_emitter = min(crontab_monitor._emitters)
    if not hasattr(crontab_monitor_emitter, '_inotify'):
        logger.error('(%s) crontab_monitor_emitter require inotify' % pid)
        return 1
    shared_state['crontab_inotify'] = crontab_monitor_emitter._inotify._inotify

    logger.info('(%s) trigger crontab and atjobs refresh' % (pid, ))

    # Fake touch event on all crontab files to load initial crontabs

    # logger.info('(%s) trigger load on all files (greedy) matching %s or %s' \
    #            % (pid, crontab_pattern, atjobs_pattern))

    # We manually walk and test to get the greedy "*" directory match behaviour
    # of the PatternMatchingEventHandler

    all_crontab_files, all_atjobs_files = [], []

    for (root, _, files) in walk(crontab_monitor_home):
        if crontab_name in files:
            crontab_path = os.path.join(root, crontab_name)
            all_crontab_files.append(crontab_path)
        if atjobs_name in files:
            atjobs_path = os.path.join(root, atjobs_name)
            all_atjobs_files.append(atjobs_path)

    for target_path in all_crontab_files + all_atjobs_files:

        logger.debug('(%s) trigger load on cron/at file in %s' %
                     (pid, target_path))

        shared_state['crontab_handler'].dispatch(
            FileModifiedEvent(target_path))

    # logger.debug('(%s) loaded initial crontabs:\n%s' % (pid,
    # all_crontab_files))

    while not stop_running.is_set():
        try:
            loop_start = datetime.datetime.now()
            loop_minute = loop_start.replace(second=0, microsecond=0)
            logger.debug('main loop started with %d crontabs and %d atjobs' %
                         (len(all_crontabs), len(all_atjobs)))
            for crontab_path, user_crontab in all_crontabs.items():
                client_dir = os.path.basename(os.path.dirname(crontab_path))
                client_id = client_dir_id(client_dir)
                for entry in user_crontab:
                    logger.debug('inspect cron entry for %s: %s' %
                                 (client_id, entry))
                    if cron_match(configuration, loop_minute, entry):
                        logger.info('run matching cron entry: %s' % entry)
                        run_handler(configuration, client_id, loop_minute,
                                    entry)
            for atjobs_path, user_atjobs in all_atjobs.items():
                client_dir = os.path.basename(os.path.dirname(atjobs_path))
                client_id = client_dir_id(client_dir)
                remaining = []
                for entry in user_atjobs:
                    logger.debug('inspect atjobs entry for %s: %s' %
                                 (client_id, entry))
                    remain_mins = at_remain(configuration, loop_minute, entry)
                    if remain_mins == 0:
                        logger.info('run matching at entry: %s' % entry)
                        run_handler(configuration, client_id, loop_minute,
                                    entry)
                    elif remain_mins > 0:
                        remaining.append(entry)
                    else:
                        logger.info('removing expired at job: %s' % entry)
                # Update remaining jobs to clean up expired
                if remaining:
                    all_atjobs[atjobs_path] = remaining
                else:
                    del all_atjobs[atjobs_path]
        except KeyboardInterrupt:
            print('(%s) caught interrupt' % pid)
            stop_running.set()
        except Exception as exc:
            logger.error('unexpected exception in monitor: %s' % exc)
            import traceback
            print(traceback.format_exc())

        # Throttle down until next minute

        loop_time = (datetime.datetime.now() - loop_start).seconds
        if loop_time > 60:
            logger.warning('(%s) loop did not finish before next tick: %s' %
                           (os.getpid(), loop_time))
            loop_time = 59
        # Target sleep until start of next minute
        sleep_time = max(60 - (loop_time + loop_start.second), 1)
        # TODO: this debug log never shows up - conflict with user info log?
        #       at least it does if changed to info.
        logger.debug('main loop sleeping %ds' % sleep_time)
        # print('main loop sleeping %ds' % sleep_time)
        time.sleep(sleep_time)

    print('(%s) Exiting crontab monitor' % pid)
    logger.info('(%s) Exiting crontab monitor' % pid)
    return 0
Ejemplo n.º 4
0
        print("Specify path to workspace!")
        exit(1)

    mesh_keys_path = os.path.expanduser(sys.argv[1])
    network_file = 'mesh_network_data.txt'
    app_file = 'mesh_app_data.txt'
    key = winreg.OpenKey(
        winreg.HKEY_LOCAL_MACHINE,
        r'SOFTWARE\WOW6432Node\Frontline Test Equipment\User Data')
    my_decoders_path = winreg.QueryValueEx(key, "My Decoders")[0]
    mesh_options_filename = 'MeshOptions.ini'

    file_modified()

    mesh_keys_file_event_handler = MeshKeysFileEventHandler(patterns=[
        os.path.join(mesh_keys_path, network_file),
        os.path.join(mesh_keys_path, app_file)
    ])
    observer = Observer()
    observer.schedule(mesh_keys_file_event_handler,
                      path=mesh_keys_path,
                      recursive=False)
    observer.start()

    try:
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        observer.stop()
    observer.join()
Ejemplo n.º 5
0
def start():
    global observer
    observer = Observer()
    observer.schedule(Handler(), path=r"C:", recursive=True)
    observer.start()
Ejemplo n.º 6
0
 def __init__(self):
     self.observer = Observer()
     for path in odoo.modules.module.ad_paths:
         _logger.info('Watching addons folder %s', path)
         self.observer.schedule(self, path, recursive=True)
Ejemplo n.º 7
0
    def run_watcher(self):
        """
        Watcher thread's function.

        :return:
            None.
        """
        # Create observer
        observer = Observer()

        # Start observer
        observer.start()

        # Dict that maps file path to `watch object`
        watche_obj_map = {}

        # Run change check in a loop
        while not self._watcher_to_stop:
            # Get current watch paths
            old_watch_path_s = set(watche_obj_map)

            # Get new watch paths
            new_watch_path_s = self._find_watch_paths()

            # For each new watch path
            for new_watch_path in new_watch_path_s:
                # Remove from the old watch paths if exists
                old_watch_path_s.discard(new_watch_path)

                # If the new watch path was not watched
                if new_watch_path not in watche_obj_map:
                    try:
                        # Schedule a watch
                        watch_obj = observer.schedule(
                            # 2KGRW
                            # `FileSystemEventHandler` instance
                            self,
                            # File path to watch
                            new_watch_path,
                            # Whether recursive
                            recursive=True,
                        )

                        # Store the watch obj
                        watche_obj_map[new_watch_path] = watch_obj

                    # If have error
                    except OSError:
                        # Set the watch object be None
                        watche_obj_map[new_watch_path] = None

            # For each old watch path that is not in the new watch paths
            for old_watch_path in old_watch_path_s:
                # Get watch object
                watch_obj = watche_obj_map.pop(old_watch_path, None)

                # If have watch object
                if watch_obj is not None:
                    # Unschedule the watch
                    observer.unschedule(watch_obj)

            # Store new watch paths
            self._watch_paths = new_watch_path_s

            # Sleep before next check
            time.sleep(self._interval)
    def __init__(self):
        _import_watchdog()
        from watchdog.observers import Observer

        self._observer = Observer()
        self._started = False
Ejemplo n.º 9
0
def serve(args):
    """Start a server which will watch .md and .rst files for changes.
    If a md file changes, the Home Documentation is rebuilt. If a .rst
    file changes, the updated sphinx project is rebuilt

    Args:
        args (ArgumentParser): flags from the CLI
    """
    # Sever's parameters
    port = args.serve_port or PORT
    host = "0.0.0.0"

    # Current working directory
    dir_path = Path().absolute()
    yaml = YAML()
    mkdocs_yml = yaml.load(open(dir_path / 'mkdocs.yml'))
    site_dir = mkdocs_yml['site_dir'] if 'site_dir' in mkdocs_yml else 'site'
    web_dir = dir_path / site_dir

    # Update routes
    utils.set_routes()

    # Offline mode
    if args.offline:
        os.environ["METADOCS_OFFLINE"] = "true"
        _ = subprocess.check_output("mkdocs build > /dev/null", shell=True)
        utils.make_offline()

    class MetadocsHTTPHandler(SimpleHTTPRequestHandler):
        """Class routing urls (paths) to projects (resources)
        """

        def translate_path(self, path):
            # default root -> cwd
            location = str(web_dir)
            route = location

            if len(path) != 0 and path != "/":
                for key, loc in utils.get_routes():
                    if path.startswith(key):
                        location = loc
                        path = path[len(key) :]
                        break

            if location[-1] == "/" or not path or path[0] == "/":
                route = location + path
            else:
                route = location + "/" + path

            return route.split("?")[0]

    # Serve as deamon thread
    success = False
    count = 0
    print("Waiting for server port...")
    try:
        while not success:
            try:
                httpd = socketserver.TCPServer((host, port), MetadocsHTTPHandler)
                success = True
            except OSError:
                count += 1
            finally:
                if not success and count > 20:
                    s = "port {} seems occupied. Try with {} ? (y/n)"
                    if "y" in input(s.format(port, port + 1)):
                        port += 1
                        count = 0
                    else:
                        print("You can specify a custom port with metadocs serve -s")
                        return
                time.sleep(0.5)
    except KeyboardInterrupt:
        print("Aborting.")
        return

    httpd.allow_reuse_address = True
    print("\nServing at http://{}:{}\n".format(host, port))
    thread = threading.Thread(target=httpd.serve_forever)
    thread.daemon = True
    thread.start()

    # Watch for changes
    event_handler = utils.MetadocsFileHandler(
        patterns=["*.rst", "*.md", "*.yml", "*.yaml"]
    )
    observer = Observer()
    observer.schedule(event_handler, path=str(dir_path), recursive=True)
    observer.start()

    try:
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        observer.stop()
        httpd.server_close()
    observer.join()
Ejemplo n.º 10
0
def watch_command(context, backend, config, poll):
    """
    Watch for change on your Sass project sources then compile them to CSS.

    Watched events are:

    \b
    * Create: when a new source file is created;
    * Change: when a source is changed;
    * Delete: when a source is deleted;
    * Move: When a source file is moved in watched dirs. Also occurs with
      editor transition file;

    Almost all errors occurring during compile won"t break watcher, so you can
    resolve them and watcher will try again to compile once a new event
    occurs.

    You can stop watcher using key combo "CTRL+C" (or CMD+C on MacOSX).
    """
    logger = logging.getLogger("boussole")
    logger.info("Watching project")

    # Discover settings file
    try:
        discovering = Discover(
            backends=[SettingsBackendJson, SettingsBackendYaml])
        config_filepath, config_engine = discovering.search(
            filepath=config, basedir=os.getcwd(), kind=backend)

        project = ProjectBase(backend_name=config_engine._kind_name)
        settings = project.backend_engine.load(filepath=config_filepath)
    except BoussoleBaseException as e:
        logger.critical(six.text_type(e))
        raise click.Abort()

    logger.debug(u"Settings file: {} ({})".format(config_filepath,
                                                  config_engine._kind_name))
    logger.debug(u"Project sources directory: {}".format(
        settings.SOURCES_PATH))
    logger.debug(u"Project destination directory: {}".format(
        settings.TARGET_PATH))
    logger.debug(u"Exclude patterns: {}".format(settings.EXCLUDES))

    # Watcher settings
    watcher_templates_patterns = {
        "patterns": ["*.scss", "*.sass"],
        "ignore_patterns": ["*.part"],
        "ignore_directories": False,
        "case_sensitive": True,
    }

    # Init inspector instance shared through all handlers
    inspector = ScssInspector()

    if not poll:
        logger.debug(u"Using Watchdog native platform observer")
        observer = Observer()
    else:
        logger.debug(u"Using Watchdog polling observer")
        observer = PollingObserver()

    # Init event handlers
    project_handler = WatchdogProjectEventHandler(settings, inspector,
                                                  **watcher_templates_patterns)

    lib_handler = WatchdogLibraryEventHandler(settings, inspector,
                                              **watcher_templates_patterns)

    # Observe source directory
    observer.schedule(project_handler, settings.SOURCES_PATH, recursive=True)

    # Also observe libraries directories
    for libpath in settings.LIBRARY_PATHS:
        observer.schedule(lib_handler, libpath, recursive=True)

    # Start watching
    logger.warning(u"Launching the watcher, use CTRL+C to stop it")
    observer.start()

    try:
        while True:
            time.sleep(1)
    except KeyboardInterrupt:
        logger.warning(u"CTRL+C used, stopping..")
        observer.stop()

    observer.join()
Ejemplo n.º 11
0
class Server():
	def __init__(self, configfile=None, basedir=None, host="0.0.0.0", port=5000, debug=False, allowRoot=False, logConf=None):
		self._configfile = configfile
		self._basedir = basedir
		self._host = host
		self._port = port
		self._debug = debug
		self._allowRoot = allowRoot
		self._logConf = logConf
		self._ioLoop = None

	def stop(self):
		if self._ioLoop:
			self._ioLoop.stop()
			self._ioLoop = None

	def run(self):
		if not self._allowRoot:
			self._checkForRoot()

		global userManager
		global eventManager
		global loginManager
		global debug
		global softwareManager
		global discoveryManager
		global VERSION

		from tornado.wsgi import WSGIContainer
		from tornado.httpserver import HTTPServer
		from tornado.ioloop import IOLoop
		from tornado.web import Application, FallbackHandler

		from astroprint.printfiles.watchdogs import UploadCleanupWatchdogHandler

		debug = self._debug

		# first initialize the settings singleton and make sure it uses given configfile and basedir if available
		self._initSettings(self._configfile, self._basedir)
		s = settings()

		# then initialize logging
		self._initLogging(self._debug, self._logConf)
		logger = logging.getLogger(__name__)

		if s.getBoolean(["accessControl", "enabled"]):
			userManagerName = settings().get(["accessControl", "userManager"])
			try:
				clazz = util.getClass(userManagerName)
				userManager = clazz()
			except AttributeError, e:
				logger.exception("Could not instantiate user manager %s, will run with accessControl disabled!" % userManagerName)

		softwareManager = swManager()
		VERSION = softwareManager.versionString

		logger.info("Starting AstroBox (%s) - Commit (%s)" % (VERSION, softwareManager.commit))

		from astroprint.migration import migrateSettings
		migrateSettings()

		eventManager = events.eventManager()
		printer = printerManager(printerProfileManager().data['driver'])

		#Start some of the managers here to make sure there are no thread collisions
		from astroprint.network.manager import networkManager
		from astroprint.boxrouter import boxrouterManager

		networkManager()
		boxrouterManager()

		# configure timelapse
		#octoprint.timelapse.configureTimelapse()

		app.wsgi_app = ReverseProxied(app.wsgi_app)

		app.secret_key = boxrouterManager().boxId
		loginManager = LoginManager()
		loginManager.session_protection = "strong"
		loginManager.user_callback = load_user
		if userManager is None:
			loginManager.anonymous_user = users.DummyUser
			principals.identity_loaders.appendleft(users.dummy_identity_loader)
		loginManager.init_app(app)

		# setup command triggers
		events.CommandTrigger(printer)
		if self._debug:
			events.DebugEventListener()

		if networkManager().isOnline():
			softwareManager.checkForcedUpdate()

		if self._host is None:
			self._host = s.get(["server", "host"])
		if self._port is None:
			self._port = s.getInt(["server", "port"])

		app.debug = self._debug

		from octoprint.server.api import api

		app.register_blueprint(api, url_prefix="/api")

		boxrouterManager() # Makes sure the singleton is created here. It doesn't need to be stored
		self._router = SockJSRouter(self._createSocketConnection, "/sockjs")

		discoveryManager = DiscoveryManager()

		def access_validation_factory(validator):
			"""
			Creates an access validation wrapper using the supplied validator.

			:param validator: the access validator to use inside the validation wrapper
			:return: an access validation wrapper taking a request as parameter and performing the request validation
			"""
			def f(request):
				"""
				Creates a custom wsgi and Flask request context in order to be able to process user information
				stored in the current session.

				:param request: The Tornado request for which to create the environment and context
				"""
				wsgi_environ = tornado.wsgi.WSGIContainer.environ(request)
				with app.request_context(wsgi_environ):
					app.session_interface.open_session(app, flask.request)
					loginManager.reload_user()
					validator(flask.request)
			return f

		self._tornado_app = Application(self._router.urls + [
			#(r"/downloads/timelapse/([^/]*\.mpg)", LargeResponseHandler, {"path": s.getBaseFolder("timelapse"), "as_attachment": True}),
			(r"/downloads/files/local/([^/]*\.(gco|gcode))", LargeResponseHandler, {"path": s.getBaseFolder("uploads"), "as_attachment": True}),
			(r"/downloads/logs/([^/]*)", LargeResponseHandler, {"path": s.getBaseFolder("logs"), "as_attachment": True, "access_validation": access_validation_factory(admin_validator)}),
			#(r"/downloads/camera/current", UrlForwardHandler, {"url": s.get(["webcam", "snapshot"]), "as_attachment": True, "access_validation": access_validation_factory(user_validator)}),
			(r".*", FallbackHandler, {"fallback": WSGIContainer(app.wsgi_app)})
		])
		self._server = HTTPServer(self._tornado_app, max_buffer_size=167772160) #Allows for uploads up to 160MB
		self._server.listen(self._port, address=self._host)

		logger.info("Listening on http://%s:%d" % (self._host, self._port))

		eventManager.fire(events.Events.STARTUP)
		if s.getBoolean(["serial", "autoconnect"]):
			(port, baudrate) = s.get(["serial", "port"]), s.getInt(["serial", "baudrate"])
			connectionOptions = printer.getConnectionOptions()
			if port in connectionOptions["ports"]:
				printer.connect(port, baudrate)

		# start up watchdogs
		observer = Observer()
		observer.schedule(UploadCleanupWatchdogHandler(), s.getBaseFolder("uploads"))
		observer.start()

		try:
			self._ioLoop = IOLoop.instance()
			self._ioLoop.start()

		except SystemExit:
			pass

		except:
			logger.fatal("Please report this including the stacktrace below in AstroPrint's bugtracker. Thanks!")
			logger.exception("Stacktrace follows:")

		finally:
			observer.stop()
			self.cleanup()

		observer.join()
		logger.info('Good Bye!')
Ejemplo n.º 12
0
 def __init__(self, src_path):
     # initialisieren von Variablen die global (auch außerhalb der Class!) verwendet werden soll
     self.__src_path = src_path
     self.__event_handler = plcEventHandler()
     self.__event_observer = Observer()
Ejemplo n.º 13
0
def start(services, auto_upgrade, anchore_module, skip_config_validate, skip_db_compat_check, all):
    """
    Startup and monitor service processes. Specify a list of service names or empty for all.
    """

    global config
    ecode = 0

    auto_upgrade = True

    if not anchore_module:
        module_name = "anchore_engine"
    else:
        module_name = str(anchore_module)

    if os.environ.get('ANCHORE_ENGINE_SKIP_DB_COMPAT_CHECK', str(skip_db_compat_check)).lower() in ['true', 't', 'y', 'yes']:
        skip_db_compat_check = True
    else:
        skip_db_compat_check = False

    if services:
        input_services = list(services)
    else:
        input_services = os.getenv('ANCHORE_ENGINE_SERVICES', '').strip().split()

    if not input_services and not all:
        raise click.exceptions.BadArgumentUsage('No services defined to start. Must either provide service arguments, ANCHORE_ENGINE_SERVICES env var, or --all option')

    try:
        validate_params = {
            'services': True,
            'webhooks': True,
            'credentials': True
        }
        if skip_config_validate:
            try:
                items = skip_config_validate.split(',')
                for item in items:
                    validate_params[item] = False
            except Exception as err:
                raise Exception(err)

        # find/set up configuration        
        configdir = config['configdir']
        configfile = os.path.join(configdir, "config.yaml")

        localconfig = None
        if os.path.exists(configfile):
            try:
                localconfig = anchore_engine.configuration.localconfig.load_config(configdir=configdir, configfile=configfile, validate_params=validate_params)
            except Exception as err:
                raise Exception("cannot load local configuration: " + str(err))
        else:
            raise Exception("cannot locate configuration file ({})".format(configfile))

        # load the appropriate DB module
        try:
            logger.info("Loading DB routines from module ({})".format(module_name))
            module = importlib.import_module(module_name + ".db.entities.upgrade")
#        except TableNotFoundError as ex:
#            logger.info("Initialized DB not found.")
        except Exception as err:
            raise Exception("Input anchore-module (" + str(module_name) + ") cannot be found/imported - exception: " + str(err))

        # get the list of local services to start
        startFailed = False
        if not input_services:
            config_services = localconfig.get('services', {})
            if not config_services:
                logger.warn('could not find any services to execute in the config file')
                sys.exit(1)

            input_services = [ name for name, srv_conf in list(config_services.items()) if srv_conf.get('enabled')]

        services = []
        for service_conf_name in input_services:
            if service_conf_name in list(service_map.values()):
                svc = service_conf_name
            else:
                svc = service_map.get(service_conf_name)

            if svc:
                services.append(svc)
            else:
                logger.warn('specified service {} not found in list of available services {} - removing from list of services to start'.format(service_conf_name, list(service_map.keys())))

        if 'anchore-catalog' in services:
            services.remove('anchore-catalog')
            services.insert(0, 'anchore-catalog')

        if not services:
            logger.error("No services found in ANCHORE_ENGINE_SERVICES or as enabled in config.yaml to start - exiting")
            sys.exit(1)


        # preflight - db checks
        try:
            db_params = anchore_engine.db.entities.common.get_params(localconfig)
            
            #override db_timeout since upgrade might require longer db session timeout setting
            try:
                if 'timeout' in db_params.get('db_connect_args', {}):
                    db_params['db_connect_args']['timeout'] = 86400
                elif 'connect_timeout' in db_params.get('db_connect_args', {}):
                    db_params['db_connect_args']['connect_timeout'] = 86400
            except Exception as err:
                pass
            
            anchore_manager.cli.utils.connect_database(config, db_params, db_retries=300)
            code_versions, db_versions = anchore_manager.cli.utils.init_database(upgrade_module=module, localconfig=localconfig, do_db_compatibility_check=(not skip_db_compat_check))

            in_sync = False
            timed_out = False
            max_timeout = 3600

            timer = time.time()
            while not in_sync and not timed_out:
                code_versions, db_versions = module.get_versions()

                if code_versions and db_versions:
                    if code_versions['db_version'] != db_versions['db_version']:
                        if auto_upgrade and 'anchore-catalog' in services:
                            logger.info("Auto-upgrade is set - performing upgrade.")
                            try:
                                # perform the upgrade logic here
                                rc = module.run_upgrade()
                                if rc:
                                    logger.info("Upgrade completed")
                                else:
                                    logger.info("No upgrade necessary. Completed.")
                            except Exception as err:
                                raise err

                            in_sync = True
                        else:
                            logger.warn("this version of anchore-engine requires the anchore DB version ({}) but we discovered anchore DB version ({}) in the running DB - it is safe to run the upgrade while seeing this message - will retry for {} more seconds.".format(str(code_versions['db_version']), str(db_versions['db_version']), str(max_timeout - int(time.time() - timer))))
                            time.sleep(5)
                    else:
                        logger.info("DB version and code version in sync.")
                        in_sync = True
                else:
                    logger.warn('no existing anchore DB data can be discovered, assuming bootstrap')
                    in_sync = True

                if (max_timeout - int(time.time() - timer)) < 0:
                    timed_out = True

            if not in_sync:
                raise Exception("this version of anchore-engine requires the anchore DB version ("+str(code_versions['db_version'])+") but we discovered anchore DB version ("+str(db_versions['db_version'])+") in the running DB - please perform the DB upgrade process and retry")

        except Exception as err:
            raise err

        finally:
            rc = anchore_engine.db.entities.common.do_disconnect()

        # start up services
        logger.info('Starting services: {}'.format(services))

        for supportdir in ["/var/log/anchore", "/var/run/anchore"]:
            try:
                if not os.path.exists(supportdir):
                    os.makedirs(supportdir, 0o755)
            except Exception as err:
                logger.error("cannot create log directory {} - exception: {}".format(supportdir, str(err)))
                raise err

        pids = []
        keepalive_threads = []
        for service in services:
            pidfile = "/var/run/anchore/" + service + ".pid"
            try:
                terminate_service(service, flush_pidfile=True)

                service_thread = ServiceThread(startup_service, (service, configdir))
                keepalive_threads.append(service_thread)
                max_tries = 30
                tries = 0
                alive = True
                while not os.path.exists(pidfile) and tries < max_tries:
                    logger.info("waiting for service pidfile {} to exist {}/{}".format(pidfile, tries, max_tries))

                    try:
                        alive = service_thread.thread.is_alive()
                    except:
                        pass
                    if not alive:
                        logger.info("service thread has stopped {}".format(service))
                        break

                    time.sleep(1)
                    tries = tries + 1

                logger.info("auto_restart_services setting: {}".format(localconfig.get('auto_restart_services', False)))
                if not localconfig.get('auto_restart_services', False):
                    logger.info("checking for startup failure pidfile={}, is_alive={}".format(os.path.exists(pidfile), alive))
                    if not os.path.exists(pidfile) or not alive:
                        raise Exception("service thread for ({}) failed to start".format(service))

                time.sleep(1)
            except Exception as err:
                startFailed = True
                logger.warn("service start failed - exception: {}".format(str(err)))
                break

        if startFailed:
            logger.fatal("one or more services failed to start. cleanly terminating the others")
            for service in services:
                terminate_service(service, flush_pidfile=True)
            sys.exit(1)
        else:
            # start up the log watchers
            try:
                observer = Observer()
                observer.schedule(AnchoreLogWatcher(), path="/var/log/anchore/")
                observer.start()

                try:
                    while True:
                        time.sleep(1)
                        if localconfig.get('auto_restart_services', False): #'auto_restart_services' in localconfig and localconfig['auto_restart_services']:
                            for service_thread in keepalive_threads:
                                if not service_thread.thread.is_alive():
                                    logger.info("restarting service: {}".format(service_thread.thread.name))
                                    service_thread.start()

                except KeyboardInterrupt:
                    observer.stop()
                observer.join()

            except Exception as err:
                logger.error("failed to startup log watchers - exception: {}".format(str(err)))
                raise err

    except Exception as err:
        logger.error(anchore_manager.cli.utils.format_error_output(config, 'servicestart', {}, err))
        if not ecode:
            ecode = 2
            
    anchore_manager.cli.utils.doexit(ecode)
Ejemplo n.º 14
0
 def start_observer(self):
     self.observer = Observer()
     self.observer.schedule(self.watcher, os.path.dirname(self.file_observing), recursive=False)
     self.observer.start()
Ejemplo n.º 15
0
    def start(self):
        """
        Start threads and directory observer.
        """
        #start threads

        for threadid in range(1, self.options.threads):
            print "start proc [", threadid, "]"

            worker = Process(target=funcworker, args=(self, threadid))
            worker.daemon = True
            self.pool.append(worker)
            worker.start()
            #self.processimage(picture,options)
        self.starttime = time.time()
        if self.options.watch:
            eventhandler = addtoqueue(self.picturequeue)
            self.observer = Observer()
            self.observer.schedule(eventhandler, self.args[0], recursive=True)
            self.observer.start()
        #We let the master process do some work because its useful for matplotlib.
        if not self.options.nowalk:
            self.fillqueuewithexistingfiles()
        if self.options.servermode:

            from Leash import addauthentication
        try:
            while (self.options.servermode or (not self.picturequeue.empty())
                   or (self.dirwalker and self.dirwalker.is_alive())
                   or self.options.watch):
                try:
                    picture = self.picturequeue.get(timeout=1)
                except Empty:
                    continue

#in Case something goes wrong
                try:
                    lastfile, data = self.procimage(picture, 0)
                except:
                    continue

                if self.options.servermode:
                    request = {
                        "command": "putplotdata",
                        "argument": {
                            "data": {
                                "result": "plot",
                                "data": {
                                    "filename": lastfile,
                                    "graphs": data,
                                    "stat": {}
                                }
                            }
                        }
                    }

                    self.plotdataqueue.put(request)
                if np.mod(self.allp.value, 500) == 0:
                    self.timreport()
        except KeyboardInterrupt:
            pass

        self.stop()
        self.timreport()
        return self.allp.value, time.time() - self.starttime
Ejemplo n.º 16
0
 def __init__(self, dir_queue=None):
     logger.info('Starting to watch directory...')
     self.observer = Observer()
     self.dir_queue = dir_queue
Ejemplo n.º 17
0
 def __init__(self, parent=None) -> None:
     super().__init__(parent=parent)
     self._observer = Observer()
     self._handler = Handler()
     self._observer.schedule(self._handler, path=unsafe_path, recursive=True)
Ejemplo n.º 18
0
class Server():
    def __init__(self,
                 configfile=None,
                 basedir=None,
                 host="127.0.0.1",
                 port=5000,
                 debug=False,
                 allowRoot=False,
                 logConf=None):
        self._configfile = configfile
        self._basedir = basedir
        self._host = host
        self._port = port
        self._debug = debug
        self._allowRoot = allowRoot
        self._logConf = logConf
        self._ioLoop = None

    def stop(self):
        if self._ioLoop:
            self._ioLoop.stop()
            self._ioLoop = None

    def run(self):
        if not self._allowRoot:
            self._checkForRoot()

        global userManager
        global eventManager
        global loginManager
        global debug
        global softwareManager
        global discoveryManager
        global VERSION
        global UI_API_KEY

        from tornado.wsgi import WSGIContainer
        from tornado.httpserver import HTTPServer
        from tornado.ioloop import IOLoop
        from tornado.web import Application, FallbackHandler

        from astroprint.printfiles.watchdogs import UploadCleanupWatchdogHandler
        from astroprint.printfiles.watchdogs import EtherBoxHandler

        debug = self._debug

        # first initialize the settings singleton and make sure it
        # uses given configfile and basedir if available
        self._initSettings(self._configfile, self._basedir)
        s = settings()

        if not s.getBoolean(['api', 'regenerate']) and s.getString(
            ['api', 'key']):
            UI_API_KEY = s.getString(['api', 'key'])
        else:
            UI_API_KEY = ''.join('%02X' % ord(z) for z in uuid.uuid4().bytes)

        # then initialize logging
        self._initLogging(self._debug, self._logConf)
        logger = logging.getLogger(__name__)

        if s.getBoolean(["accessControl", "enabled"]):
            userManagerName = s.get(["accessControl", "userManager"])
            try:
                clazz = util.getClass(userManagerName)
                userManager = clazz()
            except AttributeError, e:
                logger.exception(
                    "Could not instantiate user manager %s, will run "
                    "with accessControl disabled!" % userManagerName)

        softwareManager = swManager()
        VERSION = softwareManager.versionString

        logger.info("Starting AstroBox (%s) - Commit (%s)" %
                    (VERSION, softwareManager.commit))

        from astroprint.migration import migrateSettings
        migrateSettings()

        eventManager = events.eventManager()
        printer = printerManager(printerProfileManager().data['driver'])

        # Start some of the managers here to make sure there are
        # no thread collisions
        from astroprint.network.manager import networkManager
        from astroprint.boxrouter import boxrouterManager

        networkManager()
        machine_id = s.get(["setup", "machineId"])
        access_key = s.get(["setup", "accessCode"])
        _set_boxid = False
        if machine_id and access_key:
            api = s.get(["cloudSlicer", "apiHost"])
            _data = {
                'boxid': machine_id,
                'access_key': access_key,
            }
            try:
                logger.info("Trying to set printer id")
                logger.info(_data)
                headers = {'Content-type': 'application/json'}
                res = requests.post(
                    "%s/api/printerauth/" % api,
                    json=_data,
                    headers=headers,
                    timeout=10,
                    allow_redirects=False,
                )
                if res.status_code == 200:
                    logger.info("Printer's id is: %s", machine_id)
                    boxrouterManager()
                    boxrouterManager().boxId = machine_id

                    _set_boxid = True
                    logger.info("Done setting printer id")
                else:
                    logger.info("Could not authenticate the printer")
                    logger.info("Response status code: %s", res.status_code)
            except Exception as e:
                logger.error(
                    "Error setting up boxrouter, boxid could not be set")
                logger.error(e)

        app.wsgi_app = ReverseProxied(app.wsgi_app)

        app.secret_key = os.urandom(4096)
        loginManager = LoginManager()
        loginManager.session_protection = "strong"
        loginManager.user_callback = load_user
        if userManager is None:
            loginManager.anonymous_user = users.DummyUser
            principals.identity_loaders.appendleft(users.dummy_identity_loader)
        loginManager.init_app(app)

        # setup command triggers
        events.CommandTrigger(printer)
        if self._debug:
            events.DebugEventListener()

        if networkManager().isOnline() and _set_boxid:
            softwareManager.checkForcedUpdate()

        if self._host is None:
            self._host = s.get(["server", "host"])
        if self._port is None:
            self._port = s.getInt(["server", "port"])

        app.debug = self._debug

        from octoprint.server.api import api

        app.register_blueprint(api, url_prefix="/api")

        # Makes sure the singleton is created here. It doesn't need to be stored
        boxrouterManager(connect=False)
        self._router = SockJSRouter(self._createSocketConnection, "/sockjs")

        discoveryManager = DiscoveryManager()

        def access_validation_factory(validator):
            """
            Creates an access validation wrapper using the supplied validator.

            :param validator: the access validator to use inside the validation
            wrapper
            :return: an access validation wrapper taking a request as parameter
            and performing the request validation
            """
            def f(request):
                """
                Creates a custom wsgi and Flask request context in order to be
                able to process user information stored in the current session.

                :param request: The Tornado request for which to create
                the environment and context
                """
                wsgi_environ = tornado.wsgi.WSGIContainer.environ(request)
                with app.request_context(wsgi_environ):
                    app.session_interface.open_session(app, request)
                    loginManager.reload_user()
                    validator(request)

            return f

        self._tornado_app = Application(self._router.urls + [
            #(r"/downloads/timelapse/([^/]*\.mpg)",
            #   LargeResponseHandler,
            #   {"path": s.getBaseFolder("timelapse"),
            #   "as_attachment": True}),
            (r"/downloads/files/local/([^/]*\.(gco|gcode))",
             LargeResponseHandler, {
                 "path": s.getBaseFolder("uploads"),
                 "as_attachment": True
             }),
            (r"/downloads/logs/([^/]*)", LargeResponseHandler, {
                "path": s.getBaseFolder("logs"),
                "as_attachment": True,
                "access_validation": access_validation_factory(admin_validator)
            }),
            #(r"/downloads/camera/current",
            #   UrlForwardHandler,
            #   {"url": s.get(["webcam", "snapshot"]),
            #   "as_attachment": True,
            #   "access_validation": access_validation_factory(user_validator)}),
            (r".*", FallbackHandler, {
                "fallback": WSGIContainer(app.wsgi_app)
            })
        ])
        self._server = HTTPServer(self._tornado_app,
                                  max_buffer_size=1048576 *
                                  s.getInt(['server', 'maxUploadSize']))
        self._server.listen(self._port, address=self._host)

        logger.info("Listening on http://%s:%d" % (self._host, self._port))

        eventManager.fire(events.Events.STARTUP)
        if s.getBoolean(["serial", "autoconnect"]):
            (port, baudrate) = s.get(["serial", "port"
                                      ]), s.getInt(["serial", "baudrate"])
            connectionOptions = printer.getConnectionOptions()
            if port in connectionOptions["ports"]:
                printer.connect(port, baudrate)

        usbdetector = EtherBoxHandler()
        # start up watchdogs
        observer = Observer()
        observer.schedule(UploadCleanupWatchdogHandler(),
                          s.getBaseFolder("uploads"))
        #observer.schedule(
        #    usbdetector, s.get(['usb', 'folder']), recursive=True)
        observer.start()

        try:
            self._ioLoop = IOLoop.instance()
            self._ioLoop.start()

        except SystemExit:
            pass

        except:
            logger.fatal("Please report this including the stacktrace below "
                         "in AstroPrint's bugtracker. Thanks!")
            logger.exception("Stacktrace follows:")

        finally:
            observer.stop()
            self.cleanup()

        observer.join()
        logger.info('Good Bye!')
Ejemplo n.º 19
0
 def _start_observe_file_changes(self):
     observer = Observer()
     event_handler = RoutePriorities.ObserverEventHandler(
         self, self._config_file)
     observer.schedule(event_handler, path=self._config_file)
     observer.start()
Ejemplo n.º 20
0
from watcher.Handler import Handler
from watchdog.observers import Observer
from indexer.indexer import Indexer
from watchdog.events import FileSystemEventHandler
import os

if __name__ == '__main__':
    web_dir_observer = Observer()
    event_handler = Handler()
    web_dir_observer.schedule(event_handler, path='docs_web/')
    web_dir_observer.start()
    nombresDocumentos = os.listdir("docs_web/")
    indexer = Indexer(list(map(lambda x: "docs_web/" + x, nombresDocumentos)))

    try:
        while True:
            word = input("----ingresa termino a buscar: ")
            indexer.searchIndex(word)

            is_change = event_handler.get_val(
            )  #check if new files were created & get names collected by handler
            if len(is_change) is not 0:
                indexer.update_indexer(is_change)
    except KeyboardInterrupt:
        web_dir_observer.stop()
    # sleep until keyboard interrupt, then stop + rejoin the observer
    web_dir_observer.join()
    def watchdog(self, path):
        # 自定义处理类
        class MyHandler(FileSystemEventHandler):
            global label_title  # 声明全局变量
            global label_qty
            global qty
            global status

            def on_created(self, event):
                global label_title  # 声明全局变量
                global label_qty
                global qty

                try:  # 加入try异常处理机制,防止程式中断
                    config_list = []  # 定义列表list,存储config信息
                    with open('./config.csv', 'r') as f:  # 打开config文件
                        reader = csv.reader(f)  # 读取文件
                        for i in reader:  # 通过for循环写入到列表
                            config_list.append(i[1])

                    site = config_list[0]  # 站点
                    line = config_list[1]  # 线体
                    station = config_list[2]  # 工位
                    type = config_list[3]  # 类型
                except:
                    site = "???"  # 站点
                    line = "???"  # 线体
                    station = "???"  # 工位
                    type = "???"  # 类型
                    pass

                my_name = socket.gethostname()  # 获取本机名称
                my_ip = socket.gethostbyname(my_name)  # 获取本机IP

                now_time_for_rename = str(datetime.datetime.now().strftime(
                    '%Y%m%d%H%M%S'))  # 获取当前时间并对时间进行格式化
                path_name = os.path.basename(event.src_path)  # 获取被修改的文件名及其路径
                created_file_path = event.src_path  # 被创建的文件
                created_file_type = os.path.splitext(created_file_path)[-1][
                    1:]  # 获取被清洗文件文件的后缀名
                if created_file_type == type.upper(
                ) or created_file_type == type.lower():
                    print(
                        "\n###############################################################"
                    )
                    print("数据文件[%s]被创建 in %s" %
                          (event.src_path,
                           datetime.datetime.now()))  # 在此函数内定义需触发的事件

                    result = upload_file(
                        site + "_" + line + "_" + station + "_" + my_name +
                        '_' + my_ip + '_' + now_time_for_rename + '_' +
                        path_name, event.src_path)

                    if result == 1:
                        print("该文件上抛成功\n")
                        label_title.setText('<b>OK<b>')  # 设定界面文本信息
                        label_title.setStyleSheet(
                            'color: rgb(0, 255, 0)')  # 设定界面文本信息颜色
                        qty = qty + 1
                        label_qty.setText(str(qty))  # 设定界面数量显示
                        label_qty.setStyleSheet(
                            'color: rgb(0, 255, 0)')  # 设定界面文本信息颜色
                    if result == 0:
                        print("该文件上抛失败\n")
                        label_title.setText('<b>NG<b>')
                        label_title.setStyleSheet('color: rgb(255, 0, 0)')
                        qty = qty
                        label_qty.setText(str(qty))
                        label_qty.setStyleSheet('color: rgb(255, 0, 0)')

        event_handler = MyHandler()  # 实例化MyHandler类
        observer = Observer()  # 开启相关服务
        observer.schedule(event_handler, path, recursive=True)  # 设定监控路径
        observer.start()  # 开启监控
Ejemplo n.º 22
0
 def __init__(self, directory):
     self.dir_to_watch = directory
     print "monitoring " + self.dir_to_watch
     self.observer = Observer()
Ejemplo n.º 23
0
 def __init__(self, directory_to_watch, videoAnalysis, authChatIds, bot):
     self.observer = Observer()
     self.directory_to_watch = directory_to_watch
     self.videoAnalysis = videoAnalysis
     self.authChatIds = authChatIds
     self.bot = bot
Ejemplo n.º 24
0
 def __init__(self):
     self.observer = Observer()
Ejemplo n.º 25
0
 def __init__(self, src_path):
     self.__src_path = src_path
     self.__event_handler = ImagesEventHandler()
     self.__event_observer = Observer()
Ejemplo n.º 26
0
if __name__ == "__main__":

    if config.LIGHT_DEBUG:
        from light_controls import debug
        debug()

    from schedule_handler import Schedule_Runner

    schedule_runner = Schedule_Runner()
    loop = schedule_runner.controller.loop

    file_change_handler = FileChangeHandler(config.SCHEDULE_FILE,
                                            schedule_runner.run_schedule,
                                            loop=loop)

    obs = Observer()
    obs.schedule(file_change_handler,
                 path.Path(config.SCHEDULE_FILE).abspath().dirname()
                 )  #Define what file to watch and how
    obs.start()  #start watching file
    file_change_handler.process()  #start schedule running
    try:
        while True:
            #This does nothing except step through the loops (why is this necessary?)
            file_change_handler.loop.run_until_complete(
                asyncio.ensure_future(asyncio.sleep(
                    0.1, loop=file_change_handler.loop),
                                      loop=file_change_handler.loop)
            )  #arbitrary sleep time here I think. Could it be forever?
    except KeyboardInterrupt:
        obs.stop()
Ejemplo n.º 27
0
class FileWatcher(object):
    """ A class with only static methods that wraps object watchdogs, to trigger callbacks when a file changes.
    """
    #: A :class:`~watchdog.observers.Observer` to watch when the file changes
    observer = Observer()

    #: A :class:`~watchdog.events.FileSystemEventHandler` to get notified when the file changes
    monitor = FileSystemEventHandler()

    # `int` that is a GLib timeout id to delay the callback
    timeout = 0

    @classmethod
    def watch_file(cls, path, callback, *args, **kwargs):
        """ Watches a new file with a new callback. Removes any precedent watched files.

        Args:
            path (`str`): full path to the file to watch
            callback (`function`): callback to call with all the further arguments when the file changes
        """
        cls.start_daemon()
        cls.stop_watching()

        directory = os.path.dirname(path)
        cls.monitor.on_modified = lambda evt: cls.enqueue(callback, *args, **kwargs) if evt.src_path == path else None
        try:
            cls.observer.schedule(cls.monitor, directory)
        except OSError:
            logger.error('Impossible to open dir at {}'.format(directory), exc_info = True)

    @classmethod
    def enqueue(cls, callback, *args, **kwargs):
        """ Do not call callback directly, instead delay as to avoid repeated calls in short periods of time.

        Args:
            callback (`function`): callback to call with all the further arguments
        """
        if cls.timeout:
            GLib.Source.remove(cls.timeout)
        cls.timeout = GLib.timeout_add(200, cls.call, callback, *args, **kwargs)


    @classmethod
    def call(cls, callback, *args, **kwargs):
        """ Call the callback

        Args:
            callback (`function`): callback to call with all the further arguments
        """
        if cls.timeout:
            cls.timeout = 0
        callback(*args, **kwargs)


    @classmethod
    def stop_watching(cls):
        """ Remove all files that are being watched
        """
        cls.observer.unschedule_all()


    @classmethod
    def start_daemon(cls):
        """ Start the watchdog observer thread
        """
        if not cls.observer.is_alive():
            cls.observer.start()


    @classmethod
    def stop_daemon(cls, wait = False):
        """ Stop the watchdog observer thread.

        Args:
            wait (`bool`): whether to wait for the thread to have joined before returning
        """
        cls.observer.unschedule_all()
        if cls.observer.is_alive():
            cls.observer.stop()

        while wait and cls.observer.is_alive():
            cls.observer.join()