def run(self): while True: try: with closing(Session()) as session: self.logger.debug("Expiring edges....") self.expire_edges(session) self.logger.debug( "Expiring nonauditor approvers in audited groups...") self.expire_nonauditors(session) self.logger.debug("Sending emails...") process_async_emails(self.settings, session, datetime.utcnow()) self.logger.debug("Pruning old traces....") prune_old_traces(session) session.commit() stats.set_gauge("successful-background-update", 1) stats.set_gauge("failed-background-update", 0) except OperationalError: Session.configure( bind=get_db_engine(get_database_url(self.settings))) self.logger.critical("Failed to connect to database.") stats.set_gauge("successful-background-update", 0) stats.set_gauge("failed-background-update", 1) self.capture_exception() except: stats.set_gauge("successful-background-update", 0) stats.set_gauge("failed-background-update", 1) self.capture_exception() raise sleep(60)
def session(request, tmpdir): # type: (FixtureRequest, LocalPath) -> None settings = Settings() set_global_settings(settings) # Reinitialize plugins in case a previous test configured some. set_global_plugin_proxy(PluginProxy([])) db_engine = get_db_engine(db_url(tmpdir)) # Clean up from previous tests if using a persistent database. if "MEROU_TEST_DATABASE" in os.environ: Model.metadata.drop_all(db_engine) # Create the database schema and the corresponding session. Model.metadata.create_all(db_engine) Session.configure(bind=db_engine) session = Session() def fin(): # type: () -> None """Explicitly close the session to avoid any dangling transactions.""" session.close() request.addfinalizer(fin) return session
def get_current_user(self): username = self.request.headers.get(settings.user_auth_header) if not username: return # Users must be fully qualified if not re.match("^{}$".format(USERNAME_VALIDATION), username): raise InvalidUser() try: user, created = User.get_or_create(self.session, username=username) if created: logging.info("Created new user %s", username) self.session.commit() # Because the graph doesn't initialize until the updates table # is populated, we need to refresh the graph here in case this # is the first update. self.graph.update_from_db(self.session) except sqlalchemy.exc.OperationalError: # Failed to connect to database or create user, try to reconfigure the db. This invokes # the fetcher to try to see if our URL string has changed. Session.configure(bind=get_db_engine(get_database_url(settings))) raise DatabaseFailure() return user
def run(self): while True: try: with closing(Session()) as session: self.logger.debug("Expiring edges....") self.expire_edges(session) self.logger.debug("Expiring nonauditor approvers in audited groups...") self.expire_nonauditors(session) self.logger.debug("Sending emails...") process_async_emails(self.settings, session, datetime.utcnow()) self.logger.debug("Pruning old traces....") prune_old_traces(session) session.commit() stats.set_gauge("successful-background-update", 1) stats.set_gauge("failed-background-update", 0) except OperationalError: Session.configure(bind=get_db_engine(get_database_url(self.settings))) self.logger.critical("Failed to connect to database.") stats.set_gauge("successful-background-update", 0) stats.set_gauge("failed-background-update", 1) self.capture_exception() except: stats.set_gauge("successful-background-update", 0) stats.set_gauge("failed-background-update", 1) self.capture_exception() raise sleep(60)
def session(self): # type: () -> Session if not self._session: db_engine = get_db_engine(get_database_url(self.settings)) Session.configure(bind=db_engine) self._session = Session() return self._session
def start_server(args, sentry_client): # type: (Namespace, SentryProxy) -> None log_level = logging.getLevelName(logging.getLogger().level) logging.info("begin. log_level={}".format(log_level)) assert not (settings.debug and settings.num_processes > 1 ), "debug mode does not support multiple processes" try: initialize_plugins(settings.plugin_dirs, settings.plugin_module_paths, "grouper_fe") except PluginsDirectoryDoesNotExist as e: logging.fatal("Plugin directory does not exist: {}".format(e)) sys.exit(1) # setup database logging.debug("configure database session") database_url = args.database_url or get_database_url(settings) Session.configure(bind=get_db_engine(database_url)) usecase_factory = create_graph_usecase_factory(settings, Session()) application = create_fe_application(settings, usecase_factory, args.deployment_name) address = args.address or settings.address port = args.port or settings.port ssl_context = get_plugin_proxy().get_ssl_context() logging.info("Starting application server with %d processes on port %d", settings.num_processes, port) server = tornado.httpserver.HTTPServer(application, ssl_options=ssl_context) server.bind(port, address=address) # When using multiple processes, the forking happens here server.start(settings.num_processes) stats.set_defaults() # Create the Graph and start the config / graph update threads post fork to ensure each # process gets updated. settings.start_config_thread(args.config, "fe") with closing(Session()) as session: graph = Graph() graph.update_from_db(session) refresher = DbRefreshThread(settings, graph, settings.refresh_interval, sentry_client) refresher.daemon = True refresher.start() try: tornado.ioloop.IOLoop.instance().start() except KeyboardInterrupt: tornado.ioloop.IOLoop.instance().stop() finally: print("Bye")
def start_server(args, settings, plugins): # type: (Namespace, FrontendSettings, PluginProxy) -> None log_level = logging.getLevelName(logging.getLogger().level) logging.info("begin. log_level={}".format(log_level)) assert not (settings.debug and settings.num_processes > 1 ), "debug mode does not support multiple processes" # setup database logging.debug("configure database session") if args.database_url: settings.database = args.database_url Session.configure(bind=get_db_engine(settings.database)) application = create_fe_application(settings, args.deployment_name) ssl_context = plugins.get_ssl_context() if args.listen_stdin: logging.info("Starting application server with %d processes on stdin", settings.num_processes) server = HTTPServer(application, ssl_options=ssl_context) s = socket.socket(fileno=sys.stdin.fileno()) s.setblocking(False) s.listen() server.add_sockets([s]) else: address = args.address or settings.address port = args.port or settings.port logging.info( "Starting application server with %d processes on %s:%d", settings.num_processes, address, port, ) server = HTTPServer(application, ssl_options=ssl_context) server.bind(port, address=address) # When using multiple processes, the forking happens here server.start(settings.num_processes) # Create the Graph and start the graph update thread post fork to ensure each process gets # updated. with closing(Session()) as session: graph = Graph() graph.update_from_db(session) refresher = DbRefreshThread(settings, plugins, graph, settings.refresh_interval) refresher.daemon = True refresher.start() try: IOLoop.current().start() except KeyboardInterrupt: IOLoop.current().stop() finally: print("Bye")
def start_server(args, settings, plugins): # type: (Namespace, ApiSettings, PluginProxy) -> None log_level = logging.getLevelName(logging.getLogger().level) logging.info("begin. log_level={}".format(log_level)) assert not ( settings.debug and settings.num_processes > 1 ), "debug mode does not support multiple processes" # setup database logging.debug("configure database session") if args.database_url: settings.database = args.database_url Session.configure(bind=get_db_engine(settings.database)) with closing(Session()) as session: graph = Graph() graph.update_from_db(session) refresher = DbRefreshThread(settings, plugins, graph, settings.refresh_interval) refresher.daemon = True refresher.start() usecase_factory = create_graph_usecase_factory(settings, plugins, graph=graph) application = create_api_application(graph, settings, plugins, usecase_factory) if args.listen_stdin: logging.info("Starting application server on stdin") server = HTTPServer(application) if PY2: s = socket.fromfd(sys.stdin.fileno(), socket.AF_INET, socket.SOCK_STREAM) s.setblocking(False) s.listen(5) else: s = socket.socket(fileno=sys.stdin.fileno()) s.setblocking(False) s.listen() server.add_sockets([s]) else: address = args.address or settings.address port = args.port or settings.port logging.info("Starting application server on %s:%d", address, port) server = HTTPServer(application) server.bind(port, address=address) server.start(settings.num_processes) stats.set_defaults() try: IOLoop.current().start() except KeyboardInterrupt: IOLoop.current().stop() finally: print("Bye")
def create_session(self, tmpdir): # type: (LocalPath) -> Session db_engine = get_db_engine(db_url(tmpdir)) # If using a persistent database, clear the database first. if "MEROU_TEST_DATABASE" in os.environ: Model.metadata.drop_all(db_engine) # Create the database schema and the corresponding session. Model.metadata.create_all(db_engine) Session.configure(bind=db_engine) return Session()
def session(request, tmpdir): db_engine = get_db_engine(db_url(tmpdir)) Model.metadata.create_all(db_engine) Session.configure(bind=db_engine) session = Session() def fin(): session.close() # Useful if testing against MySQL # Model.metadata.drop_all(db_engine) request.addfinalizer(fin) return session
def session(request, tmpdir): db_path = tmpdir.join("grouper.sqlite") db_engine = get_db_engine("sqlite:///%s" % db_path) Model.metadata.create_all(db_engine) Session.configure(bind=db_engine) session = Session() def fin(): session.close() # Useful if testing against MySQL # model_soup.Model.metadata.drop_all(db_engine) request.addfinalizer(fin) return session
def start_server(args, sentry_client): # type: (argparse.Namespace, SentryProxy) -> None log_level = logging.getLevelName(logging.getLogger().level) logging.info("begin. log_level={}".format(log_level)) assert not (settings.debug and settings.num_processes > 1), \ "debug mode does not support multiple processes" try: initialize_plugins(settings.plugin_dirs, settings.plugin_module_paths, "grouper_api") except PluginsDirectoryDoesNotExist as e: logging.fatal("Plugin directory does not exist: {}".format(e)) sys.exit(1) # setup database logging.debug("configure database session") database_url = args.database_url or get_database_url(settings) Session.configure(bind=get_db_engine(database_url)) settings.start_config_thread(args.config, "api") with closing(Session()) as session: graph = Graph() graph.update_from_db(session) refresher = DbRefreshThread(settings, graph, settings.refresh_interval, sentry_client) refresher.daemon = True refresher.start() application = get_application(graph, settings, sentry_client) address = args.address or settings.address port = args.port or settings.port logging.info("Starting application server on port %d", port) server = tornado.httpserver.HTTPServer(application) server.bind(port, address=address) server.start(settings.num_processes) stats.set_defaults() try: tornado.ioloop.IOLoop.instance().start() except KeyboardInterrupt: tornado.ioloop.IOLoop.instance().stop() finally: print "Bye"
def session(request, tmpdir): db_engine = get_db_engine(db_url(tmpdir)) # Create the database schema and the corresponding session. Model.metadata.create_all(db_engine) Session.configure(bind=db_engine) session = Session() def fin(): # type: () -> None """Explicitly close the session and clean up if using a persistent database.""" session.close() if "MEROU_TEST_DATABASE" in os.environ: Model.metadata.drop_all(db_engine) request.addfinalizer(fin) return session
def start_processor(args, settings): # type: (Namespace, BackgroundSettings) -> None log_level = logging.getLevelName(logging.getLogger().level) logging.info("begin. log_level={}".format(log_level)) try: plugins = PluginProxy.load_plugins(settings, "grouper-background") set_global_plugin_proxy(plugins) except PluginsDirectoryDoesNotExist as e: logging.fatal("Plugin directory does not exist: {}".format(e)) sys.exit(1) # setup database logging.debug("configure database session") Session.configure(bind=get_db_engine(settings.database)) background = BackgroundProcessor(settings, plugins) background.run()
def start_processor(args, settings, sentry_client): # type: (Namespace, BackgroundSettings, SentryProxy) -> None log_level = logging.getLevelName(logging.getLogger().level) logging.info("begin. log_level={}".format(log_level)) try: plugins = PluginProxy.load_plugins(settings, "grouper-background") set_global_plugin_proxy(plugins) except PluginsDirectoryDoesNotExist as e: logging.fatal("Plugin directory does not exist: {}".format(e)) sys.exit(1) # setup database logging.debug("configure database session") Session.configure(bind=get_db_engine(settings.database)) background = BackgroundProcessor(settings, sentry_client) background.run()
def run(self): while True: sleep(self.refresh_interval) logging.debug("Updating Graph from Database.") try: session = Session() self.graph.update_from_db(session) session.close() stats.set_gauge("successful-db-update", 1) except OperationalError: Session.configure(bind=get_db_engine(get_database_url(self.settings))) logging.critical("Failed to connect to database.") stats.set_gauge("successful-db-update", 0) self.capture_exception() except: stats.set_gauge("successful-db-update", 0) self.capture_exception() raise
def start_processor(args, sentry_client): # type: (argparse.Namespace, SentryProxy) -> None log_level = logging.getLevelName(logging.getLogger().level) logging.info("begin. log_level={}".format(log_level)) try: initialize_plugins(settings.plugin_dirs, settings.plugin_module_paths, "grouper-background") except PluginsDirectoryDoesNotExist as e: logging.fatal("Plugin directory does not exist: {}".format(e)) sys.exit(1) # setup database logging.debug("configure database session") Session.configure(bind=get_db_engine(get_database_url(settings))) settings.start_config_thread(args.config, "background") background = BackgroundProcessor(settings, sentry_client) background.run()
def run(self): while True: self.logger.debug("Updating Graph from Database.") try: with closing(Session()) as session: self.graph.update_from_db(session) stats.set_gauge("successful-db-update", 1) stats.set_gauge("failed-db-update", 0) except OperationalError: Session.configure(bind=get_db_engine(get_database_url(self.settings))) self.logger.critical("Failed to connect to database.") stats.set_gauge("successful-db-update", 0) stats.set_gauge("failed-db-update", 1) self.capture_exception() except: stats.set_gauge("successful-db-update", 0) stats.set_gauge("failed-db-update", 1) self.capture_exception() raise sleep(self.refresh_interval)
def get_or_create_user(self, username): # type: (str) -> Optional[User] """Retrieve or create the User object for the authenticated user. This is done in a separate method called by prepare instead of in the magic Tornado get_current_user method because exceptions thrown by the latter are caught by Tornado and not propagated to the caller, and we want to use exceptions to handle invalid users and then return an error page in prepare. """ if not username: return None # Users must be fully qualified if not re.match("^{}$".format(USERNAME_VALIDATION), username): raise InvalidUser("{} does not match {}".format( username, USERNAME_VALIDATION)) # User must exist in the database and be active try: user, created = User.get_or_create(self.session, username=username) if created: logging.info("Created new user %s", username) self.session.commit() # Because the graph doesn't initialize until the updates table # is populated, we need to refresh the graph here in case this # is the first update. self.graph.update_from_db(self.session) except sqlalchemy.exc.OperationalError: # Failed to connect to database or create user, try to reconfigure the db. This invokes # the fetcher to try to see if our URL string has changed. Session.configure(bind=get_db_engine(settings().database)) raise DatabaseFailure() # service accounts are, by definition, not interactive users if user.is_service_account: raise InvalidUser("{} is a service account".format(username)) return user
def get_or_create_user(self, username): # type: (str) -> Optional[User] """Retrieve or create the User object for the authenticated user. This is done in a separate method called by prepare instead of in the magic Tornado get_current_user method because exceptions thrown by the latter are caught by Tornado and not propagated to the caller, and we want to use exceptions to handle invalid users and then return an error page in prepare. """ if not username: return None # Users must be fully qualified if not re.match("^{}$".format(USERNAME_VALIDATION), username): raise InvalidUser("{} does not match {}".format(username, USERNAME_VALIDATION)) # User must exist in the database and be active try: user, created = User.get_or_create(self.session, username=username) if created: logging.info("Created new user %s", username) self.session.commit() # Because the graph doesn't initialize until the updates table # is populated, we need to refresh the graph here in case this # is the first update. self.graph.update_from_db(self.session) except sqlalchemy.exc.OperationalError: # Failed to connect to database or create user, try to reconfigure the db. This invokes # the fetcher to try to see if our URL string has changed. Session.configure(bind=get_db_engine(settings().database)) raise DatabaseFailure() # service accounts are, by definition, not interactive users if user.is_service_account: raise InvalidUser("{} is a service account".format(username)) return user
def run(self): # type: () -> None while True: try: with closing(Session()) as session: self.logger.info("Expiring edges....") self.expire_edges(session) self.logger.info("Expiring nonauditor approvers in audited groups...") self.expire_nonauditors(session) self.logger.info("Sending emails...") process_async_emails(self.settings, session, datetime.utcnow()) self.logger.info("Pruning old traces....") prune_old_traces(session) session.commit() stats.log_gauge("successful-background-update", 1) stats.log_gauge("failed-background-update", 0) except OperationalError: Session.configure(bind=get_db_engine(get_database_url(self.settings))) self.logger.critical("Failed to connect to database.") stats.log_gauge("successful-background-update", 0) stats.log_gauge("failed-background-update", 1) self._capture_exception() except: stats.log_gauge("successful-background-update", 0) stats.log_gauge("failed-background-update", 1) self._capture_exception() self.logger.exception("Unexpected exception occurred in background thread.") raise self.logger.debug("Sleeping for {} seconds...".format(self.settings.sleep_interval)) sleep(self.settings.sleep_interval)
def start_server(args, settings, sentry_client): # type: (Namespace, FrontendSettings, SentryProxy) -> None log_level = logging.getLevelName(logging.getLogger().level) logging.info("begin. log_level={}".format(log_level)) assert not ( settings.debug and settings.num_processes > 1 ), "debug mode does not support multiple processes" try: plugins = PluginProxy.load_plugins(settings, "grouper-fe") set_global_plugin_proxy(plugins) except PluginsDirectoryDoesNotExist as e: logging.fatal("Plugin directory does not exist: {}".format(e)) sys.exit(1) # setup database logging.debug("configure database session") if args.database_url: settings.database = args.database_url Session.configure(bind=get_db_engine(settings.database)) application = create_fe_application(settings, args.deployment_name) ssl_context = plugins.get_ssl_context() if args.listen_stdin: logging.info( "Starting application server with %d processes on stdin", settings.num_processes ) server = HTTPServer(application, ssl_options=ssl_context) if PY2: s = socket.fromfd(sys.stdin.fileno(), socket.AF_INET, socket.SOCK_STREAM) s.setblocking(False) s.listen(5) else: s = socket.socket(fileno=sys.stdin.fileno()) s.setblocking(False) s.listen() server.add_sockets([s]) else: address = args.address or settings.address port = args.port or settings.port logging.info( "Starting application server with %d processes on %s:%d", settings.num_processes, address, port, ) server = HTTPServer(application, ssl_options=ssl_context) server.bind(port, address=address) # When using multiple processes, the forking happens here server.start(settings.num_processes) stats.set_defaults() # Create the Graph and start the graph update thread post fork to ensure each process gets # updated. with closing(Session()) as session: graph = Graph() graph.update_from_db(session) refresher = DbRefreshThread(settings, graph, settings.refresh_interval, sentry_client) refresher.daemon = True refresher.start() try: IOLoop.current().start() except KeyboardInterrupt: IOLoop.current().stop() finally: print("Bye")
def create_session(self): # type: () -> Session db_engine = get_db_engine(self.settings.database) Session.configure(bind=db_engine) return Session()
def make_session(): db_engine = get_db_engine(get_database_url(settings)) Session.configure(bind=db_engine) return Session()