def run(self): while True: try: with closing(Session()) as session: self.logger.debug("Expiring edges....") self.expire_edges(session) self.logger.debug("Expiring nonauditor approvers in audited groups...") self.expire_nonauditors(session) self.logger.debug("Sending emails...") process_async_emails(self.settings, session, datetime.utcnow()) self.logger.debug("Pruning old traces....") prune_old_traces(session) session.commit() stats.set_gauge("successful-background-update", 1) stats.set_gauge("failed-background-update", 0) except OperationalError: Session.configure(bind=get_db_engine(get_database_url(self.settings))) self.logger.critical("Failed to connect to database.") stats.set_gauge("successful-background-update", 0) stats.set_gauge("failed-background-update", 1) self.capture_exception() except: stats.set_gauge("successful-background-update", 0) stats.set_gauge("failed-background-update", 1) self.capture_exception() raise sleep(60)
def session(request, tmpdir): # type: (FixtureRequest, LocalPath) -> None settings = Settings() set_global_settings(settings) # Reinitialize plugins in case a previous test configured some. set_global_plugin_proxy(PluginProxy([])) db_engine = get_db_engine(db_url(tmpdir)) # Clean up from previous tests if using a persistent database. if "MEROU_TEST_DATABASE" in os.environ: Model.metadata.drop_all(db_engine) # Create the database schema and the corresponding session. Model.metadata.create_all(db_engine) Session.configure(bind=db_engine) session = Session() def fin(): # type: () -> None """Explicitly close the session to avoid any dangling transactions.""" session.close() request.addfinalizer(fin) return session
def dump_sql_command(args): # type: (argparse.Namespace) -> None db_engine = get_db_engine(get_database_url(settings)) for table in Model.metadata.sorted_tables: print CreateTable(table).compile(db_engine) for index in table.indexes: print CreateIndex(index).compile(db_engine)
def get_current_user(self): username = self.request.headers.get(settings.user_auth_header) if not username: return # Users must be fully qualified if not re.match("^{}$".format(USERNAME_VALIDATION), username): raise InvalidUser() try: user, created = User.get_or_create(self.session, username=username) if created: logging.info("Created new user %s", username) self.session.commit() # Because the graph doesn't initialize until the updates table # is populated, we need to refresh the graph here in case this # is the first update. self.graph.update_from_db(self.session) except sqlalchemy.exc.OperationalError: # Failed to connect to database or create user, try to reconfigure the db. This invokes # the fetcher to try to see if our URL string has changed. Session.configure(bind=get_db_engine(get_database_url(settings))) raise DatabaseFailure() return user
def dump_schema(self): # type: () -> str db_engine = get_db_engine(self.settings.database) sql = StringIO() for table in Model.metadata.sorted_tables: sql.write(str(CreateTable(table).compile(db_engine))) for index in table.indexes: sql.write(str(CreateIndex(index).compile(db_engine))) return sql.getvalue()
def session(request, tmpdir): db_engine = get_db_engine(db_url(tmpdir)) Model.metadata.create_all(db_engine) Session.configure(bind=db_engine) session = Session() def fin(): session.close() # Useful if testing against MySQL # Model.metadata.drop_all(db_engine) request.addfinalizer(fin) return session
def session(request, tmpdir): db_path = tmpdir.join("grouper.sqlite") db_engine = get_db_engine("sqlite:///%s" % db_path) Model.metadata.create_all(db_engine) Session.configure(bind=db_engine) session = Session() def fin(): session.close() # Useful if testing against MySQL # model_soup.Model.metadata.drop_all(db_engine) request.addfinalizer(fin) return session
def start_server(args, sentry_client): # type: (argparse.Namespace, SentryProxy) -> None log_level = logging.getLevelName(logging.getLogger().level) logging.info("begin. log_level={}".format(log_level)) assert not (settings.debug and settings.num_processes > 1), \ "debug mode does not support multiple processes" try: initialize_plugins(settings.plugin_dirs, settings.plugin_module_paths, "grouper_api") except PluginsDirectoryDoesNotExist as e: logging.fatal("Plugin directory does not exist: {}".format(e)) sys.exit(1) # setup database logging.debug("configure database session") database_url = args.database_url or get_database_url(settings) Session.configure(bind=get_db_engine(database_url)) settings.start_config_thread(args.config, "api") with closing(Session()) as session: graph = Graph() graph.update_from_db(session) refresher = DbRefreshThread(settings, graph, settings.refresh_interval, sentry_client) refresher.daemon = True refresher.start() application = get_application(graph, settings, sentry_client) address = args.address or settings.address port = args.port or settings.port logging.info("Starting application server on port %d", port) server = tornado.httpserver.HTTPServer(application) server.bind(port, address=address) server.start(settings.num_processes) stats.set_defaults() try: tornado.ioloop.IOLoop.instance().start() except KeyboardInterrupt: tornado.ioloop.IOLoop.instance().stop() finally: print "Bye"
def sync_db_command(args): # Models not implicitly or explictly imported above are explicitly imported # here: from grouper.models.perf_profile import PerfProfile # noqa db_engine = get_db_engine(get_database_url(settings)) Model.metadata.create_all(db_engine) # Add some basic database structures we know we will need if they don't exist. session = make_session() for name, description in SYSTEM_PERMISSIONS: test = Permission.get(session, name) if test: continue permission = Permission(name=name, description=description) try: permission.add(session) session.flush() except IntegrityError: session.rollback() raise Exception('Failed to create permission: %s' % (name, )) session.commit() # This group is needed to bootstrap a Grouper installation. admin_group = Group.get(session, name="grouper-administrators") if not admin_group: admin_group = Group( groupname="grouper-administrators", description="Administrators of the Grouper system.", canjoin="nobody", ) try: admin_group.add(session) session.flush() except IntegrityError: session.rollback() raise Exception('Failed to create group: grouper-administrators') for permission_name in (GROUP_ADMIN, PERMISSION_ADMIN, USER_ADMIN): permission = Permission.get(session, permission_name) assert permission, "Permission should have been created earlier!" grant_permission(session, admin_group.id, permission.id) session.commit()
def session(request, tmpdir): db_engine = get_db_engine(db_url(tmpdir)) # Create the database schema and the corresponding session. Model.metadata.create_all(db_engine) Session.configure(bind=db_engine) session = Session() def fin(): # type: () -> None """Explicitly close the session and clean up if using a persistent database.""" session.close() if "MEROU_TEST_DATABASE" in os.environ: Model.metadata.drop_all(db_engine) request.addfinalizer(fin) return session
def start_processor(args, settings, sentry_client): # type: (Namespace, BackgroundSettings, SentryProxy) -> None log_level = logging.getLevelName(logging.getLogger().level) logging.info("begin. log_level={}".format(log_level)) try: plugins = PluginProxy.load_plugins(settings, "grouper-background") set_global_plugin_proxy(plugins) except PluginsDirectoryDoesNotExist as e: logging.fatal("Plugin directory does not exist: {}".format(e)) sys.exit(1) # setup database logging.debug("configure database session") Session.configure(bind=get_db_engine(settings.database)) background = BackgroundProcessor(settings, sentry_client) background.run()
def start_processor(args, settings): # type: (Namespace, BackgroundSettings) -> None log_level = logging.getLevelName(logging.getLogger().level) logging.info("begin. log_level={}".format(log_level)) try: plugins = PluginProxy.load_plugins(settings, "grouper-background") set_global_plugin_proxy(plugins) except PluginsDirectoryDoesNotExist as e: logging.fatal("Plugin directory does not exist: {}".format(e)) sys.exit(1) # setup database logging.debug("configure database session") Session.configure(bind=get_db_engine(settings.database)) background = BackgroundProcessor(settings, plugins) background.run()
def run(self): while True: sleep(self.refresh_interval) logging.debug("Updating Graph from Database.") try: session = Session() self.graph.update_from_db(session) session.close() stats.set_gauge("successful-db-update", 1) except OperationalError: Session.configure(bind=get_db_engine(get_database_url(self.settings))) logging.critical("Failed to connect to database.") stats.set_gauge("successful-db-update", 0) self.capture_exception() except: stats.set_gauge("successful-db-update", 0) self.capture_exception() raise
def start_processor(args, sentry_client): # type: (argparse.Namespace, SentryProxy) -> None log_level = logging.getLevelName(logging.getLogger().level) logging.info("begin. log_level={}".format(log_level)) try: initialize_plugins(settings.plugin_dirs, settings.plugin_module_paths, "grouper-background") except PluginsDirectoryDoesNotExist as e: logging.fatal("Plugin directory does not exist: {}".format(e)) sys.exit(1) # setup database logging.debug("configure database session") Session.configure(bind=get_db_engine(get_database_url(settings))) settings.start_config_thread(args.config, "background") background = BackgroundProcessor(settings, sentry_client) background.run()
def run(self): while True: self.logger.debug("Updating Graph from Database.") try: with closing(Session()) as session: self.graph.update_from_db(session) stats.set_gauge("successful-db-update", 1) stats.set_gauge("failed-db-update", 0) except OperationalError: Session.configure(bind=get_db_engine(get_database_url(self.settings))) self.logger.critical("Failed to connect to database.") stats.set_gauge("successful-db-update", 0) stats.set_gauge("failed-db-update", 1) self.capture_exception() except: stats.set_gauge("successful-db-update", 0) stats.set_gauge("failed-db-update", 1) self.capture_exception() raise sleep(self.refresh_interval)
def get_or_create_user(self, username): # type: (str) -> Optional[User] """Retrieve or create the User object for the authenticated user. This is done in a separate method called by prepare instead of in the magic Tornado get_current_user method because exceptions thrown by the latter are caught by Tornado and not propagated to the caller, and we want to use exceptions to handle invalid users and then return an error page in prepare. """ if not username: return None # Users must be fully qualified if not re.match("^{}$".format(USERNAME_VALIDATION), username): raise InvalidUser("{} does not match {}".format( username, USERNAME_VALIDATION)) # User must exist in the database and be active try: user, created = User.get_or_create(self.session, username=username) if created: logging.info("Created new user %s", username) self.session.commit() # Because the graph doesn't initialize until the updates table # is populated, we need to refresh the graph here in case this # is the first update. self.graph.update_from_db(self.session) except sqlalchemy.exc.OperationalError: # Failed to connect to database or create user, try to reconfigure the db. This invokes # the fetcher to try to see if our URL string has changed. Session.configure(bind=get_db_engine(settings().database)) raise DatabaseFailure() # service accounts are, by definition, not interactive users if user.is_service_account: raise InvalidUser("{} is a service account".format(username)) return user
def get_or_create_user(self, username): # type: (str) -> Optional[User] """Retrieve or create the User object for the authenticated user. This is done in a separate method called by prepare instead of in the magic Tornado get_current_user method because exceptions thrown by the latter are caught by Tornado and not propagated to the caller, and we want to use exceptions to handle invalid users and then return an error page in prepare. """ if not username: return None # Users must be fully qualified if not re.match("^{}$".format(USERNAME_VALIDATION), username): raise InvalidUser("{} does not match {}".format(username, USERNAME_VALIDATION)) # User must exist in the database and be active try: user, created = User.get_or_create(self.session, username=username) if created: logging.info("Created new user %s", username) self.session.commit() # Because the graph doesn't initialize until the updates table # is populated, we need to refresh the graph here in case this # is the first update. self.graph.update_from_db(self.session) except sqlalchemy.exc.OperationalError: # Failed to connect to database or create user, try to reconfigure the db. This invokes # the fetcher to try to see if our URL string has changed. Session.configure(bind=get_db_engine(settings().database)) raise DatabaseFailure() # service accounts are, by definition, not interactive users if user.is_service_account: raise InvalidUser("{} is a service account".format(username)) return user
def run(self): # type: () -> None while True: try: with closing(Session()) as session: self.logger.info("Expiring edges....") self.expire_edges(session) self.logger.info("Expiring nonauditor approvers in audited groups...") self.expire_nonauditors(session) self.logger.info("Sending emails...") process_async_emails(self.settings, session, datetime.utcnow()) self.logger.info("Pruning old traces....") prune_old_traces(session) session.commit() stats.log_gauge("successful-background-update", 1) stats.log_gauge("failed-background-update", 0) except OperationalError: Session.configure(bind=get_db_engine(get_database_url(self.settings))) self.logger.critical("Failed to connect to database.") stats.log_gauge("successful-background-update", 0) stats.log_gauge("failed-background-update", 1) self._capture_exception() except: stats.log_gauge("successful-background-update", 0) stats.log_gauge("failed-background-update", 1) self._capture_exception() self.logger.exception("Unexpected exception occurred in background thread.") raise self.logger.debug("Sleeping for {} seconds...".format(self.settings.sleep_interval)) sleep(self.settings.sleep_interval)
def initialize_schema(self): # type: () -> None db_engine = get_db_engine(self.settings.database) Model.metadata.create_all(db_engine)
def start_server(args, settings, sentry_client): # type: (Namespace, FrontendSettings, SentryProxy) -> None log_level = logging.getLevelName(logging.getLogger().level) logging.info("begin. log_level={}".format(log_level)) assert not ( settings.debug and settings.num_processes > 1 ), "debug mode does not support multiple processes" try: plugins = PluginProxy.load_plugins(settings, "grouper-fe") set_global_plugin_proxy(plugins) except PluginsDirectoryDoesNotExist as e: logging.fatal("Plugin directory does not exist: {}".format(e)) sys.exit(1) # setup database logging.debug("configure database session") if args.database_url: settings.database = args.database_url Session.configure(bind=get_db_engine(settings.database)) application = create_fe_application(settings, args.deployment_name) ssl_context = plugins.get_ssl_context() if args.listen_stdin: logging.info( "Starting application server with %d processes on stdin", settings.num_processes ) server = HTTPServer(application, ssl_options=ssl_context) if PY2: s = socket.fromfd(sys.stdin.fileno(), socket.AF_INET, socket.SOCK_STREAM) s.setblocking(False) s.listen(5) else: s = socket.socket(fileno=sys.stdin.fileno()) s.setblocking(False) s.listen() server.add_sockets([s]) else: address = args.address or settings.address port = args.port or settings.port logging.info( "Starting application server with %d processes on %s:%d", settings.num_processes, address, port, ) server = HTTPServer(application, ssl_options=ssl_context) server.bind(port, address=address) # When using multiple processes, the forking happens here server.start(settings.num_processes) stats.set_defaults() # Create the Graph and start the graph update thread post fork to ensure each process gets # updated. with closing(Session()) as session: graph = Graph() graph.update_from_db(session) refresher = DbRefreshThread(settings, graph, settings.refresh_interval, sentry_client) refresher.daemon = True refresher.start() try: IOLoop.current().start() except KeyboardInterrupt: IOLoop.current().stop() finally: print("Bye")
def create_session(self): # type: () -> Session db_engine = get_db_engine(self.settings.database) Session.configure(bind=db_engine) return Session()
def make_session(): db_engine = get_db_engine(get_database_url(settings)) Session.configure(bind=db_engine) return Session()
def start_server(args, settings, sentry_client): # type: (Namespace, FrontendSettings, SentryProxy) -> None log_level = logging.getLevelName(logging.getLogger().level) logging.info("begin. log_level={}".format(log_level)) assert not (settings.debug and settings.num_processes > 1 ), "debug mode does not support multiple processes" try: plugins = PluginProxy.load_plugins(settings, "grouper-fe") set_global_plugin_proxy(plugins) except PluginsDirectoryDoesNotExist as e: logging.fatal("Plugin directory does not exist: {}".format(e)) sys.exit(1) # setup database logging.debug("configure database session") if args.database_url: settings.database = args.database_url Session.configure(bind=get_db_engine(settings.database)) application = create_fe_application(settings, args.deployment_name) ssl_context = plugins.get_ssl_context() if args.listen_stdin: logging.info("Starting application server with %d processes on stdin", settings.num_processes) server = HTTPServer(application, ssl_options=ssl_context) if PY2: s = socket.fromfd(sys.stdin.fileno(), socket.AF_INET, socket.SOCK_STREAM) s.setblocking(False) s.listen(5) else: s = socket.socket(fileno=sys.stdin.fileno()) s.setblocking(False) s.listen() server.add_sockets([s]) else: address = args.address or settings.address port = args.port or settings.port logging.info( "Starting application server with %d processes on %s:%d", settings.num_processes, address, port, ) server = HTTPServer(application, ssl_options=ssl_context) server.bind(port, address=address) # When using multiple processes, the forking happens here server.start(settings.num_processes) stats.set_defaults() # Create the Graph and start the graph update thread post fork to ensure each process gets # updated. with closing(Session()) as session: graph = Graph() graph.update_from_db(session) refresher = DbRefreshThread(settings, graph, settings.refresh_interval, sentry_client) refresher.daemon = True refresher.start() try: IOLoop.current().start() except KeyboardInterrupt: IOLoop.current().stop() finally: print("Bye")
def drop_schema(self): # type: () -> None """Not exposed via a service, used primarily for tests.""" db_engine = get_db_engine(self.settings.database) Model.metadata.drop_all(db_engine)
def sync_db_command(args): # Models not implicitly or explictly imported above are explicitly imported here from grouper.models.perf_profile import PerfProfile # noqa: F401 from grouper.models.user_token import UserToken # noqa: F401 db_engine = get_db_engine(get_database_url(settings)) Model.metadata.create_all(db_engine) # Add some basic database structures we know we will need if they don't exist. session = make_session() for name, description in SYSTEM_PERMISSIONS: test = get_permission(session, name) if test: continue try: create_permission(session, name, description) session.flush() except IntegrityError: session.rollback() raise Exception("Failed to create permission: %s" % (name, )) session.commit() # This group is needed to bootstrap a Grouper installation. admin_group = Group.get(session, name="grouper-administrators") if not admin_group: admin_group = Group( groupname="grouper-administrators", description="Administrators of the Grouper system.", canjoin="nobody", ) try: admin_group.add(session) session.flush() except IntegrityError: session.rollback() raise Exception("Failed to create group: grouper-administrators") for permission_name in (GROUP_ADMIN, PERMISSION_ADMIN, USER_ADMIN): permission = get_permission(session, permission_name) assert permission, "Permission should have been created earlier!" grant_permission(session, admin_group.id, permission.id) session.commit() auditors_group_name = get_auditors_group_name(settings) auditors_group = Group.get(session, name=auditors_group_name) if not auditors_group: auditors_group = Group( groupname=auditors_group_name, description= "Group for auditors, who can be owners of audited groups.", canjoin="canjoin", ) try: auditors_group.add(session) session.flush() except IntegrityError: session.rollback() raise Exception( "Failed to create group: {}".format(auditors_group_name)) permission = get_permission(session, PERMISSION_AUDITOR) assert permission, "Permission should have been created earlier!" grant_permission(session, auditors_group.id, permission.id) session.commit()