def get_audited_groups(session): # type: (Session) -> List[Group] """Returns all audited enabled groups. At present, this is not cached at all and returns the full list of groups from the database each time it's called. Args: session (Session): Session to load data on. Returns: a list of all enabled and audited Group objects in the database """ audited_groups = [] graph = Graph() for group in get_all_groups(session): try: group_md = graph.get_group_details(group.name) except NoSuchGroup: # Very new group with no metadata yet, or it has been disabled and # excluded from in-memory cache. continue if group_md.get('audited', False): audited_groups.append(group) return audited_groups
def start_server(args, sentry_client): # type: (Namespace, SentryProxy) -> None log_level = logging.getLevelName(logging.getLogger().level) logging.info("begin. log_level={}".format(log_level)) assert not (settings.debug and settings.num_processes > 1 ), "debug mode does not support multiple processes" try: initialize_plugins(settings.plugin_dirs, settings.plugin_module_paths, "grouper_fe") except PluginsDirectoryDoesNotExist as e: logging.fatal("Plugin directory does not exist: {}".format(e)) sys.exit(1) # setup database logging.debug("configure database session") database_url = args.database_url or get_database_url(settings) Session.configure(bind=get_db_engine(database_url)) usecase_factory = create_graph_usecase_factory(settings, Session()) application = create_fe_application(settings, usecase_factory, args.deployment_name) address = args.address or settings.address port = args.port or settings.port ssl_context = get_plugin_proxy().get_ssl_context() logging.info("Starting application server with %d processes on port %d", settings.num_processes, port) server = tornado.httpserver.HTTPServer(application, ssl_options=ssl_context) server.bind(port, address=address) # When using multiple processes, the forking happens here server.start(settings.num_processes) stats.set_defaults() # Create the Graph and start the config / graph update threads post fork to ensure each # process gets updated. settings.start_config_thread(args.config, "fe") with closing(Session()) as session: graph = Graph() graph.update_from_db(session) refresher = DbRefreshThread(settings, graph, settings.refresh_interval, sentry_client) refresher.daemon = True refresher.start() try: tornado.ioloop.IOLoop.instance().start() except KeyboardInterrupt: tornado.ioloop.IOLoop.instance().stop() finally: print("Bye")
def user_is_auditor(username: str) -> bool: """Check if a user is an auditor, defined as having the audit permission.""" graph = Graph() user_md = graph.get_user_details(username) for perm in user_md["permissions"]: if perm["permission"] == PERMISSION_AUDITOR: return True return False
def start_server(args, settings, plugins): # type: (Namespace, FrontendSettings, PluginProxy) -> None log_level = logging.getLevelName(logging.getLogger().level) logging.info("begin. log_level={}".format(log_level)) assert not (settings.debug and settings.num_processes > 1 ), "debug mode does not support multiple processes" # setup database logging.debug("configure database session") if args.database_url: settings.database = args.database_url Session.configure(bind=get_db_engine(settings.database)) application = create_fe_application(settings, args.deployment_name) ssl_context = plugins.get_ssl_context() if args.listen_stdin: logging.info("Starting application server with %d processes on stdin", settings.num_processes) server = HTTPServer(application, ssl_options=ssl_context) s = socket.socket(fileno=sys.stdin.fileno()) s.setblocking(False) s.listen() server.add_sockets([s]) else: address = args.address or settings.address port = args.port or settings.port logging.info( "Starting application server with %d processes on %s:%d", settings.num_processes, address, port, ) server = HTTPServer(application, ssl_options=ssl_context) server.bind(port, address=address) # When using multiple processes, the forking happens here server.start(settings.num_processes) # Create the Graph and start the graph update thread post fork to ensure each process gets # updated. with closing(Session()) as session: graph = Graph() graph.update_from_db(session) refresher = DbRefreshThread(settings, plugins, graph, settings.refresh_interval) refresher.daemon = True refresher.start() try: IOLoop.current().start() except KeyboardInterrupt: IOLoop.current().stop() finally: print("Bye")
def start_server(args, settings, plugins): # type: (Namespace, ApiSettings, PluginProxy) -> None log_level = logging.getLevelName(logging.getLogger().level) logging.info("begin. log_level={}".format(log_level)) assert not ( settings.debug and settings.num_processes > 1 ), "debug mode does not support multiple processes" # setup database logging.debug("configure database session") if args.database_url: settings.database = args.database_url Session.configure(bind=get_db_engine(settings.database)) with closing(Session()) as session: graph = Graph() graph.update_from_db(session) refresher = DbRefreshThread(settings, plugins, graph, settings.refresh_interval) refresher.daemon = True refresher.start() usecase_factory = create_graph_usecase_factory(settings, plugins, graph=graph) application = create_api_application(graph, settings, plugins, usecase_factory) if args.listen_stdin: logging.info("Starting application server on stdin") server = HTTPServer(application) if PY2: s = socket.fromfd(sys.stdin.fileno(), socket.AF_INET, socket.SOCK_STREAM) s.setblocking(False) s.listen(5) else: s = socket.socket(fileno=sys.stdin.fileno()) s.setblocking(False) s.listen() server.add_sockets([s]) else: address = args.address or settings.address port = args.port or settings.port logging.info("Starting application server on %s:%d", address, port) server = HTTPServer(application) server.bind(port, address=address) server.start(settings.num_processes) stats.set_defaults() try: IOLoop.current().start() except KeyboardInterrupt: IOLoop.current().stop() finally: print("Bye")
def get_all_groups_by_user(session, user): # type: (Session, User) -> List[Tuple[Group, int]] """Return groups a given user is a member of along with the user's role. This includes groups inherited from other groups, unlike get_groups_by_user. """ from grouper.graph import Graph grps = Graph().get_user_details(username=user.name)["groups"] groups = session.query(Group).filter(Group.name.in_(grps.keys())).all() return [(group, grps[group.name]["role"]) for group in groups]
def promote_nonauditors(self, session): # type: (Session) -> None """Checks all enabled audited groups and ensures that all approvers for that group have the PERMISSION_AUDITOR permission. All non-auditor approvers of audited groups will be promoted to be auditors, i.e., added to the auditors group. Args: session (Session): database session """ graph = Graph() # Hack to ensure the graph is loaded before we access it graph.update_from_db(session) # map from user object to names of audited groups in which # user is a nonauditor approver nonauditor_approver_to_groups = defaultdict( set) # type: Dict[User, Set[str]] user_is_auditor = {} # type: Dict[str, bool] for group_tuple in graph.get_groups(audited=True, directly_audited=False): group_md = graph.get_group_details(group_tuple.groupname, expose_aliases=False) for username, user_md in iteritems(group_md["users"]): if username not in user_is_auditor: user_perms = graph.get_user_details( username)["permissions"] user_is_auditor[username] = any([ p["permission"] == PERMISSION_AUDITOR for p in user_perms ]) if user_is_auditor[username]: # user is already auditor so can skip continue if user_md["role"] in APPROVER_ROLE_INDICES: # non-auditor approver. BAD! nonauditor_approver_to_groups[username].add( group_tuple.groupname) if nonauditor_approver_to_groups: auditors_group = get_auditors_group(self.settings, session) for username, group_names in iteritems( nonauditor_approver_to_groups): reason = "auto-added due to having approver role(s) in group(s): {}".format( ", ".join(group_names)) user = User.get(session, name=username) assert user auditors_group.add_member(user, user, reason, status="actioned") notify_nonauditor_promoted(self.settings, session, user, auditors_group, group_names) session.commit()
def initialize(self): self.session = self.application.my_settings.get("db_session")() self.graph = Graph() if self.get_argument("_profile", False): self.perf_collector = Collector() self.perf_trace_uuid = str(uuid4()) self.perf_collector.start() else: self.perf_collector = None self.perf_trace_uuid = None self._request_start_time = datetime.utcnow() stats.incr("requests") stats.incr("requests_{}".format(self.__class__.__name__))
def start_server(args, sentry_client): # type: (argparse.Namespace, SentryProxy) -> None log_level = logging.getLevelName(logging.getLogger().level) logging.info("begin. log_level={}".format(log_level)) assert not (settings.debug and settings.num_processes > 1), \ "debug mode does not support multiple processes" try: initialize_plugins(settings.plugin_dirs, settings.plugin_module_paths, "grouper_api") except PluginsDirectoryDoesNotExist as e: logging.fatal("Plugin directory does not exist: {}".format(e)) sys.exit(1) # setup database logging.debug("configure database session") database_url = args.database_url or get_database_url(settings) Session.configure(bind=get_db_engine(database_url)) settings.start_config_thread(args.config, "api") with closing(Session()) as session: graph = Graph() graph.update_from_db(session) refresher = DbRefreshThread(settings, graph, settings.refresh_interval, sentry_client) refresher.daemon = True refresher.start() application = get_application(graph, settings, sentry_client) address = args.address or settings.address port = args.port or settings.port logging.info("Starting application server on port %d", port) server = tornado.httpserver.HTTPServer(application) server.bind(port, address=address) server.start(settings.num_processes) stats.set_defaults() try: tornado.ioloop.IOLoop.instance().start() except KeyboardInterrupt: tornado.ioloop.IOLoop.instance().stop() finally: print "Bye"
def assert_can_join(group, user_or_group, role="member"): # type: (Group, Union[Group, User], str) -> bool """Enforce audit rules on joining a group This applies the auditing rules to determine whether or not a given user can join the given group with the given role. Args: group (models.Group): The group to test against. user (models.User): The user attempting to join. role (str): The role being tested. Raises: UserNotAuditor: If a user is found that violates the audit training policy, then this exception is raised. Returns: bool: True if the user should be allowed per policy, else it will raise as above. """ # By definition, any user can join as a member to any group. if user_or_group.type == "User" and role == "member": return True # Else, we have to check if the group is audited. If not, anybody can join. graph = Graph() group_md = graph.get_group_details(group.name) if not group_md["audited"]: return True # Audited group. Easy case, let's see if we're checking a user. If so, the user must be # considered an auditor. if user_or_group.type == "User": if user_is_auditor(user_or_group.name): return True raise UserNotAuditor( "User {} lacks the auditing permission ('{}') so may only have the " "'member' role in this audited group.".format(user_or_group.name, PERMISSION_AUDITOR) ) # No, this is a group-joining-group case. In this situation we must walk the entire group # subtree and ensure that all owners/np-owners/managers are considered auditors. This data # is contained in the group metadetails, which contains all eventual members. # # We have to fetch each group's details individually though to figure out what someone's role # is in that particular group. return assert_controllers_are_auditors(user_or_group)
def user_is_auditor(username): """Check if a user is an auditor This is defined as the user having the audit permission. Args: username (str): The account name to check. Returns: bool: True/False. """ graph = Graph() user_md = graph.get_user_details(username) for perm in user_md["permissions"]: if perm["permission"] == PERMISSION_AUDITOR: return True return False
def assert_controllers_are_auditors(group): # type: (Group) -> bool """Return whether not all owners/np-owners/managers in a group (and below) are auditors This is used to ensure that all of the people who can control a group (owners, np-owners, managers) and all subgroups (all the way down the tree) have audit permissions. Raises: UserNotAuditor: If a user is found that violates the audit training policy, then this exception is raised. Returns: bool: True if the tree is completely controlled by auditors, else it will raise as above. """ graph = Graph() checked = set() # type: Set[str] queue = [group.name] while queue: cur_group = queue.pop() if cur_group in checked: continue details = graph.get_group_details(cur_group) for chk_user, info in iteritems(details["users"]): if chk_user in checked: continue # Only examine direct members of this group, because then the role is accurate. if info["distance"] == 1: if info["rolename"] == "member": continue if user_is_auditor(chk_user): checked.add(chk_user) else: raise UserNotAuditor( "User {} has role '{}' in the group {} but lacks the auditing " "permission ('{}').".format( chk_user, info["rolename"], cur_group, PERMISSION_AUDITOR ) ) # Now put subgroups into the queue to examine. for chk_group, info in iteritems(details["subgroups"]): if info["distance"] == 1: queue.append(chk_group) # If we didn't raise, we're valid. return True
def initialize(self, *args: Any, **kwargs: Any) -> None: self.graph = Graph() self.session = self.settings["session"]() # type: Session self.template_engine = self.settings[ "template_engine"] # type: FrontendTemplateEngine self.plugins = get_plugin_proxy() session_factory = SingletonSessionFactory(self.session) self.usecase_factory = create_graph_usecase_factory( settings(), self.plugins, session_factory) if self.get_argument("_profile", False): self.perf_collector = Collector() self.perf_trace_uuid = str(uuid4()) # type: Optional[str] self.perf_collector.start() else: self.perf_collector = None self.perf_trace_uuid = None self._request_start_time = datetime.utcnow()
def expire_nonauditors(self, session): """Checks all enabled audited groups and ensures that all approvers for that group have the PERMISSION_AUDITOR permission. All approvers of audited groups that aren't auditors have their membership in the audited group set to expire settings.nonauditor_expiration_days days in the future. Args: session (Session): database session """ now = datetime.utcnow() graph = Graph() exp_days = timedelta(days=settings.nonauditor_expiration_days) # Hack to ensure the graph is loaded before we access it graph.update_from_db(session) # TODO(tyleromeara): replace with graph call for group in get_audited_groups(session): members = group.my_members() # Go through every member of the group and set them to expire if they are an approver # but not an auditor for (type_, member), edge in members.iteritems(): # Auditing is already inherited, so we don't need to handle that here if type_ == "Group": continue member = User.get(session, name=member) member_is_approver = user_role_index( member, members) in APPROVER_ROLE_INDICIES member_is_auditor = user_has_permission( session, member, PERMISSION_AUDITOR) if not member_is_approver or member_is_auditor: continue edge = GroupEdge.get(session, id=edge.edge_id) if edge.expiration and edge.expiration < now + exp_days: continue exp = now + exp_days exp = exp.date() edge.apply_changes_dict({ "expiration": "{}/{}/{}".format(exp.month, exp.day, exp.year) }) edge.add(session) notify_nonauditor_flagged(settings, session, edge) session.commit()
def initialize(self, *args, **kwargs): # type: (*Any, **Any) -> None self.graph = Graph() self.session = kwargs["session"]() # type: Session self.template_env = kwargs["template_env"] # type: Environment self.usecase_factory = kwargs[ "usecase_factory"] # type: UseCaseFactory if self.get_argument("_profile", False): self.perf_collector = Collector() self.perf_trace_uuid = str(uuid4()) # type: Optional[str] self.perf_collector.start() else: self.perf_collector = None self.perf_trace_uuid = None self._request_start_time = datetime.utcnow() stats.log_rate("requests", 1) stats.log_rate("requests_{}".format(self.__class__.__name__), 1)
def assert_controllers_are_auditors(group: Group) -> None: """Return whether not all owners/np-owners/managers in a group (and below) are auditors This is used to ensure that all of the people who can control a group (owners, np-owners, managers) and all subgroups (all the way down the tree) have audit permissions. Raises: UserNotAuditor: If a user is found that violates the audit training policy """ graph = Graph() checked: Set[str] = set() queue = [group.name] while queue: cur_group = queue.pop() if cur_group in checked: continue checked.add(cur_group) details = graph.get_group_details(cur_group) for chk_user, info in details["users"].items(): if chk_user in checked: continue # Only examine direct members of this group, because then the role is accurate. if info["distance"] == 1: if info["rolename"] == "member": continue if user_is_auditor(chk_user): checked.add(chk_user) else: raise UserNotAuditor( "User {} has role '{}' in the group {} but lacks the auditing " "permission ('{}').".format( chk_user, info["rolename"], cur_group, PERMISSION_AUDITOR ) ) # Now put subgroups into the queue to examine. for chk_group, info in details["subgroups"].items(): if info["distance"] == 1: queue.append(chk_group)
def initialize(self, *args, **kwargs): # type: (*Any, **Any) -> None self.graph = Graph() self.session = self.settings["session"]() # type: Session self.template_engine = self.settings[ "template_engine"] # type: FrontendTemplateEngine self.plugins = get_plugin_proxy() session_factory = SingletonSessionFactory(self.session) self.usecase_factory = create_graph_usecase_factory( settings(), self.plugins, session_factory) if self.get_argument("_profile", False): self.perf_collector = Collector() self.perf_trace_uuid = str(uuid4()) # type: Optional[str] self.perf_collector.start() else: self.perf_collector = None self.perf_trace_uuid = None self._request_start_time = datetime.utcnow() stats.log_rate("requests", 1) stats.log_rate("requests_{}".format(self.__class__.__name__), 1) logging.error("initialized")
def graph(self): # type: () -> GroupGraph if not self._graph: self._graph = Graph() return self._graph
def graph(session): graph = Graph() graph.update_from_db(session) return graph