Example #1
0
    def run(self):
        while True:
            try:
                with closing(Session()) as session:
                    self.logger.debug("Expiring edges....")
                    self.expire_edges(session)
                    self.logger.debug(
                        "Expiring nonauditor approvers in audited groups...")
                    self.expire_nonauditors(session)
                    self.logger.debug("Sending emails...")
                    process_async_emails(self.settings, session,
                                         datetime.utcnow())
                    self.logger.debug("Pruning old traces....")
                    prune_old_traces(session)
                    session.commit()

                stats.set_gauge("successful-background-update", 1)
                stats.set_gauge("failed-background-update", 0)
            except OperationalError:
                Session.configure(
                    bind=get_db_engine(get_database_url(self.settings)))
                self.logger.critical("Failed to connect to database.")
                stats.set_gauge("successful-background-update", 0)
                stats.set_gauge("failed-background-update", 1)
                self.capture_exception()
            except:
                stats.set_gauge("successful-background-update", 0)
                stats.set_gauge("failed-background-update", 1)
                self.capture_exception()
                raise

            sleep(60)
Example #2
0
    def run(self):
        while True:
            try:
                with closing(Session()) as session:
                    self.logger.debug("Expiring edges....")
                    self.expire_edges(session)
                    self.logger.debug("Expiring nonauditor approvers in audited groups...")
                    self.expire_nonauditors(session)
                    self.logger.debug("Sending emails...")
                    process_async_emails(self.settings, session, datetime.utcnow())
                    self.logger.debug("Pruning old traces....")
                    prune_old_traces(session)
                    session.commit()

                stats.set_gauge("successful-background-update", 1)
                stats.set_gauge("failed-background-update", 0)
            except OperationalError:
                Session.configure(bind=get_db_engine(get_database_url(self.settings)))
                self.logger.critical("Failed to connect to database.")
                stats.set_gauge("successful-background-update", 0)
                stats.set_gauge("failed-background-update", 1)
                self.capture_exception()
            except:
                stats.set_gauge("successful-background-update", 0)
                stats.set_gauge("failed-background-update", 1)
                self.capture_exception()
                raise

            sleep(60)
Example #3
0
    def get_current_user(self):
        username = self.request.headers.get(settings.user_auth_header)
        if not username:
            return

        # Users must be fully qualified
        if not re.match("^{}$".format(USERNAME_VALIDATION), username):
            raise InvalidUser()

        try:
            user, created = User.get_or_create(self.session, username=username)
            if created:
                logging.info("Created new user %s", username)
                self.session.commit()
                # Because the graph doesn't initialize until the updates table
                # is populated, we need to refresh the graph here in case this
                # is the first update.
                self.graph.update_from_db(self.session)
        except sqlalchemy.exc.OperationalError:
            # Failed to connect to database or create user, try to reconfigure the db. This invokes
            # the fetcher to try to see if our URL string has changed.
            Session.configure(bind=get_db_engine(get_database_url(settings)))
            raise DatabaseFailure()

        return user
Example #4
0
 def session(self):
     # type: () -> Session
     if not self._session:
         db_engine = get_db_engine(get_database_url(self.settings))
         Session.configure(bind=db_engine)
         self._session = Session()
     return self._session
Example #5
0
def send_async_email(
    session: Session,
    recipients: Iterable[str],
    subject: str,
    template: str,
    settings: Settings,
    context: Context,
    send_after: datetime,
    async_key: Optional[str] = None,
) -> None:
    """Construct a message object from a template and schedule it

    This is the main email sending method to send out a templated email. This is used to
    asynchronously queue up the email for sending.

    Args:
        recipients: Email addresses that will receive this mail
        subject: Subject of the email.
        template: Name of the template to use.
        context: Context for the template library.
        settings: Grouper settings
        send_after: Schedule the email to go out after this point in time.
        async_key: If you set this, it will be inserted into the db so that you can find this email
            in the future.
    """
    msg = get_email_from_template(recipients, subject, template, settings, context)

    for rcpt in recipients:
        notif = AsyncNotification(
            key=async_key, email=rcpt, subject=subject, body=msg.as_string(), send_after=send_after
        )
        notif.add(session)
    session.commit()
Example #6
0
File: util.py Project: rra/grouper
    def get_current_user(self):
        username = self.request.headers.get(settings.user_auth_header)
        if not username:
            return

        # Users must be fully qualified
        if not re.match("^{}$".format(USERNAME_VALIDATION), username):
            raise InvalidUser()

        try:
            user, created = User.get_or_create(self.session, username=username)
            if created:
                logging.info("Created new user %s", username)
                self.session.commit()
                # Because the graph doesn't initialize until the updates table
                # is populated, we need to refresh the graph here in case this
                # is the first update.
                self.graph.update_from_db(self.session)
        except sqlalchemy.exc.OperationalError:
            # Failed to connect to database or create user, try to reconfigure the db. This invokes
            # the fetcher to try to see if our URL string has changed.
            Session.configure(bind=get_db_engine(get_database_url(settings)))
            raise DatabaseFailure()

        return user
Example #7
0
def grant_permission_to_service_account(session: Session,
                                        account: ServiceAccount,
                                        permission: Permission,
                                        argument: str = "") -> None:
    """Grant a permission to this service account.

    This will fail if the (permission, argument) has already been granted to this group.

    Args:
        session: Database session
        account: A ServiceAccount object being granted a permission
        permission: A Permission object being granted
        argument: Must match constants.ARGUMENT_VALIDATION

    Throws:
        AssertError if argument does not match ARGUMENT_VALIDATION regex
    """
    assert re.match(ARGUMENT_VALIDATION + r"$",
                    argument), "Invalid permission argument"

    mapping = ServiceAccountPermissionMap(permission_id=permission.id,
                                          service_account_id=account.id,
                                          argument=argument)
    mapping.add(session)

    Counter.incr(session, "updates")

    session.commit()
Example #8
0
def grant_permission(session: Session,
                     group_id: int,
                     permission_id: int,
                     argument: str = "") -> None:
    """Grant a permission to this group.

    This will fail if the (permission, argument) has already been granted to this group.

    Args:
        session: Database session
        group_id: ID of group to which to grant the permission
        permission_id: ID of permission to grant
        argument: Must match constants.ARGUMENT_VALIDATION

    Throws:
        AssertError if argument does not match ARGUMENT_VALIDATION regex
    """
    assert re.match(ARGUMENT_VALIDATION + r"$",
                    argument), "Invalid permission argument"

    mapping = PermissionMap(permission_id=permission_id,
                            group_id=group_id,
                            argument=argument)
    mapping.add(session)

    Counter.incr(session, "updates")

    session.commit()
Example #9
0
def disable_permission_auditing(session: Session, permission_name: str,
                                actor_user_id: int) -> None:
    """Set a permission as audited.

    Args:
        session: Database session
        permission_name: Name of permission in question
        actor_user_id: ID of user who is disabling auditing
    """
    permission = get_permission(session, permission_name)
    if not permission:
        raise NoSuchPermission(name=permission_name)

    permission.audited = False

    AuditLog.log(
        session,
        actor_user_id,
        "disable_auditing",
        "Disabled auditing.",
        on_permission_id=permission.id,
    )

    Counter.incr(session, "updates")

    session.commit()
Example #10
0
def session(request, tmpdir):
    # type: (FixtureRequest, LocalPath) -> None
    settings = Settings()
    set_global_settings(settings)

    # Reinitialize plugins in case a previous test configured some.
    set_global_plugin_proxy(PluginProxy([]))

    db_engine = get_db_engine(db_url(tmpdir))

    # Clean up from previous tests if using a persistent database.
    if "MEROU_TEST_DATABASE" in os.environ:
        Model.metadata.drop_all(db_engine)

    # Create the database schema and the corresponding session.
    Model.metadata.create_all(db_engine)
    Session.configure(bind=db_engine)
    session = Session()

    def fin():
        # type: () -> None
        """Explicitly close the session to avoid any dangling transactions."""
        session.close()

    request.addfinalizer(fin)
    return session
Example #11
0
def session(request, tmpdir):
    # type: (FixtureRequest, LocalPath) -> None
    settings = Settings()
    set_global_settings(settings)

    # Reinitialize plugins in case a previous test configured some.
    set_global_plugin_proxy(PluginProxy([]))

    db_engine = get_db_engine(db_url(tmpdir))

    # Clean up from previous tests if using a persistent database.
    if "MEROU_TEST_DATABASE" in os.environ:
        Model.metadata.drop_all(db_engine)

    # Create the database schema and the corresponding session.
    Model.metadata.create_all(db_engine)
    Session.configure(bind=db_engine)
    session = Session()

    def fin():
        # type: () -> None
        """Explicitly close the session to avoid any dangling transactions."""
        session.close()

    request.addfinalizer(fin)
    return session
Example #12
0
def start_server(args, sentry_client):
    # type: (Namespace, SentryProxy) -> None

    log_level = logging.getLevelName(logging.getLogger().level)
    logging.info("begin. log_level={}".format(log_level))

    assert not (settings.debug and settings.num_processes > 1
                ), "debug mode does not support multiple processes"

    try:
        initialize_plugins(settings.plugin_dirs, settings.plugin_module_paths,
                           "grouper_fe")
    except PluginsDirectoryDoesNotExist as e:
        logging.fatal("Plugin directory does not exist: {}".format(e))
        sys.exit(1)

    # setup database
    logging.debug("configure database session")
    database_url = args.database_url or get_database_url(settings)
    Session.configure(bind=get_db_engine(database_url))

    usecase_factory = create_graph_usecase_factory(settings, Session())
    application = create_fe_application(settings, usecase_factory,
                                        args.deployment_name)

    address = args.address or settings.address
    port = args.port or settings.port

    ssl_context = get_plugin_proxy().get_ssl_context()

    logging.info("Starting application server with %d processes on port %d",
                 settings.num_processes, port)
    server = tornado.httpserver.HTTPServer(application,
                                           ssl_options=ssl_context)
    server.bind(port, address=address)
    # When using multiple processes, the forking happens here
    server.start(settings.num_processes)

    stats.set_defaults()

    # Create the Graph and start the config / graph update threads post fork to ensure each
    # process gets updated.

    settings.start_config_thread(args.config, "fe")

    with closing(Session()) as session:
        graph = Graph()
        graph.update_from_db(session)

    refresher = DbRefreshThread(settings, graph, settings.refresh_interval,
                                sentry_client)
    refresher.daemon = True
    refresher.start()

    try:
        tornado.ioloop.IOLoop.instance().start()
    except KeyboardInterrupt:
        tornado.ioloop.IOLoop.instance().stop()
    finally:
        print("Bye")
Example #13
0
 def get(session: Session,
         pk: Optional[int] = None,
         name: Optional[str] = None) -> Optional[Group]:
     if pk is not None:
         return session.query(Group).filter_by(id=pk).scalar()
     if name is not None:
         return session.query(Group).filter_by(groupname=name).scalar()
     return None
Example #14
0
def start_server(args, settings, plugins):
    # type: (Namespace, FrontendSettings, PluginProxy) -> None
    log_level = logging.getLevelName(logging.getLogger().level)
    logging.info("begin. log_level={}".format(log_level))

    assert not (settings.debug and settings.num_processes > 1
                ), "debug mode does not support multiple processes"

    # setup database
    logging.debug("configure database session")
    if args.database_url:
        settings.database = args.database_url
    Session.configure(bind=get_db_engine(settings.database))

    application = create_fe_application(settings, args.deployment_name)
    ssl_context = plugins.get_ssl_context()

    if args.listen_stdin:
        logging.info("Starting application server with %d processes on stdin",
                     settings.num_processes)
        server = HTTPServer(application, ssl_options=ssl_context)
        s = socket.socket(fileno=sys.stdin.fileno())
        s.setblocking(False)
        s.listen()
        server.add_sockets([s])
    else:
        address = args.address or settings.address
        port = args.port or settings.port
        logging.info(
            "Starting application server with %d processes on %s:%d",
            settings.num_processes,
            address,
            port,
        )
        server = HTTPServer(application, ssl_options=ssl_context)
        server.bind(port, address=address)

    # When using multiple processes, the forking happens here
    server.start(settings.num_processes)

    # Create the Graph and start the graph update thread post fork to ensure each process gets
    # updated.
    with closing(Session()) as session:
        graph = Graph()
        graph.update_from_db(session)

    refresher = DbRefreshThread(settings, plugins, graph,
                                settings.refresh_interval)
    refresher.daemon = True
    refresher.start()

    try:
        IOLoop.current().start()
    except KeyboardInterrupt:
        IOLoop.current().stop()
    finally:
        print("Bye")
Example #15
0
def start_server(args, settings, plugins):
    # type: (Namespace, ApiSettings, PluginProxy) -> None
    log_level = logging.getLevelName(logging.getLogger().level)
    logging.info("begin. log_level={}".format(log_level))

    assert not (
        settings.debug and settings.num_processes > 1
    ), "debug mode does not support multiple processes"

    # setup database
    logging.debug("configure database session")
    if args.database_url:
        settings.database = args.database_url
    Session.configure(bind=get_db_engine(settings.database))

    with closing(Session()) as session:
        graph = Graph()
        graph.update_from_db(session)

    refresher = DbRefreshThread(settings, plugins, graph, settings.refresh_interval)
    refresher.daemon = True
    refresher.start()

    usecase_factory = create_graph_usecase_factory(settings, plugins, graph=graph)
    application = create_api_application(graph, settings, plugins, usecase_factory)

    if args.listen_stdin:
        logging.info("Starting application server on stdin")
        server = HTTPServer(application)
        if PY2:
            s = socket.fromfd(sys.stdin.fileno(), socket.AF_INET, socket.SOCK_STREAM)
            s.setblocking(False)
            s.listen(5)
        else:
            s = socket.socket(fileno=sys.stdin.fileno())
            s.setblocking(False)
            s.listen()
        server.add_sockets([s])
    else:
        address = args.address or settings.address
        port = args.port or settings.port
        logging.info("Starting application server on %s:%d", address, port)
        server = HTTPServer(application)
        server.bind(port, address=address)

    server.start(settings.num_processes)

    stats.set_defaults()

    try:
        IOLoop.current().start()
    except KeyboardInterrupt:
        IOLoop.current().stop()
    finally:
        print("Bye")
Example #16
0
    def create_session(self, tmpdir):
        # type: (LocalPath) -> Session
        db_engine = get_db_engine(db_url(tmpdir))

        # If using a persistent database, clear the database first.
        if "MEROU_TEST_DATABASE" in os.environ:
            Model.metadata.drop_all(db_engine)

        # Create the database schema and the corresponding session.
        Model.metadata.create_all(db_engine)
        Session.configure(bind=db_engine)
        return Session()
Example #17
0
def cancel_async_emails(session: Session, async_key: str) -> None:
    """Cancel pending async emails by key

    If you scheduled an asynchronous email with an async_key previously, this method can be
    used to cancel any unsent emails.

    Args:
        async_key: The async_key previously provided for your emails.
    """
    session.query(AsyncNotification).filter(
        AsyncNotification.key == async_key,
        AsyncNotification.sent == False).update({"sent": True})
Example #18
0
def mutate_group_command(session: Session, group: Group,
                         args: Namespace) -> None:
    for username in args.username:
        user = User.get(session, name=username)
        if not user:
            logging.error("no such user '{}'".format(username))
            return

        if args.subcommand == "add_member":
            if args.member:
                role = "member"
            elif args.owner:
                role = "owner"
            elif args.np_owner:
                role = "np-owner"
            elif args.manager:
                role = "manager"

            assert role

            logging.info("Adding {} as {} to group {}".format(
                username, role, args.groupname))
            group.add_member(user,
                             user,
                             "grouper-ctl join",
                             status="actioned",
                             role=role)
            AuditLog.log(
                session,
                user.id,
                "join_group",
                "{} manually joined via grouper-ctl".format(username),
                on_group_id=group.id,
            )
            session.commit()

        elif args.subcommand == "remove_member":
            logging.info("Removing {} from group {}".format(
                username, args.groupname))

            try:
                group.revoke_member(user, user, "grouper-ctl remove")
                AuditLog.log(
                    session,
                    user.id,
                    "leave_group",
                    "{} manually left via grouper-ctl".format(username),
                    on_group_id=group.id,
                )
                session.commit()
            except PluginRejectedGroupMembershipUpdate as e:
                logging.error("%s", e)
Example #19
0
def session(request, tmpdir):
    db_engine = get_db_engine(db_url(tmpdir))

    Model.metadata.create_all(db_engine)
    Session.configure(bind=db_engine)
    session = Session()

    def fin():
        session.close()
        # Useful if testing against MySQL
        # Model.metadata.drop_all(db_engine)
    request.addfinalizer(fin)

    return session
Example #20
0
def get_changes_by_request_id(
        session: Session, request_id: int
) -> List[Tuple[PermissionRequestStatusChange, Comment]]:
    status_changes = (session.query(PermissionRequestStatusChange).filter(
        PermissionRequestStatusChange.request_id == request_id).all())

    comments = (session.query(Comment).filter(
        Comment.obj_type == OBJ_TYPES_IDX.index(
            "PermissionRequestStatusChange"),
        Comment.obj_pk.in_([s.id for s in status_changes]),
    ).all())
    comment_by_status_change_id = {c.obj_pk: c for c in comments}

    return [(sc, comment_by_status_change_id[sc.id]) for sc in status_changes]
Example #21
0
def session(request, tmpdir):
    db_path = tmpdir.join("grouper.sqlite")
    db_engine = get_db_engine("sqlite:///%s" % db_path)

    Model.metadata.create_all(db_engine)
    Session.configure(bind=db_engine)
    session = Session()

    def fin():
        session.close()
        # Useful if testing against MySQL
        # model_soup.Model.metadata.drop_all(db_engine)
    request.addfinalizer(fin)

    return session
Example #22
0
def session(request, tmpdir):
    db_engine = get_db_engine(db_url(tmpdir))

    Model.metadata.create_all(db_engine)
    Session.configure(bind=db_engine)
    session = Session()

    def fin():
        session.close()
        # Useful if testing against MySQL
        # Model.metadata.drop_all(db_engine)

    request.addfinalizer(fin)

    return session
Example #23
0
def start_server(args, sentry_client):
    # type: (argparse.Namespace, SentryProxy) -> None

    log_level = logging.getLevelName(logging.getLogger().level)
    logging.info("begin. log_level={}".format(log_level))

    assert not (settings.debug and settings.num_processes > 1), \
        "debug mode does not support multiple processes"

    try:
        initialize_plugins(settings.plugin_dirs, settings.plugin_module_paths, "grouper_api")
    except PluginsDirectoryDoesNotExist as e:
        logging.fatal("Plugin directory does not exist: {}".format(e))
        sys.exit(1)

    # setup database
    logging.debug("configure database session")
    database_url = args.database_url or get_database_url(settings)
    Session.configure(bind=get_db_engine(database_url))

    settings.start_config_thread(args.config, "api")

    with closing(Session()) as session:
        graph = Graph()
        graph.update_from_db(session)

    refresher = DbRefreshThread(settings, graph, settings.refresh_interval, sentry_client)
    refresher.daemon = True
    refresher.start()

    application = get_application(graph, settings, sentry_client)

    address = args.address or settings.address
    port = args.port or settings.port

    logging.info("Starting application server on port %d", port)
    server = tornado.httpserver.HTTPServer(application)
    server.bind(port, address=address)
    server.start(settings.num_processes)

    stats.set_defaults()

    try:
        tornado.ioloop.IOLoop.instance().start()
    except KeyboardInterrupt:
        tornado.ioloop.IOLoop.instance().stop()
    finally:
        print "Bye"
Example #24
0
    def run(self):
        # type: () -> None
        initial_url = self.settings.database
        while True:
            try:
                if self.settings.database != initial_url:
                    self.crash()
                with closing(Session()) as session:
                    self.logger.info("Expiring edges....")
                    self.expire_edges(session)

                    self.logger.info("Promoting nonauditor approvers in audited groups...")
                    self.promote_nonauditors(session)

                    self.logger.info("Sending emails...")
                    process_async_emails(self.settings, session, datetime.utcnow())

                    self.logger.info("Pruning old traces....")
                    prune_old_traces(session)

                    session.commit()

                stats.log_gauge("successful-background-update", 1)
                stats.log_gauge("failed-background-update", 0)
            except Exception:
                stats.log_gauge("successful-background-update", 0)
                stats.log_gauge("failed-background-update", 1)
                self.plugins.log_exception(None, None, *sys.exc_info())
                self.logger.exception("Unexpected exception occurred in background thread")
                self.crash()

            self.logger.debug("Sleeping for {} seconds...".format(self.settings.sleep_interval))
            sleep(self.settings.sleep_interval)
Example #25
0
def session(request, tmpdir):
    db_path = tmpdir.join("grouper.sqlite")
    db_engine = get_db_engine("sqlite:///%s" % db_path)

    Model.metadata.create_all(db_engine)
    Session.configure(bind=db_engine)
    session = Session()

    def fin():
        session.close()
        # Useful if testing against MySQL
        # model_soup.Model.metadata.drop_all(db_engine)

    request.addfinalizer(fin)

    return session
Example #26
0
def get_pending_request_by_group(session: Session, group: Group) -> List[PermissionRequest]:
    """Load pending request for a particular group."""
    return (
        session.query(PermissionRequest)
        .filter(PermissionRequest.status == "pending", PermissionRequest.group_id == group.id)
        .all()
    )
Example #27
0
def logdump_group_command(session: Session, group: Group,
                          args: Namespace) -> None:
    log_entries = session.query(AuditLog).filter(
        AuditLog.on_group_id == group.id, AuditLog.log_time > args.start_date)

    if args.end_date:
        log_entries = log_entries.filter(AuditLog.log_time <= args.end_date)

    with open_file_or_stdout_for_write(args.outfile) as fh:
        csv_w = csv.writer(fh)
        for log_entry in log_entries:
            if log_entry.on_user:
                extra = "user: {}".format(log_entry.on_user.username)
            elif log_entry.on_group:
                extra = "group: {}".format(log_entry.on_group.groupname)
            else:
                extra = ""

            csv_w.writerow([
                log_entry.log_time,
                log_entry.actor,
                log_entry.description,
                log_entry.action,
                extra,
            ])
Example #28
0
    def post(self, *args, **kwargs):
        # type: (*Any, **Any) -> None
        supplied_token = self.get_body_argument("token")
        match = TokenValidate.validator.match(supplied_token)
        if not match:
            return self.error(((1, "Token format not recognized"), ))

        token_name = match.group("token_name")
        token_secret = match.group("token_secret")
        username = match.group("name")

        with closing(Session()) as session:
            token = UserToken.get_by_value(session, username, token_name)
            if token is None:
                return self.error(((2, "Token specified does not exist"), ))
            if not token.enabled:
                return self.error(((3, "Token is disabled"), ))
            if not token.check_secret(token_secret):
                return self.error(((4, "Token secret mismatch"), ))
            return self.success({
                "owner": username,
                "identity": str(token),
                "act_as_owner": True,
                "valid": True
            })
Example #29
0
    def post(self):
        supplied_token = self.get_body_argument("token")
        match = TokenValidate.validator.match(supplied_token)
        if not match:
            return self.error(((1, "Token format not recognized"), ))

        sess = Session()

        token_name = match.group("token_name")
        token_secret = match.group("token_secret")
        owner = User.get(sess, name=match.group("name"))

        token = UserToken.get(sess, owner, token_name)
        if token is None:
            return self.error(((2, "Token specified does not exist"), ))
        if not token.enabled:
            return self.error(((3, "Token is disabled"), ))
        if not token.check_secret(token_secret):
            return self.error(((4, "Token secret mismatch"), ))

        return self.success({
            "owner": owner.username,
            "identity": str(token),
            "act_as_owner": True,
            "valid": True,
        })
Example #30
0
    def get(self):
        fh = StringIO()
        w_csv = csv.writer(fh, lineterminator="\n")

        # header
        w_csv.writerow([
            'username',
            'created_at',
            'type',
            'size',
            'fingerprint',
            'comment',
        ])

        user_key_list = Session().query(
            PublicKey, User).filter(User.id == PublicKey.user_id)
        for key, user in user_key_list:
            w_csv.writerow([
                user.name,
                key.created_on.isoformat(),
                key.key_type,
                key.key_size,
                key.fingerprint,
                sshpubkey.PublicKey.from_str(key.public_key).comment,
            ])

        self.set_header("Content-Type", "text/csv")
        self.write(fh.getvalue())
Example #31
0
    def get(self):
        fh = StringIO()
        w_csv = csv.writer(fh, lineterminator="\n")

        # header
        w_csv.writerow(
            [
                "username",
                "created_at",
                "type",
                "size",
                "fingerprint",
                "fingerprint_sha256",
                "comment",
            ]
        )

        with closing(Session()) as session:
            user_key_list = session.query(PublicKey, User).filter(User.id == PublicKey.user_id)
            for key, user in user_key_list:
                w_csv.writerow(
                    [
                        user.name,
                        key.created_on.isoformat(),
                        key.key_type,
                        key.key_size,
                        key.fingerprint,
                        key.fingerprint_sha256,
                        key.comment,
                    ]
                )

        self.set_header("Content-Type", "text/csv")
        self.write(fh.getvalue())
Example #32
0
def process_async_emails(
    settings: Settings, session: Session, now_ts: datetime, dry_run: bool = False
) -> int:
    """Send emails due before now

    This method finds and immediately sends any emails that have been scheduled to be sent before
    the now_ts.  Meant to be called from the background processing thread.

    Args:
        settings: The current Settings object for this application.
        session: Object for db session.
        now_ts: The time to use as the cutoff (send emails before this point).
        dry_run: If True, do not actually send any email, just generate and return how many emails
            would have been sent.

    Returns:
        Number of emails that were sent.
    """
    emails = (
        session.query(AsyncNotification)
        .filter(AsyncNotification.sent == False, AsyncNotification.send_after < now_ts)
        .all()
    )
    sent_ct = 0
    for email in emails:
        # For atomicity, attempt to set the sent flag on this email to true if
        # and only if it's still false.
        update_ct = (
            session.query(AsyncNotification)
            .filter(AsyncNotification.id == email.id, AsyncNotification.sent == False)
            .update({"sent": True})
        )

        # If it's 0, someone else won the race. Bail.
        if update_ct == 0:
            continue

        try:
            if not dry_run:
                send_email_raw(settings, email.email, email.body)
            email.sent = True
            sent_ct += 1
        except smtplib.SMTPException:
            # Any sort of error with sending the email and we want to move on to
            # the next email. This email will be retried later.
            pass
    return sent_ct
Example #33
0
def session(request, tmpdir):
    db_engine = get_db_engine(db_url(tmpdir))

    # Create the database schema and the corresponding session.
    Model.metadata.create_all(db_engine)
    Session.configure(bind=db_engine)
    session = Session()

    def fin():
        # type: () -> None
        """Explicitly close the session and clean up if using a persistent database."""
        session.close()
        if "MEROU_TEST_DATABASE" in os.environ:
            Model.metadata.drop_all(db_engine)

    request.addfinalizer(fin)
    return session
Example #34
0
 def get(self, trace_uuid):
     try:
         flamegraph_svg = perf_profile.get_flamegraph_svg(
             Session(), trace_uuid)
     except perf_profile.InvalidUUID:
         pass
     else:
         self.set_header("Content-Type", "image/svg+xml")
         self.write(flamegraph_svg)
Example #35
0
def start_processor(args, settings, sentry_client):
    # type: (Namespace, BackgroundSettings, SentryProxy) -> None
    log_level = logging.getLevelName(logging.getLogger().level)
    logging.info("begin. log_level={}".format(log_level))

    try:
        plugins = PluginProxy.load_plugins(settings, "grouper-background")
        set_global_plugin_proxy(plugins)
    except PluginsDirectoryDoesNotExist as e:
        logging.fatal("Plugin directory does not exist: {}".format(e))
        sys.exit(1)

    # setup database
    logging.debug("configure database session")
    Session.configure(bind=get_db_engine(settings.database))

    background = BackgroundProcessor(settings, sentry_client)
    background.run()
Example #36
0
def start_processor(args, settings):
    # type: (Namespace, BackgroundSettings) -> None
    log_level = logging.getLevelName(logging.getLogger().level)
    logging.info("begin. log_level={}".format(log_level))

    try:
        plugins = PluginProxy.load_plugins(settings, "grouper-background")
        set_global_plugin_proxy(plugins)
    except PluginsDirectoryDoesNotExist as e:
        logging.fatal("Plugin directory does not exist: {}".format(e))
        sys.exit(1)

    # setup database
    logging.debug("configure database session")
    Session.configure(bind=get_db_engine(settings.database))

    background = BackgroundProcessor(settings, plugins)
    background.run()
Example #37
0
    def run(self):
        while True:
            sleep(self.refresh_interval)

            logging.debug("Updating Graph from Database.")
            try:
                session = Session()
                self.graph.update_from_db(session)
                session.close()
                stats.set_gauge("successful-db-update", 1)
            except OperationalError:
                Session.configure(bind=get_db_engine(get_database_url(self.settings)))
                logging.critical("Failed to connect to database.")
                stats.set_gauge("successful-db-update", 0)
                self.capture_exception()
            except:
                stats.set_gauge("successful-db-update", 0)
                self.capture_exception()
                raise
Example #38
0
def start_processor(args, sentry_client):
    # type: (argparse.Namespace, SentryProxy) -> None

    log_level = logging.getLevelName(logging.getLogger().level)
    logging.info("begin. log_level={}".format(log_level))

    try:
        initialize_plugins(settings.plugin_dirs, settings.plugin_module_paths, "grouper-background")
    except PluginsDirectoryDoesNotExist as e:
        logging.fatal("Plugin directory does not exist: {}".format(e))
        sys.exit(1)

    # setup database
    logging.debug("configure database session")
    Session.configure(bind=get_db_engine(get_database_url(settings)))

    settings.start_config_thread(args.config, "background")

    background = BackgroundProcessor(settings, sentry_client)
    background.run()
Example #39
0
    def get_or_create_user(self, username):
        # type: (str) -> Optional[User]
        """Retrieve or create the User object for the authenticated user.

        This is done in a separate method called by prepare instead of in the magic Tornado
        get_current_user method because exceptions thrown by the latter are caught by Tornado and
        not propagated to the caller, and we want to use exceptions to handle invalid users and
        then return an error page in prepare.
        """
        if not username:
            return None

        # Users must be fully qualified
        if not re.match("^{}$".format(USERNAME_VALIDATION), username):
            raise InvalidUser("{} does not match {}".format(username, USERNAME_VALIDATION))

        # User must exist in the database and be active
        try:
            user, created = User.get_or_create(self.session, username=username)
            if created:
                logging.info("Created new user %s", username)
                self.session.commit()
                # Because the graph doesn't initialize until the updates table
                # is populated, we need to refresh the graph here in case this
                # is the first update.
                self.graph.update_from_db(self.session)
        except sqlalchemy.exc.OperationalError:
            # Failed to connect to database or create user, try to reconfigure the db. This invokes
            # the fetcher to try to see if our URL string has changed.
            Session.configure(bind=get_db_engine(settings().database))
            raise DatabaseFailure()

        # service accounts are, by definition, not interactive users
        if user.is_service_account:
            raise InvalidUser("{} is a service account".format(username))

        return user
    def run(self):
        # type: () -> None
        while True:
            try:
                with closing(Session()) as session:
                    self.logger.info("Expiring edges....")
                    self.expire_edges(session)

                    self.logger.info("Expiring nonauditor approvers in audited groups...")
                    self.expire_nonauditors(session)

                    self.logger.info("Sending emails...")
                    process_async_emails(self.settings, session, datetime.utcnow())

                    self.logger.info("Pruning old traces....")
                    prune_old_traces(session)

                    session.commit()

                stats.log_gauge("successful-background-update", 1)
                stats.log_gauge("failed-background-update", 0)
            except OperationalError:
                Session.configure(bind=get_db_engine(get_database_url(self.settings)))
                self.logger.critical("Failed to connect to database.")
                stats.log_gauge("successful-background-update", 0)
                stats.log_gauge("failed-background-update", 1)
                self._capture_exception()
            except:
                stats.log_gauge("successful-background-update", 0)
                stats.log_gauge("failed-background-update", 1)
                self._capture_exception()
                self.logger.exception("Unexpected exception occurred in background thread.")
                raise

            self.logger.debug("Sleeping for {} seconds...".format(self.settings.sleep_interval))
            sleep(self.settings.sleep_interval)
Example #41
0
def make_session():
    db_engine = get_db_engine(get_database_url(settings))
    Session.configure(bind=db_engine)
    return Session()
Example #42
0
def start_server(args, settings, sentry_client):
    # type: (Namespace, FrontendSettings, SentryProxy) -> None
    log_level = logging.getLevelName(logging.getLogger().level)
    logging.info("begin. log_level={}".format(log_level))

    assert not (
        settings.debug and settings.num_processes > 1
    ), "debug mode does not support multiple processes"

    try:
        plugins = PluginProxy.load_plugins(settings, "grouper-fe")
        set_global_plugin_proxy(plugins)
    except PluginsDirectoryDoesNotExist as e:
        logging.fatal("Plugin directory does not exist: {}".format(e))
        sys.exit(1)

    # setup database
    logging.debug("configure database session")
    if args.database_url:
        settings.database = args.database_url
    Session.configure(bind=get_db_engine(settings.database))

    application = create_fe_application(settings, args.deployment_name)
    ssl_context = plugins.get_ssl_context()

    if args.listen_stdin:
        logging.info(
            "Starting application server with %d processes on stdin", settings.num_processes
        )
        server = HTTPServer(application, ssl_options=ssl_context)
        if PY2:
            s = socket.fromfd(sys.stdin.fileno(), socket.AF_INET, socket.SOCK_STREAM)
            s.setblocking(False)
            s.listen(5)
        else:
            s = socket.socket(fileno=sys.stdin.fileno())
            s.setblocking(False)
            s.listen()
        server.add_sockets([s])
    else:
        address = args.address or settings.address
        port = args.port or settings.port
        logging.info(
            "Starting application server with %d processes on %s:%d",
            settings.num_processes,
            address,
            port,
        )
        server = HTTPServer(application, ssl_options=ssl_context)
        server.bind(port, address=address)

    # When using multiple processes, the forking happens here
    server.start(settings.num_processes)

    stats.set_defaults()

    # Create the Graph and start the graph update thread post fork to ensure each process gets
    # updated.
    with closing(Session()) as session:
        graph = Graph()
        graph.update_from_db(session)

    refresher = DbRefreshThread(settings, graph, settings.refresh_interval, sentry_client)
    refresher.daemon = True
    refresher.start()

    try:
        IOLoop.current().start()
    except KeyboardInterrupt:
        IOLoop.current().stop()
    finally:
        print("Bye")
Example #43
0
 def create_session(self):
     # type: () -> Session
     db_engine = get_db_engine(self.settings.database)
     Session.configure(bind=db_engine)
     return Session()