async def test_expiration(setup: SetupTest) -> None: """The cache is valid until half the lifetime of the child token.""" token_data = await setup.create_session_token(scopes=["read:all"]) lifetime = setup.config.token_lifetime now = current_datetime() storage = RedisStorage(TokenData, setup.config.session_secret, setup.redis) token_store = TokenRedisStore(storage, setup.logger) token_cache = setup.factory.create_token_cache() # Store a token whose expiration is five seconds more than half the # typical token lifetime in the future and cache that token as an internal # token for our session token. created = now - timedelta(seconds=lifetime.total_seconds() // 2) expires = created + setup.config.token_lifetime + timedelta(seconds=5) internal_token_data = TokenData( token=Token(), username=token_data.username, token_type=TokenType.internal, scopes=["read:all"], created=created, expires=expires, ) await token_store.store_data(internal_token_data) token_cache.store_internal_token( internal_token_data.token, token_data, "some-service", ["read:all"] ) # The cache should return this token. assert internal_token_data.token == await token_cache.get_internal_token( token_data, "some-service", ["read:all"] ) # Now change the expiration to be ten seconds earlier, which should make # the remaining lifetime less than half the total lifetime, and replace # replace the stored token with that new version. internal_token_data.expires = expires - timedelta(seconds=20) await token_store.store_data(internal_token_data) # The cache should now decline to return the token. assert not await token_cache.get_internal_token( token_data, "some-service", ["read:all"] ) # Do the same test with a notebook token. notebook_token_data = TokenData( token=Token(), username=token_data.username, token_type=TokenType.notebook, scopes=["read:all"], created=created, expires=expires, ) await token_store.store_data(notebook_token_data) token_cache.store_notebook_token(notebook_token_data.token, token_data) token = notebook_token_data.token assert token == await token_cache.get_notebook_token(token_data) notebook_token_data.expires = expires - timedelta(seconds=20) await token_store.store_data(notebook_token_data) assert not await token_cache.get_notebook_token(token_data)
async def test_invalid(config: Config, factory: ComponentFactory) -> None: redis = await redis_dependency() token_service = factory.create_token_service() expires = int(timedelta(days=1).total_seconds()) # No such key. token = Token() assert await token_service.get_data(token) is None # Invalid encrypted blob. await redis.set(f"token:{token.key}", "foo", ex=expires) assert await token_service.get_data(token) is None # Malformed session. fernet = Fernet(config.session_secret.encode()) raw_data = fernet.encrypt(b"malformed json") await redis.set(f"token:{token.key}", raw_data, ex=expires) assert await token_service.get_data(token) is None # Mismatched token. data = TokenData( token=Token(), username="******", token_type=TokenType.session, scopes=[], created=int(current_datetime().timestamp()), name="Some User", uid=12345, ) session = fernet.encrypt(data.json().encode()) await redis.set(f"token:{token.key}", session, ex=expires) assert await token_service.get_data(token) is None # Missing required fields. json_data = { "token": { "key": token.key, "secret": token.secret, }, "token_type": "session", "scopes": [], "created": int(current_datetime().timestamp()), "name": "Some User", } raw_data = fernet.encrypt(json.dumps(json_data).encode()) await redis.set(f"token:{token.key}", raw_data, ex=expires) assert await token_service.get_data(token) is None # Fix the session store and confirm we can retrieve the manually-stored # session. json_data["username"] = "******" raw_data = fernet.encrypt(json.dumps(json_data).encode()) await redis.set(f"token:{token.key}", raw_data, ex=expires) new_data = await token_service.get_data(token) assert new_data == TokenData.parse_obj(json_data)
async def test_invalid(factory: ComponentFactory) -> None: """Invalid tokens should not be returned even if cached.""" token_data = await create_session_token(factory, scopes=["read:all"]) token_cache = factory.create_token_cache_service() internal_token = Token() notebook_token = Token() token_cache.store_internal_token(internal_token, token_data, "some-service", ["read:all"]) token_cache.store_notebook_token(notebook_token, token_data) assert internal_token != await token_cache.get_internal_token( token_data, "some-service", ["read:all"], "127.0.0.1") assert notebook_token != await token_cache.get_notebook_token( token_data, "127.0.0.1")
async def test_invalid(setup: SetupTest) -> None: """Invalid tokens should not be returned even if cached.""" token_data = await setup.create_session_token(scopes=["read:all"]) token_cache = setup.factory.create_token_cache() internal_token = Token() notebook_token = Token() token_cache.store_internal_token( internal_token, token_data, "some-service", ["read:all"] ) token_cache.store_notebook_token(notebook_token, token_data) assert not await token_cache.get_internal_token( token_data, "some-service", ["read:all"] ) assert not await token_cache.get_notebook_token(token_data)
async def create_session_token(self, user_info: TokenUserInfo, *, scopes: List[str], ip_address: str) -> Token: """Create a new session token. Parameters ---------- user_info : `gafaelfawr.models.token.TokenUserInfo` The user information to associate with the token. scopes : List[`str`] The scopes of the token. ip_address : `str` The IP address from which the request came. Returns ------- token : `gafaelfawr.models.token.Token` The newly-created token. Raises ------ gafaelfawr.exceptions.PermissionDeniedError If the provided username is invalid. """ self._validate_username(user_info.username) scopes = sorted(scopes) token = Token() created = current_datetime() expires = created + self._config.token_lifetime data = TokenData( token=token, token_type=TokenType.session, scopes=scopes, created=created, expires=expires, **user_info.dict(), ) history_entry = TokenChangeHistoryEntry( token=token.key, username=data.username, token_type=TokenType.session, scopes=scopes, expires=expires, actor=data.username, action=TokenChange.create, ip_address=ip_address, event_time=created, ) await self._token_redis_store.store_data(data) with self._transaction_manager.transaction(): self._token_db_store.add(data) self._token_change_store.add(history_entry) return token
async def add_expired_session_token( user_info: TokenUserInfo, *, scopes: List[str], ip_address: str, session: AsyncSession, ) -> None: """Add an expired session token to the database. This requires going beneath the service layer, since the service layer rejects creation of expired tokens (since apart from testing this isn't a sensible thing to want to do). This does not add the token to Redis, since Redis will refuse to add it with a negative expiration time, so can only be used for tests that exclusively use the database. Parameters ---------- user_info : `gafaelfawr.models.token.TokenUserInfo` The user information to associate with the token. scopes : List[`str`] The scopes of the token. ip_address : `str` The IP address from which the request came. session : `sqlalchemy.ext.asyncio.AsyncSession` The database session. """ token_db_store = TokenDatabaseStore(session) token_change_store = TokenChangeHistoryStore(session) token = Token() created = current_datetime() expires = created - timedelta(minutes=10) data = TokenData( token=token, token_type=TokenType.session, scopes=scopes, created=created, expires=expires, **user_info.dict(), ) history_entry = TokenChangeHistoryEntry( token=token.key, username=data.username, token_type=TokenType.session, scopes=scopes, expires=expires, actor=data.username, action=TokenChange.create, ip_address=ip_address, event_time=created, ) await token_db_store.add(data) await token_change_store.add(history_entry)
async def test_invalid_auth(setup: SetupTest) -> None: r = await setup.client.get( "/auth", params={"scope": "exec:admin"}, headers={"Authorization": "Bearer"}, ) assert r.status_code == 400 authenticate = parse_www_authenticate(r.headers["WWW-Authenticate"]) assert isinstance(authenticate, AuthErrorChallenge) assert authenticate.auth_type == AuthType.Bearer assert authenticate.realm == setup.config.realm assert authenticate.error == AuthError.invalid_request r = await setup.client.get( "/auth", params={"scope": "exec:admin"}, headers={"Authorization": "token foo"}, ) assert r.status_code == 400 authenticate = parse_www_authenticate(r.headers["WWW-Authenticate"]) assert isinstance(authenticate, AuthErrorChallenge) assert authenticate.auth_type == AuthType.Bearer assert authenticate.realm == setup.config.realm assert authenticate.error == AuthError.invalid_request r = await setup.client.get( "/auth", params={"scope": "exec:admin"}, headers={"Authorization": "Bearer token"}, ) assert r.status_code == 401 assert r.headers["Cache-Control"] == "no-cache, must-revalidate" authenticate = parse_www_authenticate(r.headers["WWW-Authenticate"]) assert isinstance(authenticate, AuthErrorChallenge) assert authenticate.auth_type == AuthType.Bearer assert authenticate.realm == setup.config.realm assert authenticate.error == AuthError.invalid_token # Create a nonexistent token. token = Token() r = await setup.client.get( "/auth", params={"scope": "exec:admin"}, headers={"Authorization": f"Bearer {token}"}, ) assert r.status_code == 401 assert r.headers["Cache-Control"] == "no-cache, must-revalidate" authenticate = parse_www_authenticate(r.headers["WWW-Authenticate"]) assert isinstance(authenticate, AuthErrorChallenge) assert authenticate.auth_type == AuthType.Bearer assert authenticate.realm == setup.config.realm assert authenticate.error == AuthError.invalid_token
def parse_settings(path: Path, fix_token: bool = False) -> None: """Parse the settings file and see if any exceptions are thrown. Parameters ---------- path : `pathlib.Path` The path to the settings file to test. fix_token : `bool`, optional Whether to fix an invalid ``bootstrap_token`` before checking the settings file. Some examples have intentionally invalid tokens. """ with path.open("r") as f: settings = yaml.safe_load(f) # Avoid errors from an invalid bootstrap token in one of the examples. if fix_token and "bootstrap_token" in settings: settings["bootstrap_token"] = str(Token()) Settings.parse_obj(settings)
async def test_login_no_auth(client: AsyncClient, config: Config, factory: ComponentFactory) -> None: r = await client.get("/auth/api/v1/login") assert_unauthorized_is_correct(r, config) # An Authorization header with a valid token still redirects. token_data = await create_session_token(factory) r = await client.get( "/auth/api/v1/login", headers={"Authorization": f"bearer {token_data.token}"}, ) assert_unauthorized_is_correct(r, config) # A token with no underlying Redis representation is ignored. state = State(token=Token()) r = await client.get( "/auth/api/v1/login", cookies={COOKIE_NAME: await state.as_cookie()}, ) assert_unauthorized_is_correct(r, config) # Likewise with a cookie containing a malformed token. This requires a # bit more work to assemble. key = config.session_secret.encode() fernet = Fernet(key) data = {"token": "bad-token"} bad_cookie = fernet.encrypt(json.dumps(data).encode()).decode() r = await client.get( "/auth/api/v1/login", cookies={COOKIE_NAME: bad_cookie}, ) assert_unauthorized_is_correct(r, config) # And finally check with a mangled state that won't decrypt. bad_cookie = "XXX" + await state.as_cookie() r = await client.get( "/auth/api/v1/login", cookies={COOKIE_NAME: bad_cookie}, ) assert_unauthorized_is_correct(r, config)
async def test_ignore(setup: SetupTest, mock_kubernetes: MockCoreV1Api) -> None: assert setup.config.kubernetes kubernetes_service = setup.factory.create_kubernetes_service() # Create a secret without the expected label. secret_one = V1Secret( api_version="v1", data={"foo": "bar"}, metadata=V1ObjectMeta(name="secret-one", namespace="mobu"), type="Opaque", ) mock_kubernetes.create_namespaced_secret("mobu", secret_one) # Create a secret with the expected label but a different value. secret_two = V1Secret( api_version="v1", data={"token": token_as_base64(Token())}, metadata=V1ObjectMeta( labels={KUBERNETES_TOKEN_TYPE_LABEL: "other"}, name="secret-two", namespace="elsewhere", ), type="Opaque", ) mock_kubernetes.create_namespaced_secret("elsewhere", secret_two) # Update the secrets. Both of our secrets should survive unmolested. await kubernetes_service.update_service_secrets() objects = mock_kubernetes.get_all_objects_for_test() assert secret_one in objects assert secret_two in objects # Delete our secrets and then check that the created secrets are right. mock_kubernetes.delete_namespaced_secret("secret-one", "mobu") mock_kubernetes.delete_namespaced_secret("secret-two", "elsewhere") await assert_kubernetes_secrets_match_config(setup, mock_kubernetes)
async def test_create_not_ours(setup: SetupTest, mock_kubernetes: MockCoreV1Api, caplog: LogCaptureFixture) -> None: assert setup.config.kubernetes assert len(setup.config.kubernetes.service_secrets) >= 1 service_secret = setup.config.kubernetes.service_secrets[-1] kubernetes_service = setup.factory.create_kubernetes_service() # Create a secret that should exist but doesn't have our annotation. secret = V1Secret( api_version="v1", data={"token": token_as_base64(Token())}, metadata=V1ObjectMeta( name=service_secret.secret_name, namespace=service_secret.secret_namespace, ), type="Opaque", ) mock_kubernetes.create_namespaced_secret(service_secret.secret_namespace, secret) # Now run the synchronization. secret_one and secret_two should be left # unchanged, and we should log errors about failing to do the update. await kubernetes_service.update_service_secrets() objects = mock_kubernetes.get_all_objects_for_test() assert secret in objects assert json.loads(caplog.record_tuples[-1][2]) == { "event": (f"Creating {service_secret.secret_namespace}" f"/{service_secret.secret_name} failed"), "error": (f"Kubernetes API error: (500)\n" f"Reason: {service_secret.secret_namespace}" f"/{service_secret.secret_name} exists\n"), "level": "error", "logger": "gafaelfawr", }
def test_token() -> None: token = Token() assert str(token).startswith("gt-")
async def test_expiration(config: Config, factory: ComponentFactory) -> None: """The cache is valid until half the lifetime of the child token.""" token_data = await create_session_token(factory, scopes=["read:all"]) lifetime = config.token_lifetime now = current_datetime() redis = await redis_dependency() logger = structlog.get_logger(config.safir.logger_name) storage = RedisStorage(TokenData, config.session_secret, redis) token_store = TokenRedisStore(storage, logger) token_cache = factory.create_token_cache_service() # Store a token whose expiration is five seconds more than half the # typical token lifetime in the future and cache that token as an internal # token for our session token. created = now - timedelta(seconds=lifetime.total_seconds() // 2) expires = created + lifetime + timedelta(seconds=5) internal_token_data = TokenData( token=Token(), username=token_data.username, token_type=TokenType.internal, scopes=["read:all"], created=created, expires=expires, ) await token_store.store_data(internal_token_data) token_cache.store_internal_token(internal_token_data.token, token_data, "some-service", ["read:all"]) # The cache should return this token. assert internal_token_data.token == await token_cache.get_internal_token( token_data, "some-service", ["read:all"], "127.0.0.1") # Now change the expiration to be ten seconds earlier, which should make # the remaining lifetime less than half the total lifetime, and replace # replace the stored token with that new version. internal_token_data.expires = expires - timedelta(seconds=20) await token_store.store_data(internal_token_data) # The cache should now decline to return the token and generate a new one. old_token = internal_token_data.token async with factory.session.begin(): assert old_token != await token_cache.get_internal_token( token_data, "some-service", ["read:all"], "127.0.0.1") # Do the same test with a notebook token. notebook_token_data = TokenData( token=Token(), username=token_data.username, token_type=TokenType.notebook, scopes=["read:all"], created=created, expires=expires, ) await token_store.store_data(notebook_token_data) token_cache.store_notebook_token(notebook_token_data.token, token_data) assert notebook_token_data.token == await token_cache.get_notebook_token( token_data, "127.0.0.1") notebook_token_data.expires = expires - timedelta(seconds=20) await token_store.store_data(notebook_token_data) old_token = notebook_token_data.token async with factory.session.begin(): assert old_token != await token_cache.get_notebook_token( token_data, "127.0.0.1")
async def test_errors_replace_read( factory: ComponentFactory, mock_kubernetes: MockKubernetesApi ) -> None: await create_test_service_tokens(mock_kubernetes) kubernetes_service = factory.create_kubernetes_service(MagicMock()) token_service = factory.create_token_service() # Create a secret that should exist but has an invalid token. secret = V1Secret( api_version="v1", data={"token": token_as_base64(Token())}, metadata=V1ObjectMeta(name="gafaelfawr-secret", namespace="mobu"), type="Opaque", ) await mock_kubernetes.create_namespaced_secret("mobu", secret) # Simulate some errors. The callback function takes the operation and the # secret name. def error_callback_replace(method: str, *args: Any) -> None: if method in ("replace_namespaced_secret"): raise ApiException(status=500, reason="Some error") mock_kubernetes.error_callback = error_callback_replace # Now run the synchronization. The secret should be left unchanged, but # we should still create the missing nublado2 secret. await kubernetes_service.update_service_tokens() objects = mock_kubernetes.get_all_objects_for_test("Secret") assert secret in objects good_secret = await mock_kubernetes.read_namespaced_secret( "gafaelfawr", "nublado2" ) assert await token_data_from_secret(token_service, good_secret) # We should have also updated the status of the parent custom object. service_token = await mock_kubernetes.get_namespaced_custom_object( "gafaelfawr.lsst.io", "v1alpha1", "mobu", "gafaelfawrservicetokens", "gafaelfawr-secret", ) assert service_token["status"]["conditions"] == [ { "lastTransitionTime": ANY, "message": "Kubernetes API error: (500)\nReason: Some error\n", "observedGeneration": 1, "reason": StatusReason.Failed.value, "status": "False", "type": "SecretCreated", } ] # Try again, but simulating an error in retrieving a secret. def error_callback_read(method: str, *args: Any) -> None: if method == "read_namespaced_secret": raise ApiException(status=500, reason="Some error") mock_kubernetes.error_callback = error_callback_read # Now run the synchronization. As before, the secret should be left # unchanged, and the good secret should also be left unchanged. await kubernetes_service.update_service_tokens() objects = mock_kubernetes.get_all_objects_for_test("Secret") assert secret in objects
async def test_analyze_token(setup: SetupTest) -> None: token = Token() # Handle with no session. r = await setup.client.post("/auth/analyze", data={"token": str(token)}) assert r.status_code == 200 assert r.json() == { "handle": token.dict(), "token": {"errors": ["Invalid token"], "valid": False}, } # Valid token. token_data = await setup.create_session_token( group_names=["foo", "bar"], scopes=["admin:token", "read:all"] ) assert token_data.expires assert token_data.groups token = token_data.token r = await setup.client.post("/auth/analyze", data={"token": str(token)}) # Check that the results from /analyze include the token components and # the token information. assert r.status_code == 200 assert r.json() == { "handle": token.dict(), "token": { "data": { "exp": int(token_data.expires.timestamp()), "iat": int(token_data.created.timestamp()), "isMemberOf": [g.dict() for g in token_data.groups], "name": token_data.name, "scope": "admin:token read:all", "sub": token_data.username, "uid": token_data.username, "uidNumber": str(token_data.uid), }, "valid": True, }, } # Create a session token with minimum data. token_data.name = None token_data.uid = None token_data.groups = None token_service = setup.factory.create_token_service() user_token = await token_service.create_user_token( token_data, token_data.username, token_name="foo", scopes=[], expires=None, ip_address="127.0.0.1", ) user_token_data = await token_service.get_data(user_token) assert user_token_data # Check that the correct fields are omitted and nothing odd happens. r = await setup.client.post( "/auth/analyze", data={"token": str(user_token)} ) assert r.status_code == 200 assert r.json() == { "handle": user_token.dict(), "token": { "data": { "iat": int(user_token_data.created.timestamp()), "scope": "", "sub": user_token_data.username, "uid": user_token_data.username, }, "valid": True, }, }
async def get_notebook_token(self, token_data: TokenData, ip_address: str) -> Token: """Get or create a new notebook token. The new token will have the same expiration time as the existing token on which it's based unless that expiration time is longer than the expiration time of normal interactive tokens, in which case it will be capped at the interactive token expiration time. Parameters ---------- token_data : `gafaelfawr.models.token.TokenData` The authentication data on which to base the new token. ip_address : `str` The IP address from which the request came. Returns ------- token : `gafaelfawr.models.token.Token` The newly-created token. Raises ------ gafaelfawr.exceptions.PermissionDeniedError If the username is invalid. """ self._validate_username(token_data.username) # See if there is a cached token. token = await self._token_cache.get_notebook_token(token_data) if token: return token # See if there's already a matching notebook token. key = self._token_db_store.get_notebook_token_key( token_data, self._minimum_expiration(token_data)) if key: data = await self._token_redis_store.get_data_by_key(key) if data: self._token_cache.store_notebook_token(data.token, token_data) return data.token # There is not, so we need to create a new one. token = Token() created = current_datetime() expires = created + self._config.token_lifetime if token_data.expires and token_data.expires < expires: expires = token_data.expires data = TokenData( token=token, username=token_data.username, token_type=TokenType.notebook, scopes=token_data.scopes, created=created, expires=expires, name=token_data.name, email=token_data.email, uid=token_data.uid, groups=token_data.groups, ) history_entry = TokenChangeHistoryEntry( token=token.key, username=data.username, token_type=TokenType.notebook, parent=token_data.token.key, scopes=data.scopes, expires=expires, actor=token_data.username, action=TokenChange.create, ip_address=ip_address, event_time=created, ) await self._token_redis_store.store_data(data) with self._transaction_manager.transaction(): self._token_db_store.add(data, parent=token_data.token.key) self._token_change_store.add(history_entry) # Cache the token and return it. self._logger.info("Created new notebook token", key=token.key) self._token_cache.store_notebook_token(token, token_data) return token
async def create_token_from_admin_request( self, request: AdminTokenRequest, auth_data: TokenData, *, ip_address: Optional[str], ) -> Token: """Create a new service or user token from an admin request. Parameters ---------- request : `gafaelfawr.models.token.AdminTokenRequest` The incoming request. auth_data : `gafaelfawr.models.token.TokenData` The data for the authenticated user making the request. ip_address : `str` or `None` The IP address from which the request came, or `None` for internal requests by Gafaelfawr. Returns ------- token : `gafaelfawr.models.token.Token` The newly-created token. Raises ------ gafaelfawr.exceptions.PermissionDeniedError If the provided username is invalid. """ self._check_authorization(request.username, auth_data, require_admin=True) self._validate_username(request.username) self._validate_scopes(request.scopes) self._validate_expires(request.expires) token = Token() created = current_datetime() data = TokenData( token=token, username=request.username, token_type=request.token_type, scopes=request.scopes, created=created, expires=request.expires, name=request.name, email=request.email, uid=request.uid, groups=request.groups, ) history_entry = TokenChangeHistoryEntry( token=token.key, username=data.username, token_type=data.token_type, token_name=request.token_name, scopes=data.scopes, expires=request.expires, actor=auth_data.username, action=TokenChange.create, ip_address=ip_address, event_time=created, ) await self._token_redis_store.store_data(data) with self._transaction_manager.transaction(): self._token_db_store.add(data, token_name=request.token_name) self._token_change_store.add(history_entry) if data.token_type == TokenType.user: self._logger.info( "Created new user token", key=token.key, token_name=request.token_name, token_scope=",".join(data.scopes), token_username=data.username, ) else: self._logger.info( "Created new service token", key=token.key, token_scope=",".join(data.scopes), token_username=data.username, ) return token
async def create_user_token( self, auth_data: TokenData, username: str, *, token_name: str, scopes: List[str], expires: Optional[datetime] = None, ip_address: str, ) -> Token: """Add a new user token. Parameters ---------- auth_data : `gafaelfawr.models.token.TokenData` The token data for the authentication token of the user creating a user token. username : `str` The username for which to create a token. token_name : `str` The name of the token. scopes : List[`str`] The scopes of the token. expires : `datetime` or `None` When the token should expire. If not given, defaults to the expiration of the authentication token taken from ``data``. ip_address : `str` The IP address from which the request came. Returns ------- token : `gafaelfawr.models.token.Token` The newly-created token. Raises ------ gafaelfawr.exceptions.DuplicateTokenNameError A token with this name for this user already exists. gafaelfawr.exceptions.InvalidExpiresError The provided expiration time was invalid. gafaelfawr.exceptions.PermissionDeniedError If the given username didn't match the user information in the authentication token, or if the specified username is invalid. Notes ----- This can only be used by the user themselves, not by a token administrator, because this API does not provide a way to set the additional user information for the token. Once the user information no longer needs to be tracked by the token system, it can be unified with ``create_token_from_admin_request``. """ self._check_authorization(username, auth_data, require_same_user=True) self._validate_username(username) self._validate_expires(expires) self._validate_scopes(scopes, auth_data) scopes = sorted(scopes) token = Token() created = current_datetime() data = TokenData( token=token, username=username, token_type=TokenType.user, scopes=scopes, created=created, expires=expires, name=auth_data.name, email=auth_data.email, uid=auth_data.uid, groups=auth_data.groups, ) history_entry = TokenChangeHistoryEntry( token=token.key, username=data.username, token_type=TokenType.user, token_name=token_name, scopes=scopes, expires=expires, actor=auth_data.username, action=TokenChange.create, ip_address=ip_address, event_time=created, ) await self._token_redis_store.store_data(data) with self._transaction_manager.transaction(): self._token_db_store.add(data, token_name=token_name) self._token_change_store.add(history_entry) self._logger.info( "Created new user token", key=token.key, token_name=token_name, token_scope=",".join(data.scopes), ) return token
async def test_modify(setup: SetupTest, mock_kubernetes: MockCoreV1Api, caplog: LogCaptureFixture) -> None: assert setup.config.kubernetes assert len(setup.config.kubernetes.service_secrets) >= 2 service_secret_one = setup.config.kubernetes.service_secrets[0] service_secret_two = setup.config.kubernetes.service_secrets[1] kubernetes_service = setup.factory.create_kubernetes_service() token_service = setup.factory.create_token_service() # Secret that shouldn't exist. secret = V1Secret( api_version="v1", data={"token": "bogus"}, metadata=V1ObjectMeta( labels={KUBERNETES_TOKEN_TYPE_LABEL: "service"}, name="foo", namespace="bar", ), type="Opaque", ) mock_kubernetes.create_namespaced_secret("bar", secret) # Valid secret but with a bogus token. secret = V1Secret( api_version="v1", data={"token": "bogus"}, metadata=V1ObjectMeta( labels={KUBERNETES_TOKEN_TYPE_LABEL: "service"}, name=service_secret_one.secret_name, namespace=service_secret_one.secret_namespace, ), type="Opaque", ) mock_kubernetes.create_namespaced_secret( service_secret_one.secret_namespace, secret) # Valid secret but with a nonexistent token. secret = V1Secret( api_version="v1", data={"token": token_as_base64(Token())}, metadata=V1ObjectMeta( labels={KUBERNETES_TOKEN_TYPE_LABEL: "service"}, name=service_secret_two.secret_name, namespace=service_secret_two.secret_namespace, ), type="Opaque", ) mock_kubernetes.create_namespaced_secret( service_secret_two.secret_namespace, secret) # Update the secrets. This should delete the secret that shouldn't exist # and update the two that should with fresh secrets. await kubernetes_service.update_service_secrets() await assert_kubernetes_secrets_match_config(setup, mock_kubernetes) # Check the logging. expected = [ { "event": "Deleted bar/foo secret", "level": "info", "logger": "gafaelfawr", }, { "event": "Created new service token", "key": ANY, "level": "info", "logger": "gafaelfawr", "token_scope": ",".join(service_secret_one.scopes), "token_username": service_secret_one.service, }, { "event": (f"Updated {service_secret_one.secret_namespace}" f"/{service_secret_one.secret_name} secret"), "level": "info", "logger": "gafaelfawr", "scopes": service_secret_one.scopes, "service": service_secret_one.service, }, { "event": "Created new service token", "key": ANY, "level": "info", "logger": "gafaelfawr", "token_scope": ",".join(service_secret_two.scopes), "token_username": service_secret_two.service, }, { "event": (f"Updated {service_secret_two.secret_namespace}" f"/{service_secret_two.secret_name} secret"), "level": "info", "logger": "gafaelfawr", "scopes": service_secret_two.scopes, "service": service_secret_two.service, }, ] assert [json.loads(r[2]) for r in caplog.record_tuples] == expected # Replace one secret with a valid token for the wrong service. token = await token_service.create_token_from_admin_request( AdminTokenRequest( username="******", token_type=TokenType.service, scopes=service_secret_one.scopes, ), TokenData.internal_token(), ip_address=None, ) secret = V1Secret( api_version="v1", data={"token": token_as_base64(token)}, metadata=V1ObjectMeta( labels={KUBERNETES_TOKEN_TYPE_LABEL: "service"}, name=service_secret_one.secret_name, namespace=service_secret_one.secret_namespace, ), type="Opaque", ) mock_kubernetes.delete_namespaced_secret( service_secret_one.secret_name, service_secret_one.secret_namespace) mock_kubernetes.create_namespaced_secret( service_secret_one.secret_namespace, secret) # Replace the other token with a valid token with the wrong scopes. token = await token_service.create_token_from_admin_request( AdminTokenRequest( username=service_secret_two.service, token_type=TokenType.service, scopes=["read:all"], ), TokenData.internal_token(), ip_address=None, ) secret = V1Secret( api_version="v1", data={"token": token_as_base64(token)}, metadata=V1ObjectMeta( labels={KUBERNETES_TOKEN_TYPE_LABEL: "service"}, name=service_secret_two.secret_name, namespace=service_secret_two.secret_namespace, ), type="Opaque", ) mock_kubernetes.delete_namespaced_secret( service_secret_two.secret_name, service_secret_two.secret_namespace) mock_kubernetes.create_namespaced_secret( service_secret_two.secret_namespace, secret) # Update the secrets. This should create new tokens for both. await kubernetes_service.update_service_secrets() await assert_kubernetes_secrets_match_config(setup, mock_kubernetes) # Finally, replace a secret with one with no token. secret = V1Secret( api_version="v1", data={}, metadata=V1ObjectMeta( labels={KUBERNETES_TOKEN_TYPE_LABEL: "service"}, name=service_secret_one.secret_name, namespace=service_secret_one.secret_namespace, ), type="Opaque", ) mock_kubernetes.delete_namespaced_secret( service_secret_one.secret_name, service_secret_one.secret_namespace) mock_kubernetes.create_namespaced_secret( service_secret_one.secret_namespace, secret) # Update the secrets. This should create a new token for the first secret # but not for the second. await kubernetes_service.update_service_secrets() await assert_kubernetes_secrets_match_config(setup, mock_kubernetes, is_fresh=False)
async def test_modify( factory: ComponentFactory, mock_kubernetes: MockKubernetesApi, caplog: LogCaptureFixture, ) -> None: await create_test_service_tokens(mock_kubernetes) kubernetes_service = factory.create_kubernetes_service(MagicMock()) token_service = factory.create_token_service() # Valid secret but with a bogus token. secret = V1Secret( api_version="v1", kind="Secret", data={"token": "bogus"}, metadata=V1ObjectMeta(name="gafaelfawr-secret", namespace="mobu"), type="Opaque", ) await mock_kubernetes.create_namespaced_secret("mobu", secret) # Valid secret but with a nonexistent token. secret = V1Secret( api_version="v1", kind="Secret", data={"token": token_as_base64(Token())}, metadata=V1ObjectMeta( name="gafaelfawr", namespace="nublado2", labels={ "foo": "bar", "other": "blah", }, annotations={ "argocd.argoproj.io/compare-options": "IgnoreExtraneous", "argocd.argoproj.io/sync-options": "Prune=false", }, ), type="Opaque", ) await mock_kubernetes.create_namespaced_secret("nublado2", secret) # Update the secrets. This should replace both with fresh secrets. await kubernetes_service.update_service_tokens() await assert_kubernetes_secrets_are_correct(factory, mock_kubernetes) # Check the logging. assert parse_log(caplog) == [ { "event": "Created new service token", "key": ANY, "severity": "info", "token_scope": "admin:token", "token_username": "******", }, { "event": "Updated mobu/gafaelfawr-secret secret", "scopes": ["admin:token"], "severity": "info", "service": "mobu", }, { "event": "Created new service token", "key": ANY, "severity": "info", "token_scope": "", "token_username": "******", }, { "event": "Updated nublado2/gafaelfawr secret", "scopes": [], "severity": "info", "service": "nublado-hub", }, ] # Replace one secret with a valid token for the wrong service. async with factory.session.begin(): token = await token_service.create_token_from_admin_request( AdminTokenRequest( username="******", token_type=TokenType.service, scopes=["admin:token"], ), TokenData.internal_token(), ip_address=None, ) secret = V1Secret( api_version="v1", kind="Secret", data={"token": token_as_base64(token)}, metadata=V1ObjectMeta(name="gafaelfawr-secret", namespace="mobu"), type="Opaque", ) await mock_kubernetes.replace_namespaced_secret( "gafaelfawr-secret", "mobu", secret ) # Replace the other token with a valid token with the wrong scopes. async with factory.session.begin(): token = await token_service.create_token_from_admin_request( AdminTokenRequest( username="******", token_type=TokenType.service, scopes=["read:all"], ), TokenData.internal_token(), ip_address=None, ) secret = V1Secret( api_version="v1", kind="Secret", data={"token": token_as_base64(token)}, metadata=V1ObjectMeta(name="gafaelfawr", namespace="nublado2"), type="Opaque", ) await mock_kubernetes.replace_namespaced_secret( "gafaelfawr", "nublado2", secret ) # Update the secrets. This should create new tokens for both. await kubernetes_service.update_service_tokens() await assert_kubernetes_secrets_are_correct(factory, mock_kubernetes) nublado_secret = await mock_kubernetes.read_namespaced_secret( "gafaelfawr", "nublado2" ) # Finally, replace a secret with one with no token. secret = V1Secret( api_version="v1", data={}, metadata=V1ObjectMeta(name="gafaelfawr-secret", namespace="mobu"), type="Opaque", ) await mock_kubernetes.replace_namespaced_secret( "gafaelfawr-secret", "mobu", secret ) # Update the secrets. This should create a new token for the first secret # but not for the second. await kubernetes_service.update_service_tokens() await assert_kubernetes_secrets_are_correct( factory, mock_kubernetes, is_fresh=False ) assert nublado_secret == await mock_kubernetes.read_namespaced_secret( "gafaelfawr", "nublado2" )
async def test_errors_delete_patch(setup: SetupTest, mock_kubernetes: MockCoreV1Api) -> None: assert setup.config.kubernetes assert len(setup.config.kubernetes.service_secrets) >= 2 service_secret = setup.config.kubernetes.service_secrets[0] kubernetes_service = setup.factory.create_kubernetes_service() token_service = setup.factory.create_token_service() # Create a secret that should not exist. secret_one = V1Secret( api_version="v1", data={"token", "bar"}, metadata=V1ObjectMeta( labels={KUBERNETES_TOKEN_TYPE_LABEL: "service"}, name="secret", namespace="elsewhere", ), type="Opaque", ) mock_kubernetes.create_namespaced_secret("elsewhere", secret_one) # Create a secret that should exist but has an invalid token. secret_two = V1Secret( api_version="v1", data={"token": token_as_base64(Token())}, metadata=V1ObjectMeta( labels={KUBERNETES_TOKEN_TYPE_LABEL: "service"}, name=service_secret.secret_name, namespace=service_secret.secret_namespace, ), type="Opaque", ) mock_kubernetes.create_namespaced_secret(service_secret.secret_namespace, secret_two) # Simulate some errors. The callback function takes the operation and the # secret name. def error_callback(method: str, *args: Any) -> None: if method in ("delete_namespaced_secret", "patch_namespaced_secret"): raise ApiException(status=500, reason="Some error") MockCoreV1Api.error_callback = error_callback # Now run the synchronization. secret_one and secret_two should be left # unchanged, but we should still create the second missing service secret. await kubernetes_service.update_service_secrets() objects = mock_kubernetes.get_all_objects_for_test() assert secret_one in objects assert secret_two in objects service_secret = setup.config.kubernetes.service_secrets[1] secret = mock_kubernetes.read_namespaced_secret( service_secret.secret_name, service_secret.secret_namespace) assert secret.metadata.name == service_secret.secret_name assert secret.metadata.namespace == service_secret.secret_namespace assert await token_data_from_secret(token_service, secret) # Try again, but simulating an error in retrieving a secret. def error_callback_read(method: str, *args: Any) -> None: if method == "read_namespaced_secret": if args[1] != "elsewhere": raise ApiException(status=500, reason="Some error") MockCoreV1Api.error_callback = error_callback_read # Now run the synchronization. secret_one should be deleted and # secret_two should be left unchanged. await kubernetes_service.update_service_secrets() objects = mock_kubernetes.get_all_objects_for_test() assert secret_one not in objects assert secret_two in objects
async def test_internal_token(config: Config, factory: ComponentFactory) -> None: user_info = TokenUserInfo( username="******", name="Example Person", uid=4137, groups=[TokenGroup(name="foo", id=1000)], ) token_service = factory.create_token_service() async with factory.session.begin(): session_token = await token_service.create_session_token( user_info, scopes=["read:all", "exec:admin", "user:token"], ip_address="127.0.0.1", ) data = await token_service.get_data(session_token) assert data async with factory.session.begin(): internal_token = await token_service.get_internal_token( data, service="some-service", scopes=["read:all"], ip_address="2001:db8::45", ) assert await token_service.get_user_info(internal_token) == user_info info = await token_service.get_token_info_unchecked(internal_token.key) assert info and info == TokenInfo( token=internal_token.key, username=user_info.username, token_type=TokenType.internal, service="some-service", scopes=["read:all"], created=info.created, last_used=None, expires=data.expires, parent=session_token.key, ) assert_is_now(info.created) # Cannot request a scope that the parent token doesn't have. with pytest.raises(InvalidScopesError): await token_service.get_internal_token( data, service="some-service", scopes=["read:some"], ip_address="127.0.0.1", ) # Creating another internal token from the same parent token with the same # parameters just returns the same internal token as before. new_internal_token = await token_service.get_internal_token( data, service="some-service", scopes=["read:all"], ip_address="127.0.0.1", ) assert internal_token == new_internal_token # Try again with the cache cleared to force a database lookup. await token_service._token_cache.clear() async with factory.session.begin(): new_internal_token = await token_service.get_internal_token( data, service="some-service", scopes=["read:all"], ip_address="127.0.0.1", ) assert internal_token == new_internal_token async with factory.session.begin(): history = await token_service.get_change_history( data, token=internal_token.key, username=data.username) assert history.entries == [ TokenChangeHistoryEntry( token=internal_token.key, username=data.username, token_type=TokenType.internal, parent=data.token.key, service="some-service", scopes=["read:all"], expires=data.expires, actor=data.username, action=TokenChange.create, ip_address="2001:db8::45", event_time=info.created, ) ] # It's possible we'll have a race condition where two workers both create # an internal token at the same time with the same parameters. Gafaelfawr # 3.0.2 had a regression where, once that had happened, it could not # retrieve the internal token because it didn't expect multiple results # from the query. Simulate this and make sure it's handled properly. The # easiest way to do this is to use the internals of the token service. second_internal_token = Token() created = current_datetime() expires = created + config.token_lifetime internal_token_data = TokenData( token=second_internal_token, username=data.username, token_type=TokenType.internal, scopes=["read:all"], created=created, expires=expires, name=data.name, email=data.email, uid=data.uid, groups=data.groups, ) await token_service._token_redis_store.store_data(internal_token_data) async with factory.session.begin(): await token_service._token_db_store.add(internal_token_data, service="some-service", parent=data.token.key) await token_service._token_cache.clear() async with factory.session.begin(): dup_internal_token = await token_service.get_internal_token( data, service="some-service", scopes=["read:all"], ip_address="127.0.0.1", ) assert dup_internal_token in (internal_token, second_internal_token) # A different scope or a different service results in a new token. async with factory.session.begin(): new_internal_token = await token_service.get_internal_token( data, service="some-service", scopes=["exec:admin"], ip_address="127.0.0.1", ) assert internal_token != new_internal_token async with factory.session.begin(): new_internal_token = await token_service.get_internal_token( data, service="another-service", scopes=["read:all"], ip_address="127.0.0.1", ) assert internal_token != new_internal_token # Check that the expiration time is capped by creating a user token that # doesn't expire and then creating a notebook token from it. Use this to # test a token with empty scopes. async with factory.session.begin(): user_token = await token_service.create_user_token( data, data.username, token_name="some token", scopes=["exec:admin"], expires=None, ip_address="127.0.0.1", ) data = await token_service.get_data(user_token) assert data async with factory.session.begin(): new_internal_token = await token_service.get_internal_token( data, service="some-service", scopes=[], ip_address="127.0.0.1") assert new_internal_token != internal_token info = await token_service.get_token_info_unchecked( new_internal_token.key) assert info and info.scopes == [] expires = info.created + timedelta(minutes=config.issuer.exp_minutes) assert info.expires == expires
def generate_token() -> None: """Generate an encoded token (such as the bootstrap token).""" print(str(Token()))