def test_expired_session_lifetime(app): # make the start time be max lifetime ago (so it's expired) lifetime = config.get("SESSION_LIFETIME") now = int(time.time()) one_lifetime_ago = now - lifetime username = "******" test_session_jwt = create_session_token( app.keypairs[0], config.get("SESSION_TIMEOUT"), context=dict(session_started=one_lifetime_ago, username=username), ) with app.test_client() as client: # manually set cookie for initial session client.set_cookie( "localhost", config["SESSION_COOKIE_NAME"], test_session_jwt, httponly=True, samesite="Lax", ) with client.session_transaction() as session: # make sure we don't have the username when opening # the session, since it has expired assert session.get("username") != username
def test_map_iss_sub_pair_to_user_with_no_prior_DRS_access(db_session): """ Test RASOauth2Client.map_iss_sub_pair_to_user when the username passed in (e.g. eRA username) does not already exist in the Fence database and that user's <iss, sub> combination has not already been mapped through a prior DRS access request. """ # reset users table db_session.query(User).delete() db_session.commit() iss = "https://domain.tld" sub = "123_abc" username = "******" email = "*****@*****.**" oidc = config.get("OPENID_CONNECT", {}) ras_client = RASClient( oidc["ras"], HTTP_PROXY=config.get("HTTP_PROXY"), logger=logger, ) assert not query_for_user(db_session, username) iss_sub_pair_to_user_records = db_session.query(IssSubPairToUser).all() assert len(iss_sub_pair_to_user_records) == 0 username_to_log_in = ras_client.map_iss_sub_pair_to_user( iss, sub, username, email, db_session=db_session) assert username_to_log_in == username iss_sub_pair_to_user = db_session.query(IssSubPairToUser).get((iss, sub)) assert iss_sub_pair_to_user.user.username == username assert iss_sub_pair_to_user.user.email == email iss_sub_pair_to_user_records = db_session.query(IssSubPairToUser).all() assert len(iss_sub_pair_to_user_records) == 1
def get(self): redirect_url = flask.request.args.get("redirect") validate_redirect(redirect_url) flask.redirect_url = redirect_url if flask.redirect_url: flask.session["redirect"] = flask.redirect_url mock_login = ( config["OPENID_CONNECT"].get(self.idp_name.lower(), {}).get("mock", False) ) # to support older cfgs, new cfgs should use the `mock` field in OPENID_CONNECT legacy_mock_login = config.get( "MOCK_{}_AUTH".format(self.idp_name.upper()), False ) mock_default_user = ( config["OPENID_CONNECT"] .get(self.idp_name.lower(), {}) .get("mock_default_user", "*****@*****.**") ) if mock_login or legacy_mock_login: # prefer dev cookie for mocked username, fallback on configuration username = flask.request.cookies.get( config.get("DEV_LOGIN_COOKIE_NAME"), mock_default_user ) resp = _login(username, self.idp_name) prepare_login_log(self.idp_name) return resp return flask.redirect(self.client.get_auth_url())
def test_map_iss_sub_pair_to_user_with_prior_login_and_prior_DRS_access( db_session, ): """ Test RASOauth2Client.map_iss_sub_pair_to_user when the username passed in (e.g. eRA username) already exists in the Fence database and that user's <iss, sub> combination has already been mapped to a separate user created during a prior DRS access request. In this case, map_iss_sub_pair_to_user returns the user created from prior DRS/data access, rendering the other user (e.g. the eRA one) inaccessible. """ iss = "https://domain.tld" sub = "123_abc" username = "******" email = "*****@*****.**" oidc = config.get("OPENID_CONNECT", {}) ras_client = RASClient( oidc["ras"], HTTP_PROXY=config.get("HTTP_PROXY"), logger=logger, ) # reset users table db_session.query(User).delete() db_session.commit() user = User(username=username, email=email) db_session.add(user) db_session.commit() get_or_create_gen3_user_from_iss_sub(iss, sub, db_session=db_session) username_to_log_in = ras_client.map_iss_sub_pair_to_user( iss, sub, username, email, db_session=db_session) assert username_to_log_in == "123_abcdomain.tld" iss_sub_pair_to_user = db_session.query(IssSubPairToUser).get((iss, sub)) assert iss_sub_pair_to_user.user.username == "123_abcdomain.tld"
def _check_s3_buckets(app): """ Function to ensure that all s3_buckets have a valid credential. Additionally, if there is no region it will produce a warning then trys to fetch and cache the region. """ buckets = config.get("S3_BUCKETS", {}) aws_creds = config.get("AWS_CREDENTIALS", {}) for bucket_name, bucket_details in buckets.items(): cred = bucket_details.get("cred") region = bucket_details.get("region") if not cred: raise ValueError( "No cred for S3_BUCKET: {}. cred is required.".format( bucket_name)) if cred not in aws_creds and cred != "*": raise ValueError( "Credential {} for S3_BUCKET {} is not defined in AWS_CREDENTIALS" .format(cred, bucket_name)) if not region: logger.warning( "WARNING: no region for S3_BUCKET: {}. Providing the region will reduce" " response time and avoid a call to GetBucketLocation which you make lack the AWS ACLs for." .format(bucket_name)) credential = S3IndexedFileLocation.get_credential_to_access_bucket( bucket_name, aws_creds, config.get("MAX_PRESIGNED_URL_TTL", 3600), app.boto, ) region = app.boto.get_bucket_region(bucket_name, credential) config["S3_BUCKETS"][bucket_name]["region"] = region
def __init__( self, chunk_size=None, concurrency=None, thread_pool_size=None, buffer_size=None, logger=logger, ): """ args: chunk_size: size of chunk of users we want to take from each iteration concurrency: number of concurrent users going through the visa update flow thread_pool_size: number of Docker container CPU used for jwt verifcation buffer_size: max size of queue """ self.chunk_size = chunk_size or 10 self.concurrency = concurrency or 5 self.thread_pool_size = thread_pool_size or 3 self.buffer_size = buffer_size or 10 self.n_workers = self.thread_pool_size + self.concurrency self.logger = logger self.visa_types = config.get("USERSYNC", {}).get("visa_types", {}) # Initialize visa clients: oidc = config.get("OPENID_CONNECT", {}) if "ras" not in oidc: self.logger.error("RAS client not configured") self.ras_client = None else: self.ras_client = RASClient( oidc["ras"], HTTP_PROXY=config.get("HTTP_PROXY"), logger=logger, )
def _create_access_token_cookie(app, session, response, user): keypair = app.keypairs[0] scopes = config["SESSION_ALLOWED_SCOPES"] now = int(time.time()) expiration = now + config.get("ACCESS_TOKEN_EXPIRES_IN") # try to get from current session, if it's not there, we have to hit db linked_google_email = session.get("linked_google_email") if not linked_google_email: linked_google_email = get_linked_google_account_email(user.id) access_token = generate_signed_access_token( keypair.kid, keypair.private_key, user, config.get("ACCESS_TOKEN_EXPIRES_IN"), scopes, forced_exp_time=expiration, linked_google_email=linked_google_email, ).token domain = app.session_interface.get_cookie_domain(app) response.set_cookie( config["ACCESS_TOKEN_COOKIE_NAME"], access_token, expires=expiration, httponly=True, domain=domain, ) return response
def get_error_response(error): details, status_code = get_error_details_and_status(error) support_email = config.get("SUPPORT_EMAIL_FOR_ERRORS") app_name = config.get("APP_NAME", "Gen3 Data Commons") message = details.get("message") error_id = _get_error_identifier() logger.error("{} HTTP error occured. ID: {}\nDetails: {}".format( status_code, error_id, str(details))) # don't include internal details in the public error message if status_code == 500: message = None status_code_message = http_responses.get(status_code, "Unknown error code.") return ( render_template( "error.html", app_name=app_name, status_code=status_code, status_code_message=status_code_message, support_email=support_email, error_id=error_id, message=message, ), status_code, )
def wrapper(*args, **kwargs): if flask.session.get("username"): login_user(flask.session["username"], flask.session["provider"]) return f(*args, **kwargs) eppn = None if config["LOGIN_OPTIONS"]: enable_shib = "shibboleth" in [ option["idp"] for option in config["LOGIN_OPTIONS"] ] else: # fall back on "providers" enable_shib = "shibboleth" in config.get( "ENABLED_IDENTITY_PROVIDERS", {}).get("providers", {}) if enable_shib and "SHIBBOLETH_HEADER" in config: eppn = flask.request.headers.get(config["SHIBBOLETH_HEADER"]) if config.get("MOCK_AUTH") is True: eppn = "test" # if there is authorization header for oauth if "Authorization" in flask.request.headers: has_oauth(scope=scope) return f(*args, **kwargs) # if there is shibboleth session, then create user session and # log user in elif eppn: username = eppn.split("!")[-1] flask.session["username"] = username flask.session["provider"] = IdentityProvider.itrust login_user(username, flask.session["provider"]) return f(*args, **kwargs) else: raise Unauthorized("Please login")
def test_store_refresh_token(db_session): """ Test to check if store_refresh_token replaces the existing token with a new one in the db """ test_user = add_test_ras_user(db_session) add_refresh_token(db_session, test_user) initial_query = db_session.query(UpstreamRefreshToken).first() assert initial_query.refresh_token new_refresh_token = "newtoken1234567" new_expire = 50000 oidc = config.get("OPENID_CONNECT", {}) ras_client = RASClient( oidc["ras"], HTTP_PROXY=config.get("HTTP_PROXY"), logger=logger, ) ras_client.store_refresh_token(test_user, new_refresh_token, new_expire, db_session=db_session) final_query = db_session.query(UpstreamRefreshToken).first() assert final_query.refresh_token == new_refresh_token assert final_query.expires == new_expire
def test_map_iss_sub_pair_to_user_with_prior_DRS_access_and_arborist_error( db_session, mock_arborist_requests): """ Test that RASOauth2Client.map_iss_sub_pair_to_user raises an internal error when Arborist fails to return a successful response. """ mock_arborist_requests( {"arborist/user/123_abcdomain.tld": { "PATCH": (None, 500) }}) # reset users table db_session.query(User).delete() db_session.commit() iss = "https://domain.tld" sub = "123_abc" username = "******" email = "*****@*****.**" oidc = config.get("OPENID_CONNECT", {}) ras_client = RASClient( oidc["ras"], HTTP_PROXY=config.get("HTTP_PROXY"), logger=logger, ) get_or_create_gen3_user_from_iss_sub(iss, sub, db_session=db_session) with pytest.raises(InternalError): ras_client.map_iss_sub_pair_to_user(iss, sub, username, email, db_session=db_session)
def test_update_visa_empty_passport_returned( mock_discovery, mock_get_token, mock_userinfo, config, db_session, rsa_private_key, rsa_public_key, kid, ): """ Test to handle empty passport sent from RAS """ mock_discovery.return_value = "https://ras/token_endpoint" new_token = "refresh12345abcdefg" token_response = { "access_token": "abcdef12345", "id_token": "id12345abcdef", "refresh_token": new_token, } mock_get_token.return_value = token_response userinfo_response = { "sub": "abcd-asdj-sajpiasj12iojd-asnoin", "name": "", "preferred_username": "******", "UID": "", "UserID": "admin_user", "email": "", "passport_jwt_v11": "", } mock_userinfo.return_value = userinfo_response test_user = add_test_user(db_session) add_visa_manually(db_session, test_user, rsa_private_key, kid) add_refresh_token(db_session, test_user) visa_query = db_session.query(GA4GHVisaV1).filter_by( user=test_user).first() initial_visa = visa_query.ga4gh_visa assert initial_visa oidc = config.get("OPENID_CONNECT", {}) ras_client = RASClient( oidc["ras"], HTTP_PROXY=config.get("HTTP_PROXY"), logger=logger, ) pkey_cache = { "https://stsstg.nih.gov": { kid: rsa_public_key, } } ras_client.update_user_visas(test_user, pkey_cache=pkey_cache) query_visa = db_session.query(GA4GHVisaV1).first() assert query_visa == None
def test_valid_session_valid_access_token_diff_user(app, test_user_a, test_user_b, db_session, monkeypatch): """ Test the case where a valid access token is in a cookie, but it's for a different user than the one logged in. Make sure that a new access token is created for the logged in user and the response doesn't contain info for the non-logged in user. """ monkeypatch.setitem(config, "MOCK_AUTH", False) user = db_session.query(User).filter_by(id=test_user_a["user_id"]).first() keypair = app.keypairs[0] test_session_jwt = create_session_token( keypair, config.get("SESSION_TIMEOUT"), context={ "username": user.username, "provider": "google" }, ) # different user's access token other_user = db_session.query(User).filter_by( id=test_user_b["user_id"]).first() test_access_jwt = generate_signed_access_token( kid=keypair.kid, private_key=keypair.private_key, user=other_user, expires_in=config["ACCESS_TOKEN_EXPIRES_IN"], scopes=["openid", "user"], iss=config.get("BASE_URL"), ).token with app.test_client() as client: # manually set cookie for initial session client.set_cookie("localhost", config["SESSION_COOKIE_NAME"], test_session_jwt) client.set_cookie("localhost", config["ACCESS_TOKEN_COOKIE_NAME"], test_access_jwt) response = client.get("/user") cookies = _get_cookies_from_response(response) # either there's a new access_token in the response headers or the # previously set access token been changed access_token = (cookies.get("access_token", {}).get("access_token") or test_access_jwt) valid_access_token = validate_jwt(access_token, purpose="access") assert response.status_code == 200 response_user_id = response.json.get("user_id") or response.json.get( "sub") assert response_user_id == test_user_a["user_id"] user_id = valid_access_token.get("user_id") or valid_access_token.get( "sub") assert test_user_a["user_id"] == int(user_id)
def logout_endpoint(): root = config.get("BASE_URL", "") request_next = flask.request.args.get("next", root) if request_next.startswith("https") or request_next.startswith("http"): next_url = request_next else: next_url = build_redirect_url(config.get("ROOT_URL", ""), request_next) return logout(next_url=next_url)
def _check_aws_creds_and_region(app): """ Function to ensure that all s3_buckets have a valid credential. Additionally, if there is no region it will produce a warning then try to fetch and cache the region. """ buckets = config.get("S3_BUCKETS") or {} aws_creds = config.get("AWS_CREDENTIALS") or {} for bucket_name, bucket_details in buckets.items(): cred = bucket_details.get("cred") region = bucket_details.get("region") if not cred: raise ValueError( "No cred for S3_BUCKET: {}. cred is required.".format( bucket_name)) # if this is a public bucket, Fence will not try to sign the URL # so it won't need to know the region. if cred == "*": continue if cred not in aws_creds: raise ValueError( "Credential {} for S3_BUCKET {} is not defined in AWS_CREDENTIALS" .format(cred, bucket_name)) # only require region when we're not specifying an # s3-compatible endpoint URL (ex: no need for region when using cleversafe) if not region and not bucket_details.get("endpoint_url"): logger.warning( "WARNING: no region for S3_BUCKET: {}. Providing the region will reduce" " response time and avoid a call to GetBucketLocation which you make lack the AWS ACLs for." .format(bucket_name)) credential = S3IndexedFileLocation.get_credential_to_access_bucket( bucket_name, aws_creds, config.get("MAX_PRESIGNED_URL_TTL", 3600), app.boto, ) if not getattr(app, "boto"): logger.warning( "WARNING: boto not setup for app, probably b/c " "nothing in AWS_CREDENTIALS. Cannot attempt to get bucket " "bucket regions.") return region = app.boto.get_bucket_region(bucket_name, credential) config["S3_BUCKETS"][bucket_name]["region"] = region cred = config["PUSH_AUDIT_LOGS_CONFIG"].get("aws_sqs_config", {}).get("aws_cred") if cred and cred not in aws_creds: raise ValueError( "Credential {} for PUSH_AUDIT_LOGS_CONFIG.aws_sqs_config.aws_cred is not defined in AWS_CREDENTIALS" .format(cred))
def test_update_visa_empty_visa_returned( mock_discovery, mock_get_token, mock_userinfo, config, db_session, rsa_private_key, kid, kid_2, ): """ Test to check if the db is emptied if the ras userinfo sends back an empty visa """ mock_discovery.return_value = "https://ras/token_endpoint" new_token = "refresh12345abcdefg" token_response = { "access_token": "abcdef12345", "id_token": "id12345abcdef", "refresh_token": new_token, } mock_get_token.return_value = token_response userinfo_response = { "sub": "abcd-asdj-sajpiasj12iojd-asnoin", "name": "", "preferred_username": "******", "UID": "", "UserID": "admin_user", "email": "", } userinfo_response["ga4gh_passport_v1"] = [] mock_userinfo.return_value = userinfo_response test_user = add_test_user(db_session) add_visa_manually(db_session, test_user, rsa_private_key, kid) add_refresh_token(db_session, test_user) visa_query = db_session.query(GA4GHVisaV1).filter_by( user=test_user).first() initial_visa = visa_query.ga4gh_visa assert initial_visa oidc = config.get("OPENID_CONNECT", {}) ras_client = RASClient( oidc["ras"], HTTP_PROXY=config.get("HTTP_PROXY"), logger=logger, ) ras_client.update_user_visas(test_user) query_visa = db_session.query(GA4GHVisaV1).first() assert query_visa == None
def logout_endpoint(): root = config.get("BASE_URL", "") request_next = flask.request.args.get("next", root) if request_next.startswith("https") or request_next.startswith("http"): next_url = request_next else: next_url = build_redirect_url(config.get("ROOT_URL", ""), request_next) if domain(next_url) not in allowed_login_redirects(): raise UserError("invalid logout redirect URL: {}".format(next_url)) return logout(next_url=next_url)
def get(self): flask.redirect_url = flask.request.args.get("redirect") if flask.redirect_url: flask.session["redirect"] = flask.redirect_url if config.get("MOCK_ORCID_AUTH", False): orcid = flask.request.cookies.get( config.get("DEV_LOGIN_COOKIE_NAME"), "0000-0002-2601-8132") return _login(orcid) return flask.redirect(flask.current_app.orcid_client.get_auth_url())
def get(self): flask.redirect_url = flask.request.args.get("redirect") if flask.redirect_url: flask.session["redirect"] = flask.redirect_url if config.get("MOCK_MICROSOFT_AUTH", False): email = flask.request.cookies.get( config.get("DEV_LOGIN_COOKIE_NAME"), "*****@*****.**" ) return _login(email) return flask.redirect(flask.current_app.microsoft_client.get_auth_url())
def test_valid_session_valid_access_token(app, db_session, test_user_a, test_user_b, monkeypatch): monkeypatch.setitem(config, "MOCK_AUTH", False) user = db_session.query(User).filter_by(id=test_user_a["user_id"]).first() keypair = app.keypairs[0] test_session_jwt = create_session_token( keypair, config.get("SESSION_TIMEOUT"), context={ "username": user.username, "provider": "google" }, ) test_access_jwt = generate_signed_access_token( kid=keypair.kid, private_key=keypair.private_key, user=user, expires_in=config["ACCESS_TOKEN_EXPIRES_IN"], scopes=["openid", "user"], iss=config.get("BASE_URL"), forced_exp_time=None, client_id=None, linked_google_email=None, ).token # Test that once the session is started, we have access to # the username with app.test_client() as client: # manually set cookie for initial session client.set_cookie( "localhost", config["SESSION_COOKIE_NAME"], test_session_jwt, httponly=True, samesite="Lax", ) client.set_cookie( "localhost", config["ACCESS_TOKEN_COOKIE_NAME"], test_access_jwt, httponly=True, samesite="Lax", ) response = client.get("/user") user_id = response.json.get("user_id") or response.json.get("sub") assert response.status_code == 200 assert user_id == user.id
def test_google_login_http_headers_are_less_than_4k_for_user_with_many_projects( app, client, monkeypatch, db_session): """ Test that when the current user has access to a large number of projects, the http headers of the response from a GET to /login/google/login are less than 4k bytes in size. """ monkeypatch.setitem(config, "MOCK_GOOGLE_AUTH", True) test_session_jwt = create_session_token( app.keypairs[0], config.get("SESSION_TIMEOUT"), context={ "redirect": "https://localhost/user/oauth2/authorize?client_id=7f7kAS4MJraUuo77d7RWHr4mZ6bvGtuzup7hw46I&response_type=id_token&redirect_uri=https://webapp.example/fence&scope=openid+user+data+google_credentials&nonce=randomvalue" }, ) client.set_cookie( "localhost", config["SESSION_COOKIE_NAME"], test_session_jwt, httponly=True, samesite="Lax", ) user_projects = { "test": { f"project{x}": { "read", "read-storage", "update", "upload", "create", "write-storage", "delete", } for x in range(20) } } user_info = { "test": { "tags": {}, } } dbGaP = os.environ.get("dbGaP") or config.get("dbGaP") syncer = UserSyncer(dbGaP, config["DB"], {}) syncer.sync_to_db_and_storage_backend(user_projects, user_info, db_session) resp = client.get("/login/google/login") assert len(str(resp.headers)) < 4096 assert resp.status_code == 302
def _setup_oidc_clients(app): if config["LOGIN_OPTIONS"]: enabled_idp_ids = [option["idp"] for option in config["LOGIN_OPTIONS"]] else: # fall back on "providers" enabled_idp_ids = list( config.get("ENABLED_IDENTITY_PROVIDERS", {}).get("providers", {}).keys()) oidc = config.get("OPENID_CONNECT", {}) # Add OIDC client for Google if configured. if "google" in oidc: app.google_client = GoogleClient( config["OPENID_CONNECT"]["google"], HTTP_PROXY=config.get("HTTP_PROXY"), logger=logger, ) # Add OIDC client for ORCID if configured. if "orcid" in oidc: app.orcid_client = ORCIDClient( config["OPENID_CONNECT"]["orcid"], HTTP_PROXY=config.get("HTTP_PROXY"), logger=logger, ) # Add OIDC client for RAS if configured. if "ras" in oidc: app.ras_client = RASClient( oidc["ras"], HTTP_PROXY=config.get("HTTP_PROXY"), logger=logger, ) # Add OIDC client for Synapse if configured. if "synapse" in oidc: app.synapse_client = SynapseClient(oidc["synapse"], HTTP_PROXY=config.get("HTTP_PROXY"), logger=logger) # Add OIDC client for Microsoft if configured. if "microsoft" in oidc: app.microsoft_client = MicrosoftClient( config["OPENID_CONNECT"]["microsoft"], HTTP_PROXY=config.get("HTTP_PROXY"), logger=logger, ) # Add OIDC client for Amazon Cognito if configured. if "cognito" in oidc: app.cognito_client = CognitoClient(oidc["cognito"], HTTP_PROXY=config.get("HTTP_PROXY"), logger=logger) # Add OIDC client for multi-tenant fence if configured. configured_fence = "fence" in oidc and "fence" in enabled_idp_ids if configured_fence: app.fence_client = OAuthClient(**config["OPENID_CONNECT"]["fence"])
def remove_white_listed_service_account_ids(sa_ids): """ Remove any service account emails that should be ignored when determining validitity. Args: sa_ids (List[str]): Service account emails Returns: List[str]: Service account emails """ white_listed_sa_emails = config.get("WHITE_LISTED_SERVICE_ACCOUNT_EMAILS", []) logger.debug( "Removing whitelisted SAs {} from the SAs on the project.".format( white_listed_sa_emails)) monitoring_service_account = get_monitoring_service_account_email() if monitoring_service_account in sa_ids: sa_ids.remove(monitoring_service_account) for email in white_listed_sa_emails: if email in sa_ids: sa_ids.remove(email) return sa_ids
def index_document(self): indexd_server = config.get("INDEXD") or config["BASE_URL"] + "/index" url = indexd_server + "/index/" try: res = requests.get(url + self.file_id) except Exception as e: logger.error( "failed to reach indexd at {0}: {1}".format(url + self.file_id, e) ) raise UnavailableError("Fail to reach id service to find data location") if res.status_code == 200: try: json_response = res.json() if "urls" not in json_response: logger.error( "URLs are not included in response from " "indexd: {}".format(url + self.file_id) ) raise InternalError("URLs and metadata not found") return res.json() except Exception as e: logger.error( "indexd response missing JSON field {}".format(url + self.file_id) ) raise InternalError("internal error from indexd: {}".format(e)) elif res.status_code == 404: logger.error( "Not Found. indexd could not find {}: {}".format( url + self.file_id, res.text ) ) raise NotFound("No indexed document found with id {}".format(self.file_id)) else: raise UnavailableError(res.text)
def get_signed_url_for_file(action, file_id, file_name=None): requested_protocol = flask.request.args.get("protocol", None) r_pays_project = flask.request.args.get("userProject", None) # default to signing the url even if it's a public object # this will work so long as we're provided a user token force_signed_url = True no_force_sign_param = flask.request.args.get("no_force_sign") if no_force_sign_param and no_force_sign_param.lower() == "true": force_signed_url = False indexed_file = IndexedFile(file_id) default_expires_in = config.get("MAX_PRESIGNED_URL_TTL", 3600) expires_in = get_valid_expiration_from_request( max_limit=default_expires_in, default=default_expires_in, ) signed_url = indexed_file.get_signed_url( requested_protocol, action, expires_in, force_signed_url=force_signed_url, r_pays_project=r_pays_project, file_name=file_name, ) return {"url": signed_url}
def get(self): """ Complete the shibboleth login. """ shib_header = config.get("SHIBBOLETH_HEADER") if not shib_header: raise InternalError("Missing shibboleth header configuration") # eppn stands for eduPersonPrincipalName username = flask.request.headers.get("eppn") entityID = flask.session.get("entityID") # if eppn not available or logging in through NIH if not username or not entityID or entityID == "urn:mace:incommon:nih.gov": persistent_id = flask.request.headers.get(shib_header) username = persistent_id.split("!")[-1] if persistent_id else None if not username: # some inCommon providers are not returning eppn # or persistent_id. See PXP-4309 # print("shib_header", shib_header) # print("flask.request.headers", flask.request.headers) raise Unauthorized("Unable to retrieve username") idp = IdentityProvider.itrust if entityID: idp = entityID login_user(flask.request, username, idp) if flask.session.get("redirect"): return flask.redirect(flask.session.get("redirect")) return "logged in"
def test_valid_session_modified(app): username = "******" modified_username = "******" test_session_jwt = create_session_token(app.keypairs[0], config.get("SESSION_TIMEOUT"), context={"username": username}) # Test that once the session is started, we have access to # the username with app.test_client() as client: # manually set cookie for initial session client.set_cookie( "localhost", config["SESSION_COOKIE_NAME"], test_session_jwt, httponly=True, samesite="Lax", ) with client.session_transaction() as session: assert session["username"] == username session["username"] = modified_username with client.session_transaction() as session: assert session["username"] == modified_username
def check_csrf(): has_auth = "Authorization" in flask.request.headers no_username = not flask.session.get("username") if has_auth or no_username: return if not config.get("ENABLE_CSRF_PROTECTION", True): return if flask.request.method != "GET": try: csrf_header = flask.request.headers.get("x-csrf-token") csrf_formfield = flask.request.form.get("csrf_token") # validate_csrf checks the input (a signed token) against the raw # token stored in session["csrf_token"]. # (session["csrf_token"] is managed by flask-wtf.) # To pass CSRF check, there must exist EITHER an x-csrf-token header # OR a csrf_token form field that matches the token in the session. assert (csrf_header and validate_csrf(csrf_header) is None or csrf_formfield and validate_csrf(csrf_formfield) is None) referer = flask.request.headers.get("referer") assert referer, "Referer header missing" logger.debug("HTTP REFERER " + str(referer)) except Exception as e: raise UserError( "CSRF verification failed: {}. Request aborted".format(e))
def test_expired_session_timeout(app): # make the start time be one timeout in the past (so the # session is expired) max_inactivity = config.get("SESSION_TIMEOUT") now = int(time.time()) last_active = now - max_inactivity username = "******" # since we're timetraveling, we have to trick the JWT (since it relies # on the current time and this expiration to calculate # the actual expiration time). For testing, we'll "expire" it on creation jwt_expiration = 0 test_session_jwt = create_session_token( app.keypairs[0], jwt_expiration, context=dict(session_started=last_active, username=username), ) with app.test_client() as client: # manually set cookie for initial session client.set_cookie( "localhost", config["SESSION_COOKIE_NAME"], test_session_jwt, httponly=True, samesite="Lax", ) with client.session_transaction() as session: # make sure we don't have the username when opening # the session, since it has expired assert session.get("username") != username
def generate_api_key(kid, private_key, user_id, expires_in, scopes, client_id): """ Generate a JWT refresh token and output a UTF-8 string of the encoded JWT signed with the private key. Args: kid (str): key id of the keypair used to generate token private_key (str): RSA private key to sign and encode the JWT with user_id (user id): User id to generate token for expires_in (int): seconds until expiration scopes (List[str]): oauth scopes for user_id Return: str: encoded JWT refresh token signed with ``private_key`` """ headers = {"kid": kid} iat, exp = issued_and_expiration_times(expires_in) jti = str(uuid.uuid4()) sub = str(user_id) claims = { "pur": "api_key", "aud": scopes, "sub": sub, "iss": config.get("BASE_URL"), "iat": iat, "exp": exp, "jti": jti, "azp": client_id or "", } logger.info("issuing JWT API key with id [{}] to [{}]".format(jti, sub)) logger.debug("issuing JWT API key\n" + json.dumps(claims, indent=4)) token = jwt.encode(claims, private_key, headers=headers, algorithm="RS256") logger.debug(str(token)) token = to_unicode(token, "UTF-8") return JWTResult(token=token, kid=kid, claims=claims)