def test_app_request_logs_5xx_responses_with_error_level_sampled( app_with_mocked_logger): _set_request_class_is_sampled(app_with_mocked_logger, True) @app_with_mocked_logger.route('/') def error_route(): time.sleep(0.05) return 'error', 500 app_with_mocked_logger.test_client().get('/') expected_call = mock.call( logging.DEBUG, 'Received request {method} {url}', extra={ "url": "http://localhost/", "method": "GET", "endpoint": "error_route", "process_": RestrictedAny(lambda value: isinstance(value, int)), "thread_": RestrictedAny(lambda value: isinstance(value, ( str, bytes, ))), }, ) assert expected_call in app_with_mocked_logger.logger.log.call_args_list
def test_post_documents_for_framework(self): response = self.client.post( f"/admin/communications/{self.framework_slug}", data={ 'communication': (BytesIO(valid_pdf_bytes), 'test-comm.pdf'), 'clarification': (BytesIO(valid_pdf_bytes), 'test-clar.pdf'), }) # check that we did actually mock-send two files assert self.s3.mock_calls == [ mock.call('flop-slop-slap'), mock.call().save( f'{self.framework_slug}/communications/updates/communications/test-comm.pdf', RestrictedAny(lambda other: other.filename == "test-comm.pdf"), acl='bucket-owner-full-control', download_filename='test-comm.pdf', ), mock.call().save( f'{self.framework_slug}/communications/updates/clarifications/test-clar.pdf', RestrictedAny(lambda other: other.filename == "test-clar.pdf"), acl='bucket-owner-full-control', download_filename='test-clar.pdf', ), ] self.assert_flashes('New communication was uploaded.') self.assert_flashes('New clarification was uploaded.') assert response.status_code == 302 # should basically be redirecting back to ourselves assert urljoin( f"http://localhost/admin/communications/{self.framework_slug}", response.location, ) == f"http://localhost/admin/communications/{self.framework_slug}"
def test_app_request_logs_responses_with_info_level_sampled( app_with_mocked_logger): _set_request_class_is_sampled(app_with_mocked_logger, True) app_with_mocked_logger.test_client().get('/') expected_call = mock.call( logging.DEBUG, 'Received request {method} {url}', extra={ "url": "http://localhost/", "method": "GET", "endpoint": None, "process_": RestrictedAny(lambda value: isinstance(value, int)), "thread_": RestrictedAny(lambda value: isinstance(value, ( str, bytes, ))), }, ) assert expected_call in app_with_mocked_logger.logger.log.call_args_list
def test_list_outcomes_paging(self): self.setup_outcomes() current_app.config["DM_API_OUTCOMES_PAGE_SIZE"] = 3 res = self.client.get( f"/outcomes?page=2", ) assert res.status_code == 200 response_data = json.loads(res.get_data()) assert response_data == { "links": { "next": RestrictedAny(lambda u: urlparse(u) == (ANY, ANY, "/outcomes", "", "page=3", "",)), "self": RestrictedAny(lambda u: urlparse(u) == (ANY, ANY, "/outcomes", "", "page=2", "",)), "prev": RestrictedAny(lambda u: urlparse(u) == (ANY, ANY, "/outcomes", "", "page=1", "",)), "last": RestrictedAny(lambda u: urlparse(u) == (ANY, ANY, "/outcomes", "", "page=5", "",)), }, "meta": { "total": 14, }, "outcomes": [ Outcome.query.filter(Outcome.external_id == expected_id).one().serialize() for expected_id in (100000009, 100000012, 100000010,) ], }
def test_handle_s3_sns_unfetchable_cert(self, mock_handle_subscription_confirmation, base_body_dict): with self.mocked_app_logger_log() as mock_app_log: with requests_mock.Mocker() as rmock: rmock.register_uri("GET", "https://nowhere.amazonaws.com/cert.pem", status_code=404) client = self.get_authorized_client() res = client.post("/callbacks/sns/s3/uploaded", data=json.dumps({ **base_body_dict, "Signature": "should_be_irrelevant", "SigningCertURL": "https://nowhere.amazonaws.com/cert.pem", }), content_type="application/json") assert res.status_code == 400 assert mock_app_log.call_args_list == [ ( (logging.INFO, AnyStringMatching(r"Failed to fetch certificate .*404"), ()), AnySupersetOf({"extra": AnySupersetOf({ "target_url": "https://nowhere.amazonaws.com/cert.pem", })}), ), ( (logging.WARNING, AnyStringMatching(r"SNS request body failed "), ()), AnySupersetOf({"extra": AnySupersetOf({ "validation_error": RestrictedAny(lambda x: isinstance(x, requests.exceptions.HTTPError)), })}), ), (mock.ANY, AnySupersetOf({"extra": AnySupersetOf({"status": 400})})) ] assert rmock.request_history == [ RestrictedAny(lambda r: r.url == "https://nowhere.amazonaws.com/cert.pem") ] assert mock_handle_subscription_confirmation.called is False
def test_handle_s3_sns_bad_signature(self, mock_handle_subscription_confirmation, mock_validate, base_body_dict): mock_validate.side_effect = validatesns.ValidationError with self.mocked_app_logger_log() as mock_app_log: with requests_mock.Mocker() as rmock: client = self.get_authorized_client() res = client.post( "/callbacks/sns/s3/uploaded", data=json.dumps(base_body_dict), content_type="application/json", ) assert res.status_code == 400 assert mock_app_log.call_args_list == [ ( (logging.WARNING, AnyStringMatching(r".*failed signature validation"), ()), AnySupersetOf({ "extra": AnySupersetOf({ "validation_error": RestrictedAny(lambda x: isinstance( x, validatesns.ValidationError)), }) }), ), (mock.ANY, AnySupersetOf({"extra": AnySupersetOf({"status": 400})})) ] assert not rmock.request_history assert mock_validate.call_args_list == [((base_body_dict, ), AnySupersetOf({}))] assert mock_handle_subscription_confirmation.called is False
def test_any_odd(self): any_odd = RestrictedAny(lambda x: x % 2) assert ( 4, 5, 6, ) == ( 4, any_odd, 6, ) assert ( 4, 9, 6, ) == ( 4, any_odd, 6, ) assert not ( 4, 9, 6, ) == ( 4, any_odd, any_odd, )
def test_app_request_logs_5xx_responses_with_error_level_not_sampled( app_with_mocked_logger, is_sampled): if is_sampled is not None: _set_request_class_is_sampled(app_with_mocked_logger, is_sampled) @app_with_mocked_logger.route('/') def error_route(): time.sleep(0.05) return 'error', 500 app_with_mocked_logger.test_client().get('/') assert app_with_mocked_logger.logger.log.call_args_list == [ mock.call( logging.ERROR, '{method} {url} {status}', extra={ "url": "http://localhost/", "status": 500, "method": "GET", "endpoint": "error_route", "duration_real": RestrictedAny( lambda value: isinstance(value, float) and 0.05 <= value), "duration_process": RestrictedAny( lambda value: isinstance(value, float) and 0 < value), "process_": RestrictedAny(lambda value: isinstance(value, int)), "thread_": RestrictedAny(lambda value: isinstance(value, ( str, bytes, ))), }, ) ]
def test_list_outcomes_empty(self, query_string): res = self.client.get( f"/outcomes?{query_string}", ) assert res.status_code == 200 assert json.loads(res.get_data()) == { "links": {"self": RestrictedAny(lambda u: urlparse(u) == (ANY, ANY, "/outcomes", "", query_string, "",))}, "meta": { "total": 0, }, "outcomes": [], }
def test_app_request_logs_responses_with_info_level_not_sampled( app_with_mocked_logger, is_sampled): if is_sampled is not None: _set_request_class_is_sampled(app_with_mocked_logger, is_sampled) app_with_mocked_logger.test_client().get('/') expected_call = mock.call( logging.INFO, '{method} {url} {status}', extra={ "url": "http://localhost/", "status": 404, "method": "GET", "endpoint": None, "duration_real": RestrictedAny( lambda value: isinstance(value, float) and 0 < value), "duration_process": RestrictedAny( lambda value: isinstance(value, float) and 0 < value), "process_": RestrictedAny(lambda value: isinstance(value, int)), "thread_": RestrictedAny(lambda value: isinstance(value, ( str, bytes, ))), }, ) assert app_with_mocked_logger.logger.log.call_args_list == [expected_call]
def clamd_instream_func(*args, **kwargs): # if clamd_instream_retval *is* None, we'd be expecting "instream" not to be called at all assert clamd_instream_retval is not None assert args == (RestrictedAny(lambda x: x.read() == b"123412341234"),) assert kwargs == {} if concurrent_new_tagset is not None: # a very literal "side effect" here - simulating a modification to the object's tags while scanning... s3_client.put_object_tagging( Bucket=bucket.name, Key=objver.Object().key, VersionId=objver.id, Tagging={"TagSet": tagset_from_dict(concurrent_new_tagset)}, ) return clamd_instream_retval
class TestHandleSubscriptionConfirmation(BaseCallbackApplicationTest): @pytest.mark.parametrize( ( "body_dict", "supported_topic_name", "rmock_response_kwargs", "expected_output", "expect_request_made", "expected_log_calls", ), tuple( chain.from_iterable(( ( ( # body_dict { "SubscribeURL": "https://amz.net", "TopicArn": f"arn:aws:sns:howth-west-2:123456789012:Drawers" }, # supported_topic_name "Drawers", # rmock_response_kwargs { "text": f"""<ConfirmSubscriptionResponse xmlns="http://brazenfaced.things"> <ConfirmSubscriptionResult><SubscriptionArn> arn:aws:sns:howth-west-2:123456789012:Drawers:bicycles </SubscriptionArn></ConfirmSubscriptionResult> <ResponseMetadata><RequestId> always-skeezing </RequestId></ResponseMetadata> </ConfirmSubscriptionResponse>""" }, # expected_output (RestrictedAny(lambda x: isinstance(x, Response)), 200 ), # expect_request_made True, # expected_log_calls ( ( (logging.INFO, AnyStringMatching("Made GET "), ()), AnySupersetOf({ "extra": AnySupersetOf({ "target_url": "https://amz.net", "topic_arn": f"arn:aws:sns:howth-west-2:123456789012:Drawers", }), }), ), ( (logging.INFO, AnyStringMatching( "SubscriptionConfirmation succeeded "), ()), AnySupersetOf({ "extra": AnySupersetOf({ "subscription_arn": f"arn:aws:sns:howth-west-2:123456789012:Drawers:bicycles", "confirmation_request_id": "always-skeezing", }), }), ), ), ), ( # body_dict { "SubscribeURL": "https://butt.bridge", "TopicArn": "premature:decay" }, # supported_topic_name "BrilliantFellows", # rmock_response_kwargs { "text": "dummy" }, # expected_output BadRequest, # expect_request_made False, # expected_log_calls ( ( (logging.WARNING, AnyStringMatching(r".*unrecognized topic.*"), ()), AnySupersetOf({ "extra": AnySupersetOf({ "topic_name": "decay", "topic_arn": "premature:decay", }), }), ), ), ), ( # body_dict { "SubscribeURL": "https://sister.island.co.uk", "TopicArn": "100M:Played:Out:CoalSeams" }, # supported_topic_name "CoalSeams", # rmock_response_kwargs { "text": """<ConfirmSubscriptionResponse xmlns="http://neighbours-across.the/channel"> <ConfirmSubscriptionResult><SubscriptionArn> unrelated:naming:scheme </SubscriptionArn></ConfirmSubscriptionResult> </ConfirmSubscriptionResponse>""", "status_code": 210 }, # expected_output (RestrictedAny(lambda x: isinstance(x, Response)), 200 ), # expect_request_made True, # expected_log_calls ( ( (logging.INFO, AnyStringMatching("Made GET "), ()), AnySupersetOf({ "extra": AnySupersetOf({ "target_url": "https://sister.island.co.uk", "topic_arn": "100M:Played:Out:CoalSeams", }), }), ), ( (logging.INFO, AnyStringMatching( "SubscriptionConfirmation succeeded "), ()), AnySupersetOf({ "extra": AnySupersetOf({ "subscription_arn": "unrelated:naming:scheme", }), }), ), ), ), ( # body_dict { "SubscribeURL": "https://disorder.ly.hous.es", "TopicArn": "nice:mixup" }, # supported_topic_name "mixup", # rmock_response_kwargs { "text": "<Second drink<does it<" }, # expected_output BadRequest, # expect_request_made True, # expected_log_calls ( ( (logging.INFO, AnyStringMatching("Made GET "), ()), AnySupersetOf({ "extra": AnySupersetOf({ "target_url": "https://disorder.ly.hous.es", "topic_arn": "nice:mixup", }), }), ), ( (logging.WARNING, RestrictedAny( lambda x: isinstance(x, ParseError)), ()), AnySupersetOf({}), ), ( (logging.WARNING, "SubscriptionConfirmation response parsing failed", ()), AnySupersetOf({}), ), ), ), ), ( # the following case should basically have the same results for all request errors, so testing many # of these cases ( # body_dict { "SubscribeURL": "https://wildgoose.chase/this", "TopicArn": "first-class:third:ticket" }, # supported_topic_name "ticket", # rmock_response_kwargs rmock_response_kwargs, # expected_output BadRequest, # expect_request_made True, # expected_log_calls ( ( (logging.INFO, AnyStringMatching("Failed to make GET "), ()), AnySupersetOf({ "extra": AnySupersetOf({ "target_url": "https://wildgoose.chase/this", "topic_arn": "first-class:third:ticket", }), }), ), ), ) for rmock_response_kwargs in ( { "status_code": 404, "text": "where?" }, { "status_code": 500, "text": "what?" }, { "status_code": 403, "text": "who?" }, { "status_code": 400, "text": "no" }, { "exc": requests.exceptions.ConnectTimeout }, { "exc": requests.exceptions.SSLError }, )), ))), ) def test_handle_subscription_confirmation( self, body_dict, supported_topic_name, rmock_response_kwargs, expected_output, expect_request_made, expected_log_calls, ): """ :param body_dict: request body_dict to pass directly to _handle_subscription_confirmation :param supported_topic_name: supported_topic_name to pass directly to _handle_subscription_confirmation :param rmock_response_kwargs: kwargs to pass to register_uri specifying how requests_mock should respond to a request to the "subscribe" url :param expected_output: either an Exception subclass to expect to be raised or the expected return value of _handle_subscription_confirmation :param expect_request_made: whether to expect a request to have been made to the "subscribe" url :param expected_log_calls: sequence of expected mock.call()s to have been made to app logger """ with self.mocked_app_logger_log() as mock_app_log: with self.app.test_request_context(): with requests_mock.Mocker() as rmock: rmock.register_uri("GET", body_dict["SubscribeURL"], **rmock_response_kwargs) expect_exception = isinstance( expected_output, type) and issubclass( expected_output, Exception) with pytest.raises( expected_output ) if expect_exception else null_context_manager(): out = _handle_subscription_confirmation( body_dict, supported_topic_name) if not expect_exception: assert out == expected_output assert rmock.called is expect_request_made assert mock_app_log.call_args_list == list( expected_log_calls)
def test_child_span_id_provided( self, mock_logger, onwards_request_headers, response_status, base_client, rmock, app, ): rmock.get("http://baseurl/_status", json={"status": "foobar"}, status_code=response_status) app.config["DM_SPAN_ID_HEADERS"] = ( "X-Brian-Tweedy", "major-tweedy", ) with app.test_request_context('/'): request.get_onwards_request_headers = mock.Mock( return_value=onwards_request_headers) try: base_client.get_status() except HTTPError: # it is tested elsewhere whether this exception is raised in the *right* circumstances or not pass assert rmock.called # some of our scenarios test multiple header names differing only by capitalization - we care that the same # span id that was chosen for the log message is the same one that was sent in the onwards request header, # so we need two distinct values which are acceptable either_span_id = RestrictedAny( lambda value: value == "Amiens Street" or value == "great northern") assert mock_logger.log.call_args_list == [ mock.call(logging.DEBUG, "API request {method} {url}", extra={ "method": "GET", "url": "http://baseurl/_status", "childSpanId": either_span_id, }), (mock.call( logging.INFO, "API {api_method} request on {api_url} finished in {api_time}", extra={ "api_method": "GET", "api_url": "http://baseurl/_status", "api_status": response_status, "api_time": mock.ANY, "childSpanId": either_span_id, } ) if response_status == 200 else mock.call( logging.WARNING, "API {api_method} request on {api_url} failed with {api_status} '{api_error}'", extra={ "api_method": "GET", "api_url": "http://baseurl/_status", "api_status": response_status, "api_time": mock.ANY, "api_error": mock.ANY, "childSpanId": either_span_id, }, )) ] # both logging calls should have had the *same* childSpanId value assert mock_logger.log.call_args_list[0][1]["extra"]["childSpanId"] \ == mock_logger.log.call_args_list[1][1]["extra"]["childSpanId"] # that value should be the same one that was sent in the onwards request header assert ( rmock.last_request.headers.get("x-brian-tweedy") or rmock.last_request.headers.get("major-tweedy") ) == mock_logger.log.call_args_list[0][1]["extra"]["childSpanId"]
def _logging_call_site(app): app.logger.info( "Charming day {ankles}, {underleaves}, {parent_span_id}", extra={"underleaves": "ample"}, ) _set_request_class_is_sampled(app_with_stream_logger, is_sampled) with app.test_request_context('/'): test_extra_log_context = { "ankles": "thinsocked", "span_id": "beesWaxed", } request.get_extra_log_context = mock.Mock(spec_set=[]) request.get_extra_log_context.return_value = test_extra_log_context # we perform the log call in a specifically designated & named function to exercise & be able to reliably # assert the behaviour of the introspective aspects of our logging _logging_call_site(app) all_lines = tuple( json.loads(line) for line in stream.read().splitlines()) assert all_lines == ( AnySupersetOf({ "message": "Missing keys when formatting log message: ('parent_span_id',)", # it may seem foolish and a bit fragile to include the following parameters in our assertions but # properly testing introspective behaviour is always going to get a bit weird and meta in that regard. "app_funcName": "_logging_call_site", "app_pathname": os.path.normcase(_logging_call_site.__code__.co_filename), "app_lineno": RestrictedAny(lambda value: isinstance(value, int)), "lineno": RestrictedAny(lambda value: isinstance(value, int)), "pathname": AnyStringMatching(r".+\/dmutils\/logging\.pyc?"), }), AnySupersetOf({ "time": mock.ANY, "application": mock.ANY, "message": "Charming day thinsocked, ample, {parent_span_id: missing key}", "underleaves": "ample", "ankles": "thinsocked", "spanId": "beesWaxed", "parentSpanId": None, "requestId": None, "debugFlag": None, "isSampled": None, # as above, these parameters are included in the assertion to ensure our modifications haven't affected # the regular logging introspection features "lineno": RestrictedAny(lambda value: isinstance(value, int)), "pathname": os.path.normcase(_logging_call_site.__code__.co_filename), **({ "app_funcName": "_logging_call_site", "app_pathname": os.path.normcase(_logging_call_site.__code__.co_filename), "app_lineno": RestrictedAny(lambda value: isinstance(value, int)), } if is_sampled else {}), }), ) if not is_sampled: # AppStackLocationFilter shouldn't have included information in this low-urgency message for unexpected_key in ( "app_funcName", "app_lineno", "app_pathname", ): assert unexpected_key not in all_lines[1] for unexpected_key in ( "span_id", "trace_id", "traceId", "request_id", "debug_flag", "is_sampled", "parent_span_id", # also ensuring "missing key" functionality didn't add a value for this ): assert not any(unexpected_key in line for line in all_lines)
class TestListOutcomes(BaseApplicationTest, FixtureMixin): @pytest.mark.parametrize("query_string", ("", "completed=true", "completed=false",)) def test_list_outcomes_empty(self, query_string): res = self.client.get( f"/outcomes?{query_string}", ) assert res.status_code == 200 assert json.loads(res.get_data()) == { "links": {"self": RestrictedAny(lambda u: urlparse(u) == (ANY, ANY, "/outcomes", "", query_string, "",))}, "meta": { "total": 0, }, "outcomes": [], } def setup_outcomes(self): user_id = self.setup_dummy_user(id=1, role='buyer') self.setup_dummy_suppliers(5) # create required objects for direct award-based Outcome self.setup_dummy_services(5, model=ArchivedService) # # create required objects for direct-award-related Outcomes # projects = tuple( DirectAwardProject( name=name, users=[User.query.get(user_id)], ) for name in ("alumno optimo", "palmam ferenti", "vere dignum et iustum est",) ) db.session.add_all(projects) searches = tuple( DirectAwardSearch( project=project, created_by=user_id, active=True, search_url="http://nothing.nowhere", ) for project in projects ) db.session.add_all(searches) for i, search in enumerate(searches): for archived_service in db.session.query(ArchivedService).filter( ArchivedService.service_id.in_([str(j) for j in range(2000000000 + i, 2000000000 + i + 3)]) ).all(): search.archived_services.append(archived_service) # # create required objects for Brief-related Outcomes # briefs = tuple( self.setup_dummy_brief(status="closed", user_id=user_id, data={}) for _ in range(4) ) db.session.add_all(briefs) # increasingly many BriefResponses for each Brief in `briefs` brief_responses = tuple(BriefResponse( brief=brief, supplier_id=j, submitted_at=datetime.datetime.utcnow(), data={}, ) for j in range(i) for i, brief in enumerate(briefs)) db.session.add_all(brief_responses) outcomes = ( Outcome( external_id=100000000, direct_award_project=searches[0].project, direct_award_search=searches[0], direct_award_archived_service=searches[0].archived_services[0], result="awarded", start_date=datetime.date(2006, 2, 2), end_date=datetime.date(2006, 3, 3), awarding_organisation_name="Omnium Gatherum", award_value=81396, completed_at=datetime.datetime(2005, 10, 10, 10, 10, 10), ), Outcome( external_id=100000005, direct_award_project=searches[0].project, direct_award_search=searches[0], direct_award_archived_service=searches[0].archived_services[1], result="awarded", start_date=datetime.date(2006, 4, 4), awarding_organisation_name="Nisus Formativus", ), Outcome( external_id=100000002, direct_award_project=searches[0].project, result="none-suitable", ), Outcome( external_id=100000011, direct_award_project=searches[1].project, result="none-suitable", ), Outcome( external_id=100000004, direct_award_project=searches[2].project, result="cancelled", completed_at=datetime.datetime(2005, 10, 9, 9, 9, 9), ), Outcome( external_id=100000001, brief=briefs[0], result="cancelled", completed_at=datetime.datetime(2005, 5, 5, 5, 5, 5), ), Outcome( external_id=100000008, brief=briefs[0], result="cancelled", ), Outcome( external_id=100000012, brief=briefs[1], brief_response=briefs[1].brief_responses[0], result="awarded", start_date=datetime.date(2010, 1, 1), end_date=datetime.date(2011, 8, 8), awarding_organisation_name="Viridum Toxicum", award_value=81396, completed_at=datetime.datetime(2005, 11, 11, 11, 11, 11), ), Outcome( external_id=100000006, brief=briefs[1], brief_response=briefs[1].brief_responses[0], result="awarded", award_value=83300, ), Outcome( external_id=100000009, brief=briefs[2], result="none-suitable", completed_at=datetime.datetime(2005, 10, 10, 10, 11, 11), ), Outcome( external_id=100000013, brief=briefs[2], brief_response=briefs[2].brief_responses[0], result="awarded", start_date=datetime.date(2011, 1, 1), end_date=datetime.date(2011, 1, 2), award_value=3072, ), Outcome( external_id=100000003, brief=briefs[2], brief_response=briefs[2].brief_responses[1], result="awarded", ), Outcome( external_id=100000007, brief=briefs[3], result="none-suitable", ), Outcome( external_id=100000010, brief=briefs[3], brief_response=briefs[3].brief_responses[0], result="awarded", start_date=datetime.date(2006, 1, 1), end_date=datetime.date(2008, 1, 1), awarding_organisation_name="Lacus Mortis", award_value=4386035, completed_at=datetime.datetime(2006, 1, 1, 1, 1, 1), ), ) db.session.add_all(outcomes) db.session.commit() @pytest.mark.parametrize("query_string,expected_response_data", ( ("", { "links": {"self": RestrictedAny(lambda u: urlparse(u) == (ANY, ANY, "/outcomes", "", "", "",))}, "meta": { "total": 14, }, "outcomes": [ AnySupersetOf({"id": 100000001}), AnySupersetOf({"id": 100000004}), AnySupersetOf({"id": 100000000}), AnySupersetOf({"id": 100000009}), AnySupersetOf({"id": 100000012}), AnySupersetOf({"id": 100000010}), AnySupersetOf({"id": 100000002}), AnySupersetOf({"id": 100000003}), AnySupersetOf({"id": 100000005}), AnySupersetOf({"id": 100000006}), AnySupersetOf({"id": 100000007}), AnySupersetOf({"id": 100000008}), AnySupersetOf({"id": 100000011}), AnySupersetOf({"id": 100000013}), ], }), ("completed=true", { "links": { "self": RestrictedAny(lambda u: urlparse(u) == (ANY, ANY, "/outcomes", "", "completed=true", "",)), }, "meta": { "total": 6, }, "outcomes": [ AnySupersetOf({"id": 100000001}), AnySupersetOf({"id": 100000004}), AnySupersetOf({"id": 100000000}), AnySupersetOf({"id": 100000009}), AnySupersetOf({"id": 100000012}), AnySupersetOf({"id": 100000010}), ], }), ("completed=false", { "links": { "self": RestrictedAny(lambda u: urlparse(u) == (ANY, ANY, "/outcomes", "", "completed=false", "",)), }, "meta": { "total": 8, }, "outcomes": [ AnySupersetOf({"id": 100000002}), AnySupersetOf({"id": 100000003}), AnySupersetOf({"id": 100000005}), AnySupersetOf({"id": 100000006}), AnySupersetOf({"id": 100000007}), AnySupersetOf({"id": 100000008}), AnySupersetOf({"id": 100000011}), AnySupersetOf({"id": 100000013}), ], }), )) def test_list_outcomes(self, query_string, expected_response_data): self.setup_outcomes() res = self.client.get( f"/outcomes?{query_string}", ) assert res.status_code == 200 response_data = json.loads(res.get_data()) # allow parameter to check its coarse constraints assert response_data == expected_response_data # now we'll follow that up by checking that outcomes with a particular id match their correct serialization assert response_data["outcomes"] == [ Outcome.query.filter(Outcome.external_id == outcome_dict["id"]).one().serialize() for outcome_dict in response_data["outcomes"] ] def test_list_outcomes_paging(self): self.setup_outcomes() current_app.config["DM_API_OUTCOMES_PAGE_SIZE"] = 3 res = self.client.get( f"/outcomes?page=2", ) assert res.status_code == 200 response_data = json.loads(res.get_data()) assert response_data == { "links": { "next": RestrictedAny(lambda u: urlparse(u) == (ANY, ANY, "/outcomes", "", "page=3", "",)), "self": RestrictedAny(lambda u: urlparse(u) == (ANY, ANY, "/outcomes", "", "page=2", "",)), "prev": RestrictedAny(lambda u: urlparse(u) == (ANY, ANY, "/outcomes", "", "page=1", "",)), "last": RestrictedAny(lambda u: urlparse(u) == (ANY, ANY, "/outcomes", "", "page=5", "",)), }, "meta": { "total": 14, }, "outcomes": [ Outcome.query.filter(Outcome.external_id == expected_id).one().serialize() for expected_id in (100000009, 100000012, 100000010,) ], }
log_level, condition, raise_exception, inject_context, True, # mock_time [ # expected_call_args_list mock.call( log_level, _messages_expected[message][0].get( 'error' if raise_exception else 'success'), exc_info=bool(raise_exception), extra={ "duration_real": RestrictedAny( # a double-closure here to get around python's weird behaviour when capturing iterated # variables (in this case `sleep_time`) (lambda st: lambda val: st * 0.95 < val < st * 1.5)(sleep_time)), "duration_process": mock.ANY, **(inject_context or {}), }, ) ] if _expect_log( is_sampled, sleep_time, message, log_level, condition, raise_exception, inject_context,