def test_handle_s3_sns_bad_signature(self,
                                         mock_handle_subscription_confirmation,
                                         mock_validate, base_body_dict):
        mock_validate.side_effect = validatesns.ValidationError
        with self.mocked_app_logger_log() as mock_app_log:
            with requests_mock.Mocker() as rmock:
                client = self.get_authorized_client()
                res = client.post(
                    "/callbacks/sns/s3/uploaded",
                    data=json.dumps(base_body_dict),
                    content_type="application/json",
                )

                assert res.status_code == 400
                assert mock_app_log.call_args_list == [
                    (
                        (logging.WARNING,
                         AnyStringMatching(r".*failed signature validation"),
                         ()),
                        AnySupersetOf({
                            "extra":
                            AnySupersetOf({
                                "validation_error":
                                RestrictedAny(lambda x: isinstance(
                                    x, validatesns.ValidationError)),
                            })
                        }),
                    ),
                    (mock.ANY,
                     AnySupersetOf({"extra": AnySupersetOf({"status": 400})}))
                ]
                assert not rmock.request_history
                assert mock_validate.call_args_list == [((base_body_dict, ),
                                                         AnySupersetOf({}))]
                assert mock_handle_subscription_confirmation.called is False
    def test_handle_s3_sns_weird_request_type(
            self, mock_handle_subscription_confirmation, mock_validate):
        with self.mocked_app_logger_log() as mock_app_log:
            with requests_mock.Mocker() as rmock:
                client = self.get_authorized_client()
                weird_body_dict = {
                    "MessageId": "1234321",
                    "Type": "EuropeanConflagration",
                }
                res = client.post(
                    "/callbacks/sns/s3/uploaded",
                    data=json.dumps(weird_body_dict),
                    content_type="application/json",
                )

                assert res.status_code == 400
                assert mock_app_log.call_args_list == [
                    (
                        (logging.WARNING,
                         AnyStringMatching(r"Unrecognized request type "), ()),
                        AnySupersetOf({
                            "extra":
                            AnySupersetOf({
                                "request_type":
                                "EuropeanConflagration",
                            })
                        }),
                    ),
                    (mock.ANY,
                     AnySupersetOf({"extra": AnySupersetOf({"status": 400})}))
                ]
                assert not rmock.request_history
                assert mock_validate.call_args_list == [((weird_body_dict, ),
                                                         AnySupersetOf({}))]
                assert mock_handle_subscription_confirmation.called is False
Ejemplo n.º 3
0
    def test_handle_s3_sns_subscription_confirmation(
        self,
        mock_handle_subscription_confirmation,
        mock_validate,
        content_type,
    ):
        # arbitrary sentinel Response
        mock_handle_subscription_confirmation.return_value = Response("Grain supplies"), 200

        with self.mocked_app_logger_log() as mock_app_log:
            with requests_mock.Mocker() as rmock:
                client = self.get_authorized_client()
                res = client.post(
                    "/callbacks/sns/s3/uploaded",
                    data=json.dumps(self._basic_subscription_confirmation_body),
                    content_type=content_type,
                )

                assert res.status_code == 200
                assert res.get_data() == b"Grain supplies"
                assert mock_app_log.call_args_list == [
                    (mock.ANY, AnySupersetOf({"extra": AnySupersetOf({"status": 200})}))
                ]
                assert not rmock.request_history
                assert mock_validate.call_args_list == [
                    ((self._basic_subscription_confirmation_body,), AnySupersetOf({}))
                ]
                assert mock_handle_subscription_confirmation.call_args_list == [
                    ((self._basic_subscription_confirmation_body, "s3_file_upload_notification_development",), {})
                ]
    def test_get_certificate(self, call_params_seq, single_flask_request):
        """
        :param call_params_seq: a sequence of tuples, each representing a call to make to _get_certificate and
            including information about the context it should be called in and the call's expected result. each
            tuple consists of the parameters:
            (
                url,                    # the url to set up with register_uri() and then pass as _get_certificate's
                                        # argument
                rmock_response_kwargs,  # kwargs to pass to register_uri specifying how requests_mock should respond to
                                        # such a request
                expected_output,        # either an Exception class to expect _get_certificate's invocation to raise
                                        # or the value to compare _get_certificate's return value with
                expect_request_made,    # whether an actual (intercepted) http request should have been made to the url
            )
        :param single_flask_request: whether all requests of ``call_params_seq`` should be performed in a single test
            flask request. otherwise a separate flask test request is used per call
        """
        with self.mocked_app_logger_log() as mock_app_log:
            with self.app.test_request_context(
            ) if single_flask_request else null_context_manager():
                for url, rmock_response_kwargs, expected_output, expect_request_made in call_params_seq:
                    mock_app_log.reset_mock()
                    with null_context_manager(
                    ) if single_flask_request else self.app.test_request_context(
                    ):
                        with requests_mock.Mocker() as rmock:
                            rmock.register_uri("GET", url,
                                               **rmock_response_kwargs)

                            expect_exception = isinstance(expected_output, type) and \
                                issubclass(expected_output, Exception)
                            with pytest.raises(
                                    expected_output
                            ) if expect_exception else null_context_manager():
                                out = _get_certificate(url)

                            if not expect_exception:
                                assert out == expected_output

                            assert rmock.called is expect_request_made
                            # TODO more complete logging testing
                            assert mock_app_log.call_args_list == (
                                [] if not expect_request_made else [
                                    (
                                        (logging.INFO,
                                         AnyStringMatching(
                                             "Failed" if expect_exception else
                                             "Fetched"), ()),
                                        AnySupersetOf({
                                            "extra":
                                            AnySupersetOf({"target_url": url})
                                        }),
                                    ),
                                ])
Ejemplo n.º 5
0
def test_response_headers_error_response(
        spanid_random_mock,
        traceid_random_mock,
        app,
        extra_config,
        extra_req_headers,
        expected_trace_id,  # unused here
        expect_trace_random_call,
        expected_span_id,  # unused here
        expected_parent_span_id,  # unused here
        expected_is_sampled,  # unused here
        expected_debug_flag,  # unused here
        expected_onwards_req_headers,  # unused here
        expected_resp_headers,
        expected_dm_request_id_header_final_value,  # unused here
):
    app.config.update(extra_config)
    request_id_init_app(app)
    client = app.test_client()

    traceid_random_mock.randrange.side_effect = assert_args_and_return(
        _GENERATED_TRACE_VALUE, 1 << 128)

    @app.route('/')
    def error_route():
        raise Exception()

    with app.app_context():
        response = client.get('/', headers=extra_req_headers)
        assert response.status_code == 500
        assert dict(response.headers) == AnySupersetOf(expected_resp_headers)

    assert traceid_random_mock.randrange.called is expect_trace_random_call
    assert spanid_random_mock.randrange.called is False
Ejemplo n.º 6
0
def test_response_headers_regular_response(
        spanid_random_mock,
        traceid_random_mock,
        app,
        extra_config,
        extra_req_headers,
        expected_trace_id,  # unused here
        expect_trace_random_call,
        expected_span_id,  # unused here
        expected_parent_span_id,  # unused here
        expected_is_sampled,  # unused here
        expected_debug_flag,  # unused here
        expected_onwards_req_headers,  # unused here
        expected_resp_headers,
        expected_dm_request_id_header_final_value,  # unused here
):
    app.config.update(extra_config)
    request_id_init_app(app)
    client = app.test_client()

    traceid_random_mock.randrange.side_effect = assert_args_and_return(
        _GENERATED_TRACE_VALUE, 1 << 128)

    with app.app_context():
        response = client.get('/', headers=extra_req_headers)
        # note using these mechanisms we're not able to test for the *absence* of a header
        assert dict(response.headers) == AnySupersetOf(expected_resp_headers)

    assert traceid_random_mock.randrange.called is expect_trace_random_call
    assert spanid_random_mock.randrange.called is False
Ejemplo n.º 7
0
def test_request_header_zero_padded(
    spanid_random_mock,
    traceid_random_mock,
    app,
):
    request_id_init_app(app)

    traceid_random_mock.randrange.side_effect = assert_args_and_return(
        0xbeef, 1 << 128)
    spanid_random_mock.randrange.side_effect = assert_args_and_return(
        0xa, 1 << 64)

    with app.test_request_context():
        assert request.request_id == request.trace_id == "0000000000000000000000000000beef"
        assert request.span_id is None
        assert request.get_onwards_request_headers() == {
            "DM-Request-ID": "0000000000000000000000000000beef",
            "X-B3-TraceId": "0000000000000000000000000000beef",
            "X-B3-SpanId": "000000000000000a",
        }
        assert request.get_extra_log_context() == AnySupersetOf({
            'parent_span_id':
            None,
            'span_id':
            None,
            'trace_id':
            '0000000000000000000000000000beef',
        })

    assert traceid_random_mock.randrange.called is True
    assert spanid_random_mock.randrange.called is True
 def test_superset(self):
     assert [{
         "a": 123,
         "b": 456,
         "less": "predictabananas"
     }, 789] == [AnySupersetOf({
         "a": 123,
         "b": 456
     }), 789]
    def test_handle_s3_sns_test_event(
        self,
        mock_handle_subscription_confirmation,
        mock_validate,
    ):
        with self.mocked_app_logger_log() as mock_app_log:
            with requests_mock.Mocker() as rmock:
                client = self.get_authorized_client()
                body_dict = {
                    "MessageId": "1234321",
                    "Type": "Notification",
                    "Message": '{"Event":"s3:TestEvent","nut":"shell"}',
                }
                res = client.post(
                    "/callbacks/sns/s3/uploaded",
                    data=json.dumps(body_dict),
                    content_type="application/json",
                    headers={"X-Amz-Sns-Subscription-Arn": "kcirtaP"},
                )

                assert res.status_code == 200
                assert mock_app_log.call_args_list == [
                    (
                        (logging.INFO,
                         AnyStringMatching(r"Processing message "), ()),
                        AnySupersetOf({
                            "extra":
                            AnySupersetOf({
                                "message_id": "1234321",
                                "subscription_arn": "kcirtaP",
                            })
                        }),
                    ), (
                        (logging.INFO, "Received S3 test event", ()),
                        {},
                    ),
                    (mock.ANY,
                     AnySupersetOf({"extra": AnySupersetOf({"status": 200})}))
                ]
                assert not rmock.request_history
                assert mock_validate.call_args_list == [((body_dict, ),
                                                         AnySupersetOf({}))]
                assert mock_handle_subscription_confirmation.called is False
Ejemplo n.º 10
0
    def test_handle_s3_sns_unfetchable_cert(self, mock_handle_subscription_confirmation, base_body_dict):
        with self.mocked_app_logger_log() as mock_app_log:
            with requests_mock.Mocker() as rmock:
                rmock.register_uri("GET", "https://nowhere.amazonaws.com/cert.pem", status_code=404)

                client = self.get_authorized_client()
                res = client.post("/callbacks/sns/s3/uploaded", data=json.dumps({
                    **base_body_dict,
                    "Signature": "should_be_irrelevant",
                    "SigningCertURL": "https://nowhere.amazonaws.com/cert.pem",
                }), content_type="application/json")

                assert res.status_code == 400
                assert mock_app_log.call_args_list == [
                    (
                        (logging.INFO, AnyStringMatching(r"Failed to fetch certificate .*404"), ()),
                        AnySupersetOf({"extra": AnySupersetOf({
                            "target_url": "https://nowhere.amazonaws.com/cert.pem",
                        })}),
                    ),
                    (
                        (logging.WARNING, AnyStringMatching(r"SNS request body failed "), ()),
                        AnySupersetOf({"extra": AnySupersetOf({
                            "validation_error": RestrictedAny(lambda x: isinstance(x, requests.exceptions.HTTPError)),
                        })}),
                    ),
                    (mock.ANY, AnySupersetOf({"extra": AnySupersetOf({"status": 400})}))
                ]
                assert rmock.request_history == [
                    RestrictedAny(lambda r: r.url == "https://nowhere.amazonaws.com/cert.pem")
                ]
                assert mock_handle_subscription_confirmation.called is False
Ejemplo n.º 11
0
def test_logged_duration_real_logger(
    app_with_stream_logger,
    # value to set the is_sampled flag to on the mock request
    is_sampled,
    # how long to sleep in seconds
    sleep_time,
    # message, log_level, condition - values to pass as arguments of logged_duration verbatim
    message,
    log_level,
    condition,
    # exception (class) to raise inside logged_duration, None to raise no exception
    raise_exception,
    # dict to update log_context with inside logged_duration, None perform no update
    inject_context,
    # whether to use mocked time primitives to speed up the test
    mock_time,
    # sequence of log dicts to expect to be output as json logs
    expected_logs,
):
    app, stream = app_with_stream_logger

    with app.test_request_context("/", headers={}):
        request.is_sampled = is_sampled

        with (mock_time_functions()
              if mock_time else actual_time_functions()) as (
                  _sleep,
                  _perf_counter,
                  _process_time,
              ):
            with (null_context_manager() if raise_exception is None else
                  pytest.raises(raise_exception)):
                with timing.logged_duration(
                        logger=app.logger.getChild("foobar"),
                        message=message,
                        log_level=log_level,
                        condition=condition,
                ) as log_context:
                    _sleep(sleep_time)
                    if inject_context is not None:
                        log_context.update(inject_context)
                    if raise_exception is not None:
                        raise raise_exception("Boo")

    all_lines = tuple(
        json.loads(line) for line in stream.getvalue().splitlines())

    assert all_lines == (AnySupersetOf({
        "levelname": "INFO",
        "message": "Logging configured"
    }), ) + expected_logs
Ejemplo n.º 12
0
    def test_update_outcome_scenarios(
        self,
        other_oc_brief_based,
        initial_brief_based,
        other_oc_data,
        initial_data,
        put_values,
        expected_status_code,
        expected_response_data,
    ):
        """
        A number of arguments control the background context this test is run in and the parameters PUT to the endpoint.
        Not all of the combinations make sense together and a caller should not expect a test to pass with a nonsensical
        combination of arguments

        :param other_oc_brief_based:   whether the "other", existing Outcome should be Brief-based as opposed to
                                       Direct Award-based
        :param initial_brief_based:    whether the target Outcome should initially be set up to be Brief-based as
                                       opposed to Direct Award-based
        :param other_oc_data:          field values to set up the "other" Outcome with, ``None`` for no "other"
                                       Outcome to be created
        :param initial_data:           field values to initially set up the target Outcome with
        :param put_values:             payload dictionary to be PUT to the target endpoint (without the
                                       ``outcome`` wrapper)
        :param expected_status_code:
        :param expected_response_data:
        """
        user_id = self.setup_dummy_user(id=1, role='buyer')
        self.setup_dummy_suppliers(3)

        project = None
        search = None
        chosen_archived_service = other_archived_service = None
        if not (other_oc_brief_based and initial_brief_based):
            # create required objects for direct award-based Outcome
            self.setup_dummy_services(3, model=ArchivedService)

            project = DirectAwardProject(
                name="Lambay Island",
                users=[User.query.get(user_id)],
            )
            db.session.add(project)

            search = DirectAwardSearch(
                project=project,
                created_by=user_id,
                active=True,
                search_url="http://nothing.nowhere",
            )
            db.session.add(search)

            for archived_service in db.session.query(ArchivedService).filter(
                    ArchivedService.service_id.in_((
                        "2000000000",
                        "2000000001",
                    ))).all():
                search.archived_services.append(archived_service)

            chosen_archived_service, other_archived_service = search.archived_services[:
                                                                                       2]
        # else skip creating these to save time

        brief = None
        chosen_brief_response = other_brief_response = None
        if other_oc_brief_based or initial_brief_based:
            # create required objects for brief-based Outcome
            brief = self.setup_dummy_brief(status="closed",
                                           user_id=user_id,
                                           data={})
            chosen_brief_response, other_brief_response = (BriefResponse(
                brief=brief,
                supplier_id=i,
                submitted_at=datetime.datetime.utcnow(),
                data={},
            ) for i in (
                1,
                2,
            ))
            db.session.add(chosen_brief_response)
            db.session.add(other_brief_response)
        # else skip creating these to save time

        other_outcome = None
        if other_oc_data is not None:
            # create "other" Outcome for our target one to potentially clash with
            other_outcome = Outcome(
                **({
                    "brief": brief
                } if other_oc_brief_based else {
                    "direct_award_project": project
                }),
                **(
                    {
                        "result":
                        other_oc_data.get("result", "awarded"),
                        **({
                            "brief_response": other_brief_response,
                        } if other_oc_brief_based else {
                               "direct_award_search":
                               search,
                               "direct_award_archived_service":
                               other_archived_service,
                           }),
                    } if other_oc_data.get(
                        "result", "awarded") == "awarded" else {
                            "result": other_oc_data["result"]
                        }),
                **{
                    k: v
                    for k, v in (other_oc_data or {}).items() if k not in (
                        "completed_at",
                        "result",
                    )
                },
            )
            if "completed_at" in other_oc_data:
                other_outcome.completed_at = other_oc_data["completed_at"]
            db.session.add(other_outcome)

        # create our target Outcome in its initial state
        outcome = Outcome(
            **({
                "brief": brief
            } if initial_brief_based else {
                "direct_award_project": project
            }),
            **(
                {
                    "result":
                    initial_data.get("result", "awarded"),
                    **({
                        "brief_response": chosen_brief_response,
                    } if initial_brief_based else {
                           "direct_award_search":
                           search,
                           "direct_award_archived_service":
                           chosen_archived_service,
                       }),
                } if initial_data.get("result", "awarded") == "awarded" else {
                    "result": initial_data["result"]
                }),
            **{
                k: v
                for k, v in (initial_data or {}).items() if k not in (
                    "completed_at",
                    "result",
                )
            },
        )
        if "completed_at" in initial_data:
            # can only set completed_at after other fields have been set
            outcome.completed_at = initial_data["completed_at"]
        db.session.add(outcome)

        # must assign ids before we can lock project
        db.session.flush()
        if project:
            project.locked_at = datetime.datetime.now()

        # make a concrete note of these so we don't have to fetch them back from the database after the request,
        # potentially getting back values which have been inadvertantly changed
        outcome_external_id = outcome.external_id
        project_external_id = project and project.external_id
        search_id = search and search.id
        chosen_archived_service_id = chosen_archived_service and chosen_archived_service.id
        chosen_archived_service_service_id = chosen_archived_service and chosen_archived_service.service_id
        brief_id = brief and brief.id
        chosen_brief_response_id = chosen_brief_response and chosen_brief_response.id
        audit_event_count = AuditEvent.query.count()
        db.session.commit()

        # keep an nice concrete representation for later comparison
        outcome_serialization_before = outcome.serialize()

        res = self.client.put(
            f"/outcomes/{outcome.external_id}",
            data=json.dumps({
                "updated_by": "*****@*****.**",
                "outcome": put_values,
            }),
            content_type="application/json",
        )
        assert res.status_code == expected_status_code
        response_data = json.loads(res.get_data())
        assert response_data == expected_response_data

        # allow these to be re-used in this session, "refreshed"
        db.session.add_all(x for x in (
            outcome,
            project,
            search,
            chosen_archived_service,
        ) if x is not None)
        db.session.expire_all()

        if res.status_code != 200:
            # assert change wasn't made, audit event wasn't added
            assert outcome.serialize() == outcome_serialization_before
            assert AuditEvent.query.count() == audit_event_count
        else:
            # an additional check of values we should be able to figure out the "correct" values for
            assert response_data == {
                "outcome": {
                    "id": outcome_external_id,
                    "result": initial_data.get("result", "awarded"),
                    "completed": (
                        bool(outcome_serialization_before.get("completedAt"))
                        or put_values.get("completed") is True
                    ),
                    "completedAt": (
                        outcome_serialization_before.get("completedAt")
                        or (
                            AnyStringMatching(r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d+)?Z")
                            if put_values.get("completed") else None
                        )
                    ),
                    **({
                        "resultOfFurtherCompetition": {
                            "brief": {
                                "id": brief_id,
                            },
                            **({
                                "briefResponse": {
                                    "id": chosen_brief_response_id,
                                },
                            } if initial_data.get("result", "awarded") == "awarded" else {}),
                        },
                    } if initial_brief_based else {
                        "resultOfDirectAward": {
                            "project": {
                                "id": project_external_id,
                            },
                            **({
                                "search": {
                                    "id": search_id,
                                },
                                "archivedService": {
                                    "id": chosen_archived_service_id,
                                    "service": {
                                        "id": chosen_archived_service_service_id,
                                    },
                                },
                            } if initial_data.get("result", "awarded") == "awarded" else {})
                        },
                    }),
                    **({"award": AnySupersetOf({})} if initial_data.get("result", "awarded") == "awarded" else {}),
                }
            }

            # check changes actually got committed
            assert response_data == {
                "outcome": outcome.serialize(),
            }

            # check audit event(s) were saved
            expect_complete_audit_event = put_values.get(
                "completed") is True and not initial_data.get("completed_at")
            n_expected_new_audit_events = 2 if expect_complete_audit_event else 1

            assert AuditEvent.query.count(
            ) == audit_event_count + n_expected_new_audit_events
            # grab those most recent (1 or) 2 audit events from the db, re-sorting them to be in a predictable order -
            # we don't care whether the complete_outcome or update_outcome comes out of the db first
            audit_events = sorted(
                db.session.query(AuditEvent).order_by(
                    desc(AuditEvent.created_at),
                    desc(AuditEvent.id),
                )[:n_expected_new_audit_events],
                key=lambda ae: ae.type,
                reverse=True,
            )

            assert audit_events[0].type == "update_outcome"
            assert audit_events[0].object is outcome
            assert audit_events[0].acknowledged is False
            assert audit_events[0].acknowledged_at is None
            assert not audit_events[0].acknowledged_by
            assert audit_events[0].user == "*****@*****.**"
            assert audit_events[0].data == put_values

            if expect_complete_audit_event:
                assert audit_events[1].type == "complete_outcome"
                assert audit_events[1].created_at == audit_events[
                    0].created_at == outcome.completed_at
                assert audit_events[1].object is outcome
                assert audit_events[1].acknowledged is False
                assert audit_events[1].acknowledged_at is None
                assert not audit_events[1].acknowledged_by
                assert audit_events[1].user == "*****@*****.**"
                assert audit_events[1].data == {}
Ejemplo n.º 13
0
class TestUpdateOutcome(BaseApplicationTest, FixtureMixin):
    _test_update_outcome_base_scenarios = (
        (
            # other_oc_data
            {},
            # initial_data
            {},
            # put_values
            {
                "completed": True,
                "award": {
                    "awardingOrganisationName": "Omphalos",
                    "awardValue": "00314.1500",
                    "startDate": "2020-10-10",
                    "endDate": "2020-11-20",
                },
            },
            # expected_status_code
            200,
            # expected_response_data
            {
                "outcome":
                AnySupersetOf({
                    "completed": True,
                    "award": {
                        "awardingOrganisationName": "Omphalos",
                        "awardValue": "314.15",
                        "startDate": "2020-10-10",
                        "endDate": "2020-11-20",
                    },
                }),
            },
        ),
        (
            # other_oc_data
            None,
            # initial_data
            {
                "completed_at": None,
                "result": "none-suitable",
            },
            # put_values
            {
                "completed": True,
            },
            # expected_status_code
            200,
            # expected_response_data
            {
                "outcome": AnySupersetOf({
                    "completed": True,
                }),
            },
        ),
        (
            # other_oc_data
            {
                "completed_at": None,
                "result": "cancelled",
            },
            # initial_data
            {
                "completed_at": None,
                "result": "none-suitable",
            },
            # put_values
            {
                "completed": True,
            },
            # expected_status_code
            200,
            # expected_response_data
            {
                "outcome": AnySupersetOf({
                    "completed": True,
                }),
            },
        ),
        (
            # other_oc_data
            None,
            # initial_data
            {},
            # put_values
            {
                "completed": True,
                "award": {
                    "awardingOrganisationName": "Omphalos",
                    "awardValue": "00314.1500",
                    "startDate": "2020-10-10",
                    "endDate": "2020-11-20",
                },
            },
            # expected_status_code
            200,
            # expected_response_data
            {
                "outcome":
                AnySupersetOf({
                    "completed": True,
                    "award": {
                        "awardingOrganisationName": "Omphalos",
                        "awardValue": "314.15",
                        "startDate": "2020-10-10",
                        "endDate": "2020-11-20",
                    },
                }),
            },
        ),
        (
            # other_oc_data
            {
                "completed_at": datetime.datetime(2010, 10, 10, 10, 10, 10),
                "awarding_organisation_name": "Lambay Freehold",
                "award_value": 54321,
                "start_date": datetime.date(2010, 12, 12),
                "end_date": datetime.date(2011, 12, 12),
            },
            # initial_data
            {},
            # put_values
            {
                "award": {
                    "awardingOrganisationName": "Omphalos",
                    "awardValue": "00314.1500",
                    "startDate": "2020-10-10",
                    "endDate": "2020-11-20",
                },
            },
            # expected_status_code
            200,
            # expected_response_data
            {
                "outcome":
                AnySupersetOf({
                    "completed": False,
                    "award": {
                        "awardingOrganisationName": "Omphalos",
                        "awardValue": "314.15",
                        "startDate": "2020-10-10",
                        "endDate": "2020-11-20",
                    },
                }),
            },
        ),
        (
            # other_oc_data
            {
                "awarding_organisation_name": "Lambay Freehold",
                "award_value": 54321,
                "start_date": datetime.date(2010, 12, 12),
                "end_date": datetime.date(2011, 12, 12),
            },
            # initial_data
            {},
            # put_values
            {
                "completed": True,
                "award": {
                    "awardingOrganisationName": "Omphalos",
                    "awardValue": "00314.1500",
                    "startDate": "2020-10-10",
                    "endDate": "2020-11-20",
                },
            },
            # expected_status_code
            200,
            # expected_response_data
            {
                "outcome":
                AnySupersetOf({
                    "completed": True,
                    "award": {
                        "awardingOrganisationName": "Omphalos",
                        "awardValue": "314.15",
                        "startDate": "2020-10-10",
                        "endDate": "2020-11-20",
                    },
                }),
            },
        ),
        (
            # other_oc_data
            {},
            # initial_data
            {
                "completed_at": datetime.datetime(2010, 10, 10, 10, 10, 10),
                "awarding_organisation_name": "Lambay Freehold",
                "award_value": 54321,
                "start_date": datetime.date(2010, 12, 12),
                "end_date": datetime.date(2011, 12, 12),
            },
            # put_values
            {
                "completed": True,
                "award": {
                    "awardingOrganisationName": "Incubator",
                    "awardValue": "271271.2",
                    "startDate": "2020-10-10",
                    "endDate": "2020-11-20",
                },
            },
            # expected_status_code
            200,
            # expected_response_data
            {
                "outcome":
                AnySupersetOf({
                    "award": {
                        "awardingOrganisationName": "Incubator",
                        "awardValue": "271271.20",
                        "startDate": "2020-10-10",
                        "endDate": "2020-11-20",
                    },
                }),
            },
        ),
        (
            # other_oc_data
            None,
            # initial_data
            {
                "completed_at": None,
                "awarding_organisation_name": "Lambay Freehold",
                "award_value": 54321,
                "start_date": datetime.date(2010, 12, 12),
                "end_date": datetime.date(2011, 12, 12),
            },
            # put_values
            {
                "completed": False,
                "award": {
                    "startDate": None,
                },
            },
            # expected_status_code
            200,
            # expected_response_data
            {
                "outcome":
                AnySupersetOf({
                    "completed": False,
                    "award": {
                        "awardingOrganisationName": "Lambay Freehold",
                        "awardValue": "54321.00",
                        "startDate": None,
                        "endDate": "2011-12-12",
                    },
                }),
            },
        ),
        (
            # other_oc_data
            {},
            # initial_data
            {
                "completed_at": None,
                "awarding_organisation_name": "Lambay Freehold",
                "award_value": 5432.1,
                "start_date": datetime.date(2010, 12, 12),
                "end_date": datetime.date(2011, 12, 12),
            },
            # put_values
            {
                "completed": True,
            },
            # expected_status_code
            200,
            # expected_response_data
            {
                "outcome":
                AnySupersetOf({
                    "completed": True,
                    "award": {
                        "awardingOrganisationName": "Lambay Freehold",
                        "awardValue": "5432.10",
                        "startDate": "2010-12-12",
                        "endDate": "2011-12-12",
                    },
                }),
            },
        ),
        (
            # other_oc_data
            {},
            # initial_data
            {
                "completed_at": None,
                "result": "none-suitable",
            },
            # put_values
            {
                "award": {
                    "awardingOrganisationName": "Talbot de Malahide",
                },
            },
            # expected_status_code
            400,
            # expected_response_data
            {
                "error":
                ("awarding_organisation_name cannot be set for Outcomes with result='none-suitable'."
                 " Attempted to set value 'Talbot de Malahide'"),
            },
        ),
        (
            # other_oc_data
            {
                "completed_at": None,
                "result": "cancelled",
            },
            # initial_data
            {
                "completed_at": datetime.datetime(2010, 3, 3, 3, 3, 3),
                "result": "none-suitable",
            },
            # put_values
            {
                "completed": False,
            },
            # expected_status_code
            400,
            # expected_response_data
            {
                "error": "Can't un-complete outcome",
            },
        ),
        (
            # other_oc_data
            {
                "completed_at": datetime.datetime(2010, 3, 3, 3, 3, 3),
                "result": "cancelled",
            },
            # initial_data
            {
                "completed_at": None,
                "result": "none-suitable",
            },
            # put_values
            {
                "completed": True,
            },
            # expected_status_code
            400,
            # expected_response_data
            {
                "error":
                AnyStringMatching(
                    r".+ \d+ already has a complete outcome: \d+"),
            },
        ),
        (
            # other_oc_data
            None,
            # initial_data
            {},
            # put_values
            {
                "result": "cancelled",
            },
            # expected_status_code
            400,
            # expected_response_data
            {
                "error":
                AnyStringMatching(r".*json was not a valid format.*",
                                  flags=re.I),
            },
        ),
        (
            # other_oc_data
            {},
            # initial_data
            {},
            # put_values
            {
                "resultOfDirectAward": {
                    "projectId": 321,
                },
            },
            # expected_status_code
            400,
            # expected_response_data
            {
                "error":
                AnyStringMatching(r".*json was not a valid format.*",
                                  flags=re.I),
            },
        ),
        (
            # other_oc_data
            None,
            # initial_data
            {},
            # put_values
            {
                "completed": True,
                # note "award" section flattened here
                "awardingOrganisationName": "Omphalos",
                "awardValue": "00314.1500",
                "startDate": "2020-10-10",
                "endDate": "2020-11-20",
            },
            # expected_status_code
            400,
            # expected_response_data
            {
                "error":
                AnyStringMatching(r".*json was not a valid format.*",
                                  flags=re.I),
            },
        ),
        (
            # other_oc_data
            {
                "completed_at": datetime.datetime(2010, 10, 10, 10, 10, 10),
                "awarding_organisation_name": "Lambay Freehold",
                "award_value": 54321,
                "start_date": datetime.date(2010, 12, 12),
                "end_date": datetime.date(2011, 12, 12),
            },
            # initial_data
            {},
            # put_values
            {
                "completed": True,
                "award": {
                    "awardingOrganisationName": "Omphalos",
                    "awardValue": "00314.1500",
                    "startDate": "2020-10-10",
                    "endDate": "2020-11-20",
                },
            },
            # expected_status_code
            400,
            # expected_response_data
            {
                "error":
                AnyStringMatching(
                    r".+ \d+ already has a complete outcome: \d+"),
            },
        ),
        (
            # other_oc_data
            {
                "completed_at": datetime.datetime(2010, 10, 10, 10, 10, 10),
                "result": "cancelled",
            },
            # initial_data
            {},
            # put_values
            {
                "completed": True,
                "award": {
                    "awardingOrganisationName": "Omphalos",
                    "awardValue": "00314.1500",
                    "startDate": "2020-10-10",
                    "endDate": "2020-11-20",
                },
            },
            # expected_status_code
            400,
            # expected_response_data
            {
                "error":
                AnyStringMatching(
                    r".+ \d+ already has a complete outcome: \d+", ),
            },
        ),
        (
            # other_oc_data
            {},
            # initial_data
            {
                "completed_at": datetime.datetime(2010, 10, 10, 10, 10, 10),
                "awarding_organisation_name": "Lambay Freehold",
                "award_value": 54321,
                "start_date": datetime.date(2010, 12, 12),
                "end_date": datetime.date(2011, 12, 12),
            },
            # put_values
            {
                "completed": False,
                "award": {
                    "awardingOrganisationName": "Incubator",
                    "awardValue": "271271.2",
                    "startDate": "2020-10-10",
                    "endDate": "2020-11-20",
                },
            },
            # expected_status_code
            400,
            # expected_response_data
            {
                "error": "Can't un-complete outcome",
            },
        ),
        (
            # other_oc_data
            None,
            # initial_data
            {
                "completed_at": datetime.datetime(2010, 10, 10, 10, 10, 10),
                "awarding_organisation_name": "Lambay Freehold",
                "award_value": 54321,
                "start_date": datetime.date(2010, 12, 12),
                "end_date": datetime.date(2011, 12, 12),
            },
            # put_values
            {
                "completed": True,
                "award": {
                    "awardingOrganisationName": "",
                    "awardValue": "271271.2",
                    "startDate": "2020-10-10",
                    "endDate": "2020-11-20",
                },
            },
            # expected_status_code
            400,
            # expected_response_data
            {
                "error":
                AnyStringMatching(r".*\bawarding_organisation_name\b.*"),
            },
        ),
        (
            # other_oc_data
            {},
            # initial_data
            {},
            # put_values
            {
                "completed": True,
                "award": {
                    "awardingOrganisationName": "Billy Pitt",
                    "awardValue": "314.15",
                    "startDate": "2020-10-10",
                    "endDate": "2020-20-20",
                },
            },
            # expected_status_code
            400,
            # expected_response_data
            {
                "error": AnyStringMatching(r".*\bendDate\b.*"),
            },
        ),
        (
            # other_oc_data
            None,
            # initial_data
            {},
            # put_values
            {
                "completed": True,
                "award": {
                    "awardingOrganisationName": "Martello",
                    "awardValue": "Twelve quid",
                    "startDate": "2020-01-01",
                    "endDate": "2021-12-21",
                },
            },
            # expected_status_code
            400,
            # expected_response_data
            {
                "error": AnyStringMatching(r".*\bawardValue\b.*"),
            },
        ),
    )

    @pytest.mark.parametrize(
        (
            "other_oc_brief_based",
            "initial_brief_based",
            "other_oc_data",
            "initial_data",
            "put_values",
            "expected_status_code",
            "expected_response_data",
        ),
        tuple(
            chain(
                (  # we reproduce here the variants in _test_update_outcome_base_scenarios, once for Briefs, once
                    # for Projects
                    (
                        f_t,
                        f_t,
                    ) + variant_params for f_t, variant_params in product((
                        False,
                        True,
                    ), _test_update_outcome_base_scenarios)),
                (  # and also include some with mixed target-types
                    (
                        # other_oc_brief_based
                        False,
                        # initial_brief_based
                        True,
                        # other_oc_data
                        {
                            "completed_at": datetime.datetime(
                                2007, 7, 7, 7, 7, 7),
                            "result": "none-suitable",
                        },
                        # initial_data
                        {
                            "completed_at": None,
                            "result": "cancelled",
                        },
                        # put_values
                        {
                            "completed": True,
                        },
                        # expected_status_code
                        200,
                        # expected_response_data
                        {
                            "outcome": AnySupersetOf({
                                "completed": True,
                            }),
                        },
                    ),
                    (
                        # other_oc_brief_based
                        True,
                        # initial_brief_based
                        False,
                        # other_oc_data
                        {
                            "completed_at": datetime.datetime(
                                2007, 7, 7, 7, 7, 7),
                            "awarding_organisation_name": "Lambay Freehold",
                            "award_value": 54321,
                            "start_date": datetime.date(2010, 12, 12),
                            "end_date": datetime.date(2011, 12, 12),
                        },
                        # initial_data
                        {},
                        # put_values
                        {
                            "completed": True,
                            "award": {
                                "awardingOrganisationName": "Omphalos",
                                "awardValue": "00314.1500",
                                "startDate": "2020-10-10",
                                "endDate": "2020-11-20",
                            },
                        },
                        # expected_status_code
                        200,
                        # expected_response_data
                        {
                            "outcome":
                            AnySupersetOf({
                                "completed": True,
                                "award": {
                                    "awardingOrganisationName": "Omphalos",
                                    "awardValue": "314.15",
                                    "startDate": "2020-10-10",
                                    "endDate": "2020-11-20",
                                },
                            }),
                        },
                    ),
                ),
            )),
        # help pytest make its printed representation of the parameter set a little more readable
        ids=(lambda val: "EMPTYDCT" if val == {} else None),
    )
    def test_update_outcome_scenarios(
        self,
        other_oc_brief_based,
        initial_brief_based,
        other_oc_data,
        initial_data,
        put_values,
        expected_status_code,
        expected_response_data,
    ):
        """
        A number of arguments control the background context this test is run in and the parameters PUT to the endpoint.
        Not all of the combinations make sense together and a caller should not expect a test to pass with a nonsensical
        combination of arguments

        :param other_oc_brief_based:   whether the "other", existing Outcome should be Brief-based as opposed to
                                       Direct Award-based
        :param initial_brief_based:    whether the target Outcome should initially be set up to be Brief-based as
                                       opposed to Direct Award-based
        :param other_oc_data:          field values to set up the "other" Outcome with, ``None`` for no "other"
                                       Outcome to be created
        :param initial_data:           field values to initially set up the target Outcome with
        :param put_values:             payload dictionary to be PUT to the target endpoint (without the
                                       ``outcome`` wrapper)
        :param expected_status_code:
        :param expected_response_data:
        """
        user_id = self.setup_dummy_user(id=1, role='buyer')
        self.setup_dummy_suppliers(3)

        project = None
        search = None
        chosen_archived_service = other_archived_service = None
        if not (other_oc_brief_based and initial_brief_based):
            # create required objects for direct award-based Outcome
            self.setup_dummy_services(3, model=ArchivedService)

            project = DirectAwardProject(
                name="Lambay Island",
                users=[User.query.get(user_id)],
            )
            db.session.add(project)

            search = DirectAwardSearch(
                project=project,
                created_by=user_id,
                active=True,
                search_url="http://nothing.nowhere",
            )
            db.session.add(search)

            for archived_service in db.session.query(ArchivedService).filter(
                    ArchivedService.service_id.in_((
                        "2000000000",
                        "2000000001",
                    ))).all():
                search.archived_services.append(archived_service)

            chosen_archived_service, other_archived_service = search.archived_services[:
                                                                                       2]
        # else skip creating these to save time

        brief = None
        chosen_brief_response = other_brief_response = None
        if other_oc_brief_based or initial_brief_based:
            # create required objects for brief-based Outcome
            brief = self.setup_dummy_brief(status="closed",
                                           user_id=user_id,
                                           data={})
            chosen_brief_response, other_brief_response = (BriefResponse(
                brief=brief,
                supplier_id=i,
                submitted_at=datetime.datetime.utcnow(),
                data={},
            ) for i in (
                1,
                2,
            ))
            db.session.add(chosen_brief_response)
            db.session.add(other_brief_response)
        # else skip creating these to save time

        other_outcome = None
        if other_oc_data is not None:
            # create "other" Outcome for our target one to potentially clash with
            other_outcome = Outcome(
                **({
                    "brief": brief
                } if other_oc_brief_based else {
                    "direct_award_project": project
                }),
                **(
                    {
                        "result":
                        other_oc_data.get("result", "awarded"),
                        **({
                            "brief_response": other_brief_response,
                        } if other_oc_brief_based else {
                               "direct_award_search":
                               search,
                               "direct_award_archived_service":
                               other_archived_service,
                           }),
                    } if other_oc_data.get(
                        "result", "awarded") == "awarded" else {
                            "result": other_oc_data["result"]
                        }),
                **{
                    k: v
                    for k, v in (other_oc_data or {}).items() if k not in (
                        "completed_at",
                        "result",
                    )
                },
            )
            if "completed_at" in other_oc_data:
                other_outcome.completed_at = other_oc_data["completed_at"]
            db.session.add(other_outcome)

        # create our target Outcome in its initial state
        outcome = Outcome(
            **({
                "brief": brief
            } if initial_brief_based else {
                "direct_award_project": project
            }),
            **(
                {
                    "result":
                    initial_data.get("result", "awarded"),
                    **({
                        "brief_response": chosen_brief_response,
                    } if initial_brief_based else {
                           "direct_award_search":
                           search,
                           "direct_award_archived_service":
                           chosen_archived_service,
                       }),
                } if initial_data.get("result", "awarded") == "awarded" else {
                    "result": initial_data["result"]
                }),
            **{
                k: v
                for k, v in (initial_data or {}).items() if k not in (
                    "completed_at",
                    "result",
                )
            },
        )
        if "completed_at" in initial_data:
            # can only set completed_at after other fields have been set
            outcome.completed_at = initial_data["completed_at"]
        db.session.add(outcome)

        # must assign ids before we can lock project
        db.session.flush()
        if project:
            project.locked_at = datetime.datetime.now()

        # make a concrete note of these so we don't have to fetch them back from the database after the request,
        # potentially getting back values which have been inadvertantly changed
        outcome_external_id = outcome.external_id
        project_external_id = project and project.external_id
        search_id = search and search.id
        chosen_archived_service_id = chosen_archived_service and chosen_archived_service.id
        chosen_archived_service_service_id = chosen_archived_service and chosen_archived_service.service_id
        brief_id = brief and brief.id
        chosen_brief_response_id = chosen_brief_response and chosen_brief_response.id
        audit_event_count = AuditEvent.query.count()
        db.session.commit()

        # keep an nice concrete representation for later comparison
        outcome_serialization_before = outcome.serialize()

        res = self.client.put(
            f"/outcomes/{outcome.external_id}",
            data=json.dumps({
                "updated_by": "*****@*****.**",
                "outcome": put_values,
            }),
            content_type="application/json",
        )
        assert res.status_code == expected_status_code
        response_data = json.loads(res.get_data())
        assert response_data == expected_response_data

        # allow these to be re-used in this session, "refreshed"
        db.session.add_all(x for x in (
            outcome,
            project,
            search,
            chosen_archived_service,
        ) if x is not None)
        db.session.expire_all()

        if res.status_code != 200:
            # assert change wasn't made, audit event wasn't added
            assert outcome.serialize() == outcome_serialization_before
            assert AuditEvent.query.count() == audit_event_count
        else:
            # an additional check of values we should be able to figure out the "correct" values for
            assert response_data == {
                "outcome": {
                    "id": outcome_external_id,
                    "result": initial_data.get("result", "awarded"),
                    "completed": (
                        bool(outcome_serialization_before.get("completedAt"))
                        or put_values.get("completed") is True
                    ),
                    "completedAt": (
                        outcome_serialization_before.get("completedAt")
                        or (
                            AnyStringMatching(r"\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(\.\d+)?Z")
                            if put_values.get("completed") else None
                        )
                    ),
                    **({
                        "resultOfFurtherCompetition": {
                            "brief": {
                                "id": brief_id,
                            },
                            **({
                                "briefResponse": {
                                    "id": chosen_brief_response_id,
                                },
                            } if initial_data.get("result", "awarded") == "awarded" else {}),
                        },
                    } if initial_brief_based else {
                        "resultOfDirectAward": {
                            "project": {
                                "id": project_external_id,
                            },
                            **({
                                "search": {
                                    "id": search_id,
                                },
                                "archivedService": {
                                    "id": chosen_archived_service_id,
                                    "service": {
                                        "id": chosen_archived_service_service_id,
                                    },
                                },
                            } if initial_data.get("result", "awarded") == "awarded" else {})
                        },
                    }),
                    **({"award": AnySupersetOf({})} if initial_data.get("result", "awarded") == "awarded" else {}),
                }
            }

            # check changes actually got committed
            assert response_data == {
                "outcome": outcome.serialize(),
            }

            # check audit event(s) were saved
            expect_complete_audit_event = put_values.get(
                "completed") is True and not initial_data.get("completed_at")
            n_expected_new_audit_events = 2 if expect_complete_audit_event else 1

            assert AuditEvent.query.count(
            ) == audit_event_count + n_expected_new_audit_events
            # grab those most recent (1 or) 2 audit events from the db, re-sorting them to be in a predictable order -
            # we don't care whether the complete_outcome or update_outcome comes out of the db first
            audit_events = sorted(
                db.session.query(AuditEvent).order_by(
                    desc(AuditEvent.created_at),
                    desc(AuditEvent.id),
                )[:n_expected_new_audit_events],
                key=lambda ae: ae.type,
                reverse=True,
            )

            assert audit_events[0].type == "update_outcome"
            assert audit_events[0].object is outcome
            assert audit_events[0].acknowledged is False
            assert audit_events[0].acknowledged_at is None
            assert not audit_events[0].acknowledged_by
            assert audit_events[0].user == "*****@*****.**"
            assert audit_events[0].data == put_values

            if expect_complete_audit_event:
                assert audit_events[1].type == "complete_outcome"
                assert audit_events[1].created_at == audit_events[
                    0].created_at == outcome.completed_at
                assert audit_events[1].object is outcome
                assert audit_events[1].acknowledged is False
                assert audit_events[1].acknowledged_at is None
                assert not audit_events[1].acknowledged_by
                assert audit_events[1].user == "*****@*****.**"
                assert audit_events[1].data == {}

    def test_nonexistent_outcome(self):
        res = self.client.put(
            f"/outcomes/314159",
            data=json.dumps({
                "updated_by": "*****@*****.**",
                "outcome": {
                    "completed": True,
                },
            }),
            content_type="application/json",
        )
        assert res.status_code == 404
        assert json.loads(res.get_data()) == {
            "error": "Outcome 314159 not found",
        }
class TestScanAndTagS3Object(BaseApplicationTest):
    @pytest.mark.parametrize(
        (
            "initial_tagset",
            "concurrent_new_tagset",
            "clamd_instream_retval",
            "expected_retval",
            "expected_log_calls",
            "expected_notify_calls",
            "expected_tagset",
        ),
        (
            (
                # initial_tagset
                {
                    "existing": "tag123",
                    "avStatus.irrelevant": "who is here",
                },
                # concurrent_new_tagset
                {"surprise": "tag234"},
                # clamd_instream_retval
                {"stream": ("OK", "dénouement sufficient",)},
                # expected_retval
                (
                    {},
                    True,
                    {
                        "avStatus.clamdVerStr": "ClamAV 567; first watch",
                        "avStatus.result": "pass",
                        "avStatus.ts": "2010-09-08T07:06:05.040302",
                    },
                ),
                # expected_log_calls
                (
                    (
                        (logging.DEBUG, AnyStringMatching(r"Call to S3 \(get object tagging"), ()),
                        AnySupersetOf({"extra": AnySupersetOf({
                            "s3_bucket_name": "spade",
                            "s3_object_key": "sandman/+4321 billy-winks☾.pdf",
                            "s3_object_version": "0",
                        })}),
                    ),
                    (
                        (logging.INFO, AnyStringMatching(r"Object version .* has no .avStatus\.result. tag "), ()),
                        AnySupersetOf({"extra": AnySupersetOf({
                            "av_status": {"avStatus.irrelevant": "who is here"},
                            "s3_bucket_name": "spade",
                            "s3_object_key": "sandman/+4321 billy-winks☾.pdf",
                            "s3_object_version": "0",
                        })}),
                    ),
                    (
                        (logging.DEBUG, AnyStringMatching(r"Call to S3 \(initiate object download"), ()),
                        AnySupersetOf({"extra": AnySupersetOf({
                            "s3_bucket_name": "spade",
                            "s3_object_key": "sandman/+4321 billy-winks☾.pdf",
                            "s3_object_version": "0",
                        })}),
                    ),
                    (
                        (logging.INFO, AnyStringMatching(r"Scanned "), ()),
                        AnySupersetOf({"extra": AnySupersetOf({
                            "file_length": 12,
                            "file_name": "too ducky.puddeny-pie.pdf",
                            "clamd_result": ("OK", "dénouement sufficient"),
                        })}),
                    ),
                    (
                        (logging.DEBUG, AnyStringMatching(r"Fetched clamd version "), ()),
                        AnySupersetOf({"extra": AnySupersetOf({
                            "clamd_version": "ClamAV 567; first watch",
                        })}),
                    ),
                    (
                        (logging.DEBUG, AnyStringMatching(r"Call to S3 \(get object tagging"), ()),
                        AnySupersetOf({"extra": AnySupersetOf({
                            "s3_bucket_name": "spade",
                            "s3_object_key": "sandman/+4321 billy-winks☾.pdf",
                            "s3_object_version": "0",
                        })}),
                    ),
                    (
                        (logging.DEBUG, AnyStringMatching(r"Call to S3 \(put object tagging"), ()),
                        AnySupersetOf({"extra": AnySupersetOf({
                            "s3_bucket_name": "spade",
                            "s3_object_key": "sandman/+4321 billy-winks☾.pdf",
                            "s3_object_version": "0",
                        })}),
                    ),
                    (
                        (logging.INFO, AnyStringMatching(r"Handled bucket "), ()),
                        AnySupersetOf({"extra": AnySupersetOf({
                            "s3_bucket_name": "spade",
                            "s3_object_key": "sandman/+4321 billy-winks☾.pdf",
                            "s3_object_version": "0",
                        })}),
                    ),
                ),
                # expected_notify_calls
                (),
                # expected_tagset
                {
                    "avStatus.result": "pass",
                    "avStatus.clamdVerStr": "ClamAV 567_ first watch",
                    "avStatus.ts": "2010-09-08T07:06:05.040302",
                    "surprise": "tag234",
                },
            ),
            (
                # initial_tagset
                {"existing": "tag123"},
                # concurrent_new_tagset
                {"surprise": "tag234"},
                # clamd_instream_retval
                {"stream": ("FOUND", "After him, Garry!",)},
                # expected_retval
                (
                    {},
                    True,
                    {
                        "avStatus.clamdVerStr": "ClamAV 567; first watch",
                        "avStatus.result": "fail",
                        "avStatus.ts": "2010-09-08T07:06:05.040302",
                    },
                ),
                # expected_log_calls
                (
                    (
                        (logging.DEBUG, AnyStringMatching(r"Call to S3 \(get object tagging"), ()),
                        AnySupersetOf({"extra": AnySupersetOf({
                            "s3_bucket_name": "spade",
                            "s3_object_key": "sandman/+4321 billy-winks☾.pdf",
                            "s3_object_version": "0",
                        })}),
                    ),
                    (
                        (logging.INFO, AnyStringMatching(r"Object version .* has no .avStatus\.result. tag "), ()),
                        AnySupersetOf({"extra": AnySupersetOf({
                            "av_status": {},
                            "s3_bucket_name": "spade",
                            "s3_object_key": "sandman/+4321 billy-winks☾.pdf",
                            "s3_object_version": "0",
                        })}),
                    ),
                    (
                        (logging.DEBUG, AnyStringMatching(r"Call to S3 \(initiate object download"), ()),
                        AnySupersetOf({"extra": AnySupersetOf({
                            "s3_bucket_name": "spade",
                            "s3_object_key": "sandman/+4321 billy-winks☾.pdf",
                            "s3_object_version": "0",
                        })}),
                    ),
                    (
                        (logging.INFO, AnyStringMatching(r"Scanned "), ()),
                        AnySupersetOf({"extra": AnySupersetOf({
                            "file_length": 12,
                            "file_name": "too ducky.puddeny-pie.pdf",
                            "clamd_result": ("FOUND", "After him, Garry!"),
                        })}),
                    ),
                    (
                        (logging.DEBUG, AnyStringMatching(r"Fetched clamd version "), ()),
                        AnySupersetOf({"extra": AnySupersetOf({
                            "clamd_version": "ClamAV 567; first watch",
                        })}),
                    ),
                    (
                        (logging.DEBUG, AnyStringMatching(r"Call to S3 \(get object tagging"), ()),
                        AnySupersetOf({"extra": AnySupersetOf({
                            "s3_bucket_name": "spade",
                            "s3_object_key": "sandman/+4321 billy-winks☾.pdf",
                            "s3_object_version": "0",
                        })}),
                    ),
                    (
                        (logging.DEBUG, AnyStringMatching(r"Call to S3 \(put object tagging"), ()),
                        AnySupersetOf({"extra": AnySupersetOf({
                            "s3_bucket_name": "spade",
                            "s3_object_key": "sandman/+4321 billy-winks☾.pdf",
                            "s3_object_version": "0",
                        })}),
                    ),
                    (
                        (logging.INFO, AnyStringMatching(r"Handled bucket "), ()),
                        AnySupersetOf({"extra": AnySupersetOf({
                            "s3_bucket_name": "spade",
                            "s3_object_key": "sandman/+4321 billy-winks☾.pdf",
                            "s3_object_version": "0",
                        })}),
                    ),
                ),
                # expected_notify_calls
                (
                    mock.call("not_a_real_key-00000000-fake-uuid-0000-000000000000"),
                    mock.call().send_email(
                        to_email_address="*****@*****.**",
                        personalisation={
                            "bucket_name": "spade",
                            "clamd_output": "FOUND, After him, Garry!",
                            "dm_trace_id": mock.ANY,
                            "file_name": "too ducky.puddeny-pie.pdf",
                            "object_key": "sandman/+4321 billy-winks☾.pdf",
                            "object_version": "0",
                            "sns_message_id": "<N/A>",
                        },
                        template_name_or_id="developer_virus_alert",
                    ),
                ),
                # expected_tagset
                {
                    "avStatus.result": "fail",
                    "avStatus.clamdVerStr": "ClamAV 567_ first watch",
                    "avStatus.ts": "2010-09-08T07:06:05.040302",
                    "surprise": "tag234",
                }
            ),
            (
                # initial_tagset
                {"existing": "tag123"},
                # concurrent_new_tagset
                None,
                # clamd_instream_retval
                {"stream": ("FOUND", "eicar-test-signature",)},
                # expected_retval
                (
                    {},
                    True,
                    {
                        "avStatus.clamdVerStr": "ClamAV 567; first watch",
                        "avStatus.result": "fail",
                        "avStatus.ts": "2010-09-08T07:06:05.040302",
                    },
                ),
                # expected_log_calls
                (
                    (
                        (logging.DEBUG, AnyStringMatching(r"Call to S3 \(get object tagging"), ()),
                        AnySupersetOf({"extra": AnySupersetOf({
                            "s3_bucket_name": "spade",
                            "s3_object_key": "sandman/+4321 billy-winks☾.pdf",
                            "s3_object_version": "0",
                        })}),
                    ),
                    (
                        (logging.INFO, AnyStringMatching(r"Object version .* has no .avStatus\.result. tag "), ()),
                        AnySupersetOf({"extra": AnySupersetOf({
                            "av_status": {},
                            "s3_bucket_name": "spade",
                            "s3_object_key": "sandman/+4321 billy-winks☾.pdf",
                            "s3_object_version": "0",
                        })}),
                    ),
                    (
                        (logging.DEBUG, AnyStringMatching(r"Call to S3 \(initiate object download"), ()),
                        AnySupersetOf({"extra": AnySupersetOf({
                            "s3_bucket_name": "spade",
                            "s3_object_key": "sandman/+4321 billy-winks☾.pdf",
                            "s3_object_version": "0",
                        })}),
                    ),
                    (
                        (logging.INFO, AnyStringMatching(r"Scanned "), ()),
                        AnySupersetOf({"extra": AnySupersetOf({
                            "file_length": 12,
                            "file_name": "too ducky.puddeny-pie.pdf",
                            "clamd_result": ("FOUND", "eicar-test-signature"),
                        })}),
                    ),
                    (
                        (logging.DEBUG, AnyStringMatching(r"Fetched clamd version "), ()),
                        AnySupersetOf({"extra": AnySupersetOf({
                            "clamd_version": "ClamAV 567; first watch",
                        })}),
                    ),
                    (
                        (logging.DEBUG, AnyStringMatching(r"Call to S3 \(get object tagging"), ()),
                        AnySupersetOf({"extra": AnySupersetOf({
                            "s3_bucket_name": "spade",
                            "s3_object_key": "sandman/+4321 billy-winks☾.pdf",
                            "s3_object_version": "0",
                        })}),
                    ),
                    (
                        (logging.DEBUG, AnyStringMatching(r"Call to S3 \(put object tagging"), ()),
                        AnySupersetOf({"extra": AnySupersetOf({
                            "s3_bucket_name": "spade",
                            "s3_object_key": "sandman/+4321 billy-winks☾.pdf",
                            "s3_object_version": "0",
                        })}),
                    ),
                    (
                        (logging.INFO, AnyStringMatching(r"Handled bucket "), ()),
                        AnySupersetOf({"extra": AnySupersetOf({
                            "s3_bucket_name": "spade",
                            "s3_object_key": "sandman/+4321 billy-winks☾.pdf",
                            "s3_object_version": "0",
                        })}),
                    ),
                ),
                # expected_notify_calls
                (
                    mock.call("not_a_real_key-00000000-fake-uuid-0000-000000000000"),
                    mock.call().send_email(
                        to_email_address="*****@*****.**",
                        personalisation={
                            "bucket_name": "spade",
                            "clamd_output": "FOUND, eicar-test-signature",
                            "dm_trace_id": mock.ANY,
                            "file_name": "too ducky.puddeny-pie.pdf",
                            "object_key": "sandman/+4321 billy-winks☾.pdf",
                            "object_version": "0",
                            "sns_message_id": "<N/A>",
                        },
                        template_name_or_id="developer_virus_alert",
                        reference="eicar-found-4d3daeeb3ea3d90d4d6e7a20a5b483a9-development",
                    ),
                ),
                # expected_tagset
                {
                    "avStatus.result": "fail",
                    "avStatus.clamdVerStr": "ClamAV 567_ first watch",
                    "avStatus.ts": "2010-09-08T07:06:05.040302",
                    "existing": "tag123",
                }
            ),
            (
                # initial_tagset
                {"existing": "tag123"},
                # concurrent_new_tagset
                {
                    "surprise": "tag234",
                    "avStatus.ts": "2010-09-08T07:06:05.040302",
                },
                # clamd_instream_retval
                {"stream": ("ERROR", " Some trouble is on here",)},
                # expected_retval
                UnknownClamdError,
                # expected_log_calls
                (
                    (
                        (logging.DEBUG, AnyStringMatching(r"Call to S3 \(get object tagging"), ()),
                        AnySupersetOf({"extra": AnySupersetOf({
                            "s3_bucket_name": "spade",
                            "s3_object_key": "sandman/+4321 billy-winks☾.pdf",
                            "s3_object_version": "0",
                        })}),
                    ),
                    (
                        (logging.INFO, AnyStringMatching(r"Object version .* has no .avStatus\.result. tag "), ()),
                        AnySupersetOf({"extra": AnySupersetOf({
                            "av_status": {},
                            "s3_bucket_name": "spade",
                            "s3_object_key": "sandman/+4321 billy-winks☾.pdf",
                            "s3_object_version": "0",
                        })}),
                    ),
                    (
                        (logging.DEBUG, AnyStringMatching(r"Call to S3 \(initiate object download"), ()),
                        AnySupersetOf({"extra": AnySupersetOf({
                            "s3_bucket_name": "spade",
                            "s3_object_key": "sandman/+4321 billy-winks☾.pdf",
                            "s3_object_version": "0",
                        })}),
                    ),
                    (
                        (logging.INFO, AnyStringMatching(r"Scanned "), ()),
                        AnySupersetOf({"extra": AnySupersetOf({
                            "file_length": 12,
                            "file_name": "too ducky.puddeny-pie.pdf",
                            "clamd_result": ("ERROR", " Some trouble is on here",),
                        })}),
                    ),
                    (
                        (logging.INFO, AnyStringMatching(r"Failed handling.*UnknownClamdError.*"), ()),
                        AnySupersetOf({"extra": AnySupersetOf({
                            "s3_bucket_name": "spade",
                            "s3_object_key": "sandman/+4321 billy-winks☾.pdf",
                            "s3_object_version": "0",
                        })}),
                    ),
                ),
                # expected_notify_calls
                (),
                # expected_tagset
                {
                    "surprise": "tag234",
                    "avStatus.ts": "2010-09-08T07:06:05.040302",
                },
            ),
            (
                # initial_tagset
                {"existing": "tag123"},
                # concurrent_new_tagset
                {
                    "avStatus.result": "fail",
                    "avStatus.ts": "2010-09-08T07:06:04.010101",
                    "avStatus.irrelevant": "who is here",
                },
                # clamd_instream_retval
                {"stream": ("OK", "Egg two demolished",)},
                # expected_retval
                (
                    {
                        "avStatus.result": "fail",
                        "avStatus.ts": "2010-09-08T07:06:04.010101",
                        "avStatus.irrelevant": "who is here",
                    },
                    False,
                    {
                        "avStatus.clamdVerStr": "ClamAV 567; first watch",
                        "avStatus.result": "pass",
                        "avStatus.ts": "2010-09-08T07:06:05.040302",
                    },
                ),
                # expected_log_calls
                (
                    (
                        (logging.DEBUG, AnyStringMatching(r"Call to S3 \(get object tagging"), ()),
                        AnySupersetOf({"extra": AnySupersetOf({
                            "s3_bucket_name": "spade",
                            "s3_object_key": "sandman/+4321 billy-winks☾.pdf",
                            "s3_object_version": "0",
                        })}),
                    ),
                    (
                        (logging.INFO, AnyStringMatching(r"Object version .* has no .avStatus\.result. tag "), ()),
                        AnySupersetOf({"extra": AnySupersetOf({
                            "av_status": {},
                            "s3_bucket_name": "spade",
                            "s3_object_key": "sandman/+4321 billy-winks☾.pdf",
                            "s3_object_version": "0",
                        })}),
                    ),
                    (
                        (logging.DEBUG, AnyStringMatching(r"Call to S3 \(initiate object download"), ()),
                        AnySupersetOf({"extra": AnySupersetOf({
                            "s3_bucket_name": "spade",
                            "s3_object_key": "sandman/+4321 billy-winks☾.pdf",
                            "s3_object_version": "0",
                        })}),
                    ),
                    (
                        (logging.INFO, AnyStringMatching(r"Scanned "), ()),
                        AnySupersetOf({"extra": AnySupersetOf({
                            "file_length": 12,
                            "file_name": "too ducky.puddeny-pie.pdf",
                            "clamd_result": ("OK", "Egg two demolished"),
                        })}),
                    ),
                    (
                        (logging.DEBUG, AnyStringMatching(r"Fetched clamd version "), ()),
                        AnySupersetOf({"extra": AnySupersetOf({
                            "clamd_version": "ClamAV 567; first watch",
                        })}),
                    ),
                    (
                        (logging.DEBUG, AnyStringMatching(r"Call to S3 \(get object tagging"), ()),
                        AnySupersetOf({"extra": AnySupersetOf({
                            "s3_bucket_name": "spade",
                            "s3_object_key": "sandman/+4321 billy-winks☾.pdf",
                            "s3_object_version": "0",
                        })}),
                    ),
                    (
                        (
                            logging.WARNING,
                            AnyStringMatching(r"Object was tagged.*existing.*unapplied.*"),
                            (),
                        ),
                        AnySupersetOf({"extra": AnySupersetOf({
                            "existing_av_status": {
                                "avStatus.result": "fail",
                                "avStatus.ts": "2010-09-08T07:06:04.010101",
                                "avStatus.irrelevant": "who is here",
                            },
                            "existing_av_status_result": "fail",
                            "unapplied_av_status": {
                                "avStatus.result": "pass",
                                "avStatus.clamdVerStr": "ClamAV 567; first watch",
                                "avStatus.ts": "2010-09-08T07:06:05.040302",
                            },
                            "unapplied_av_status_result": "pass",
                        })}),
                    ),
                    (
                        (logging.INFO, AnyStringMatching(r"Handled bucket "), ()),
                        AnySupersetOf({"extra": AnySupersetOf({
                            "s3_bucket_name": "spade",
                            "s3_object_key": "sandman/+4321 billy-winks☾.pdf",
                            "s3_object_version": "0",
                        })}),
                    ),
                ),
                # expected_notify_calls
                (),
                # expected_tagset
                {
                    "avStatus.result": "fail",
                    "avStatus.ts": "2010-09-08T07:06:04.010101",
                    "avStatus.irrelevant": "who is here",
                },
            ),
            (
                # initial_tagset
                {"existing": "tag123"},
                # concurrent_new_tagset
                {
                    "avStatus.result": "pass",
                    "avStatus.ts": "2010-09-08T07:06:04.010101",
                    "avStatus.clamdVerStr": "4321_ 7654",
                    "surprise": "789+789",
                },
                # clamd_instream_retval
                {"stream": ("FOUND", "After him, boy!",)},
                # expected_retval
                (
                    {
                        "avStatus.result": "pass",
                        "avStatus.ts": "2010-09-08T07:06:04.010101",
                        "avStatus.clamdVerStr": "4321_ 7654",
                    },
                    False,
                    {
                        "avStatus.clamdVerStr": "ClamAV 567; first watch",
                        "avStatus.result": "fail",
                        "avStatus.ts": "2010-09-08T07:06:05.040302",
                    },
                ),
                # expected_log_calls
                (
                    (
                        (logging.DEBUG, AnyStringMatching(r"Call to S3 \(get object tagging"), ()),
                        AnySupersetOf({"extra": AnySupersetOf({
                            "s3_bucket_name": "spade",
                            "s3_object_key": "sandman/+4321 billy-winks☾.pdf",
                            "s3_object_version": "0",
                        })}),
                    ),
                    (
                        (logging.INFO, AnyStringMatching(r"Object version .* has no .avStatus\.result. tag "), ()),
                        AnySupersetOf({"extra": AnySupersetOf({
                            "av_status": {},
                            "s3_bucket_name": "spade",
                            "s3_object_key": "sandman/+4321 billy-winks☾.pdf",
                            "s3_object_version": "0",
                        })}),
                    ),
                    (
                        (logging.DEBUG, AnyStringMatching(r"Call to S3 \(initiate object download"), ()),
                        AnySupersetOf({"extra": AnySupersetOf({
                            "s3_bucket_name": "spade",
                            "s3_object_key": "sandman/+4321 billy-winks☾.pdf",
                            "s3_object_version": "0",
                        })}),
                    ),
                    (
                        (logging.INFO, AnyStringMatching(r"Scanned "), ()),
                        AnySupersetOf({"extra": AnySupersetOf({
                            "file_length": 12,
                            "file_name": "too ducky.puddeny-pie.pdf",
                            "clamd_result": ("FOUND", "After him, boy!"),
                        })}),
                    ),
                    (
                        (logging.DEBUG, AnyStringMatching(r"Fetched clamd version "), ()),
                        AnySupersetOf({"extra": AnySupersetOf({
                            "clamd_version": "ClamAV 567; first watch",
                        })}),
                    ),
                    (
                        (logging.DEBUG, AnyStringMatching(r"Call to S3 \(get object tagging"), ()),
                        AnySupersetOf({"extra": AnySupersetOf({
                            "s3_bucket_name": "spade",
                            "s3_object_key": "sandman/+4321 billy-winks☾.pdf",
                            "s3_object_version": "0",
                        })}),
                    ),
                    (
                        (
                            logging.WARNING,
                            AnyStringMatching(r"Object was tagged.*existing.*unapplied.*"),
                            (),
                        ),
                        AnySupersetOf({"extra": AnySupersetOf({
                            "existing_av_status": {
                                "avStatus.result": "pass",
                                "avStatus.ts": "2010-09-08T07:06:04.010101",
                                "avStatus.clamdVerStr": "4321_ 7654",
                            },
                            "existing_av_status_result": "pass",
                            "unapplied_av_status": {
                                "avStatus.result": "fail",
                                "avStatus.clamdVerStr": "ClamAV 567; first watch",
                                "avStatus.ts": "2010-09-08T07:06:05.040302",
                            },
                            "unapplied_av_status_result": "fail",
                        })}),
                    ),
                    (
                        (logging.INFO, AnyStringMatching(r"Handled bucket "), ()),
                        AnySupersetOf({"extra": AnySupersetOf({
                            "s3_bucket_name": "spade",
                            "s3_object_key": "sandman/+4321 billy-winks☾.pdf",
                            "s3_object_version": "0",
                        })}),
                    ),
                ),
                # expected_notify_calls
                (),
                # expected_tagset
                {
                    "avStatus.result": "pass",
                    "avStatus.ts": "2010-09-08T07:06:04.010101",
                    "avStatus.clamdVerStr": "4321_ 7654",
                    "surprise": "789+789",
                },
            ),
            (
                # initial_tagset
                {
                    "avStatus.result": "pass",
                    "avStatus.ts": "2010-09-08T07:06:04.010101",
                },
                # concurrent_new_tagset
                None,
                # clamd_instream_retval
                None,
                # expected_retval
                (
                    {
                        "avStatus.result": "pass",
                        "avStatus.ts": "2010-09-08T07:06:04.010101",
                    },
                    False,
                    None,
                ),
                # expected_log_calls
                (
                    (
                        (logging.DEBUG, AnyStringMatching(r"Call to S3 \(get object tagging"), ()),
                        AnySupersetOf({"extra": AnySupersetOf({
                            "s3_bucket_name": "spade",
                            "s3_object_key": "sandman/+4321 billy-winks☾.pdf",
                            "s3_object_version": "0",
                        })}),
                    ),
                    (
                        (logging.INFO, AnyStringMatching(r"Object version.*already.*avStatus\.result.*tag.+"), ()),
                        AnySupersetOf({"extra": AnySupersetOf({
                            "existing_av_status": {
                                "avStatus.result": "pass",
                                "avStatus.ts": "2010-09-08T07:06:04.010101",
                            },
                            "existing_av_status_result": "pass",
                            "s3_bucket_name": "spade",
                            "s3_object_key": "sandman/+4321 billy-winks☾.pdf",
                            "s3_object_version": "0",
                        })}),
                    ),
                    (
                        (logging.INFO, AnyStringMatching(r"Handled bucket "), ()),
                        AnySupersetOf({"extra": AnySupersetOf({
                            "s3_bucket_name": "spade",
                            "s3_object_key": "sandman/+4321 billy-winks☾.pdf",
                            "s3_object_version": "0",
                        })}),
                    ),
                ),
                # expected_notify_calls
                (),
                # expected_tagset
                {
                    "avStatus.result": "pass",
                    "avStatus.ts": "2010-09-08T07:06:04.010101",
                },
            ),
            (
                # initial_tagset
                {
                    "avStatus.result": "fail",
                    "avStatus.ts": "2010-09-08T07:06:04.010101",
                },
                # concurrent_new_tagset
                None,
                # clamd_instream_retval
                None,
                # expected_retval
                (
                    {
                        "avStatus.result": "fail",
                        "avStatus.ts": "2010-09-08T07:06:04.010101",
                    },
                    False,
                    None,
                ),
                # expected_log_calls
                (
                    (
                        (logging.DEBUG, AnyStringMatching(r"Call to S3 \(get object tagging"), ()),
                        AnySupersetOf({"extra": AnySupersetOf({
                            "s3_bucket_name": "spade",
                            "s3_object_key": "sandman/+4321 billy-winks☾.pdf",
                            "s3_object_version": "0",
                        })}),
                    ),
                    (
                        (logging.INFO, AnyStringMatching(r"Object version.*already.*avStatus\.result.*tag.+"), ()),
                        AnySupersetOf({"extra": AnySupersetOf({
                            "existing_av_status": {
                                "avStatus.result": "fail",
                                "avStatus.ts": "2010-09-08T07:06:04.010101",
                            },
                            "existing_av_status_result": "fail",
                            "s3_bucket_name": "spade",
                            "s3_object_key": "sandman/+4321 billy-winks☾.pdf",
                            "s3_object_version": "0",
                        })}),
                    ),
                    (
                        (logging.INFO, AnyStringMatching(r"Handled bucket "), ()),
                        AnySupersetOf({"extra": AnySupersetOf({
                            "s3_bucket_name": "spade",
                            "s3_object_key": "sandman/+4321 billy-winks☾.pdf",
                            "s3_object_version": "0",
                        })}),
                    ),
                ),
                # expected_notify_calls
                (),
                # expected_tagset
                {
                    "avStatus.result": "fail",
                    "avStatus.ts": "2010-09-08T07:06:04.010101",
                },
            ),
        ),
    )
    @freeze_time("2010-09-08T07:06:05.040302")
    @mock.patch("app.s3.DMNotifyClient", autospec=True)
    def test_scan_and_tag_s3_object(
        self,
        mock_notify_client,
        bucket_with_file,
        mock_clamd,
        initial_tagset,
        concurrent_new_tagset,
        clamd_instream_retval,
        expected_retval,
        expected_log_calls,
        expected_notify_calls,
        expected_tagset,
    ):
        """
        :param initial_tagset:        tagset (dict) that file in bucket will appear to have initially
        :param concurrent_new_tagset: a tagset (dict) that coincidentally gets set "while" the clam instream process
                                      is running, None to skip this update
        :param clamd_instream_retval: value to return from mock clamd instream(...) call, None to expect no call to
                                      take place
        :param expected_retval:       return value to expect from call, or, if a subclass of Exception, expect to raise
                                      this exception type
        :param expected_log_calls:    sequence of expected mock.call()s to have been made to app logger
        :param expected_notify_calls: sequence of expected mock.call()s to have been made to mock DMNotifyClient
        :param expected_tagset:       tagset (dict) to expect file to have after the request processing has finished
        """
        bucket, objver = bucket_with_file
        s3_client = boto3.client("s3", region_name="howth-west-2")

        if initial_tagset is not None:
            s3_client.put_object_tagging(
                Bucket=bucket.name,
                Key=objver.Object().key,
                VersionId=objver.id,
                Tagging={"TagSet": tagset_from_dict(initial_tagset)},
            )

        def clamd_instream_func(*args, **kwargs):
            # if clamd_instream_retval *is* None, we'd be expecting "instream" not to be called at all
            assert clamd_instream_retval is not None

            assert args == (RestrictedAny(lambda x: x.read() == b"123412341234"),)
            assert kwargs == {}

            if concurrent_new_tagset is not None:
                # a very literal "side effect" here - simulating a modification to the object's tags while scanning...
                s3_client.put_object_tagging(
                    Bucket=bucket.name,
                    Key=objver.Object().key,
                    VersionId=objver.id,
                    Tagging={"TagSet": tagset_from_dict(concurrent_new_tagset)},
                )

            return clamd_instream_retval

        mock_clamd.instream.side_effect = clamd_instream_func
        mock_clamd.version.return_value = "ClamAV 567; first watch"

        with self.mocked_app_logger_log() as mock_app_log:
            with pytest.raises(expected_retval) if (
                isinstance(expected_retval, type)
                and issubclass(expected_retval, Exception)
            ) else null_context_manager():
                with self.app.test_request_context():
                    retval = scan_and_tag_s3_object(
                        s3_client,
                        bucket.name,
                        objver.Object().key,
                        objver.id,
                    )

            if not (isinstance(expected_retval, type) and issubclass(expected_retval, Exception)):
                assert retval == expected_retval

            assert mock_app_log.call_args_list == list(expected_log_calls)
            assert mock_notify_client.mock_calls == list(expected_notify_calls)

            assert dict_from_tagset(
                s3_client.get_object_tagging(
                    Bucket=bucket.name,
                    Key=objver.Object().key,
                    VersionId=objver.id,
                )["TagSet"]
            ) == expected_tagset
    def test_handle_s3_sns_notification(
        self,
        mock_validate,
        mock_scan_and_tag_s3_object,
        content_type,
        bucket_with_file,
    ):
        bucket, objver = bucket_with_file

        with self.mocked_app_logger_log() as mock_app_log:
            client = self.get_authorized_client()
            body_dict = {
                **self._basic_notification_body,
                "TopicArn":
                "bull:by:the:horns:123:s3_file_upload_notification_development",
                "Subject":
                "Someone uploaded a file, yeah?",
                "Message":
                json.dumps({
                    "Records": [
                        {
                            "s3": {
                                "bucket": {
                                    "name": bucket.name,
                                },
                                "object": {
                                    "key": quote_plus(objver.Object().key),
                                    "versionId": objver.id,
                                },
                            },
                            "awsRegion": "howth-west-2",
                        },
                    ],
                }),
            }
            subscription_arn = f"{body_dict['TopicArn']}:314159"

            res = client.post(
                "/callbacks/sns/s3/uploaded",
                data=json.dumps(body_dict),
                content_type=content_type,
                headers={"X-Amz-Sns-Subscription-Arn": subscription_arn},
            )

            assert res.status_code == 200
            assert mock_app_log.call_args_list == [
                (
                    (logging.INFO, AnyStringMatching(r"Processing message "),
                     ()),
                    AnySupersetOf({
                        "extra":
                        AnySupersetOf({
                            "message_id":
                            "424344def",
                            "subscription_arn":
                            "bull:by:the:horns:123:s3_file_upload_notification_development:314159",
                        })
                    }),
                ),
                (mock.ANY,
                 AnySupersetOf({"extra": AnySupersetOf({"status": 200})})),
            ]
            assert mock_validate.call_args_list == [((body_dict, ),
                                                     AnySupersetOf({}))]
            assert mock_scan_and_tag_s3_object.call_args_list == [
                mock.call(
                    s3_client=mock.ANY,
                    s3_bucket_name=bucket.name,
                    s3_object_key=objver.Object().key,
                    s3_object_version=objver.id,
                    sns_message_id="424344def",
                ),
            ]
Ejemplo n.º 16
0
 (
     is_sampled,
     sleep_time,
     message,
     log_level,
     condition,
     raise_exception,
     inject_context,
     True,  # mock_time
     (  # expected_logs
         *(  # in cases where our format string expects an extra parameter that wasn't supplied ("street"
             # here), our log output will be lead by a warning about the missing parameter - I feel it's
             # important to include this permutation to prove that we don't end up swallowing a genuine
             # exception if we inadvertantly raise an exception while outputting our log message
             (
                 AnySupersetOf({"levelname": "WARNING"}), ) if
             ("street" in str(message) and "street" not in
              (inject_context or {})) else ()),
         AnySupersetOf({
             "name":
             "flask.app.foobar",
             "levelname":
             logging.getLevelName(log_level),
             "message":
             _messages_expected[message][1].get(
                 'error' if raise_exception else 'success'),
             "duration_real":
             RestrictedAny(
                 # a double-closure here to get around python's weird behaviour when capturing iterated
                 # variables (in this case `sleep_time`)
                 (lambda st: lambda val: st * 0.95 < val < st *
class TestListOutcomes(BaseApplicationTest, FixtureMixin):
    @pytest.mark.parametrize("query_string", ("", "completed=true", "completed=false",))
    def test_list_outcomes_empty(self, query_string):
        res = self.client.get(
            f"/outcomes?{query_string}",
        )
        assert res.status_code == 200
        assert json.loads(res.get_data()) == {
            "links": {"self": RestrictedAny(lambda u: urlparse(u) == (ANY, ANY, "/outcomes", "", query_string, "",))},
            "meta": {
                "total": 0,
            },
            "outcomes": [],
        }

    def setup_outcomes(self):
        user_id = self.setup_dummy_user(id=1, role='buyer')
        self.setup_dummy_suppliers(5)

        # create required objects for direct award-based Outcome
        self.setup_dummy_services(5, model=ArchivedService)

        #
        # create required objects for direct-award-related Outcomes
        #

        projects = tuple(
            DirectAwardProject(
                name=name,
                users=[User.query.get(user_id)],
            ) for name in ("alumno optimo", "palmam ferenti", "vere dignum et iustum est",)
        )
        db.session.add_all(projects)

        searches = tuple(
            DirectAwardSearch(
                project=project,
                created_by=user_id,
                active=True,
                search_url="http://nothing.nowhere",
            ) for project in projects
        )
        db.session.add_all(searches)

        for i, search in enumerate(searches):
            for archived_service in db.session.query(ArchivedService).filter(
                ArchivedService.service_id.in_([str(j) for j in range(2000000000 + i, 2000000000 + i + 3)])
            ).all():
                search.archived_services.append(archived_service)

        #
        # create required objects for Brief-related Outcomes
        #

        briefs = tuple(
            self.setup_dummy_brief(status="closed", user_id=user_id, data={})
            for _ in range(4)
        )
        db.session.add_all(briefs)
        # increasingly many BriefResponses for each Brief in `briefs`
        brief_responses = tuple(BriefResponse(
            brief=brief,
            supplier_id=j,
            submitted_at=datetime.datetime.utcnow(),
            data={},
        ) for j in range(i) for i, brief in enumerate(briefs))
        db.session.add_all(brief_responses)

        outcomes = (
            Outcome(
                external_id=100000000,
                direct_award_project=searches[0].project,
                direct_award_search=searches[0],
                direct_award_archived_service=searches[0].archived_services[0],
                result="awarded",
                start_date=datetime.date(2006, 2, 2),
                end_date=datetime.date(2006, 3, 3),
                awarding_organisation_name="Omnium Gatherum",
                award_value=81396,
                completed_at=datetime.datetime(2005, 10, 10, 10, 10, 10),
            ),
            Outcome(
                external_id=100000005,
                direct_award_project=searches[0].project,
                direct_award_search=searches[0],
                direct_award_archived_service=searches[0].archived_services[1],
                result="awarded",
                start_date=datetime.date(2006, 4, 4),
                awarding_organisation_name="Nisus Formativus",
            ),
            Outcome(
                external_id=100000002,
                direct_award_project=searches[0].project,
                result="none-suitable",
            ),
            Outcome(
                external_id=100000011,
                direct_award_project=searches[1].project,
                result="none-suitable",
            ),
            Outcome(
                external_id=100000004,
                direct_award_project=searches[2].project,
                result="cancelled",
                completed_at=datetime.datetime(2005, 10, 9, 9, 9, 9),
            ),
            Outcome(
                external_id=100000001,
                brief=briefs[0],
                result="cancelled",
                completed_at=datetime.datetime(2005, 5, 5, 5, 5, 5),
            ),
            Outcome(
                external_id=100000008,
                brief=briefs[0],
                result="cancelled",
            ),
            Outcome(
                external_id=100000012,
                brief=briefs[1],
                brief_response=briefs[1].brief_responses[0],
                result="awarded",
                start_date=datetime.date(2010, 1, 1),
                end_date=datetime.date(2011, 8, 8),
                awarding_organisation_name="Viridum Toxicum",
                award_value=81396,
                completed_at=datetime.datetime(2005, 11, 11, 11, 11, 11),
            ),
            Outcome(
                external_id=100000006,
                brief=briefs[1],
                brief_response=briefs[1].brief_responses[0],
                result="awarded",
                award_value=83300,
            ),
            Outcome(
                external_id=100000009,
                brief=briefs[2],
                result="none-suitable",
                completed_at=datetime.datetime(2005, 10, 10, 10, 11, 11),
            ),
            Outcome(
                external_id=100000013,
                brief=briefs[2],
                brief_response=briefs[2].brief_responses[0],
                result="awarded",
                start_date=datetime.date(2011, 1, 1),
                end_date=datetime.date(2011, 1, 2),
                award_value=3072,
            ),
            Outcome(
                external_id=100000003,
                brief=briefs[2],
                brief_response=briefs[2].brief_responses[1],
                result="awarded",
            ),
            Outcome(
                external_id=100000007,
                brief=briefs[3],
                result="none-suitable",
            ),
            Outcome(
                external_id=100000010,
                brief=briefs[3],
                brief_response=briefs[3].brief_responses[0],
                result="awarded",
                start_date=datetime.date(2006, 1, 1),
                end_date=datetime.date(2008, 1, 1),
                awarding_organisation_name="Lacus Mortis",
                award_value=4386035,
                completed_at=datetime.datetime(2006, 1, 1, 1, 1, 1),
            ),
        )
        db.session.add_all(outcomes)

        db.session.commit()

    @pytest.mark.parametrize("query_string,expected_response_data", (
        ("", {
            "links": {"self": RestrictedAny(lambda u: urlparse(u) == (ANY, ANY, "/outcomes", "", "", "",))},
            "meta": {
                "total": 14,
            },
            "outcomes": [
                AnySupersetOf({"id": 100000001}),
                AnySupersetOf({"id": 100000004}),
                AnySupersetOf({"id": 100000000}),
                AnySupersetOf({"id": 100000009}),
                AnySupersetOf({"id": 100000012}),
                AnySupersetOf({"id": 100000010}),
                AnySupersetOf({"id": 100000002}),
                AnySupersetOf({"id": 100000003}),
                AnySupersetOf({"id": 100000005}),
                AnySupersetOf({"id": 100000006}),
                AnySupersetOf({"id": 100000007}),
                AnySupersetOf({"id": 100000008}),
                AnySupersetOf({"id": 100000011}),
                AnySupersetOf({"id": 100000013}),
            ],
        }),
        ("completed=true", {
            "links": {
                "self": RestrictedAny(lambda u: urlparse(u) == (ANY, ANY, "/outcomes", "", "completed=true", "",)),
            },
            "meta": {
                "total": 6,
            },
            "outcomes": [
                AnySupersetOf({"id": 100000001}),
                AnySupersetOf({"id": 100000004}),
                AnySupersetOf({"id": 100000000}),
                AnySupersetOf({"id": 100000009}),
                AnySupersetOf({"id": 100000012}),
                AnySupersetOf({"id": 100000010}),
            ],
        }),
        ("completed=false", {
            "links": {
                "self": RestrictedAny(lambda u: urlparse(u) == (ANY, ANY, "/outcomes", "", "completed=false", "",)),
            },
            "meta": {
                "total": 8,
            },
            "outcomes": [
                AnySupersetOf({"id": 100000002}),
                AnySupersetOf({"id": 100000003}),
                AnySupersetOf({"id": 100000005}),
                AnySupersetOf({"id": 100000006}),
                AnySupersetOf({"id": 100000007}),
                AnySupersetOf({"id": 100000008}),
                AnySupersetOf({"id": 100000011}),
                AnySupersetOf({"id": 100000013}),
            ],
        }),
    ))
    def test_list_outcomes(self, query_string, expected_response_data):
        self.setup_outcomes()

        res = self.client.get(
            f"/outcomes?{query_string}",
        )

        assert res.status_code == 200
        response_data = json.loads(res.get_data())

        # allow parameter to check its coarse constraints
        assert response_data == expected_response_data

        # now we'll follow that up by checking that outcomes with a particular id match their correct serialization
        assert response_data["outcomes"] == [
            Outcome.query.filter(Outcome.external_id == outcome_dict["id"]).one().serialize()
            for outcome_dict in response_data["outcomes"]
        ]

    def test_list_outcomes_paging(self):
        self.setup_outcomes()
        current_app.config["DM_API_OUTCOMES_PAGE_SIZE"] = 3

        res = self.client.get(
            f"/outcomes?page=2",
        )

        assert res.status_code == 200
        response_data = json.loads(res.get_data())

        assert response_data == {
            "links": {
                "next": RestrictedAny(lambda u: urlparse(u) == (ANY, ANY, "/outcomes", "", "page=3", "",)),
                "self": RestrictedAny(lambda u: urlparse(u) == (ANY, ANY, "/outcomes", "", "page=2", "",)),
                "prev": RestrictedAny(lambda u: urlparse(u) == (ANY, ANY, "/outcomes", "", "page=1", "",)),
                "last": RestrictedAny(lambda u: urlparse(u) == (ANY, ANY, "/outcomes", "", "page=5", "",)),
            },
            "meta": {
                "total": 14,
            },
            "outcomes": [
                Outcome.query.filter(Outcome.external_id == expected_id).one().serialize()
                for expected_id in (100000009, 100000012, 100000010,)
            ],
        }
class TestHandleS3Sns(BaseCallbackApplicationTest):
    _basic_subscription_confirmation_body = {
        "Type": "SubscriptionConfirmation",
        "TopicArn": "54321:cattleTrade",
        "Token": "314159b",
        "Timestamp": "2018-05-05T11:00:01.12345Z",
        "SubscribeURL": "https://laissez.faire/doctrine",
        "MessageId": "abc123",
    }
    _basic_notification_body = {
        "Type": "Notification",
        "TopicArn": "65432:oldIndustries",
        "Timestamp": "2018-05-05T11:00:01.12345Z",
        "MessageId": "424344def",
        "Message": "The way thereof",
    }

    @pytest.mark.parametrize("base_body_dict", (
        _basic_subscription_confirmation_body,
        _basic_notification_body,
    ))
    @freeze_time("2018-05-05T10:00")
    @mock.patch("app.callbacks.views.sns._handle_subscription_confirmation",
                autospec=True)
    def test_handle_s3_sns_unfetchable_cert(
            self, mock_handle_subscription_confirmation, base_body_dict):
        with self.mocked_app_logger_log() as mock_app_log:
            with requests_mock.Mocker() as rmock:
                rmock.register_uri("GET",
                                   "https://nowhere.amazonaws.com/cert.pem",
                                   status_code=404)

                client = self.get_authorized_client()
                res = client.post("/callbacks/sns/s3/uploaded",
                                  data=json.dumps({
                                      **base_body_dict,
                                      "Signature":
                                      "should_be_irrelevant",
                                      "SigningCertURL":
                                      "https://nowhere.amazonaws.com/cert.pem",
                                  }),
                                  content_type="application/json")

                assert res.status_code == 400
                assert mock_app_log.call_args_list == [
                    (
                        (logging.INFO,
                         AnyStringMatching(
                             r"Failed to fetch certificate .*404"), ()),
                        AnySupersetOf({
                            "extra":
                            AnySupersetOf({
                                "target_url":
                                "https://nowhere.amazonaws.com/cert.pem",
                            })
                        }),
                    ),
                    (
                        (logging.WARNING,
                         AnyStringMatching(r"SNS request body failed "), ()),
                        AnySupersetOf({
                            "extra":
                            AnySupersetOf({
                                "validation_error":
                                RestrictedAny(lambda x: isinstance(
                                    x, requests.exceptions.HTTPError)),
                            })
                        }),
                    ),
                    (mock.ANY,
                     AnySupersetOf({"extra": AnySupersetOf({"status": 400})}))
                ]
                assert rmock.request_history == [
                    RestrictedAny(lambda r: r.url ==
                                  "https://nowhere.amazonaws.com/cert.pem")
                ]
                assert mock_handle_subscription_confirmation.called is False

    @pytest.mark.parametrize("base_body_dict", (
        _basic_subscription_confirmation_body,
        _basic_notification_body,
    ))
    @mock.patch("validatesns.validate", autospec=True)
    @mock.patch("app.callbacks.views.sns._handle_subscription_confirmation",
                autospec=True)
    def test_handle_s3_sns_bad_signature(self,
                                         mock_handle_subscription_confirmation,
                                         mock_validate, base_body_dict):
        mock_validate.side_effect = validatesns.ValidationError
        with self.mocked_app_logger_log() as mock_app_log:
            with requests_mock.Mocker() as rmock:
                client = self.get_authorized_client()
                res = client.post(
                    "/callbacks/sns/s3/uploaded",
                    data=json.dumps(base_body_dict),
                    content_type="application/json",
                )

                assert res.status_code == 400
                assert mock_app_log.call_args_list == [
                    (
                        (logging.WARNING,
                         AnyStringMatching(r".*failed signature validation"),
                         ()),
                        AnySupersetOf({
                            "extra":
                            AnySupersetOf({
                                "validation_error":
                                RestrictedAny(lambda x: isinstance(
                                    x, validatesns.ValidationError)),
                            })
                        }),
                    ),
                    (mock.ANY,
                     AnySupersetOf({"extra": AnySupersetOf({"status": 400})}))
                ]
                assert not rmock.request_history
                assert mock_validate.call_args_list == [((base_body_dict, ),
                                                         AnySupersetOf({}))]
                assert mock_handle_subscription_confirmation.called is False

    @mock.patch("validatesns.validate", autospec=True)
    @mock.patch("app.callbacks.views.sns._handle_subscription_confirmation",
                autospec=True)
    def test_handle_s3_sns_weird_request_type(
            self, mock_handle_subscription_confirmation, mock_validate):
        with self.mocked_app_logger_log() as mock_app_log:
            with requests_mock.Mocker() as rmock:
                client = self.get_authorized_client()
                weird_body_dict = {
                    "MessageId": "1234321",
                    "Type": "EuropeanConflagration",
                }
                res = client.post(
                    "/callbacks/sns/s3/uploaded",
                    data=json.dumps(weird_body_dict),
                    content_type="application/json",
                )

                assert res.status_code == 400
                assert mock_app_log.call_args_list == [
                    (
                        (logging.WARNING,
                         AnyStringMatching(r"Unrecognized request type "), ()),
                        AnySupersetOf({
                            "extra":
                            AnySupersetOf({
                                "request_type":
                                "EuropeanConflagration",
                            })
                        }),
                    ),
                    (mock.ANY,
                     AnySupersetOf({"extra": AnySupersetOf({"status": 400})}))
                ]
                assert not rmock.request_history
                assert mock_validate.call_args_list == [((weird_body_dict, ),
                                                         AnySupersetOf({}))]
                assert mock_handle_subscription_confirmation.called is False

    _test_handle_s3_sns_unexpected_message_contents_didnt_match_log_args = lambda message: (  # noqa
        (logging.WARNING, AnyStringMatching(r"Message contents didn't match "),
         ()),
        AnySupersetOf(
            {"extra": AnySupersetOf({
                "message_contents": message,
            })}),
    )
    _test_handle_s3_sns_unexpected_message_unrecognized_message_format_log_args = lambda message: (  # noqa
        (logging.WARNING, AnyStringMatching(r"Unrecognized message format "),
         ()),
        AnySupersetOf({"extra": AnySupersetOf({
            "body_message": message,
        })}),
    )

    @pytest.mark.parametrize(
        "message,expected_warning_log_call",
        (
            (
                "mangiD",
                _test_handle_s3_sns_unexpected_message_contents_didnt_match_log_args(
                    "mangiD"),
            ),
            (
                123,
                _test_handle_s3_sns_unexpected_message_contents_didnt_match_log_args(
                    123),
            ),
            (
                None,
                _test_handle_s3_sns_unexpected_message_contents_didnt_match_log_args(
                    None),
            ),
            (
                "",
                _test_handle_s3_sns_unexpected_message_contents_didnt_match_log_args(
                    ""),
            ),
            (
                '{"a":"b"}',
                _test_handle_s3_sns_unexpected_message_unrecognized_message_format_log_args(
                    {"a": "b"}),
            ),
        ),
    )
    @mock.patch("validatesns.validate", autospec=True)
    @mock.patch("app.callbacks.views.sns._handle_subscription_confirmation",
                autospec=True)
    def test_handle_s3_sns_unexpected_message(
        self,
        mock_handle_subscription_confirmation,
        mock_validate,
        message,
        expected_warning_log_call,
    ):
        with self.mocked_app_logger_log() as mock_app_log:
            with requests_mock.Mocker() as rmock:
                client = self.get_authorized_client()
                body_dict = {
                    "MessageId": "1234321",
                    "Type": "Notification",
                    "Message": message,
                }
                res = client.post(
                    "/callbacks/sns/s3/uploaded",
                    data=json.dumps(body_dict),
                    content_type="application/json",
                    headers={"X-Amz-Sns-Subscription-Arn": "kcirtaP"},
                )

                assert res.status_code == 400
                assert mock_app_log.call_args_list == [
                    (
                        (logging.INFO,
                         AnyStringMatching(r"Processing message "), ()),
                        AnySupersetOf({
                            "extra":
                            AnySupersetOf({
                                "message_id": "1234321",
                                "subscription_arn": "kcirtaP",
                            })
                        }),
                    ), expected_warning_log_call,
                    (mock.ANY,
                     AnySupersetOf({"extra": AnySupersetOf({"status": 400})}))
                ]
                assert not rmock.request_history
                assert mock_validate.call_args_list == [((body_dict, ),
                                                         AnySupersetOf({}))]
                assert mock_handle_subscription_confirmation.called is False

    @mock.patch("validatesns.validate", autospec=True)
    @mock.patch("app.callbacks.views.sns._handle_subscription_confirmation",
                autospec=True)
    def test_handle_s3_sns_test_event(
        self,
        mock_handle_subscription_confirmation,
        mock_validate,
    ):
        with self.mocked_app_logger_log() as mock_app_log:
            with requests_mock.Mocker() as rmock:
                client = self.get_authorized_client()
                body_dict = {
                    "MessageId": "1234321",
                    "Type": "Notification",
                    "Message": '{"Event":"s3:TestEvent","nut":"shell"}',
                }
                res = client.post(
                    "/callbacks/sns/s3/uploaded",
                    data=json.dumps(body_dict),
                    content_type="application/json",
                    headers={"X-Amz-Sns-Subscription-Arn": "kcirtaP"},
                )

                assert res.status_code == 200
                assert mock_app_log.call_args_list == [
                    (
                        (logging.INFO,
                         AnyStringMatching(r"Processing message "), ()),
                        AnySupersetOf({
                            "extra":
                            AnySupersetOf({
                                "message_id": "1234321",
                                "subscription_arn": "kcirtaP",
                            })
                        }),
                    ), (
                        (logging.INFO, "Received S3 test event", ()),
                        {},
                    ),
                    (mock.ANY,
                     AnySupersetOf({"extra": AnySupersetOf({"status": 200})}))
                ]
                assert not rmock.request_history
                assert mock_validate.call_args_list == [((body_dict, ),
                                                         AnySupersetOf({}))]
                assert mock_handle_subscription_confirmation.called is False

    @pytest.mark.parametrize("content_type", (
        "application/json",
        "text/plain",
    ))
    @mock.patch("validatesns.validate", autospec=True)
    @mock.patch("app.callbacks.views.sns._handle_subscription_confirmation",
                autospec=True)
    def test_handle_s3_sns_subscription_confirmation(
        self,
        mock_handle_subscription_confirmation,
        mock_validate,
        content_type,
    ):
        # arbitrary sentinel Response
        mock_handle_subscription_confirmation.return_value = Response(
            "Grain supplies"), 200

        with self.mocked_app_logger_log() as mock_app_log:
            with requests_mock.Mocker() as rmock:
                client = self.get_authorized_client()
                res = client.post(
                    "/callbacks/sns/s3/uploaded",
                    data=json.dumps(
                        self._basic_subscription_confirmation_body),
                    content_type=content_type,
                )

                assert res.status_code == 200
                assert res.get_data() == b"Grain supplies"
                assert mock_app_log.call_args_list == [
                    (mock.ANY,
                     AnySupersetOf({"extra": AnySupersetOf({"status": 200})}))
                ]
                assert not rmock.request_history
                assert mock_validate.call_args_list == [
                    ((self._basic_subscription_confirmation_body, ),
                     AnySupersetOf({}))
                ]
                assert mock_handle_subscription_confirmation.call_args_list == [
                    ((
                        self._basic_subscription_confirmation_body,
                        "s3_file_upload_notification_development",
                    ), {})
                ]

    @pytest.mark.parametrize("content_type", (
        "application/json",
        "text/plain",
    ))
    @mock.patch("app.callbacks.views.sns.scan_and_tag_s3_object",
                autospec=True)
    @mock.patch("validatesns.validate", autospec=True)
    def test_handle_s3_sns_notification(
        self,
        mock_validate,
        mock_scan_and_tag_s3_object,
        content_type,
        bucket_with_file,
    ):
        bucket, objver = bucket_with_file

        with self.mocked_app_logger_log() as mock_app_log:
            client = self.get_authorized_client()
            body_dict = {
                **self._basic_notification_body,
                "TopicArn":
                "bull:by:the:horns:123:s3_file_upload_notification_development",
                "Subject":
                "Someone uploaded a file, yeah?",
                "Message":
                json.dumps({
                    "Records": [
                        {
                            "s3": {
                                "bucket": {
                                    "name": bucket.name,
                                },
                                "object": {
                                    "key": quote_plus(objver.Object().key),
                                    "versionId": objver.id,
                                },
                            },
                            "awsRegion": "howth-west-2",
                        },
                    ],
                }),
            }
            subscription_arn = f"{body_dict['TopicArn']}:314159"

            res = client.post(
                "/callbacks/sns/s3/uploaded",
                data=json.dumps(body_dict),
                content_type=content_type,
                headers={"X-Amz-Sns-Subscription-Arn": subscription_arn},
            )

            assert res.status_code == 200
            assert mock_app_log.call_args_list == [
                (
                    (logging.INFO, AnyStringMatching(r"Processing message "),
                     ()),
                    AnySupersetOf({
                        "extra":
                        AnySupersetOf({
                            "message_id":
                            "424344def",
                            "subscription_arn":
                            "bull:by:the:horns:123:s3_file_upload_notification_development:314159",
                        })
                    }),
                ),
                (mock.ANY,
                 AnySupersetOf({"extra": AnySupersetOf({"status": 200})})),
            ]
            assert mock_validate.call_args_list == [((body_dict, ),
                                                     AnySupersetOf({}))]
            assert mock_scan_and_tag_s3_object.call_args_list == [
                mock.call(
                    s3_client=mock.ANY,
                    s3_bucket_name=bucket.name,
                    s3_object_key=objver.Object().key,
                    s3_object_version=objver.id,
                    sns_message_id="424344def",
                ),
            ]
class TestHandleSubscriptionConfirmation(BaseCallbackApplicationTest):
    @pytest.mark.parametrize(
        (
            "body_dict",
            "supported_topic_name",
            "rmock_response_kwargs",
            "expected_output",
            "expect_request_made",
            "expected_log_calls",
        ),
        tuple(
            chain.from_iterable((
                (
                    (
                        # body_dict
                        {
                            "SubscribeURL":
                            "https://amz.net",
                            "TopicArn":
                            f"arn:aws:sns:howth-west-2:123456789012:Drawers"
                        },
                        # supported_topic_name
                        "Drawers",
                        # rmock_response_kwargs
                        {
                            "text":
                            f"""<ConfirmSubscriptionResponse xmlns="http://brazenfaced.things">
                        <ConfirmSubscriptionResult><SubscriptionArn>
                            arn:aws:sns:howth-west-2:123456789012:Drawers:bicycles
                        </SubscriptionArn></ConfirmSubscriptionResult>
                        <ResponseMetadata><RequestId>
                            always-skeezing
                        </RequestId></ResponseMetadata>
                    </ConfirmSubscriptionResponse>"""
                        },
                        # expected_output
                        (RestrictedAny(lambda x: isinstance(x, Response)), 200
                         ),
                        # expect_request_made
                        True,
                        # expected_log_calls
                        (
                            (
                                (logging.INFO, AnyStringMatching("Made GET "),
                                 ()),
                                AnySupersetOf({
                                    "extra":
                                    AnySupersetOf({
                                        "target_url":
                                        "https://amz.net",
                                        "topic_arn":
                                        f"arn:aws:sns:howth-west-2:123456789012:Drawers",
                                    }),
                                }),
                            ),
                            (
                                (logging.INFO,
                                 AnyStringMatching(
                                     "SubscriptionConfirmation succeeded "),
                                 ()),
                                AnySupersetOf({
                                    "extra":
                                    AnySupersetOf({
                                        "subscription_arn":
                                        f"arn:aws:sns:howth-west-2:123456789012:Drawers:bicycles",
                                        "confirmation_request_id":
                                        "always-skeezing",
                                    }),
                                }),
                            ),
                        ),
                    ),
                    (
                        # body_dict
                        {
                            "SubscribeURL": "https://butt.bridge",
                            "TopicArn": "premature:decay"
                        },
                        # supported_topic_name
                        "BrilliantFellows",
                        # rmock_response_kwargs
                        {
                            "text": "dummy"
                        },
                        # expected_output
                        BadRequest,
                        # expect_request_made
                        False,
                        # expected_log_calls
                        (
                            (
                                (logging.WARNING,
                                 AnyStringMatching(r".*unrecognized topic.*"),
                                 ()),
                                AnySupersetOf({
                                    "extra":
                                    AnySupersetOf({
                                        "topic_name":
                                        "decay",
                                        "topic_arn":
                                        "premature:decay",
                                    }),
                                }),
                            ), ),
                    ),
                    (
                        # body_dict
                        {
                            "SubscribeURL": "https://sister.island.co.uk",
                            "TopicArn": "100M:Played:Out:CoalSeams"
                        },
                        # supported_topic_name
                        "CoalSeams",
                        # rmock_response_kwargs
                        {
                            "text":
                            """<ConfirmSubscriptionResponse xmlns="http://neighbours-across.the/channel">
                        <ConfirmSubscriptionResult><SubscriptionArn>
                            unrelated:naming:scheme
                        </SubscriptionArn></ConfirmSubscriptionResult>
                    </ConfirmSubscriptionResponse>""",
                            "status_code": 210
                        },
                        # expected_output
                        (RestrictedAny(lambda x: isinstance(x, Response)), 200
                         ),
                        # expect_request_made
                        True,
                        # expected_log_calls
                        (
                            (
                                (logging.INFO, AnyStringMatching("Made GET "),
                                 ()),
                                AnySupersetOf({
                                    "extra":
                                    AnySupersetOf({
                                        "target_url":
                                        "https://sister.island.co.uk",
                                        "topic_arn":
                                        "100M:Played:Out:CoalSeams",
                                    }),
                                }),
                            ),
                            (
                                (logging.INFO,
                                 AnyStringMatching(
                                     "SubscriptionConfirmation succeeded "),
                                 ()),
                                AnySupersetOf({
                                    "extra":
                                    AnySupersetOf({
                                        "subscription_arn":
                                        "unrelated:naming:scheme",
                                    }),
                                }),
                            ),
                        ),
                    ),
                    (
                        # body_dict
                        {
                            "SubscribeURL": "https://disorder.ly.hous.es",
                            "TopicArn": "nice:mixup"
                        },
                        # supported_topic_name
                        "mixup",
                        # rmock_response_kwargs
                        {
                            "text": "<Second drink<does it<"
                        },
                        # expected_output
                        BadRequest,
                        # expect_request_made
                        True,
                        # expected_log_calls
                        (
                            (
                                (logging.INFO, AnyStringMatching("Made GET "),
                                 ()),
                                AnySupersetOf({
                                    "extra":
                                    AnySupersetOf({
                                        "target_url":
                                        "https://disorder.ly.hous.es",
                                        "topic_arn": "nice:mixup",
                                    }),
                                }),
                            ),
                            (
                                (logging.WARNING,
                                 RestrictedAny(
                                     lambda x: isinstance(x, ParseError)), ()),
                                AnySupersetOf({}),
                            ),
                            (
                                (logging.WARNING,
                                 "SubscriptionConfirmation response parsing failed",
                                 ()),
                                AnySupersetOf({}),
                            ),
                        ),
                    ),
                ),
                (
                    # the following case should basically have the same results for all request errors, so testing many
                    # of these cases
                    (
                        # body_dict
                        {
                            "SubscribeURL": "https://wildgoose.chase/this",
                            "TopicArn": "first-class:third:ticket"
                        },
                        # supported_topic_name
                        "ticket",
                        # rmock_response_kwargs
                        rmock_response_kwargs,
                        # expected_output
                        BadRequest,
                        # expect_request_made
                        True,
                        # expected_log_calls
                        (
                            (
                                (logging.INFO,
                                 AnyStringMatching("Failed to make GET "), ()),
                                AnySupersetOf({
                                    "extra":
                                    AnySupersetOf({
                                        "target_url":
                                        "https://wildgoose.chase/this",
                                        "topic_arn":
                                        "first-class:third:ticket",
                                    }),
                                }),
                            ), ),
                    ) for rmock_response_kwargs in (
                        {
                            "status_code": 404,
                            "text": "where?"
                        },
                        {
                            "status_code": 500,
                            "text": "what?"
                        },
                        {
                            "status_code": 403,
                            "text": "who?"
                        },
                        {
                            "status_code": 400,
                            "text": "no"
                        },
                        {
                            "exc": requests.exceptions.ConnectTimeout
                        },
                        {
                            "exc": requests.exceptions.SSLError
                        },
                    )),
            ))),
    )
    def test_handle_subscription_confirmation(
        self,
        body_dict,
        supported_topic_name,
        rmock_response_kwargs,
        expected_output,
        expect_request_made,
        expected_log_calls,
    ):
        """
        :param body_dict:             request body_dict to pass directly to _handle_subscription_confirmation
        :param supported_topic_name:  supported_topic_name to pass directly to _handle_subscription_confirmation
        :param rmock_response_kwargs: kwargs to pass to register_uri specifying how requests_mock should respond to
                                      a request to the "subscribe" url
        :param expected_output:       either an Exception subclass to expect to be raised or the expected return value
                                      of _handle_subscription_confirmation
        :param expect_request_made:   whether to expect a request to have been made to the "subscribe" url
        :param expected_log_calls:    sequence of expected mock.call()s to have been made to app logger
        """
        with self.mocked_app_logger_log() as mock_app_log:
            with self.app.test_request_context():
                with requests_mock.Mocker() as rmock:
                    rmock.register_uri("GET", body_dict["SubscribeURL"],
                                       **rmock_response_kwargs)

                    expect_exception = isinstance(
                        expected_output, type) and issubclass(
                            expected_output, Exception)
                    with pytest.raises(
                            expected_output
                    ) if expect_exception else null_context_manager():
                        out = _handle_subscription_confirmation(
                            body_dict, supported_topic_name)

                    if not expect_exception:
                        assert out == expected_output

                    assert rmock.called is expect_request_made
                    assert mock_app_log.call_args_list == list(
                        expected_log_calls)
    def _logging_call_site(app):
        app.logger.info(
            "Charming day {ankles}, {underleaves}, {parent_span_id}",
            extra={"underleaves": "ample"},
        )

        _set_request_class_is_sampled(app_with_stream_logger, is_sampled)

        with app.test_request_context('/'):
            test_extra_log_context = {
                "ankles": "thinsocked",
                "span_id": "beesWaxed",
            }

            request.get_extra_log_context = mock.Mock(spec_set=[])
            request.get_extra_log_context.return_value = test_extra_log_context

            # we perform the log call in a specifically designated & named function to exercise & be able to reliably
            # assert the behaviour of the introspective aspects of our logging
            _logging_call_site(app)

        all_lines = tuple(
            json.loads(line) for line in stream.read().splitlines())

        assert all_lines == (
            AnySupersetOf({
                "message":
                "Missing keys when formatting log message: ('parent_span_id',)",
                # it may seem foolish and a bit fragile to include the following parameters in our assertions but
                # properly testing introspective behaviour is always going to get a bit weird and meta in that regard.
                "app_funcName":
                "_logging_call_site",
                "app_pathname":
                os.path.normcase(_logging_call_site.__code__.co_filename),
                "app_lineno":
                RestrictedAny(lambda value: isinstance(value, int)),
                "lineno":
                RestrictedAny(lambda value: isinstance(value, int)),
                "pathname":
                AnyStringMatching(r".+\/dmutils\/logging\.pyc?"),
            }),
            AnySupersetOf({
                "time":
                mock.ANY,
                "application":
                mock.ANY,
                "message":
                "Charming day thinsocked, ample, {parent_span_id: missing key}",
                "underleaves":
                "ample",
                "ankles":
                "thinsocked",
                "spanId":
                "beesWaxed",
                "parentSpanId":
                None,
                "requestId":
                None,
                "debugFlag":
                None,
                "isSampled":
                None,
                # as above, these parameters are included in the assertion to ensure our modifications haven't affected
                # the regular logging introspection features
                "lineno":
                RestrictedAny(lambda value: isinstance(value, int)),
                "pathname":
                os.path.normcase(_logging_call_site.__code__.co_filename),
                **({
                    "app_funcName":
                    "_logging_call_site",
                    "app_pathname":
                    os.path.normcase(_logging_call_site.__code__.co_filename),
                    "app_lineno":
                    RestrictedAny(lambda value: isinstance(value, int)),
                } if is_sampled else {}),
            }),
        )

        if not is_sampled:
            # AppStackLocationFilter shouldn't have included information in this low-urgency message
            for unexpected_key in (
                    "app_funcName",
                    "app_lineno",
                    "app_pathname",
            ):
                assert unexpected_key not in all_lines[1]

        for unexpected_key in (
                "span_id",
                "trace_id",
                "traceId",
                "request_id",
                "debug_flag",
                "is_sampled",
                "parent_span_id",  # also ensuring "missing key" functionality didn't add a value for this
        ):
            assert not any(unexpected_key in line for line in all_lines)