def test_send_email_replace_address_constructor( self, app, notify_send_email, redirect_app_config, redirect_constructor, expected_redirected_address, ): with app.app_context(): app.config["DM_NOTIFY_REDIRECT_DOMAINS_TO_ADDRESS"] = redirect_app_config dm_notify_client = DMNotifyClient(_test_api_key, redirect_domains_to_address=redirect_constructor) with mock.patch(self.client_class_str + '.' + 'send_email_notification') as email_mock: email_mock.return_value = notify_send_email dm_notify_client.send_email(self.email_address, self.template_id) email_mock.assert_called_with( expected_redirected_address if expected_redirected_address is not None else self.email_address, self.template_id, personalisation=None, # NOTE how reference is unaffected by any of this reference='niC4qhMflcnl8MkY82N7Gqze2ZA7ed1pSBTGnxeDPj0=', email_reply_to_id=None )
def send_supplier_emails(email_api_key, email_addresses, supplier_context, logger): email_client = DMNotifyClient(email_api_key, logger=logger) for email_address in email_addresses: email_client.send_email( template_name_or_id=EMAIL_TEMPLATE_ID, to_email_address=email_address, personalisation=get_template_personalisation(supplier_context), )
def test_send_email_can_use_templates_from_app_config(self, app): app.config["NOTIFY_TEMPLATES"] = {"template_name": "template-id"} with app.app_context(): dm_notify_client = DMNotifyClient(_test_api_key) with mock.patch(self.client_class_str + '.' + 'send_email_notification') as send_email_notification_mock: dm_notify_client.send_email("*****@*****.**", "template_name") assert send_email_notification_mock.call_args \ == mock.call( "*****@*****.**", "template-id", personalisation=None, reference=mock.ANY, email_reply_to_id=None )
def test_send_email_behaviour_outside_flask_app_context(self): """If logger is supplied then app context is not required""" dm_notify_client = DMNotifyClient( _test_api_key, logger=mock.Mock(), ) with mock.patch(self.client_class_str + '.' + 'send_email_notification') as send_email_notification_mock: dm_notify_client.send_email("*****@*****.**", "template_id") assert send_email_notification_mock.call_args \ == mock.call( "*****@*****.**", "template_id", personalisation=None, reference=mock.ANY, email_reply_to_id=None )
def notify_users(email_api_key, stage, brief): logger.info("Notifying users about brief ID: {brief_id} - '{brief_title}'", extra={ 'brief_title': brief['title'], 'brief_id': brief['id'] }) email_client = DMNotifyClient(email_api_key, logger=logger) if brief['users']: try: brief_responses_url = \ "{base_url}/buyers/frameworks/{framework_slug}/requirements/{lot_slug}/{brief_id}/responses".format( base_url=get_web_url_from_stage(stage), brief_id=brief["id"], brief_title=brief["title"], lot_slug=brief["lotSlug"], framework_slug=brief["frameworkSlug"], ) for email_address in (user['emailAddress'] for user in brief['users'] if user['active']): email_client.send_email( email_address, template_name_or_id=EMAIL_TEMPLATE_ID, personalisation={ "brief_title": brief["title"], "brief_responses_url": brief_responses_url, }, allow_resend=False, ) return True except EmailError as e: logger.error("Email failed to send for brief_id: {brief_id}", extra={ 'error': e, 'brief_id': brief['id'] }) if isinstance(e, EmailTemplateError): raise # do not try to continue return False
def test_constructor_can_retrieve_api_key_from_app_config(self, app): api_key = "notify-api-key-" + _test_api_key app.config["DM_NOTIFY_API_KEY"] = api_key with mock.patch("dmutils.email.dm_notify.DMNotifyClient._client_class") as notify_client_mock: with app.app_context(): DMNotifyClient() assert notify_client_mock.call_args \ == mock.call( api_key, mock.ANY, )
def test_send_email_allows_resend_to_replacement_addresses(self, app): """ Test the replacement_email_address mechanism doesn't make calls to different addresses look like resends. """ with app.app_context(): dm_notify_client = DMNotifyClient( _test_api_key, redirect_domains_to_address={"example.gov.uk": "*****@*****.**"}, ) with mock.patch(self.client_class_str + '.' + 'send_email_notification') as send_email_notification_mock: with mock.patch(self.client_class_str + '.' + 'get_all_notifications') as get_all_notifications_mock: send_email_notification_mock.return_value = {'id': 'example-id'} get_all_notifications_mock.return_value = {"notifications": []} # First call to send_email instantiates the cache with calls to `get_all_notifications` # and `get_delivered_notifications`, hence 2 extra log entries. dm_notify_client.send_email("*****@*****.**", self.template_id, allow_resend=False) dm_notify_client.send_email("*****@*****.**", self.template_id, allow_resend=False) assert send_email_notification_mock.call_args_list == [ mock.call( "*****@*****.**", self.template_id, personalisation=None, reference="oq2Xi6D6ymviEtVK8Gr9I0675Q8KcjfAz3IO9sfX8a0=", email_reply_to_id=None ), mock.call( "*****@*****.**", self.template_id, personalisation=None, reference='Q_0wRa57Pj4BEIWGop9gOLoxhkCsVMsE2UOZeOnZyas=', email_reply_to_id=None ), ]
def send_brief_clarification_question(data_api_client, brief, clarification_question): questions_url = ( get_web_url_from_stage(current_app.config["DM_ENVIRONMENT"]) + url_for('external.supplier_questions', framework_slug=brief["framework"]['slug'], lot_slug=brief["lotSlug"], brief_id=brief["id"])) notify_client = DMNotifyClient(current_app.config['DM_NOTIFY_API_KEY']) # Email the question to brief owners for email_address in get_brief_user_emails(brief): try: notify_client.send_email( email_address, template_name_or_id=current_app.config['NOTIFY_TEMPLATES'] ['clarification_question'], personalisation={ "brief_title": brief['title'], "brief_name": brief['title'], "message": escape(clarification_question), "publish_by_date": dateformat(brief['clarificationQuestionsPublishedBy']), "questions_url": questions_url }, reference="clarification-question-{}".format( hash_string(email_address))) except EmailError as e: current_app.logger.error( "Brief question email failed to send. error={error} supplier_id={supplier_id} brief_id={brief_id}", extra={ 'error': six.text_type(e), 'supplier_id': current_user.supplier_id, 'brief_id': brief['id'] }) abort(503, "Clarification question email failed to send") data_api_client.create_audit_event( audit_type=AuditTypes.send_clarification_question, user=current_user.email_address, object_type="briefs", object_id=brief['id'], data={ "question": clarification_question, "briefId": brief['id'], "supplierId": current_user.supplier_id }) brief_url = (get_web_url_from_stage(current_app.config["DM_ENVIRONMENT"]) + url_for('external.get_brief_by_id', framework_family=brief['framework']['family'], brief_id=brief['id'])) # Send the supplier a copy of the question try: notify_client.send_email( current_user.email_address, template_name_or_id=current_app.config["NOTIFY_TEMPLATES"] ["clarification_question_confirmation"], personalisation={ "brief_name": brief['title'], "message": escape(clarification_question), "brief_url": brief_url, }, reference="clarification-question-confirmation-{}".format( hash_string(current_user.email_address))) except EmailError as e: current_app.logger.error( "Brief question supplier email failed to send. error={error} supplier_id={supplier_id} brief_id={brief_id}", extra={ 'error': six.text_type(e), 'supplier_id': current_user.supplier_id, 'brief_id': brief['id'] })
def dm_notify_client(app): """Supply initialized client.""" with app.app_context(): return DMNotifyClient(_test_api_key)
def scan_and_tag_s3_object( s3_client, s3_bucket_name, s3_object_key, s3_object_version, sns_message_id=None, ): with logged_duration( logger=current_app.logger, message=lambda _: ( "Handled bucket {s3_bucket_name} key {s3_object_key} version {s3_object_version}" if sys.exc_info()[0] is None else # need to literally format() the exception into message as it's difficult to get it injected into extra "Failed handling {{s3_bucket_name}} key {{s3_object_key}} version {{s3_object_version}}: {!r}" .format(sys.exc_info()[1])), log_level=logging.INFO, condition=True, ) as log_context: base_log_context = { "s3_bucket_name": s3_bucket_name, "s3_object_key": s3_object_key, "s3_object_version": s3_object_version, } log_context.update(base_log_context) # TODO abort if file too big? with log_external_request( "S3", "get object tagging [{s3_bucket_name}/{s3_object_key} versionId {s3_object_version}]", logger=current_app.logger, ) as log_context_s3: log_context_s3.update(base_log_context) tagging_tag_set = s3_client.get_object_tagging( Bucket=s3_bucket_name, Key=s3_object_key, VersionId=s3_object_version, )["TagSet"] av_status = _prefixed_tag_values_from_tag_set(tagging_tag_set, "avStatus.") if av_status.get("avStatus.result") is None: current_app.logger.info( "Object version {s3_object_version} has no 'avStatus.result' tag - will scan...", extra={ **base_log_context, "av_status": av_status, }, ) else: current_app.logger.info( "Object version {s3_object_version} already has 'avStatus.result' " "tag: {existing_av_status_result!r}", extra={ **base_log_context, "existing_av_status_result": av_status["avStatus.result"], "existing_av_status": av_status, }, ) return av_status, False, None clamd_client = get_clamd_socket() # first check our clamd is available - there's no point in going and fetching the object if we can't do # anything with it. allow a raised exception to bubble up as a 500, which seems the most appropriate thing # in this case clamd_client.ping() # the following two requests (to S3 for the file contents and to clamd for scanning) don't really happen # sequentially as we're going to attempt to stream the data received from one into the other (by passing # the StreamingBody file-like object from this response into .instream(...)), so these logged_duration # sections do NOT *directly* correspond to the file being downloaded and then the file being scanned. The # two activities will overlap in time, something that isn't expressible with logged_duration with log_external_request( "S3", "initiate object download [{s3_bucket_name}/{s3_object_key} versionId {s3_object_version}]", logger=current_app.logger, ) as log_context_s3: log_context_s3.update(base_log_context) s3_object = s3_client.get_object( Bucket=s3_bucket_name, Key=s3_object_key, VersionId=s3_object_version, ) file_name = _filename_from_content_disposition( s3_object.get("ContentDisposition") or "") with logged_duration( logger=current_app.logger, message=lambda _: ( "Scanned {file_length}byte file '{file_name}', result {clamd_result}" if sys.exc_info()[0] is None else # need to literally format() exception into message as it's difficult to get it injected into extra "Failed scanning {{file_length}}byte file '{{file_name}}': {!r}" .format(sys.exc_info()[1])), log_level=logging.INFO, condition=True, ) as log_context_clamd: log_context_clamd.update({ "file_length": s3_object["ContentLength"], "file_name": file_name or "<unknown>", }) clamd_result = clamd_client.instream(s3_object["Body"])["stream"] log_context_clamd["clamd_result"] = clamd_result if clamd_result[0] == "ERROR": # let's hope this was a transient error and a later attempt may succeed. hard to know what else to do # in this case - tagging a file with "ERROR" would prevent further attempts. raise UnknownClamdError( f"clamd did not successfully scan file: {clamd_result!r}") with logged_duration( logger=current_app.logger, message=lambda _: ( "Fetched clamd version string: {clamd_version}" if sys.exc_info()[0] is None else # need to literally format() exception into message as it's difficult to get it injected into extra "Failed fetching clamd version string: {!r}".format( sys.exc_info()[1])), log_level=logging.DEBUG, ) as log_context_clamd: # hypothetically there is a race condition between the time of scanning the file and fetching the # version here when freshclam could give us a new definition file, making this information incorrect, # but it's a very small possibility clamd_version = clamd_client.version() log_context_clamd.update({"clamd_version": clamd_version}) # we namespace all keys set as part of an avStatus update with an "avStatus." prefix, intending that all # of these keys are only ever set or removed together as they are all information about the same scanning # decision new_av_status = { "avStatus.result": "pass" if clamd_result[0] == "OK" else "fail", "avStatus.clamdVerStr": clamd_version, "avStatus.ts": datetime.datetime.utcnow().isoformat(), } # Now we briefly re-check the object's tags to ensure they weren't set by something else while we were # scanning. Note the impossibility of avoiding all possible race conditions as S3's API doesn't allow any # form of locking. What we *can* do is make the possible time period between check-tags and set-tags as # small as possible... with log_external_request( "S3", "get object tagging [{s3_bucket_name}/{s3_object_key} versionId {s3_object_version}]", logger=current_app.logger, ) as log_context_s3: log_context_s3.update(base_log_context) tagging_tag_set = s3_client.get_object_tagging( Bucket=s3_bucket_name, Key=s3_object_key, VersionId=s3_object_version, )["TagSet"] av_status = _prefixed_tag_values_from_tag_set(tagging_tag_set, "avStatus.") if av_status.get("avStatus.result") is not None: current_app.logger.warning( "Object was tagged with new 'avStatus.result' ({existing_av_status_result!r}) while we were " "scanning. Not applying our own 'avStatus' ({unapplied_av_status_result!r})", extra={ "existing_av_status_result": av_status["avStatus.result"], "unapplied_av_status_result": new_av_status["avStatus.result"], "existing_av_status": av_status, "unapplied_av_status": new_av_status, }, ) return av_status, False, new_av_status tagging_tag_set = _tag_set_updated_with_dict( _tag_set_omitting_prefixed(tagging_tag_set, "avStatus."), new_av_status, ) with log_external_request( "S3", "put object tagging [{s3_bucket_name}/{s3_object_key} versionId {s3_object_version}]", logger=current_app.logger, ) as log_context_s3: log_context_s3.update(base_log_context) s3_client.put_object_tagging( Bucket=s3_bucket_name, Key=s3_object_key, VersionId=s3_object_version, Tagging={"TagSet": tagging_tag_set}, ) if clamd_result[0] != "OK": # TODO? attempt to rectify the situation: # TODO? if this is (still) current version of object: # TODO? S3: find most recent version of object which is tagged "good" # TODO? if there is no such version: # TODO? S3: upload fail whale? # TODO? else copy that version to become new "current" ver for this key, ensuring to copy its tags # TODO? note the impossibility of doing this without some race conditions notify_client = DMNotifyClient( current_app.config["DM_NOTIFY_API_KEY"]) if (len(clamd_result) >= 2 and clamd_result[1].lower() in map( str.lower, current_app. config["DM_EICAR_TEST_SIGNATURE_RESULT_STRINGS"])): notify_kwargs = { # we'll use the s3 ETag of the file as the notify ref - it will be the only piece of information # that will be shared knowledge between a functional test and the application yet also allow the # test to differentiate the results of its different test runs, allowing it to easily check for # the message being sent "reference": "eicar-found-{}-{}".format( _normalize_hex(s3_object["ETag"]), current_app.config["DM_ENVIRONMENT"], ), "to_email_address": current_app. config["DM_EICAR_TEST_SIGNATURE_VIRUS_ALERT_EMAIL"], } else: notify_kwargs = { "to_email_address": current_app.config["DM_DEVELOPER_VIRUS_ALERT_EMAIL"], } try: notify_client.send_email( template_name_or_id="developer_virus_alert", personalisation={ "bucket_name": s3_bucket_name, "object_key": s3_object_key, "object_version": s3_object_version, "file_name": file_name or "<unknown>", "clamd_output": ", ".join(clamd_result), "sns_message_id": sns_message_id or "<N/A>", "dm_trace_id": getattr(request, "trace_id", None) or "<unknown>", }, **notify_kwargs, ) except EmailError as e: current_app.logger.error( "Failed to send developer_virus_alert email after scanning " "{s3_bucket_name}/{s3_object_key} versionId {s3_object_version}: {e}", extra={ **base_log_context, "e": str(e), }, ) # however we probably don't want this to cause a 500 because the main task has been completed - retrying # it won't work and e.g. we eill want to signify to SNS that it should not attempt to re-send this # message return av_status, True, new_av_status