def send_email(self, to_email_address, template_name_or_id, personalisation=None, allow_resend=True, reference=None, reply_to_address_id=None): """ Method to send an email using the Notify api. :param to_email_address: String email address for recipient :param template_name_or_id: Template accessible on the Notify account, can either be a key to the `templates` dictionary or a Notify template ID. :param personalisation: The template variables, dict :param allow_resend: if False instantiate the delivered reference cache and ensure we are not sending duplicates :param reply_to_address_id: String id of reply-to email address. Must be set up in Notify config before use :return: response from the api. For more information see https://github.com/alphagov/notifications-python-client """ template_id = self.templates.get(template_name_or_id, template_name_or_id) reference = reference or self.get_reference( to_email_address, template_id, personalisation) if not allow_resend and self.has_been_sent(reference): self.logger.info( "Email with reference '{reference}' has already been sent", extra=dict(client=self.client.__class__, to_email_address=hash_string(to_email_address), template_name_or_id=template_name_or_id, reference=reference, reply_to_address_id=reply_to_address_id), ) return # NOTE how the potential replacement of the email address happens *after* the has_been_sent check and # reference generation final_email_address = ( self._redirect_domains_to_address and self._redirect_domains_to_address.get( # splitting at rightmost @ should reliably give us the domain to_email_address.rsplit("@", 1) [-1].lower())) or to_email_address try: with log_external_request(service='Notify'): response = self.client.send_email_notification( final_email_address, template_id, personalisation=personalisation, reference=reference, email_reply_to_id=reply_to_address_id) except HTTPError as e: self._log_email_error_message(to_email_address, template_name_or_id, reference, e) raise EmailError(str(e)) self._update_cache(reference) return response
def subscribe_new_emails_to_list(self, list_id: str, email_addresses: str) -> bool: success = True for email_address in email_addresses: with log_external_request(service='Mailchimp'): if not self.subscribe_new_email_to_list( list_id, email_address): success = False return success
def wrapper(*args, **kwargs): for i in range(1 + self.retries): try: with log_external_request(service='Mailchimp'): return method(*args, **kwargs) except HTTPError as e: exception = e if exception.response.status_code == 504: continue raise exception raise exception
def get_lists_for_email(self, email_address: str) -> Sequence[Mapping]: """ Returns a sequence of all lists the email_address has an association with (note: even if that association is "unsubscribed" or "cleaned"). """ with log_external_request(service='Mailchimp'): return tuple({ "list_id": mailing_list["id"], "name": mailing_list["name"], } for mailing_list in self._client.lists.all( get_all=True, email=email_address)["lists"])
def send_campaign(self, campaign_id: str): try: with log_external_request(service='Mailchimp'): self._client.campaigns.actions.send(campaign_id) return True except (RequestException, MailChimpError) as e: self.logger.error( "Mailchimp failed to send campaign id '{0}'".format( campaign_id), extra={ "error": str(e), "mailchimp_response": get_response_from_exception(e), }) return False
def set_campaign_content(self, campaign_id: str, content_data: Mapping): try: with log_external_request(service='Mailchimp'): return self._client.campaigns.content.update( campaign_id, content_data) except (RequestException, MailChimpError) as e: self.logger.error( "Mailchimp failed to set content for campaign id '{0}'".format( campaign_id), extra={ "error": str(e), "mailchimp_response": get_response_from_exception(e), }, ) return False
def create_campaign(self, campaign_data: Mapping) -> Union[str, bool]: try: with log_external_request(service='Mailchimp'): campaign = self._client.campaigns.create(campaign_data) return cast(str, campaign['id']) except (RequestException, MailChimpError) as e: self.logger.error( "Mailchimp failed to create campaign for '{campaign_title}'". format(campaign_title=campaign_data.get("settings", {}).get( "title")), extra={ "error": str(e), "mailchimp_response": get_response_from_exception(e), }, ) return False
def permanently_remove_email_from_list(self, email_address: str, list_id: str) -> bool: """ Permanently (very permanently) erases all trace of an email address from a given list """ hashed_email = self.get_email_hash(email_address) try: with log_external_request(service='Mailchimp'): self._client.lists.members.delete_permanent( list_id=list_id, subscriber_hash=hashed_email, ) return True except (RequestException, MailChimpError) as e: self.logger.error( f"Mailchimp failed to permanently remove user ({hashed_email}) from list ({list_id})", extra={ "error": str(e), "mailchimp_response": get_response_from_exception(e) }, ) return False
def get_all_notifications(self, **kwargs): """Wrapper for notifications_python_client.notifications.NotificationsAPIClient::get_all_notifications""" with log_external_request(service='Notify'): return self.client.get_all_notifications(**kwargs)['notifications']
def subscribe_new_email_to_list(self, list_id: str, email_address: str): """ Will subscribe email address to list if they do not already exist in that list else do nothing. Possible return values: True: User error, e.g. already on list, fake/invalid email (status 4xx) False: Unexpected error, e.g. cannot connect to Mailchimp (status 5xx) deleted_user: Mailchimp can't subscribe a deleted user (status 400) anything else: User successfully subscribed (status 200) """ hashed_email = self.get_email_hash(email_address) try: with log_external_request(service='Mailchimp'): resp = self._client.lists.members.create_or_update( list_id, hashed_email, { "email_address": email_address, "status_if_new": "subscribed" }) resp.update({ "status": "success", "error_type": None, "status_code": 200 }) return resp except (RequestException, MailChimpError) as e: # Some errors we don't care about but do want to log. Find and log them here. response = get_response_from_exception(e) if "looks fake or invalid, please enter a real email address." in response.get( "detail", ""): # As defined in mailchimp API documentation, this particular error message may arise if a user has # requested mailchimp to never add them to mailchimp lists. In this case, we resort to allowing a # failed API call (but log) as a user of this method would unlikely be able to do anything as we have # no control over this behaviour. self.logger.warning( f"Expected error: Mailchimp failed to add user ({hashed_email}) to list ({list_id}). " "API error: The email address looks fake or invalid, please enter a real email address.", extra={ "error": str(e), "mailchimp_response": response }) return { "status": "error", "error_type": "invalid_email", "status_code": 400 } elif 'is already a list member.' in response.get("detail", ""): # If a user is already a list member we receive a 400 error as documented in the tests for this error self.logger.warning( f"Expected error: Mailchimp failed to add user ({hashed_email}) to list ({list_id}). " "API error: This email address is already subscribed.", extra={ "error": str(e), "mailchimp_response": response }) return { "status": "error", "error_type": "already_subscribed", "status_code": 400 } elif 'The contact must re-subscribe to get back on the list.' in response.get( 'detail', ''): # User has been deleted and cannot be programmatically resubscribed self.logger.warning( f"Expected error: Mailchimp cannot automatically subscribe user ({hashed_email}) to list " f"({list_id}) as the user has been permanently deleted.", extra={ "error": str(e), "mailchimp_response": response }) return { "status": "error", "error_type": "deleted_user", "status_code": 400 } elif response.get('status') == 400: # Some other validation error self.logger.warning( f"Expected error: Mailchimp failed to add user ({hashed_email}) to list ({list_id}). " "API error: The email address was invalid.", extra={ "error": str(e), "mailchimp_response": response }) return { "status": "error", "error_type": "invalid_email", "status_code": 400 } # Otherwise this was an unexpected error and should be logged as such self.logger.error( f"Mailchimp failed to add user ({hashed_email}) to list ({list_id})", extra={ "error": str(e), "mailchimp_response": response }) return { "status": "error", "error_type": "unexpected_error", "status_code": 500 }
def send_email(self, to_email_address, template_name_or_id, personalisation=None, allow_resend=True, reference=None, reply_to_address_id=None, use_recent_cache=True): """ Method to send an email using the Notify api. :param to_email_address: String email address for recipient :param template_name_or_id: Template accessible on the Notify account, can either be a key to the `templates` dictionary or a Notify template ID. :param personalisation: The template variables, dict :param allow_resend: if False instantiate the delivered reference cache and ensure we are not sending duplicates :param reply_to_address_id: String id of reply-to email address. Must be set up in Notify config before use :param use_recent_cache: Use the client's cache of recently sent references. If set to False, any has_been_sent() calls will check the reference in the Notify API directly :return: response from the api. For more information see https://github.com/alphagov/notifications-python-client """ template_id = self.templates.get(template_name_or_id, template_name_or_id) reference = reference or self.get_reference( to_email_address, template_id, personalisation) email_obj = DMNotifyEmail(to_email_address, template_name_or_id, reference, personalisation) if not allow_resend and self.has_been_sent( reference, use_recent_cache=use_recent_cache): self._log( logging.WARNING, "Email with reference '{reference}' has already been sent", email_obj, ) return # NOTE how the potential replacement of the email address happens *after* the has_been_sent check and # reference generation final_email_address = ( self._redirect_domains_to_address and self._redirect_domains_to_address.get( # splitting at rightmost @ should reliably give us the domain to_email_address.rsplit("@", 1) [-1].lower())) or to_email_address try: with log_external_request(service='Notify'): response = self.client.send_email_notification( final_email_address, template_id, personalisation=personalisation, reference=reference, email_reply_to_id=reply_to_address_id) except HTTPError as e: self._log_email_error_message(email_obj, e) if isinstance(e.message, list) and \ any(msg["message"].startswith("Missing personalisation") for msg in e.message): raise EmailTemplateError(str(e)) raise EmailError(str(e)) self._log( logging.INFO, f"Email with reference '{reference}' sent to Notify successfully", email_obj) self._update_cache(reference) return response
def scan_and_tag_s3_object( s3_client, s3_bucket_name, s3_object_key, s3_object_version, sns_message_id=None, ): with logged_duration( logger=current_app.logger, message=lambda _: ( "Handled bucket {s3_bucket_name} key {s3_object_key} version {s3_object_version}" if sys.exc_info()[0] is None else # need to literally format() the exception into message as it's difficult to get it injected into extra "Failed handling {{s3_bucket_name}} key {{s3_object_key}} version {{s3_object_version}}: {!r}" .format(sys.exc_info()[1])), log_level=logging.INFO, condition=True, ) as log_context: base_log_context = { "s3_bucket_name": s3_bucket_name, "s3_object_key": s3_object_key, "s3_object_version": s3_object_version, } log_context.update(base_log_context) # TODO abort if file too big? with log_external_request( "S3", "get object tagging [{s3_bucket_name}/{s3_object_key} versionId {s3_object_version}]", logger=current_app.logger, ) as log_context_s3: log_context_s3.update(base_log_context) tagging_tag_set = s3_client.get_object_tagging( Bucket=s3_bucket_name, Key=s3_object_key, VersionId=s3_object_version, )["TagSet"] av_status = _prefixed_tag_values_from_tag_set(tagging_tag_set, "avStatus.") if av_status.get("avStatus.result") is None: current_app.logger.info( "Object version {s3_object_version} has no 'avStatus.result' tag - will scan...", extra={ **base_log_context, "av_status": av_status, }, ) else: current_app.logger.info( "Object version {s3_object_version} already has 'avStatus.result' " "tag: {existing_av_status_result!r}", extra={ **base_log_context, "existing_av_status_result": av_status["avStatus.result"], "existing_av_status": av_status, }, ) return av_status, False, None clamd_client = get_clamd_socket() # first check our clamd is available - there's no point in going and fetching the object if we can't do # anything with it. allow a raised exception to bubble up as a 500, which seems the most appropriate thing # in this case clamd_client.ping() # the following two requests (to S3 for the file contents and to clamd for scanning) don't really happen # sequentially as we're going to attempt to stream the data received from one into the other (by passing # the StreamingBody file-like object from this response into .instream(...)), so these logged_duration # sections do NOT *directly* correspond to the file being downloaded and then the file being scanned. The # two activities will overlap in time, something that isn't expressible with logged_duration with log_external_request( "S3", "initiate object download [{s3_bucket_name}/{s3_object_key} versionId {s3_object_version}]", logger=current_app.logger, ) as log_context_s3: log_context_s3.update(base_log_context) s3_object = s3_client.get_object( Bucket=s3_bucket_name, Key=s3_object_key, VersionId=s3_object_version, ) file_name = _filename_from_content_disposition( s3_object.get("ContentDisposition") or "") with logged_duration( logger=current_app.logger, message=lambda _: ( "Scanned {file_length}byte file '{file_name}', result {clamd_result}" if sys.exc_info()[0] is None else # need to literally format() exception into message as it's difficult to get it injected into extra "Failed scanning {{file_length}}byte file '{{file_name}}': {!r}" .format(sys.exc_info()[1])), log_level=logging.INFO, condition=True, ) as log_context_clamd: log_context_clamd.update({ "file_length": s3_object["ContentLength"], "file_name": file_name or "<unknown>", }) clamd_result = clamd_client.instream(s3_object["Body"])["stream"] log_context_clamd["clamd_result"] = clamd_result if clamd_result[0] == "ERROR": # let's hope this was a transient error and a later attempt may succeed. hard to know what else to do # in this case - tagging a file with "ERROR" would prevent further attempts. raise UnknownClamdError( f"clamd did not successfully scan file: {clamd_result!r}") with logged_duration( logger=current_app.logger, message=lambda _: ( "Fetched clamd version string: {clamd_version}" if sys.exc_info()[0] is None else # need to literally format() exception into message as it's difficult to get it injected into extra "Failed fetching clamd version string: {!r}".format( sys.exc_info()[1])), log_level=logging.DEBUG, ) as log_context_clamd: # hypothetically there is a race condition between the time of scanning the file and fetching the # version here when freshclam could give us a new definition file, making this information incorrect, # but it's a very small possibility clamd_version = clamd_client.version() log_context_clamd.update({"clamd_version": clamd_version}) # we namespace all keys set as part of an avStatus update with an "avStatus." prefix, intending that all # of these keys are only ever set or removed together as they are all information about the same scanning # decision new_av_status = { "avStatus.result": "pass" if clamd_result[0] == "OK" else "fail", "avStatus.clamdVerStr": clamd_version, "avStatus.ts": datetime.datetime.utcnow().isoformat(), } # Now we briefly re-check the object's tags to ensure they weren't set by something else while we were # scanning. Note the impossibility of avoiding all possible race conditions as S3's API doesn't allow any # form of locking. What we *can* do is make the possible time period between check-tags and set-tags as # small as possible... with log_external_request( "S3", "get object tagging [{s3_bucket_name}/{s3_object_key} versionId {s3_object_version}]", logger=current_app.logger, ) as log_context_s3: log_context_s3.update(base_log_context) tagging_tag_set = s3_client.get_object_tagging( Bucket=s3_bucket_name, Key=s3_object_key, VersionId=s3_object_version, )["TagSet"] av_status = _prefixed_tag_values_from_tag_set(tagging_tag_set, "avStatus.") if av_status.get("avStatus.result") is not None: current_app.logger.warning( "Object was tagged with new 'avStatus.result' ({existing_av_status_result!r}) while we were " "scanning. Not applying our own 'avStatus' ({unapplied_av_status_result!r})", extra={ "existing_av_status_result": av_status["avStatus.result"], "unapplied_av_status_result": new_av_status["avStatus.result"], "existing_av_status": av_status, "unapplied_av_status": new_av_status, }, ) return av_status, False, new_av_status tagging_tag_set = _tag_set_updated_with_dict( _tag_set_omitting_prefixed(tagging_tag_set, "avStatus."), new_av_status, ) with log_external_request( "S3", "put object tagging [{s3_bucket_name}/{s3_object_key} versionId {s3_object_version}]", logger=current_app.logger, ) as log_context_s3: log_context_s3.update(base_log_context) s3_client.put_object_tagging( Bucket=s3_bucket_name, Key=s3_object_key, VersionId=s3_object_version, Tagging={"TagSet": tagging_tag_set}, ) if clamd_result[0] != "OK": # TODO? attempt to rectify the situation: # TODO? if this is (still) current version of object: # TODO? S3: find most recent version of object which is tagged "good" # TODO? if there is no such version: # TODO? S3: upload fail whale? # TODO? else copy that version to become new "current" ver for this key, ensuring to copy its tags # TODO? note the impossibility of doing this without some race conditions notify_client = DMNotifyClient( current_app.config["DM_NOTIFY_API_KEY"]) if (len(clamd_result) >= 2 and clamd_result[1].lower() in map( str.lower, current_app. config["DM_EICAR_TEST_SIGNATURE_RESULT_STRINGS"])): notify_kwargs = { # we'll use the s3 ETag of the file as the notify ref - it will be the only piece of information # that will be shared knowledge between a functional test and the application yet also allow the # test to differentiate the results of its different test runs, allowing it to easily check for # the message being sent "reference": "eicar-found-{}-{}".format( _normalize_hex(s3_object["ETag"]), current_app.config["DM_ENVIRONMENT"], ), "to_email_address": current_app. config["DM_EICAR_TEST_SIGNATURE_VIRUS_ALERT_EMAIL"], } else: notify_kwargs = { "to_email_address": current_app.config["DM_DEVELOPER_VIRUS_ALERT_EMAIL"], } try: notify_client.send_email( template_name_or_id="developer_virus_alert", personalisation={ "bucket_name": s3_bucket_name, "object_key": s3_object_key, "object_version": s3_object_version, "file_name": file_name or "<unknown>", "clamd_output": ", ".join(clamd_result), "sns_message_id": sns_message_id or "<N/A>", "dm_trace_id": getattr(request, "trace_id", None) or "<unknown>", }, **notify_kwargs, ) except EmailError as e: current_app.logger.error( "Failed to send developer_virus_alert email after scanning " "{s3_bucket_name}/{s3_object_key} versionId {s3_object_version}: {e}", extra={ **base_log_context, "e": str(e), }, ) # however we probably don't want this to cause a 500 because the main task has been completed - retrying # it won't work and e.g. we eill want to signify to SNS that it should not attempt to re-send this # message return av_status, True, new_av_status