Example #1
0
def my_update_subtask_status(entry_id, current_task_id, new_subtask_status):
    """
    Check whether a subtask has been updated before really updating.

    Check whether a subtask which has been retried
    has had the retry already write its results here before the code
    that was invoking the retry had a chance to update this status.

    This is the norm in "eager" mode (used by tests) where the retry is called
    and run to completion before control is returned to the code that
    invoked the retry.  If the retries eventually end in failure (e.g. due to
    a maximum number of retries being attempted), the "eager" code will return
    the error for each retry as it is popped off the stack.  We want to just ignore
    the later updates that are called as the result of the earlier retries.

    This should not be an issue in production, where status is updated before
    a task is retried, and is then updated afterwards if the retry fails.
    """
    entry = InstructorTask.objects.get(pk=entry_id)
    subtask_dict = json.loads(entry.subtasks)
    subtask_status_info = subtask_dict['status']
    current_subtask_status = SubtaskStatus.from_dict(
        subtask_status_info[current_task_id])
    current_retry_count = current_subtask_status.get_retry_count()
    new_retry_count = new_subtask_status.get_retry_count()
    if current_retry_count <= new_retry_count:
        update_subtask_status(entry_id, current_task_id, new_subtask_status)
Example #2
0
 def test_send_email_retried_subtask(self):
     # test at a lower level, to ensure that the course gets checked down below too.
     entry = InstructorTask.create(self.course.id, "task_type", "task_key",
                                   "task_input", self.instructor)
     entry_id = entry.id
     subtask_id = "subtask-id-value"
     initialize_subtask_info(entry, "emailed", 100, [subtask_id])
     subtask_status = SubtaskStatus.create(subtask_id,
                                           state=RETRY,
                                           retried_nomax=2)
     update_subtask_status(entry_id, subtask_id, subtask_status)
     bogus_email_id = 1001
     to_list = ['*****@*****.**']
     global_email_context = {'course_title': 'dummy course'}
     # try running with a clean subtask:
     new_subtask_status = SubtaskStatus.create(subtask_id)
     with self.assertRaisesRegex(DuplicateTaskException, 'already retried'):
         send_course_email(entry_id, bogus_email_id,
                           to_list, global_email_context,
                           new_subtask_status.to_dict())
     # try again, with a retried subtask with lower count:
     new_subtask_status = SubtaskStatus.create(subtask_id,
                                               state=RETRY,
                                               retried_nomax=1)
     with self.assertRaisesRegex(DuplicateTaskException, 'already retried'):
         send_course_email(entry_id, bogus_email_id,
                           to_list, global_email_context,
                           new_subtask_status.to_dict())
Example #3
0
def my_update_subtask_status(entry_id, current_task_id, new_subtask_status):
    """
    Check whether a subtask has been updated before really updating.

    Check whether a subtask which has been retried
    has had the retry already write its results here before the code
    that was invoking the retry had a chance to update this status.

    This is the norm in "eager" mode (used by tests) where the retry is called
    and run to completion before control is returned to the code that
    invoked the retry.  If the retries eventually end in failure (e.g. due to
    a maximum number of retries being attempted), the "eager" code will return
    the error for each retry as it is popped off the stack.  We want to just ignore
    the later updates that are called as the result of the earlier retries.

    This should not be an issue in production, where status is updated before
    a task is retried, and is then updated afterwards if the retry fails.
    """
    entry = InstructorTask.objects.get(pk=entry_id)
    subtask_dict = json.loads(entry.subtasks)
    subtask_status_info = subtask_dict['status']
    current_subtask_status = SubtaskStatus.from_dict(subtask_status_info[current_task_id])
    current_retry_count = current_subtask_status.get_retry_count()
    new_retry_count = new_subtask_status.get_retry_count()
    if current_retry_count <= new_retry_count:
        update_subtask_status(entry_id, current_task_id, new_subtask_status)
 def test_send_email_running_subtask(self):
     # test at a lower level, to ensure that the course gets checked down below too.
     entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor)
     entry_id = entry.id
     subtask_id = "subtask-id-value"
     initialize_subtask_info(entry, "emailed", 100, [subtask_id])
     subtask_status = SubtaskStatus.create(subtask_id)
     update_subtask_status(entry_id, subtask_id, subtask_status)
     check_subtask_is_valid(entry_id, subtask_id, subtask_status)
     bogus_email_id = 1001
     to_list = ['*****@*****.**']
     global_email_context = {'course_title': 'dummy course'}
     with self.assertRaisesRegexp(DuplicateTaskException, 'already being executed'):
         send_course_email(entry_id, bogus_email_id, to_list, global_email_context, subtask_status.to_dict())
Example #5
0
 def test_send_email_running_subtask(self):
     # test at a lower level, to ensure that the course gets checked down below too.
     entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor)
     entry_id = entry.id
     subtask_id = "subtask-id-value"
     initialize_subtask_info(entry, "emailed", 100, [subtask_id])
     subtask_status = SubtaskStatus.create(subtask_id)
     update_subtask_status(entry_id, subtask_id, subtask_status)
     check_subtask_is_valid(entry_id, subtask_id, subtask_status)
     bogus_email_id = 1001
     to_list = ['*****@*****.**']
     global_email_context = {'course_title': 'dummy course'}
     with self.assertRaisesRegexp(DuplicateTaskException, 'already being executed'):
         send_course_email(entry_id, bogus_email_id, to_list, global_email_context, subtask_status.to_dict())
Example #6
0
 def test_send_email_retried_subtask(self):
     # test at a lower level, to ensure that the course gets checked down below too.
     entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor)
     entry_id = entry.id
     subtask_id = "subtask-id-value"
     initialize_subtask_info(entry, "emailed", 100, [subtask_id])
     subtask_status = SubtaskStatus.create(subtask_id, state=RETRY, retried_nomax=2)
     update_subtask_status(entry_id, subtask_id, subtask_status)
     bogus_email_id = 1001
     to_list = ['*****@*****.**']
     global_email_context = {'course_title': 'dummy course'}
     # try running with a clean subtask:
     new_subtask_status = SubtaskStatus.create(subtask_id)
     with self.assertRaisesRegexp(DuplicateTaskException, 'already retried'):
         send_course_email(entry_id, bogus_email_id, to_list, global_email_context, new_subtask_status.to_dict())
     # try again, with a retried subtask with lower count:
     new_subtask_status = SubtaskStatus.create(subtask_id, state=RETRY, retried_nomax=1)
     with self.assertRaisesRegexp(DuplicateTaskException, 'already retried'):
         send_course_email(entry_id, bogus_email_id, to_list, global_email_context, new_subtask_status.to_dict())
Example #7
0
 def dummy_update_subtask_status(entry_id, _current_task_id,
                                 new_subtask_status):
     """Passes a bad value for task_id to test update_subtask_status"""
     bogus_task_id = "this-is-bogus"
     update_subtask_status(entry_id, bogus_task_id, new_subtask_status)
Example #8
0
def _submit_for_retry(entry_id, email_id, to_list, global_email_context,
                      current_exception, subtask_status, skip_retry_max=False):
    """
    Helper function to requeue a task for retry, using the new version of arguments provided.

    Inputs are the same as for running a task, plus two extra indicating the state at the time of retry.
    These include the `current_exception` that the task encountered that is causing the retry attempt,
    and the `subtask_status` that is to be returned.  A third extra argument `skip_retry_max`
    indicates whether the current retry should be subject to a maximum test.

    Returns a tuple of two values:
      * First value is a dict which represents current progress.  Keys are:

        'task_id' : id of subtask.  This is used to pass task information across retries.
        'attempted' : number of attempts -- should equal succeeded plus failed
        'succeeded' : number that succeeded in processing
        'skipped' : number that were not processed.
        'failed' : number that failed during processing
        'retried_nomax' : number of times the subtask has been retried for conditions that
            should not have a maximum count applied
        'retried_withmax' : number of times the subtask has been retried for conditions that
            should have a maximum count applied
        'state' : celery state of the subtask (e.g. QUEUING, PROGRESS, RETRY, FAILURE, SUCCESS)

      * Second value is an exception returned by the innards of the method.  If the retry was
        successfully submitted, this value will be the RetryTaskError that retry() returns.
        Otherwise, it (ought to be) the current_exception passed in.
    """
    task_id = subtask_status.task_id
    log.info("Task %s: Successfully sent to %s users; failed to send to %s users (and skipped %s users)",
             task_id, subtask_status.succeeded, subtask_status.failed, subtask_status.skipped)

    # Calculate time until we retry this task (in seconds):
    # The value for max_retries is increased by the number of times an "infinite-retry" exception
    # has been retried.  We want the regular retries to trigger max-retry checking, but not these
    # special retries.  So we count them separately.
    max_retries = _get_current_task().max_retries + subtask_status.retried_nomax
    base_delay = _get_current_task().default_retry_delay
    if skip_retry_max:
        # once we reach five retries, don't increase the countdown further.
        retry_index = min(subtask_status.retried_nomax, 5)
        exception_type = 'sending-rate'
        # if we have a cap, after all, apply it now:
        if hasattr(settings, 'BULK_EMAIL_INFINITE_RETRY_CAP'):
            retry_cap = settings.BULK_EMAIL_INFINITE_RETRY_CAP + subtask_status.retried_withmax
            max_retries = min(max_retries, retry_cap)
    else:
        retry_index = subtask_status.retried_withmax
        exception_type = 'transient'

    # Skew the new countdown value by a random factor, so that not all
    # retries are deferred by the same amount.
    countdown = ((2 ** retry_index) * base_delay) * random.uniform(.75, 1.25)

    log.warning(('Task %s: email with id %d not delivered due to %s error %s, '
                 'retrying send to %d recipients in %s seconds (with max_retry=%s)'),
                task_id, email_id, exception_type, current_exception, len(to_list), countdown, max_retries)

    # we make sure that we update the InstructorTask with the current subtask status
    # *before* actually calling retry(), to be sure that there is no race
    # condition between this update and the update made by the retried task.
    update_subtask_status(entry_id, task_id, subtask_status)

    # Now attempt the retry.  If it succeeds, it returns a RetryTaskError that
    # needs to be returned back to Celery.  If it fails, we return the existing
    # exception.
    try:
        retry_task = send_course_email.retry(
            args=[
                entry_id,
                email_id,
                to_list,
                global_email_context,
                subtask_status.to_dict(),
            ],
            exc=current_exception,
            countdown=countdown,
            max_retries=max_retries,
            throw=True,
        )
        raise retry_task
    except RetryTaskError as retry_error:
        # If the retry call is successful, update with the current progress:
        log.info(
            'Task %s: email with id %d caused send_course_email task to retry again.',
            task_id,
            email_id
        )
        return subtask_status, retry_error
    except Exception as retry_exc:  # pylint: disable=broad-except
        # If there are no more retries, because the maximum has been reached,
        # we expect the original exception to be raised.  We catch it here
        # (and put it in retry_exc just in case it's different, but it shouldn't be),
        # and update status as if it were any other failure.  That means that
        # the recipients still in the to_list are counted as failures.
        log.exception('Task %s: email with id %d caused send_course_email task to fail to retry. To list: %s',
                      task_id, email_id, [i['email'] for i in to_list])
        num_failed = len(to_list)
        subtask_status.increment(failed=num_failed, state=FAILURE)
        return subtask_status, retry_exc
Example #9
0
def send_course_email(entry_id, email_id, to_list, global_email_context, subtask_status_dict):
    """
    Sends an email to a list of recipients.

    Inputs are:
      * `entry_id`: id of the InstructorTask object to which progress should be recorded.
      * `email_id`: id of the CourseEmail model that is to be emailed.
      * `to_list`: list of recipients.  Each is represented as a dict with the following keys:
        - 'profile__name': full name of User.
        - 'email': email address of User.
        - 'pk': primary key of User model.
      * `global_email_context`: dict containing values that are unique for this email but the same
        for all recipients of this email.  This dict is to be used to fill in slots in email
        template.  It does not include 'name' and 'email', which will be provided by the to_list.
      * `subtask_status_dict` : dict containing values representing current status.  Keys are:

        'task_id' : id of subtask.  This is used to pass task information across retries.
        'attempted' : number of attempts -- should equal succeeded plus failed
        'succeeded' : number that succeeded in processing
        'skipped' : number that were not processed.
        'failed' : number that failed during processing
        'retried_nomax' : number of times the subtask has been retried for conditions that
            should not have a maximum count applied
        'retried_withmax' : number of times the subtask has been retried for conditions that
            should have a maximum count applied
        'state' : celery state of the subtask (e.g. QUEUING, PROGRESS, RETRY, FAILURE, SUCCESS)

        Most values will be zero on initial call, but may be different when the task is
        invoked as part of a retry.

    Sends to all addresses contained in to_list that are not also in the Optout table.
    Emails are sent multi-part, in both plain text and html.  Updates InstructorTask object
    with status information (sends, failures, skips) and updates number of subtasks completed.
    """
    subtask_status = SubtaskStatus.from_dict(subtask_status_dict)
    current_task_id = subtask_status.task_id
    num_to_send = len(to_list)
    log.info(("Preparing to send email %s to %d recipients as subtask %s "
              "for instructor task %d: context = %s, status=%s, time=%s"),
             email_id, num_to_send, current_task_id, entry_id, global_email_context, subtask_status, datetime.now())

    # Check that the requested subtask is actually known to the current InstructorTask entry.
    # If this fails, it throws an exception, which should fail this subtask immediately.
    # This can happen when the parent task has been run twice, and results in duplicate
    # subtasks being created for the same InstructorTask entry.  This can happen when Celery
    # loses its connection to its broker, and any current tasks get requeued.
    # We hope to catch this condition in perform_delegate_email_batches() when it's the parent
    # task that is resubmitted, but just in case we fail to do so there, we check here as well.
    # There is also a possibility that this task will be run twice by Celery, for the same reason.
    # To deal with that, we need to confirm that the task has not already been completed.
    check_subtask_is_valid(entry_id, current_task_id, subtask_status)

    send_exception = None
    new_subtask_status = None
    try:
        start_time = time.time()
        new_subtask_status, send_exception = _send_course_email(
            entry_id,
            email_id,
            to_list,
            global_email_context,
            subtask_status,
        )
        log.info(
            "BulkEmail ==> _send_course_email completed in : %s for task : %s with recipient count: %s",
            time.time() - start_time,
            subtask_status.task_id,
            len(to_list)
        )
    except Exception:
        # Unexpected exception. Try to write out the failure to the entry before failing.
        log.exception("Send-email task %s for email %s: failed unexpectedly!", current_task_id, email_id)
        # We got here for really unexpected reasons.  Since we don't know how far
        # the task got in emailing, we count all recipients as having failed.
        # It at least keeps the counts consistent.
        subtask_status.increment(failed=num_to_send, state=FAILURE)
        update_subtask_status(entry_id, current_task_id, subtask_status)
        raise

    if send_exception is None:
        # Update the InstructorTask object that is storing its progress.
        log.info("Send-email task %s for email %s: succeeded", current_task_id, email_id)
        update_subtask_status(entry_id, current_task_id, new_subtask_status)
    elif isinstance(send_exception, RetryTaskError):
        # If retrying, a RetryTaskError needs to be returned to Celery.
        # We assume that the the progress made before the retry condition
        # was encountered has already been updated before the retry call was made,
        # so we only log here.
        log.warning("Send-email task %s for email %s: being retried", current_task_id, email_id)
        raise send_exception  # pylint: disable=raising-bad-type
    else:
        log.error("Send-email task %s for email %s: failed: %s", current_task_id, email_id, send_exception)
        update_subtask_status(entry_id, current_task_id, new_subtask_status)
        raise send_exception  # pylint: disable=raising-bad-type

    # return status in a form that can be serialized by Celery into JSON:
    log.info("Send-email task %s for email %s: returning status %s", current_task_id, email_id, new_subtask_status)
    return new_subtask_status.to_dict()
Example #10
0
 def mock_update_subtask_status(entry_id, current_task_id, new_subtask_status):
     """Increments count of number of emails sent."""
     self.emails_sent += new_subtask_status.succeeded
     return update_subtask_status(entry_id, current_task_id, new_subtask_status)
Example #11
0
 def mock_update_subtask_status(entry_id, current_task_id, new_subtask_status):
     """Increments count of number of emails sent."""
     self.emails_sent += new_subtask_status.succeeded
     return update_subtask_status(entry_id, current_task_id, new_subtask_status)
Example #12
0
 def dummy_update_subtask_status(entry_id, _current_task_id, new_subtask_status):
     """Passes a bad value for task_id to test update_subtask_status"""
     bogus_task_id = "this-is-bogus"
     update_subtask_status(entry_id, bogus_task_id, new_subtask_status)
Example #13
0
def _submit_for_retry(entry_id, email_id, to_list, global_email_context,
                      current_exception, subtask_status, skip_retry_max=False):
    """
    Helper function to requeue a task for retry, using the new version of arguments provided.

    Inputs are the same as for running a task, plus two extra indicating the state at the time of retry.
    These include the `current_exception` that the task encountered that is causing the retry attempt,
    and the `subtask_status` that is to be returned.  A third extra argument `skip_retry_max`
    indicates whether the current retry should be subject to a maximum test.

    Returns a tuple of two values:
      * First value is a dict which represents current progress.  Keys are:

        'task_id' : id of subtask.  This is used to pass task information across retries.
        'attempted' : number of attempts -- should equal succeeded plus failed
        'succeeded' : number that succeeded in processing
        'skipped' : number that were not processed.
        'failed' : number that failed during processing
        'retried_nomax' : number of times the subtask has been retried for conditions that
            should not have a maximum count applied
        'retried_withmax' : number of times the subtask has been retried for conditions that
            should have a maximum count applied
        'state' : celery state of the subtask (e.g. QUEUING, PROGRESS, RETRY, FAILURE, SUCCESS)

      * Second value is an exception returned by the innards of the method.  If the retry was
        successfully submitted, this value will be the RetryTaskError that retry() returns.
        Otherwise, it (ought to be) the current_exception passed in.
    """
    task_id = subtask_status.task_id
    log.info("Task %s: Successfully sent to %s users; failed to send to %s users (and skipped %s users)",
             task_id, subtask_status.succeeded, subtask_status.failed, subtask_status.skipped)

    # Calculate time until we retry this task (in seconds):
    # The value for max_retries is increased by the number of times an "infinite-retry" exception
    # has been retried.  We want the regular retries to trigger max-retry checking, but not these
    # special retries.  So we count them separately.
    max_retries = _get_current_task().max_retries + subtask_status.retried_nomax
    base_delay = _get_current_task().default_retry_delay
    if skip_retry_max:
        # once we reach five retries, don't increase the countdown further.
        retry_index = min(subtask_status.retried_nomax, 5)
        exception_type = 'sending-rate'
        # if we have a cap, after all, apply it now:
        if hasattr(settings, 'BULK_EMAIL_INFINITE_RETRY_CAP'):
            retry_cap = settings.BULK_EMAIL_INFINITE_RETRY_CAP + subtask_status.retried_withmax
            max_retries = min(max_retries, retry_cap)
    else:
        retry_index = subtask_status.retried_withmax
        exception_type = 'transient'

    # Skew the new countdown value by a random factor, so that not all
    # retries are deferred by the same amount.
    countdown = ((2 ** retry_index) * base_delay) * random.uniform(.75, 1.25)

    log.warning(('Task %s: email with id %d not delivered due to %s error %s, '
                 'retrying send to %d recipients in %s seconds (with max_retry=%s)'),
                task_id, email_id, exception_type, current_exception, len(to_list), countdown, max_retries)

    # we make sure that we update the InstructorTask with the current subtask status
    # *before* actually calling retry(), to be sure that there is no race
    # condition between this update and the update made by the retried task.
    update_subtask_status(entry_id, task_id, subtask_status)

    # Now attempt the retry.  If it succeeds, it returns a RetryTaskError that
    # needs to be returned back to Celery.  If it fails, we return the existing
    # exception.
    try:
        retry_task = send_course_email.retry(
            args=[
                entry_id,
                email_id,
                to_list,
                global_email_context,
                subtask_status.to_dict(),
            ],
            exc=current_exception,
            countdown=countdown,
            max_retries=max_retries,
            throw=True,
        )
        raise retry_task
    except RetryTaskError as retry_error:
        # If the retry call is successful, update with the current progress:
        log.info(
            u'Task %s: email with id %d caused send_course_email task to retry again.',
            task_id,
            email_id
        )
        return subtask_status, retry_error
    except Exception as retry_exc:  # pylint: disable=broad-except
        # If there are no more retries, because the maximum has been reached,
        # we expect the original exception to be raised.  We catch it here
        # (and put it in retry_exc just in case it's different, but it shouldn't be),
        # and update status as if it were any other failure.  That means that
        # the recipients still in the to_list are counted as failures.
        log.exception(u'Task %s: email with id %d caused send_course_email task to fail to retry. To list: %s',
                      task_id, email_id, [i['email'] for i in to_list])
        num_failed = len(to_list)
        subtask_status.increment(failed=num_failed, state=FAILURE)
        return subtask_status, retry_exc
Example #14
0
def send_course_email(entry_id, email_id, to_list, global_email_context, subtask_status_dict):
    """
    Sends an email to a list of recipients.

    Inputs are:
      * `entry_id`: id of the InstructorTask object to which progress should be recorded.
      * `email_id`: id of the CourseEmail model that is to be emailed.
      * `to_list`: list of recipients.  Each is represented as a dict with the following keys:
        - 'profile__name': full name of User.
        - 'email': email address of User.
        - 'pk': primary key of User model.
      * `global_email_context`: dict containing values that are unique for this email but the same
        for all recipients of this email.  This dict is to be used to fill in slots in email
        template.  It does not include 'name' and 'email', which will be provided by the to_list.
      * `subtask_status_dict` : dict containing values representing current status.  Keys are:

        'task_id' : id of subtask.  This is used to pass task information across retries.
        'attempted' : number of attempts -- should equal succeeded plus failed
        'succeeded' : number that succeeded in processing
        'skipped' : number that were not processed.
        'failed' : number that failed during processing
        'retried_nomax' : number of times the subtask has been retried for conditions that
            should not have a maximum count applied
        'retried_withmax' : number of times the subtask has been retried for conditions that
            should have a maximum count applied
        'state' : celery state of the subtask (e.g. QUEUING, PROGRESS, RETRY, FAILURE, SUCCESS)

        Most values will be zero on initial call, but may be different when the task is
        invoked as part of a retry.

    Sends to all addresses contained in to_list that are not also in the Optout table.
    Emails are sent multi-part, in both plain text and html.  Updates InstructorTask object
    with status information (sends, failures, skips) and updates number of subtasks completed.
    """
    subtask_status = SubtaskStatus.from_dict(subtask_status_dict)
    current_task_id = subtask_status.task_id
    num_to_send = len(to_list)
    log.info((u"Preparing to send email %s to %d recipients as subtask %s "
              u"for instructor task %d: context = %s, status=%s"),
             email_id, num_to_send, current_task_id, entry_id, global_email_context, subtask_status)

    # Check that the requested subtask is actually known to the current InstructorTask entry.
    # If this fails, it throws an exception, which should fail this subtask immediately.
    # This can happen when the parent task has been run twice, and results in duplicate
    # subtasks being created for the same InstructorTask entry.  This can happen when Celery
    # loses its connection to its broker, and any current tasks get requeued.
    # We hope to catch this condition in perform_delegate_email_batches() when it's the parent
    # task that is resubmitted, but just in case we fail to do so there, we check here as well.
    # There is also a possibility that this task will be run twice by Celery, for the same reason.
    # To deal with that, we need to confirm that the task has not already been completed.
    check_subtask_is_valid(entry_id, current_task_id, subtask_status)

    send_exception = None
    new_subtask_status = None
    try:
        course_title = global_email_context['course_title']
        with dog_stats_api.timer('course_email.single_task.time.overall', tags=[_statsd_tag(course_title)]):
            new_subtask_status, send_exception = _send_course_email(
                entry_id,
                email_id,
                to_list,
                global_email_context,
                subtask_status,
            )
    except Exception:
        # Unexpected exception. Try to write out the failure to the entry before failing.
        log.exception("Send-email task %s for email %s: failed unexpectedly!", current_task_id, email_id)
        # We got here for really unexpected reasons.  Since we don't know how far
        # the task got in emailing, we count all recipients as having failed.
        # It at least keeps the counts consistent.
        subtask_status.increment(failed=num_to_send, state=FAILURE)
        update_subtask_status(entry_id, current_task_id, subtask_status)
        raise

    if send_exception is None:
        # Update the InstructorTask object that is storing its progress.
        log.info("Send-email task %s for email %s: succeeded", current_task_id, email_id)
        update_subtask_status(entry_id, current_task_id, new_subtask_status)
    elif isinstance(send_exception, RetryTaskError):
        # If retrying, a RetryTaskError needs to be returned to Celery.
        # We assume that the the progress made before the retry condition
        # was encountered has already been updated before the retry call was made,
        # so we only log here.
        log.warning("Send-email task %s for email %s: being retried", current_task_id, email_id)
        raise send_exception  # pylint: disable=raising-bad-type
    else:
        log.error("Send-email task %s for email %s: failed: %s", current_task_id, email_id, send_exception)
        update_subtask_status(entry_id, current_task_id, new_subtask_status)
        raise send_exception  # pylint: disable=raising-bad-type

    # return status in a form that can be serialized by Celery into JSON:
    log.info("Send-email task %s for email %s: returning status %s", current_task_id, email_id, new_subtask_status)
    return new_subtask_status.to_dict()