Esempio n. 1
0
def test_process_sanitised_letter_puts_letter_into_technical_failure_if_max_retries_exceeded(
    mocker,
    sample_letter_notification,
):
    mocker.patch('app.celery.letters_pdf_tasks.update_letter_pdf_status',
                 side_effect=Exception())
    mocker.patch('app.celery.letters_pdf_tasks.process_sanitised_letter.retry',
                 side_effect=MaxRetriesExceededError())

    sample_letter_notification.status = NOTIFICATION_PENDING_VIRUS_CHECK
    encrypted_data = encryption.encrypt({
        'page_count':
        2,
        'message':
        None,
        'invalid_pages':
        None,
        'validation_status':
        'passed',
        'filename':
        'NOTIFY.{}'.format(sample_letter_notification.reference),
        'notification_id':
        str(sample_letter_notification.id),
        'address':
        None
    })

    with pytest.raises(NotificationTechnicalFailureException):
        process_sanitised_letter(encrypted_data)

    assert sample_letter_notification.status == NOTIFICATION_TECHNICAL_FAILURE
def test_should_go_into_technical_error_if_exceeds_retries_on_deliver_sms_task(
    sample_notification,
    mocker,
    sms_method,
    sms_method_name,
):
    mocker.patch(
        "app.delivery.send_to_providers.send_sms_to_provider",
        side_effect=Exception("EXPECTED"),
    )
    mocker.patch(
        f"app.celery.provider_tasks.{sms_method_name}.retry",
        side_effect=MaxRetriesExceededError(),
    )
    queued_callback = mocker.patch(
        "app.celery.provider_tasks._check_and_queue_callback_task")

    with pytest.raises(NotificationTechnicalFailureException) as e:
        sms_method(sample_notification.id)
    assert str(sample_notification.id) in str(e.value)

    getattr(provider_tasks,
            sms_method_name).retry.assert_called_with(queue="retry-tasks",
                                                      countdown=0)

    assert sample_notification.status == "technical-failure"
    queued_callback.assert_called_once_with(sample_notification)
Esempio n. 3
0
def send_grade_to_credentials(self, username, course_run_key, verified,
                              letter_grade, percent_grade):
    """ Celery task to notify the Credentials IDA of a grade change via POST. """
    logger.info(
        f"Running task send_grade_to_credentials for username {username} and course {course_run_key}"
    )

    countdown = 2**self.request.retries
    course_key = CourseKey.from_string(course_run_key)

    try:
        credentials_client = get_credentials_api_client(
            User.objects.get(username=settings.CREDENTIALS_SERVICE_USERNAME),
            org=course_key.org,
        )

        credentials_client.grades.post({
            'username': username,
            'course_run': str(course_key),
            'letter_grade': letter_grade,
            'percent_grade': percent_grade,
            'verified': verified,
        })

        logger.info(
            f"Sent grade for course {course_run_key} to user {username}")

    except Exception as exc:  # lint-amnesty, pylint: disable=unused-variable
        error_msg = f"Failed to send grade for course {course_run_key} to user {username}."
        logger.exception(error_msg)
        exception = MaxRetriesExceededError(
            f"Failed to send grade to credentials. Reason: {error_msg}")
        raise self.retry(exc=exception,
                         countdown=countdown,
                         max_retries=MAX_RETRIES)
Esempio n. 4
0
    def test_maximum_retries(self, mock_retry, mock_request):
        mock_response = JsonResponse({"balance": 250}, status=400)
        mock_request.return_value = mock_response
        mock_retry.side_effect = MaxRetriesExceededError()

        ussd_client = self.get_ussd_client()
        ussd_client.send('mwas')
Esempio n. 5
0
def update_certificate_visible_date_on_course_update(
        self, course_key, certificate_available_date):
    """
    This task is designed to be called whenever a course is updated with
    certificate_available_date so that visible_date is updated on credential
    service as well.

    It will get all users within the course that have a certificate and call
    the credentials API to update all these certificates visible_date value
    to keep certificates in sync on both sides.

    If this function is moved, make sure to update it's entry in
    EXPLICIT_QUEUES in the settings files so it runs in the correct queue.

    Arguments:
        course_key (str): The course identifier
        certificate_available_date (str): The date to update the certificate availablity date to. It's a string
            representation of a datetime object because task parameters must be JSON-able.

    Returns:
        None

    """
    countdown = 2**self.request.retries
    # If the credentials config model is disabled for this
    # feature, it may indicate a condition where processing of such tasks
    # has been temporarily disabled.  Since this is a recoverable situation,
    # mark this task for retry instead of failing it altogether.
    if not CredentialsApiConfig.current().is_learner_issuance_enabled:
        error_msg = (
            "Task update_certificate_visible_date_on_course_update cannot be executed when credentials issuance is "
            "disabled in API config")
        LOGGER.info(error_msg)
        exception = MaxRetriesExceededError(
            f"Failed to update certificate availability date for course {course_key}. Reason: {error_msg}"
        )
        raise self.retry(exc=exception,
                         countdown=countdown,
                         max_retries=MAX_RETRIES)
    # update the course certificate with the new certificate available date if:
    # - The course is not self paced
    # - The certificates_display_behavior is not "end_with_date"
    course_overview = CourseOverview.get_from_id(course_key)
    if (course_overview.self_paced is False
            and course_overview.certificates_display_behavior
            == CertificatesDisplayBehaviors.END_WITH_DATE):
        update_credentials_course_certificate_configuration_available_date.delay(
            str(course_key), certificate_available_date)
    users_with_certificates_in_course = GeneratedCertificate.eligible_available_certificates.filter(
        course_id=course_key).values_list('user__username', flat=True)

    LOGGER.info(
        "Task update_certificate_visible_date_on_course_update resending course certificates "
        f"for {len(users_with_certificates_in_course)} users in course {course_key}."
    )
    for user in users_with_certificates_in_course:
        award_course_certificate.delay(
            user,
            str(course_key),
            certificate_available_date=certificate_available_date)
Esempio n. 6
0
 def _retry_with_custom_exception(username, course_key, reason, countdown):
     exception = MaxRetriesExceededError(
         f"Failed to revoke program certificate for user {username} for course {course_key}. Reason: {reason}"
     )
     return self.retry(exc=exception,
                       countdown=countdown,
                       max_retries=MAX_RETRIES)
Esempio n. 7
0
def test_feed_update_fail(celery_worker, mocker, requests_mock,
                          authenticated_user):
    """
    Verify user is notified when a feed update fails
    """
    rss_url = "https://test.com/rss"
    requests_mock.get(rss_url,
                      exc=requests.exceptions.RequestException("error"))
    mocker.patch("apps.feeds.tasks.get_feed.retry",
                 side_effect=MaxRetriesExceededError())

    # Create test feed
    feed = Feed.objects.create(
        title="test",
        link="https://test.com",
        description="testing...",
        subscriber=authenticated_user,
        rss_url=rss_url,
    )
    # Verify feed has no existing items
    assert not feed.items.exists()

    # Verify user has not (unread) notifications
    assert authenticated_user.notifications.count() == 0

    update_feed_items.delay(feed.pk)
    sleep(8)

    feed.refresh_from_db()
    assert not feed.items.exists()
    assert authenticated_user.notifications.count() == 1
    notification = authenticated_user.notifications.first()
    assert feed.title in notification.title
    assert notification.details == feed.get_update_url()
    assert notification.unread
def test_should_go_into_technical_error_if_exceeds_retries_on_deliver_email_task(sample_notification, mocker):
    mocker.patch('app.delivery.send_to_providers.send_email_to_provider', side_effect=Exception("EXPECTED"))
    mocker.patch('app.celery.provider_tasks.deliver_email.retry', side_effect=MaxRetriesExceededError())

    with pytest.raises(NotificationTechnicalFailureException) as e:
        deliver_email(sample_notification.id)
    assert str(sample_notification.id) in str(e.value)

    provider_tasks.deliver_email.retry.assert_called_with(queue="retry-tasks")
    assert sample_notification.status == 'technical-failure'
Esempio n. 9
0
def test_sanitise_letter_puts_letter_into_technical_failure_if_max_retries_exceeded(sample_letter_notification, mocker):
    mocker.patch('app.celery.letters_pdf_tasks.notify_celery.send_task', side_effect=Exception())
    mocker.patch('app.celery.letters_pdf_tasks.sanitise_letter.retry', side_effect=MaxRetriesExceededError())

    filename = 'NOTIFY.{}'.format(sample_letter_notification.reference)
    sample_letter_notification.status = NOTIFICATION_PENDING_VIRUS_CHECK

    with pytest.raises(NotificationTechnicalFailureException):
        sanitise_letter(filename)

    assert sample_letter_notification.status == NOTIFICATION_TECHNICAL_FAILURE
Esempio n. 10
0
    def test_periodic_update_feeds_and_items_with_invalid_data_with_raising_max_retry_exception(
            self, mock_send_notification, task_retry, mocked_parser):

        task_retry.side_effect = MaxRetriesExceededError()
        self.assertTrue(self.feed.is_auto_updated)
        self.assertEqual(mocked_parser.call_count, 0)
        self.assertEqual(mock_send_notification.call_count, 0)

        with raises(MaxRetriesExceededError):
            periodic_update_feeds_and_items.delay()

        self.feed.refresh_from_db()
        self.assertEqual(mocked_parser.call_count, 1)
        self.assertEqual(mock_send_notification.call_count, 1)
        self.assertFalse(self.feed.is_auto_updated)
Esempio n. 11
0
def test_send_webhook_request_async_when_max_retries_exceeded(
    mocked_send_response,
    mocked_task_retry,
    event_delivery,
    webhook_response_failed,
):
    mocked_send_response.return_value = webhook_response_failed
    mocked_task_retry.side_effect = MaxRetriesExceededError()

    send_webhook_request_async(event_delivery.pk)

    attempt = EventDeliveryAttempt.objects.filter(
        delivery=event_delivery).first()
    delivery = EventDelivery.objects.get(id=event_delivery.pk)
    assert attempt.status == EventDeliveryStatus.FAILED
    assert delivery.status == EventDeliveryStatus.FAILED
 def test_error_fetching_from_s3_saves(self, cached_pdf, html_to_pdf,
                                       post_submission, retry):
     self.comments[1]['files'].append(
         {"name": "file_name.png", "key": "someKey"})
     retry.return_value = MaxRetriesExceededError()
     with mock.patch('regulations.tasks.boto3') as boto3:
         client = boto3.Session.return_value.client.return_value
         client.download_file.side_effect = botocore.exceptions.ClientError
         submit_comment(self.comments, self.form, self.meta)
     saved_submission = FailedCommentSubmission.objects.all()[0]
     self.assertEqual(
         json.dumps({
             'comments': self.comments,
             'form_data': self.form,
         }),
         saved_submission.body,
     )
    def test_failed_submit_maximum_retries(self, cache_pdf, html_to_pdf,
                                           post_submission, retry):
        cache_pdf.return_value = SignedUrl(
            'pdf', 'https://s3.amazonaws.com/bucket/pdf')
        html_to_pdf.return_value.__enter__.return_value = self.file_handle

        post_submission.side_effect = RequestException

        retry.return_value = MaxRetriesExceededError()

        submit_comment(self.comments, self.form, self.meta)
        saved_submission = FailedCommentSubmission.objects.all()[0]
        self.assertEqual(
            json.dumps({
                'comments': self.comments,
                'form_data': self.form,
            }),
            saved_submission.body,
        )
Esempio n. 14
0
def update_certificate_visible_date_on_course_update(self, course_key):
    """
    This task is designed to be called whenever a course is updated with
    certificate_available_date so that visible_date is updated on credential
    service as well.

    It will get all users within the course that have a certificate and call
    the credentials API to update all these certificates visible_date value
    to keep certificates in sync on both sides.

    Args:
        course_key (str): The course identifier

    Returns:
        None

    """
    countdown = 2**self.request.retries
    # If the credentials config model is disabled for this
    # feature, it may indicate a condition where processing of such tasks
    # has been temporarily disabled.  Since this is a recoverable situation,
    # mark this task for retry instead of failing it altogether.
    if not CredentialsApiConfig.current().is_learner_issuance_enabled:
        error_msg = (
            "Task update_certificate_visible_date_on_course_update cannot be executed when credentials issuance is "
            "disabled in API config")
        LOGGER.info(error_msg)
        exception = MaxRetriesExceededError(
            f"Failed to update certificate availability date for course {course_key}. Reason: {error_msg}"
        )
        raise self.retry(exc=exception,
                         countdown=countdown,
                         max_retries=MAX_RETRIES)

    users_with_certificates_in_course = GeneratedCertificate.eligible_available_certificates.filter(
        course_id=course_key).values_list('user__username', flat=True)

    LOGGER.info(
        "Task update_certificate_visible_date_on_course_update resending course certificates"
        f"for {len(users_with_certificates_in_course)} users in course {course_key}."
    )
    for user in users_with_certificates_in_course:
        award_course_certificate.delay(user, str(course_key))
def test_job_context_fail(database, job_constants):
    """When a job raises an exception and has no retries left, it should be
    marked as failed"""
    sess = database.session
    job = JobFactory(
        job_status=sess.query(JobStatus).filter_by(name='running').one(),
        job_type=sess.query(JobType).filter_by(name='validation').one(),
        file_type=sess.query(FileType).filter_by(name='sub_award').one(),
    )
    sess.add(job)
    sess.commit()

    task = Mock()
    task.retry.return_value = MaxRetriesExceededError()
    with jobQueue.job_context(task, job.job_id):
        raise Exception('This failed!')

    sess.refresh(job)
    assert job.job_status.name == 'failed'
    assert job.error_message == 'This failed!'
Esempio n. 16
0
def retry_email(task, err) -> None:
    """Using an exponential retry so we can wait for the server to start
    working again soon on the recommendation of [1].

    Args:
        task: the Celery task instance (i.e. setting Bind=True)
        err: The error message returned from the exception.

    Returns:
        None
    """
    # Find the number of attempts so far
    num_retries = task.request.retries
    seconds_to_wait = 2.0**num_retries
    try:
        task.retry(countdown=seconds_to_wait, exc=err)
    except MaxRetriesExceededError as e:
        message = 'Number of retries exceeded.'
        log_message = {'message': message, 'error': e}
        logger.error(log_message)
        raise MaxRetriesExceededError(e)
Esempio n. 17
0
def send_grade_to_credentials(self, username, course_run_key, verified,
                              letter_grade, percent_grade):
    """
    Celery task to notify the Credentials IDA of a grade change via POST.
    """
    logger.info(
        f"Running task send_grade_to_credentials for username {username} and course {course_run_key}"
    )

    countdown = 2**self.request.retries
    course_key = CourseKey.from_string(course_run_key)

    try:
        credentials_client = get_credentials_api_client(
            User.objects.get(username=settings.CREDENTIALS_SERVICE_USERNAME))
        api_url = urljoin(
            f"{get_credentials_api_base_url(org=course_key.org)}/", "grades/")
        response = credentials_client.post(api_url,
                                           data={
                                               'username': username,
                                               'course_run': str(course_key),
                                               'letter_grade': letter_grade,
                                               'percent_grade': percent_grade,
                                               'verified': verified,
                                           })
        response.raise_for_status()

        logger.info(
            f"Sent grade for course {course_run_key} to user {username}")

    except Exception:  # lint-amnesty, pylint: disable=W0703
        grade_str = f'(percent: {percent_grade} letter: {letter_grade})'
        error_msg = f'Failed to send grade{grade_str} for course {course_run_key} to user {username}.'
        logger.exception(error_msg)
        exception = MaxRetriesExceededError(
            f"Failed to send grade to credentials. Reason: {error_msg}")
        raise self.retry(exc=exception,
                         countdown=countdown,
                         max_retries=MAX_RETRIES)  # pylint: disable=raise-missing-from
Esempio n. 18
0
class TestProcessFileAsync:
    """Test process file async."""

    @patch('cern_search_rest_api.modules.cernsearch.tasks.current_processors.get_processor')
    @patch('cern_search_rest_api.modules.cernsearch.tasks.ObjectVersion.get')
    def test_process_file_async_success(
            self,
            object_version_get_mock,
            get_processor_mock,
            appctx,
            object_version
    ):
        """Test process file calls."""
        get_processor_mock.return_value.process.return_value = 'Processed'
        object_version_get_mock.return_value = object_version

        process_file_async('00000000-0000-0000-0000-000000000000', 'test.pdf')

        object_version_get_mock.assert_called_once_with('00000000-0000-0000-0000-000000000000', 'test.pdf')
        get_processor_mock.assert_called_once_with(name=TikaProcessor.id())
        get_processor_mock.return_value.process.assert_called_once_with(object_version)

    @patch('cern_search_rest_api.modules.cernsearch.tasks.ObjectVersion.get', side_effect=Exception())
    @patch('cern_search_rest_api.modules.cernsearch.tasks.process_file_async.retry', side_effect=Retry())
    def test_process_file_async_retry(self, process_file_async_retry, object_version_get_mock, object_version):
        """Test process file calls."""
        with raises(Retry):
            process_file_async('00000000-0000-0000-0000-000000000000', 'test.pdf')

    @patch('cern_search_rest_api.modules.cernsearch.tasks.ObjectVersion.get', side_effect=Exception())
    @patch('cern_search_rest_api.modules.cernsearch.tasks.process_file_async.retry',
           side_effect=MaxRetriesExceededError())
    def test_process_file_async_failure(self, process_file_async_retry_mock, object_version):
        """Test process file calls."""
        with raises(Reject):
            process_file_async('00000000-0000-0000-0000-000000000000', 'test.pdf')
Esempio n. 19
0
def award_program_certificates(self, username):
    """
    This task is designed to be called whenever a student's completion status
    changes with respect to one or more courses (primarily, when a course
    certificate is awarded).

    It will consult with a variety of APIs to determine whether or not the
    specified user should be awarded a certificate in one or more programs, and
    use the credentials service to create said certificates if so.

    This task may also be invoked independently of any course completion status
    change - for example, to backpopulate missing program credentials for a
    student.

    Args:
        username (str): The username of the student

    Returns:
        None

    """
    LOGGER.info('Running task award_program_certificates for username %s',
                username)

    countdown = 2**self.request.retries

    # If the credentials config model is disabled for this
    # feature, it may indicate a condition where processing of such tasks
    # has been temporarily disabled.  Since this is a recoverable situation,
    # mark this task for retry instead of failing it altogether.

    if not CredentialsApiConfig.current().is_learner_issuance_enabled:
        LOGGER.warning(
            'Task award_program_certificates cannot be executed when credentials issuance is disabled in API config',
        )
        raise self.retry(countdown=countdown, max_retries=MAX_RETRIES)

    try:
        try:
            student = User.objects.get(username=username)
        except User.DoesNotExist:
            LOGGER.exception(
                'Task award_program_certificates was called with invalid username %s',
                username)
            # Don't retry for this case - just conclude the task.
            return
        program_uuids = []
        for site in Site.objects.all():
            program_uuids.extend(get_completed_programs(site, student))
        if not program_uuids:
            # No reason to continue beyond this point unless/until this
            # task gets updated to support revocation of program certs.
            LOGGER.info(
                'Task award_program_certificates was called for user %s with no completed programs',
                username)
            return

        # Determine which program certificates the user has already been awarded, if any.
        existing_program_uuids = get_certified_programs(student)

    except Exception as exc:
        LOGGER.exception(
            'Failed to determine program certificates to be awarded for user %s',
            username)
        raise self.retry(exc=exc, countdown=countdown, max_retries=MAX_RETRIES)

    # For each completed program for which the student doesn't already have a
    # certificate, award one now.
    #
    # This logic is important, because we will retry the whole task if awarding any particular program cert fails.
    #
    # N.B. the list is sorted to facilitate deterministic ordering, e.g. for tests.
    new_program_uuids = sorted(
        list(set(program_uuids) - set(existing_program_uuids)))
    if new_program_uuids:
        try:
            credentials_client = get_credentials_api_client(
                User.objects.get(
                    username=settings.CREDENTIALS_SERVICE_USERNAME), )
        except Exception as exc:
            LOGGER.exception(
                'Failed to create a credentials API client to award program certificates'
            )
            # Retry because a misconfiguration could be fixed
            raise self.retry(exc=exc,
                             countdown=countdown,
                             max_retries=MAX_RETRIES)

        failed_program_certificate_award_attempts = []
        for program_uuid in new_program_uuids:
            try:
                award_program_certificate(credentials_client, username,
                                          program_uuid)
                LOGGER.info('Awarded certificate for program %s to user %s',
                            program_uuid, username)
            except exceptions.HttpNotFoundError:
                LOGGER.exception(
                    """Certificate for program {uuid} could not be found. Unable to award certificate to user
                    {username}. The program might not be configured.""".format(
                        uuid=program_uuid, username=username))
            except exceptions.HttpClientError as exc:
                # Grab the status code from the client error, because our API
                # client handles all 4XX errors the same way. In the future,
                # we may want to fork slumber, add 429 handling, and use that
                # in edx_rest_api_client.
                if exc.response.status_code == 429:  # pylint: disable=no-member
                    rate_limit_countdown = 60
                    LOGGER.info(
                        """Rate limited. Retrying task to award certificates to user {username} in {countdown}
                        seconds""".format(username=username,
                                          countdown=rate_limit_countdown))
                    # Retry after 60 seconds, when we should be in a new throttling window
                    raise self.retry(exc=exc,
                                     countdown=rate_limit_countdown,
                                     max_retries=MAX_RETRIES)
                else:
                    LOGGER.exception(
                        """Unable to award certificate to user {username} for program {uuid}. The program might not be
                        configured.""".format(username=username,
                                              uuid=program_uuid))
            except Exception:  # pylint: disable=broad-except
                # keep trying to award other certs, but retry the whole task to fix any missing entries
                LOGGER.warning(
                    'Failed to award certificate for program {uuid} to user {username}.'
                    .format(uuid=program_uuid, username=username))
                failed_program_certificate_award_attempts.append(program_uuid)

        if failed_program_certificate_award_attempts:
            # N.B. This logic assumes that this task is idempotent
            LOGGER.info(
                'Retrying task to award failed certificates to user %s',
                username)
            # The error message may change on each reattempt but will never be raised until
            # the max number of retries have been exceeded. It is unlikely that this list
            # will change by the time it reaches its maximimum number of attempts.
            exception = MaxRetriesExceededError(
                "Failed to award certificate for user {} for programs {}".
                format(username, failed_program_certificate_award_attempts))
            raise self.retry(exc=exception,
                             countdown=countdown,
                             max_retries=MAX_RETRIES)
    else:
        LOGGER.info('User %s is not eligible for any new program certificates',
                    username)

    LOGGER.info(
        'Successfully completed the task award_program_certificates for username %s',
        username)
Esempio n. 20
0
class TestCeleryTasks(MasuTestCase):
    """Test cases for Celery tasks."""

    @patch("masu.celery.tasks.Orchestrator")
    def test_check_report_updates(self, mock_orchestrator):
        """Test that the scheduled task calls the orchestrator."""
        mock_orch = mock_orchestrator()
        tasks.check_report_updates()

        mock_orchestrator.assert_called()
        mock_orch.prepare.assert_called()

    @patch("masu.celery.tasks.Orchestrator")
    @patch("masu.external.date_accessor.DateAccessor.today")
    def test_remove_expired_data(self, mock_date, mock_orchestrator):
        """Test that the scheduled task calls the orchestrator."""
        mock_orch = mock_orchestrator()

        mock_date_string = "2018-07-25 00:00:30.993536"
        mock_date_obj = datetime.strptime(mock_date_string, "%Y-%m-%d %H:%M:%S.%f")
        mock_date.return_value = mock_date_obj

        tasks.remove_expired_data()

        mock_orchestrator.assert_called()
        mock_orch.remove_expired_report_data.assert_called()

    @patch("masu.celery.tasks.Orchestrator")
    @patch("masu.celery.tasks.query_and_upload_to_s3")
    @patch("masu.external.date_accessor.DateAccessor.today")
    @override_settings(ENABLE_S3_ARCHIVING=True)
    def test_upload_normalized_data(self, mock_date, mock_upload, mock_orchestrator):
        """Test that the scheduled task uploads the correct normalized data."""
        test_export_setting = TableExportSetting(
            provider="test", output_name="test", sql="test_sql", iterate_daily=False
        )
        schema_name = "acct10001"
        provider_uuid = uuid.uuid4()

        mock_date.return_value = date(2015, 1, 5)

        mock_orchestrator.get_accounts.return_value = (
            [{"schema_name": schema_name, "provider_uuid": provider_uuid}],
            [],
        )

        current_month_start = date(2015, 1, 1)
        current_month_end = date(2015, 1, 31)
        prev_month_start = date(2014, 12, 1)
        prev_month_end = date(2014, 12, 31)

        call_curr_month = call.delay(
            schema_name,
            provider_uuid,
            dictify_table_export_settings(test_export_setting),
            current_month_start,
            current_month_end,
        )
        call_prev_month = call.delay(
            schema_name,
            provider_uuid,
            dictify_table_export_settings(test_export_setting),
            prev_month_start,
            prev_month_end,
        )

        with patch("masu.celery.tasks.table_export_settings", [test_export_setting]):
            tasks.upload_normalized_data()
            mock_upload.assert_has_calls([call_curr_month, call_prev_month])

        mock_date.return_value = date(2012, 3, 31)
        current_month_start = date(2012, 3, 1)
        current_month_end = date(2012, 3, 31)
        prev_month_start = date(2012, 2, 1)
        prev_month_end = date(2012, 2, 29)

        call_curr_month = call.delay(
            schema_name,
            provider_uuid,
            dictify_table_export_settings(test_export_setting),
            current_month_start,
            current_month_end,
        )
        call_prev_month = call.delay(
            schema_name,
            provider_uuid,
            dictify_table_export_settings(test_export_setting),
            prev_month_start,
            prev_month_end,
        )

        with patch("masu.celery.tasks.table_export_settings", [test_export_setting]):
            tasks.upload_normalized_data()
            mock_upload.assert_has_calls([call_curr_month, call_prev_month])

    @patch("masu.celery.tasks.DataExportRequest")
    @patch("masu.celery.tasks.AwsS3Syncer")
    def test_sync_data_to_customer_success(self, mock_sync, mock_data_export_request):
        """Test that the scheduled task correctly calls the sync function."""
        mock_data_export_request.uuid = fake.uuid4()
        mock_data_get = mock_data_export_request.objects.get
        mock_data_save = mock_data_get.return_value.save

        tasks.sync_data_to_customer(mock_data_export_request.uuid)

        mock_data_get.assert_called_once_with(uuid=mock_data_export_request.uuid)
        self.assertEqual(mock_data_save.call_count, 2)
        mock_sync.assert_called_once()
        mock_sync.return_value.sync_bucket.assert_called_once()

    @patch("masu.celery.tasks.LOG")
    @patch("masu.celery.tasks.DataExportRequest")
    @patch("masu.celery.tasks.AwsS3Syncer")
    def test_sync_data_to_customer_fail_exc(self, mock_sync, mock_data_export_request, mock_log):
        """Test that the scheduled task correctly calls the sync function, which explodes."""
        mock_data_export_request.uuid = fake.uuid4()
        mock_data_get = mock_data_export_request.objects.get
        mock_data_save = mock_data_get.return_value.save

        mock_sync.return_value.sync_bucket.side_effect = ClientError(
            error_response={"error": fake.word()}, operation_name=fake.word()
        )

        tasks.sync_data_to_customer(mock_data_export_request.uuid)

        mock_data_get.assert_called_once_with(uuid=mock_data_export_request.uuid)
        self.assertEqual(mock_data_save.call_count, 2)
        mock_sync.assert_called_once()
        mock_sync.return_value.sync_bucket.assert_called_once()
        mock_log.exception.assert_called_once()

    @patch("masu.celery.tasks.DataExportRequest.objects")
    @patch("masu.celery.tasks.AwsS3Syncer")
    def test_sync_data_to_customer_cold_storage_retry(self, mock_sync, mock_data_export_request):
        """Test that the sync task retries syncer fails with a cold storage error."""
        data_export_object = Mock()
        data_export_object.uuid = fake.uuid4()
        data_export_object.status = APIExportRequest.PENDING

        mock_data_export_request.get.return_value = data_export_object
        mock_sync.return_value.sync_bucket.side_effect = SyncedFileInColdStorageError()
        with self.assertRaises(Retry):
            tasks.sync_data_to_customer(data_export_object.uuid)
        self.assertEquals(data_export_object.status, APIExportRequest.WAITING)

    @patch("masu.celery.tasks.sync_data_to_customer.retry", side_effect=MaxRetriesExceededError())
    @patch("masu.celery.tasks.DataExportRequest.objects")
    @patch("masu.celery.tasks.AwsS3Syncer")
    def test_sync_data_to_customer_max_retry(self, mock_sync, mock_data_export_request, mock_retry):
        """Test that the sync task retries syncer fails with a cold storage error."""
        data_export_object = Mock()
        data_export_object.uuid = fake.uuid4()
        data_export_object.status = APIExportRequest.PENDING

        mock_data_export_request.get.return_value = data_export_object
        mock_sync.return_value.sync_bucket.side_effect = SyncedFileInColdStorageError()

        tasks.sync_data_to_customer(data_export_object.uuid)
        self.assertEquals(data_export_object.status, APIExportRequest.ERROR)

    @override_settings(ENABLE_S3_ARCHIVING=True)
    def test_delete_archived_data_bad_inputs_exception(self):
        """Test that delete_archived_data raises an exception when given bad inputs."""
        schema_name, provider_type, provider_uuid = "", "", ""
        with self.assertRaises(TypeError) as e:
            tasks.delete_archived_data(schema_name, provider_type, provider_uuid)
        self.assertIn("schema_name", str(e.exception))
        self.assertIn("provider_type", str(e.exception))
        self.assertIn("provider_uuid", str(e.exception))

    @patch("masu.util.aws.common.boto3.resource")
    @override_settings(ENABLE_S3_ARCHIVING=False)
    def test_delete_archived_data_archiving_disabled_noop(self, mock_resource):
        """Test that delete_archived_data returns early when feature is disabled."""
        schema_name, provider_type, provider_uuid = fake.slug(), Provider.PROVIDER_AWS, fake.uuid4()
        tasks.delete_archived_data(schema_name, provider_type, provider_uuid)
        mock_resource.assert_not_called()

    @patch("masu.util.aws.common.boto3.resource")
    @override_settings(ENABLE_S3_ARCHIVING=True)
    @override_settings(S3_BUCKET_PATH="")
    def test_delete_archived_data_missing_bucket_path_exception(self, mock_resource):
        """Test that delete_archived_data raises an exception with an empty bucket path."""
        schema_name, provider_type, provider_uuid = fake.slug(), Provider.PROVIDER_AWS, fake.uuid4()
        with self.assertRaises(ImproperlyConfigured):
            tasks.delete_archived_data(schema_name, provider_type, provider_uuid)
        mock_resource.assert_not_called()

    @override_settings(ENABLE_S3_ARCHIVING=True)
    @override_settings(S3_BUCKET_PATH="data_archive")
    @patch("masu.util.aws.common.boto3.resource")
    def test_delete_archived_data_success(self, mock_resource):
        """Test that delete_archived_data correctly interacts with AWS S3."""
        schema_name = "acct10001"
        provider_type = Provider.PROVIDER_AWS
        provider_uuid = "00000000-0000-0000-0000-000000000001"
        expected_prefix = "data_archive/acct10001/aws/00000000-0000-0000-0000-000000000001/"

        # Generate enough fake objects to expect calling the S3 delete api twice.
        mock_bucket = mock_resource.return_value.Bucket.return_value
        bucket_objects = [DummyS3Object(key=fake.file_path()) for _ in range(1234)]
        expected_keys = [{"Key": bucket_object.key} for bucket_object in bucket_objects]

        # Leave one object mysteriously not deleted to cover the LOG.warning use case.
        mock_bucket.objects.filter.side_effect = [bucket_objects, bucket_objects[:1]]

        with self.assertLogs("masu.celery.tasks", "WARNING") as captured_logs:
            tasks.delete_archived_data(schema_name, provider_type, provider_uuid)
        mock_resource.assert_called()
        mock_bucket.delete_objects.assert_has_calls(
            [call(Delete={"Objects": expected_keys[:1000]}), call(Delete={"Objects": expected_keys[1000:]})]
        )
        mock_bucket.objects.filter.assert_has_calls([call(Prefix=expected_prefix), call(Prefix=expected_prefix)])
        self.assertIn("Found 1 objects after attempting", captured_logs.output[-1])

    @override_settings(ENABLE_S3_ARCHIVING=False)
    def test_delete_archived_data_archiving_false(self):
        """Test that delete_archived_data correctly interacts with AWS S3."""
        schema_name = "acct10001"
        provider_type = Provider.PROVIDER_AWS
        provider_uuid = "00000000-0000-0000-0000-000000000001"

        with self.assertLogs("masu.celery.tasks", "INFO") as captured_logs:
            tasks.delete_archived_data(schema_name, provider_type, provider_uuid)
            self.assertIn("Skipping delete_archived_data. Upload feature is disabled.", captured_logs.output[0])

    @patch("masu.celery.tasks.vacuum_schema")
    def test_vacuum_schemas(self, mock_vacuum):
        """Test that the vacuum_schemas scheduled task runs for all schemas."""
        schema_one = "acct123"
        schema_two = "acct456"
        with connection.cursor() as cursor:
            cursor.execute(
                """
                INSERT INTO api_tenant (schema_name)
                VALUES (%s), (%s)
                """,
                [schema_one, schema_two],
            )

        tasks.vacuum_schemas()

        for schema_name in [schema_one, schema_two]:
            mock_vacuum.delay.assert_any_call(schema_name)

    @patch("masu.celery.tasks.Config")
    @patch("masu.external.date_accessor.DateAccessor.get_billing_months")
    def test_clean_volume(self, mock_date, mock_config):
        """Test that the clean volume function is cleaning the appropriate files"""
        # create a manifest
        mock_date.return_value = ["2020-02-01"]
        manifest_dict = {
            "assembly_id": "1234",
            "billing_period_start_datetime": "2020-02-01",
            "num_total_files": 2,
            "provider_uuid": self.aws_provider_uuid,
        }
        manifest_accessor = ReportManifestDBAccessor()
        manifest = manifest_accessor.add(**manifest_dict)
        # create two files on the temporary volume one with a matching prefix id
        #  as the assembly_id in the manifest above
        with tempfile.TemporaryDirectory() as tmpdirname:
            mock_config.PVC_DIR = tmpdirname
            mock_config.VOLUME_FILE_RETENTION = 60 * 60 * 24
            old_matching_file = os.path.join(tmpdirname, "%s.csv" % manifest.assembly_id)
            new_no_match_file = os.path.join(tmpdirname, "newfile.csv")
            old_no_match_file = os.path.join(tmpdirname, "oldfile.csv")
            filepaths = [old_matching_file, new_no_match_file, old_no_match_file]
            for path in filepaths:
                open(path, "a").close()
                self.assertEqual(os.path.exists(path), True)

            # Update timestame for oldfile.csv
            datehelper = DateHelper()
            now = datehelper.now
            old_datetime = now - timedelta(seconds=mock_config.VOLUME_FILE_RETENTION * 2)
            oldtime = old_datetime.timestamp()
            os.utime(old_matching_file, (oldtime, oldtime))
            os.utime(old_no_match_file, (oldtime, oldtime))

            # now run the clean volume task
            tasks.clean_volume()
            # make sure that the file with the matching id still exists and that
            # the file with the other id is gone
            self.assertEqual(os.path.exists(old_matching_file), True)
            self.assertEqual(os.path.exists(new_no_match_file), True)
            self.assertEqual(os.path.exists(old_no_match_file), False)
            # now edit the manifest to say that all the files have been processed
            # and rerun the clean_volumes task
            manifest.num_processed_files = manifest_dict.get("num_total_files")
            manifest.save()
            tasks.clean_volume()
            # ensure that the original file is deleted from the volume
            self.assertEqual(os.path.exists(old_matching_file), False)
            self.assertEqual(os.path.exists(new_no_match_file), True)

        # assert the tempdir is cleaned up
        self.assertEqual(os.path.exists(tmpdirname), False)
        # test no files found for codecov
        tasks.clean_volume()
Esempio n. 21
0
class TestCeleryTasks(MasuTestCase):
    """Test cases for Celery tasks."""

    @patch("masu.celery.tasks.Orchestrator")
    def test_check_report_updates(self, mock_orchestrator):
        """Test that the scheduled task calls the orchestrator."""
        mock_orch = mock_orchestrator()
        tasks.check_report_updates()

        mock_orchestrator.assert_called()
        mock_orch.prepare.assert_called()

    @patch("masu.celery.tasks.Orchestrator")
    @patch("masu.external.date_accessor.DateAccessor.today")
    def test_remove_expired_data(self, mock_date, mock_orchestrator):
        """Test that the scheduled task calls the orchestrator."""
        mock_orch = mock_orchestrator()

        mock_date_string = "2018-07-25 00:00:30.993536"
        mock_date_obj = datetime.strptime(mock_date_string, "%Y-%m-%d %H:%M:%S.%f")
        mock_date.return_value = mock_date_obj

        tasks.remove_expired_data()

        mock_orchestrator.assert_called()
        mock_orch.remove_expired_report_data.assert_called()

    @patch("masu.celery.tasks.DataExportRequest")
    @patch("masu.celery.tasks.AwsS3Syncer")
    def test_sync_data_to_customer_success(self, mock_sync, mock_data_export_request):
        """Test that the scheduled task correctly calls the sync function."""
        mock_data_export_request.uuid = fake.uuid4()
        mock_data_get = mock_data_export_request.objects.get
        mock_data_save = mock_data_get.return_value.save

        tasks.sync_data_to_customer(mock_data_export_request.uuid)

        mock_data_get.assert_called_once_with(uuid=mock_data_export_request.uuid)
        self.assertEqual(mock_data_save.call_count, 2)
        mock_sync.assert_called_once()
        mock_sync.return_value.sync_bucket.assert_called_once()

    @patch("masu.celery.tasks.LOG")
    @patch("masu.celery.tasks.DataExportRequest")
    @patch("masu.celery.tasks.AwsS3Syncer")
    def test_sync_data_to_customer_fail_exc(self, mock_sync, mock_data_export_request, mock_log):
        """Test that the scheduled task correctly calls the sync function, which explodes."""
        mock_data_export_request.uuid = fake.uuid4()
        mock_data_get = mock_data_export_request.objects.get
        mock_data_save = mock_data_get.return_value.save

        mock_sync.return_value.sync_bucket.side_effect = ClientError(
            error_response={"error": fake.word()}, operation_name=fake.word()
        )

        tasks.sync_data_to_customer(mock_data_export_request.uuid)

        mock_data_get.assert_called_once_with(uuid=mock_data_export_request.uuid)
        self.assertEqual(mock_data_save.call_count, 2)
        mock_sync.assert_called_once()
        mock_sync.return_value.sync_bucket.assert_called_once()
        mock_log.exception.assert_called_once()

    @patch("masu.celery.tasks.DataExportRequest.objects")
    @patch("masu.celery.tasks.AwsS3Syncer")
    def test_sync_data_to_customer_cold_storage_retry(self, mock_sync, mock_data_export_request):
        """Test that the sync task retries syncer fails with a cold storage error."""
        data_export_object = Mock()
        data_export_object.uuid = fake.uuid4()
        data_export_object.status = APIExportRequest.PENDING

        mock_data_export_request.get.return_value = data_export_object
        mock_sync.return_value.sync_bucket.side_effect = SyncedFileInColdStorageError()
        with self.assertRaises(Retry):
            tasks.sync_data_to_customer(data_export_object.uuid)
        self.assertEqual(data_export_object.status, APIExportRequest.WAITING)

    @patch("masu.celery.tasks.sync_data_to_customer.retry", side_effect=MaxRetriesExceededError())
    @patch("masu.celery.tasks.DataExportRequest.objects")
    @patch("masu.celery.tasks.AwsS3Syncer")
    def test_sync_data_to_customer_max_retry(self, mock_sync, mock_data_export_request, mock_retry):
        """Test that the sync task retries syncer fails with a cold storage error."""
        data_export_object = Mock()
        data_export_object.uuid = fake.uuid4()
        data_export_object.status = APIExportRequest.PENDING

        mock_data_export_request.get.return_value = data_export_object
        mock_sync.return_value.sync_bucket.side_effect = SyncedFileInColdStorageError()

        tasks.sync_data_to_customer(data_export_object.uuid)
        self.assertEqual(data_export_object.status, APIExportRequest.ERROR)

    # Check to see if exchange rates are being created or updated
    def test_get_currency_conversion_rates(self):
        with self.assertLogs("masu.celery.tasks", "INFO") as captured_logs:
            tasks.get_daily_currency_rates()
            self.assertIn("Creating the exchange rate" or "Updating currency", str(captured_logs))

    # Check to see if Error is raised on wrong URL
    @patch("masu.celery.tasks.requests")
    def test_error_get_currency_conversion_rates(self, mock_requests):
        mock_requests.get.side_effect = Exception("error")
        with self.assertRaises(Exception) as e:
            tasks.get_daily_currency_rates()
            self.assertIn("Couldn't pull latest conversion rates", str(e.exception))

    @patch("masu.celery.tasks.requests")
    def test_get_currency_conversion_rates_successful(self, mock_requests):
        beforeRows = ExchangeRates.objects.count()
        self.assertEqual(beforeRows, 0)
        mock_requests.get.return_value = Mock(
            status_code=201, json=lambda: {"result": "success", "rates": {"AUD": 1.37, "CAD": 1.25, "CHF": 0.928}}
        )
        tasks.get_daily_currency_rates()
        afterRows = ExchangeRates.objects.count()
        self.assertEqual(afterRows, 3)

    @patch("masu.celery.tasks.requests")
    def test_get_currency_conversion_rates_unsupported_currency(self, mock_requests):
        beforeRows = ExchangeRates.objects.count()
        self.assertEqual(beforeRows, 0)
        mock_requests.get.return_value = Mock(
            status_code=201,
            json=lambda: {"result": "success", "rates": {"AUD": 1.37, "CAD": 1.25, "CHF": 0.928, "FOO": 12.34}},
        )
        tasks.get_daily_currency_rates()
        afterRows = ExchangeRates.objects.count()
        self.assertEqual(afterRows, 3)

    @override_settings(ENABLE_S3_ARCHIVING=True)
    def test_delete_archived_data_bad_inputs_exception(self):
        """Test that delete_archived_data raises an exception when given bad inputs."""
        schema_name, provider_type, provider_uuid = "", "", ""
        with self.assertRaises(TypeError) as e:
            tasks.delete_archived_data(schema_name, provider_type, provider_uuid)
        self.assertIn("schema_name", str(e.exception))
        self.assertIn("provider_type", str(e.exception))
        self.assertIn("provider_uuid", str(e.exception))

    @patch("masu.util.aws.common.boto3.resource")
    @override_settings(ENABLE_S3_ARCHIVING=False)
    def test_delete_archived_data_archiving_disabled_noop(self, mock_resource):
        """Test that delete_archived_data returns early when feature is disabled."""
        schema_name, provider_type, provider_uuid = fake.slug(), Provider.PROVIDER_AWS, fake.uuid4()
        tasks.delete_archived_data(schema_name, provider_type, provider_uuid)
        mock_resource.assert_not_called()

    @override_settings(ENABLE_S3_ARCHIVING=True)
    @patch("masu.celery.tasks.get_s3_resource")
    def test_deleted_archived_with_prefix_success(self, mock_resource):
        """Test that delete_archived_data correctly interacts with AWS S3."""
        expected_prefix = "data/csv/10001/00000000-0000-0000-0000-000000000001/"

        # Generate enough fake objects to expect calling the S3 delete api twice.
        mock_bucket = mock_resource.return_value.Bucket.return_value
        bucket_objects = [DummyS3Object(key=fake.file_path()) for _ in range(1234)]
        expected_keys = [{"Key": bucket_object.key} for bucket_object in bucket_objects]

        # Leave one object mysteriously not deleted to cover the LOG.warning use case.
        mock_bucket.objects.filter.side_effect = [bucket_objects, bucket_objects[:1]]

        with self.assertLogs("masu.celery.tasks", "WARNING") as captured_logs:
            tasks.deleted_archived_with_prefix(mock_bucket, expected_prefix)
        mock_resource.assert_called()
        mock_bucket.delete_objects.assert_has_calls(
            [call(Delete={"Objects": expected_keys[:1000]}), call(Delete={"Objects": expected_keys[1000:]})]
        )
        mock_bucket.objects.filter.assert_has_calls([call(Prefix=expected_prefix), call(Prefix=expected_prefix)])
        self.assertIn("Found 1 objects after attempting", captured_logs.output[-1])

    @override_settings(ENABLE_S3_ARCHIVING=True)
    @patch("masu.celery.tasks.deleted_archived_with_prefix")
    def test_delete_archived_data_success(self, mock_delete):
        """Test that delete_archived_data correctly interacts with AWS S3."""
        schema_name = "acct10001"
        provider_type = Provider.PROVIDER_AWS
        provider_uuid = "00000000-0000-0000-0000-000000000001"

        tasks.delete_archived_data(schema_name, provider_type, provider_uuid)
        mock_delete.assert_called()

    @override_settings(ENABLE_S3_ARCHIVING=False)
    def test_delete_archived_data_archiving_false(self):
        """Test that delete_archived_data correctly interacts with AWS S3."""
        schema_name = "acct10001"
        provider_type = Provider.PROVIDER_AWS
        provider_uuid = "00000000-0000-0000-0000-000000000001"

        with self.assertLogs("masu.celery.tasks", "INFO") as captured_logs:
            tasks.delete_archived_data(schema_name, provider_type, provider_uuid)
            self.assertIn("Skipping delete_archived_data. Upload feature is disabled.", captured_logs.output[0])

    @override_settings(ENABLE_S3_ARCHIVING=True, SKIP_MINIO_DATA_DELETION=True)
    def test_delete_archived_data_minio(self):
        """Test that delete_archived_data correctly interacts with AWS S3."""
        schema_name = "acct10001"
        provider_type = Provider.PROVIDER_AWS
        provider_uuid = "00000000-0000-0000-0000-000000000001"

        with self.assertLogs("masu.celery.tasks", "INFO") as captured_logs:
            tasks.delete_archived_data(schema_name, provider_type, provider_uuid)
            self.assertIn("Skipping delete_archived_data. MinIO in use.", captured_logs.output[0])

    @patch("masu.celery.tasks.Config")
    @patch("masu.external.date_accessor.DateAccessor.get_billing_months")
    def test_clean_volume(self, mock_date, mock_config):
        """Test that the clean volume function is cleaning the appropriate files"""
        # create a manifest
        mock_date.return_value = ["2020-02-01"]
        manifest_dict = {
            "assembly_id": "1234",
            "billing_period_start_datetime": "2020-02-01",
            "num_total_files": 2,
            "provider_uuid": self.aws_provider_uuid,
        }
        manifest_accessor = ReportManifestDBAccessor()
        manifest = manifest_accessor.add(**manifest_dict)
        # create two files on the temporary volume one with a matching prefix id
        #  as the assembly_id in the manifest above
        with tempfile.TemporaryDirectory() as tmpdirname:
            mock_config.PVC_DIR = tmpdirname
            mock_config.VOLUME_FILE_RETENTION = 60 * 60 * 24
            old_matching_file = os.path.join(tmpdirname, "%s.csv" % manifest.assembly_id)
            new_no_match_file = os.path.join(tmpdirname, "newfile.csv")
            old_no_match_file = os.path.join(tmpdirname, "oldfile.csv")
            filepaths = [old_matching_file, new_no_match_file, old_no_match_file]
            for path in filepaths:
                open(path, "a").close()
                self.assertEqual(os.path.exists(path), True)

            # Update timestame for oldfile.csv
            datehelper = DateHelper()
            now = datehelper.now
            old_datetime = now - timedelta(seconds=mock_config.VOLUME_FILE_RETENTION * 2)
            oldtime = old_datetime.timestamp()
            os.utime(old_matching_file, (oldtime, oldtime))
            os.utime(old_no_match_file, (oldtime, oldtime))

            # now run the clean volume task
            tasks.clean_volume()
            # make sure that the file with the matching id still exists and that
            # the file with the other id is gone
            self.assertEqual(os.path.exists(old_matching_file), True)
            self.assertEqual(os.path.exists(new_no_match_file), True)
            self.assertEqual(os.path.exists(old_no_match_file), False)
            # now edit the manifest to say that all the files have been processed
            # and rerun the clean_volumes task
            manifest.num_processed_files = manifest_dict.get("num_total_files")
            manifest_helper = ManifestCreationHelper(
                manifest.id, manifest_dict.get("num_total_files"), manifest_dict.get("assembly_id")
            )
            manifest_helper.generate_test_report_files()
            manifest_helper.process_all_files()

            manifest.save()
            tasks.clean_volume()
            # ensure that the original file is deleted from the volume
            self.assertEqual(os.path.exists(old_matching_file), False)
            self.assertEqual(os.path.exists(new_no_match_file), True)

        # assert the tempdir is cleaned up
        self.assertEqual(os.path.exists(tmpdirname), False)
        # test no files found for codecov
        tasks.clean_volume()

    @patch("masu.celery.tasks.AWSOrgUnitCrawler")
    def test_crawl_account_hierarchy_with_provider_uuid(self, mock_crawler):
        """Test that only accounts associated with the provider_uuid are polled."""
        mock_crawler.crawl_account_hierarchy.return_value = True
        with self.assertLogs("masu.celery.tasks", "INFO") as captured_logs:
            tasks.crawl_account_hierarchy(self.aws_test_provider_uuid)
            expected_log_msg = "Account hierarchy crawler found %s accounts to scan" % ("1")
            self.assertIn(expected_log_msg, captured_logs.output[0])

    @patch("masu.celery.tasks.AWSOrgUnitCrawler")
    def test_crawl_account_hierarchy_without_provider_uuid(self, mock_crawler):
        """Test that all polling accounts for user are used when no provider_uuid is provided."""
        _, polling_accounts = Orchestrator.get_accounts()
        mock_crawler.crawl_account_hierarchy.return_value = True
        with self.assertLogs("masu.celery.tasks", "INFO") as captured_logs:
            tasks.crawl_account_hierarchy()
            expected_log_msg = "Account hierarchy crawler found %s accounts to scan" % (len(polling_accounts))
            self.assertIn(expected_log_msg, captured_logs.output[0])

    @patch("masu.celery.tasks.celery_app")
    def test_collect_queue_len(self, mock_celery_app):
        """Test that the collect queue len function runs correctly."""
        mock_celery_app.pool.acquire(block=True).default_channel.client.llen.return_value = 2
        with self.assertLogs("masu.celery.tasks", "DEBUG") as captured_logs:
            tasks.collect_queue_metrics()
            expected_log_msg = "Celery queue backlog info: "
            self.assertIn(expected_log_msg, captured_logs.output[0])

    @override_settings(CELERY_TASK_ALWAYS_EAGER=True)
    def test_delete_provider_async_not_found(self):
        """Test that delete_provider_async does not raise unhandled error on missing Provider."""
        provider_uuid = "00000000-0000-0000-0000-000000000001"
        with self.assertLogs("masu.celery.tasks", "WARNING") as captured_logs:
            tasks.delete_provider_async("fake name", provider_uuid, "fake_schema")
            expected_log_msg = "does not exist"
            self.assertIn(expected_log_msg, captured_logs.output[0])

    @override_settings(CELERY_TASK_ALWAYS_EAGER=True)
    def test_out_of_order_source_delete_async_not_found(self):
        """Test that out_of_order_source_delete_async does not raise unhandled error or missing Source."""
        source_id = 0
        with self.assertLogs("masu.celery.tasks", "WARNING") as captured_logs:
            tasks.out_of_order_source_delete_async(source_id)
            expected_log_msg = "does not exist"
            self.assertIn(expected_log_msg, captured_logs.output[0])

    @override_settings(CELERY_TASK_ALWAYS_EAGER=True)
    def test_missing_source_delete_async_not_found(self):
        """Test that missing_source_delete_async does not raise unhandled error on missing Source."""
        source_id = 0
        with self.assertLogs("masu.celery.tasks", "WARNING") as captured_logs:
            tasks.missing_source_delete_async(source_id)
            expected_log_msg = "does not exist"
            self.assertIn(expected_log_msg, captured_logs.output[0])
    def run(self, message_id, **kwargs):
        """
        Load and contruct message and send them off
        """
        l = self.get_logger(**kwargs)

        error_retry_count = kwargs.get('error_retry_count', 0)
        if error_retry_count >= self.max_error_retries:
            raise MaxRetriesExceededError(
                "Can't retry {0}[{1}] args:{2} kwargs:{3}".format(
                    self.name, self.request.id, self.request.args, kwargs))

        l.info("Loading Outbound Message <%s>" % message_id)
        try:
            message = Outbound.objects.select_related('channel').get(
                id=message_id)
        except ObjectDoesNotExist:
            logger.error('Missing Outbound message', exc_info=True)
            return

        if message.attempts < settings.MESSAGE_SENDER_MAX_RETRIES:
            if error_retry_count > 0:
                retry_delay = calculate_retry_delay(error_retry_count)
            else:
                retry_delay = self.default_retry_delay
            l.info("Attempts: %s" % message.attempts)
            # send or resend
            try:
                if not message.channel:
                    channel = Channel.objects.get(default=True)
                else:
                    channel = message.channel

                sender = self.get_client(channel)
                ConcurrencyLimiter.manage_limit(self, channel)

                if not message.to_addr and message.to_identity:
                    message.to_addr = get_identity_address(
                        message.to_identity, use_communicate_through=True)

                if message.to_addr and not message.to_identity:
                    result = get_identity_by_address(message.to_addr)

                    if result:
                        message.to_identity = result[0]['id']
                    else:
                        identity = {
                            'details': {
                                'default_addr_type': 'msisdn',
                                'addresses': {
                                    'msisdn': {
                                        message.to_addr: {
                                            'default': True
                                        }
                                    }
                                }
                            }
                        }
                        identity = create_identity(identity)
                        message.to_identity = identity['id']

                if "voice_speech_url" in message.metadata:
                    # OBD number of tries metric
                    fire_metric.apply_async(
                        kwargs={
                            "metric_name": 'vumimessage.obd.tries.sum',
                            "metric_value": 1.0
                        })

                    # Voice message
                    speech_url = message.metadata["voice_speech_url"]
                    vumiresponse = sender.send_voice(voice_to_addr_formatter(
                        message.to_addr),
                                                     message.content,
                                                     speech_url=speech_url,
                                                     session_event="new")
                    l.info("Sent voice message to <%s>" % message.to_addr)
                else:
                    # Plain content
                    vumiresponse = sender.send_text(text_to_addr_formatter(
                        message.to_addr),
                                                    message.content,
                                                    session_event="new")
                    l.info("Sent text message to <%s>" % (message.to_addr, ))

                message.last_sent_time = datetime.now()
                message.attempts += 1
                message.vumi_message_id = vumiresponse["message_id"]
                message.save()
                fire_metric.apply_async(kwargs={
                    "metric_name": 'vumimessage.tries.sum',
                    "metric_value": 1.0
                })
            except requests_exceptions.ConnectionError as exc:
                l.info('Connection Error sending message')
                fire_metric.delay('sender.send_message.connection_error.sum',
                                  1)
                kwargs['error_retry_count'] = error_retry_count + 1
                self.retry(exc=exc,
                           countdown=retry_delay,
                           args=(message_id, ),
                           kwargs=kwargs)
            except requests_exceptions.Timeout as exc:
                l.info('Sending message failed due to timeout')
                fire_metric.delay('sender.send_message.timeout.sum', 1)
                kwargs['error_retry_count'] = error_retry_count + 1
                self.retry(exc=exc,
                           countdown=retry_delay,
                           args=(message_id, ),
                           kwargs=kwargs)
            except requests_exceptions.HTTPError as exc:
                # retry message sending if in 500 range (3 default
                # retries)
                l.info('Sending message failed due to status: %s' %
                       exc.response.status_code)
                metric_name = ('sender.send_message.http_error.%s.sum' %
                               exc.response.status_code)
                fire_metric.delay(metric_name, 1)
                kwargs['error_retry_count'] = error_retry_count + 1
                self.retry(exc=exc,
                           countdown=retry_delay,
                           args=(message_id, ),
                           kwargs=kwargs)

            # If we've gotten this far the message send was successful.
            fire_metric.apply_async(kwargs={
                "metric_name": 'message.sent.sum',
                "metric_value": 1.0
            })
            return vumiresponse

        else:
            # This is for retries based on async nacks from the transport.
            l.info("Message <%s> at max retries." % str(message_id))
            message.to_addr = ''
            message.save(update_fields=['to_addr'])
            fire_metric.apply_async(kwargs={
                "metric_name": 'vumimessage.maxretries.sum',
                "metric_value": 1.0
            })
            # Count failures on exhausted tries.
            fire_metric.apply_async(kwargs={
                "metric_name": 'message.failures.sum',
                "metric_value": 1.0
            })
Esempio n. 23
0
def revoke_program_certificates(self, username, course_key):
    """
    This task is designed to be called whenever a student's course certificate is
    revoked.

    It will consult with a variety of APIs to determine whether or not the
    specified user's certificate should be revoked in one or more programs, and
    use the credentials service to revoke the said certificates if so.

    Args:
        username (str): The username of the student
        course_key (str|CourseKey): The course identifier

    Returns:
        None

    """
    countdown = 2**self.request.retries
    # If the credentials config model is disabled for this
    # feature, it may indicate a condition where processing of such tasks
    # has been temporarily disabled.  Since this is a recoverable situation,
    # mark this task for retry instead of failing it altogether.

    if not CredentialsApiConfig.current().is_learner_issuance_enabled:
        LOGGER.warning(
            'Task revoke_program_certificates cannot be executed when credentials issuance is disabled in API config',
        )
        raise self.retry(countdown=countdown, max_retries=MAX_RETRIES)

    try:
        student = User.objects.get(username=username)
    except User.DoesNotExist:
        LOGGER.exception(
            u'Task revoke_program_certificates was called with invalid username %s',
            username)
        # Don't retry for this case - just conclude the task.
        return

    try:
        inverted_programs = get_inverted_programs(student)
        course_specific_programs = inverted_programs.get(str(course_key))
        if not course_specific_programs:
            # No reason to continue beyond this point
            LOGGER.info(
                u'Task revoke_program_certificates was called for user %s and course %s with no engaged programs',
                username, course_key)
            return

        # Determine which program certificates the user has already been awarded, if any.
        program_uuids_to_revoke = get_revokable_program_uuids(
            course_specific_programs, student)
    except Exception as exc:
        LOGGER.exception(
            u'Failed to determine program certificates to be revoked for user %s with course %s',
            username, course_key)
        raise self.retry(exc=exc, countdown=countdown, max_retries=MAX_RETRIES)

    if program_uuids_to_revoke:
        try:
            credentials_client = get_credentials_api_client(
                User.objects.get(
                    username=settings.CREDENTIALS_SERVICE_USERNAME), )
        except Exception as exc:
            LOGGER.exception(
                'Failed to create a credentials API client to revoke program certificates'
            )
            # Retry because a misconfiguration could be fixed
            raise self.retry(exc=exc,
                             countdown=countdown,
                             max_retries=MAX_RETRIES)

        failed_program_certificate_revoke_attempts = []
        for program_uuid in program_uuids_to_revoke:
            try:
                revoke_program_certificate(credentials_client, username,
                                           program_uuid)
                LOGGER.info(u'Revoked certificate for program %s for user %s',
                            program_uuid, username)
            except exceptions.HttpNotFoundError:
                LOGGER.exception(
                    u"""Certificate for program {uuid} could not be found. Unable to revoke certificate for user
                    {username}.""".format(uuid=program_uuid,
                                          username=username))
            except exceptions.HttpClientError as exc:
                # Grab the status code from the client error, because our API
                # client handles all 4XX errors the same way. In the future,
                # we may want to fork slumber, add 429 handling, and use that
                # in edx_rest_api_client.
                if exc.response.status_code == 429:  # pylint: disable=no-member, no-else-raise
                    rate_limit_countdown = 60
                    LOGGER.info(
                        u"""Rate limited. Retrying task to revoke certificates for user {username} in {countdown}
                        seconds""".format(username=username,
                                          countdown=rate_limit_countdown))
                    # Retry after 60 seconds, when we should be in a new throttling window
                    raise self.retry(exc=exc,
                                     countdown=rate_limit_countdown,
                                     max_retries=MAX_RETRIES)
                else:
                    LOGGER.exception(
                        u"Unable to revoke certificate for user {username} for program {uuid}."
                        .format(username=username, uuid=program_uuid))
            except Exception:  # pylint: disable=broad-except
                # keep trying to revoke other certs, but retry the whole task to fix any missing entries
                LOGGER.warning(
                    u'Failed to revoke certificate for program {uuid} of user {username}.'
                    .format(uuid=program_uuid, username=username))
                failed_program_certificate_revoke_attempts.append(program_uuid)

        if failed_program_certificate_revoke_attempts:
            # N.B. This logic assumes that this task is idempotent
            LOGGER.info(
                u'Retrying task to revoke failed certificates to user %s',
                username)
            # The error message may change on each reattempt but will never be raised until
            # the max number of retries have been exceeded. It is unlikely that this list
            # will change by the time it reaches its maximimum number of attempts.
            exception = MaxRetriesExceededError(
                u"Failed to revoke certificate for user {} for programs {}".
                format(username, failed_program_certificate_revoke_attempts))
            raise self.retry(exc=exception,
                             countdown=countdown,
                             max_retries=MAX_RETRIES)
    else:
        LOGGER.info(u'There is no program certificates for user %s to revoke',
                    username)

    LOGGER.info(
        u'Successfully completed the task revoke_program_certificates for username %s',
        username)
Esempio n. 24
0
class TestCeleryTasks(MasuTestCase):
    """Test cases for Celery tasks."""

    @patch('masu.celery.tasks.Orchestrator')
    def test_check_report_updates(self, mock_orchestrator):
        """Test that the scheduled task calls the orchestrator."""
        mock_orch = mock_orchestrator()
        tasks.check_report_updates()

        mock_orchestrator.assert_called()
        mock_orch.prepare.assert_called()

    @patch('masu.celery.tasks.Orchestrator')
    @patch('masu.external.date_accessor.DateAccessor.today')
    def test_remove_expired_data(self, mock_date, mock_orchestrator):
        """Test that the scheduled task calls the orchestrator."""
        mock_orch = mock_orchestrator()

        mock_date_string = '2018-07-25 00:00:30.993536'
        mock_date_obj = datetime.strptime(mock_date_string, '%Y-%m-%d %H:%M:%S.%f')
        mock_date.return_value = mock_date_obj

        tasks.remove_expired_data()

        mock_orchestrator.assert_called()
        mock_orch.remove_expired_report_data.assert_called()

    @patch('masu.celery.tasks.Orchestrator')
    @patch('masu.celery.tasks.query_and_upload_to_s3')
    @patch('masu.external.date_accessor.DateAccessor.today')
    def test_upload_normalized_data(self, mock_date, mock_upload, mock_orchestrator):
        """Test that the scheduled task uploads the correct normalized data."""

        test_export_setting = {
            'provider': 'test',
            'table_name': 'test',
            'sql': 'test_sql',
        }
        schema_name = 'acct10001'
        provider_uuid = uuid.uuid4()

        mock_date.return_value = date(2015, 1, 5)

        mock_orchestrator.get_accounts.return_value = (
            [{'schema_name': schema_name, 'provider_uuid': provider_uuid}],
            [],
        )

        current_month_start = date(2015, 1, 1)
        current_month_end = date(2015, 1, 31)
        prev_month_start = date(2014, 12, 1)
        prev_month_end = date(2014, 12, 31)

        call_curr_month = call(
            schema_name,
            provider_uuid,
            test_export_setting,
            (current_month_start, current_month_end),
        )
        call_prev_month = call(
            schema_name,
            provider_uuid,
            test_export_setting,
            (prev_month_start, prev_month_end),
        )

        with patch('masu.celery.tasks.table_export_settings', [test_export_setting]):
            tasks.upload_normalized_data()
            mock_upload.assert_has_calls([call_curr_month, call_prev_month])

        mock_date.return_value = date(2012, 3, 31)
        current_month_start = date(2012, 3, 1)
        current_month_end = date(2012, 3, 31)
        prev_month_start = date(2012, 2, 1)
        prev_month_end = date(2012, 2, 29)

        call_curr_month = call(
            schema_name,
            provider_uuid,
            test_export_setting,
            (current_month_start, current_month_end),
        )
        call_prev_month = call(
            schema_name,
            provider_uuid,
            test_export_setting,
            (prev_month_start, prev_month_end),
        )

        with patch('masu.celery.tasks.table_export_settings', [test_export_setting]):
            tasks.upload_normalized_data()
            mock_upload.assert_has_calls([call_curr_month, call_prev_month])

    @patch('masu.celery.tasks.DataExportRequest')
    @patch('masu.celery.tasks.AwsS3Syncer')
    def test_sync_data_to_customer_success(self, mock_sync, mock_data_export_request):
        """Test that the scheduled task correctly calls the sync function."""
        mock_data_export_request.uuid = fake.uuid4()
        mock_data_get = mock_data_export_request.objects.get
        mock_data_save = mock_data_get.return_value.save

        tasks.sync_data_to_customer(mock_data_export_request.uuid)

        mock_data_get.assert_called_once_with(uuid=mock_data_export_request.uuid)
        self.assertEqual(mock_data_save.call_count, 2)
        mock_sync.assert_called_once()
        mock_sync.return_value.sync_bucket.assert_called_once()

    @patch('masu.celery.tasks.LOG')
    @patch('masu.celery.tasks.DataExportRequest')
    @patch('masu.celery.tasks.AwsS3Syncer')
    def test_sync_data_to_customer_fail_exc(self, mock_sync, mock_data_export_request, mock_log):
        """Test that the scheduled task correctly calls the sync function, which explodes."""
        mock_data_export_request.uuid = fake.uuid4()
        mock_data_get = mock_data_export_request.objects.get
        mock_data_save = mock_data_get.return_value.save

        mock_sync.return_value.sync_bucket.side_effect = ClientError(
            error_response={'error': fake.word()}, operation_name=fake.word())

        tasks.sync_data_to_customer(mock_data_export_request.uuid)

        mock_data_get.assert_called_once_with(uuid=mock_data_export_request.uuid)
        self.assertEqual(mock_data_save.call_count, 2)
        mock_sync.assert_called_once()
        mock_sync.return_value.sync_bucket.assert_called_once()
        mock_log.exception.assert_called_once()

    @patch('masu.celery.tasks.DataExportRequest.objects')
    @patch('masu.celery.tasks.AwsS3Syncer')
    def test_sync_data_to_customer_cold_storage_retry(self, mock_sync, mock_data_export_request):
        """Test that the sync task retries syncer fails with a cold storage error."""
        data_export_object = Mock()
        data_export_object.uuid = fake.uuid4()
        data_export_object.status = APIExportRequest.PENDING

        mock_data_export_request.get.return_value = data_export_object
        mock_sync.return_value.sync_bucket.side_effect = SyncedFileInColdStorageError()
        with self.assertRaises(Retry):
            tasks.sync_data_to_customer(data_export_object.uuid)
        self.assertEquals(data_export_object.status, APIExportRequest.WAITING)

    @patch('masu.celery.tasks.sync_data_to_customer.retry', side_effect=MaxRetriesExceededError())
    @patch('masu.celery.tasks.DataExportRequest.objects')
    @patch('masu.celery.tasks.AwsS3Syncer')
    def test_sync_data_to_customer_max_retry(self, mock_sync, mock_data_export_request, mock_retry):
        """Test that the sync task retries syncer fails with a cold storage error."""
        data_export_object = Mock()
        data_export_object.uuid = fake.uuid4()
        data_export_object.status = APIExportRequest.PENDING

        mock_data_export_request.get.return_value = data_export_object
        mock_sync.return_value.sync_bucket.side_effect = SyncedFileInColdStorageError()

        tasks.sync_data_to_customer(data_export_object.uuid)
        self.assertEquals(data_export_object.status, APIExportRequest.ERROR)
Esempio n. 25
0
class TestCeleryTasks(MasuTestCase):
    """Test cases for Celery tasks."""

    @patch('masu.celery.tasks.Orchestrator')
    def test_check_report_updates(self, mock_orchestrator):
        """Test that the scheduled task calls the orchestrator."""
        mock_orch = mock_orchestrator()
        tasks.check_report_updates()

        mock_orchestrator.assert_called()
        mock_orch.prepare.assert_called()

    @patch('masu.celery.tasks.Orchestrator')
    @patch('masu.external.date_accessor.DateAccessor.today')
    def test_remove_expired_data(self, mock_date, mock_orchestrator):
        """Test that the scheduled task calls the orchestrator."""
        mock_orch = mock_orchestrator()

        mock_date_string = '2018-07-25 00:00:30.993536'
        mock_date_obj = datetime.strptime(mock_date_string, '%Y-%m-%d %H:%M:%S.%f')
        mock_date.return_value = mock_date_obj

        tasks.remove_expired_data()

        mock_orchestrator.assert_called()
        mock_orch.remove_expired_report_data.assert_called()

    @patch('masu.celery.tasks.Orchestrator')
    @patch('masu.celery.tasks.query_and_upload_to_s3')
    @patch('masu.external.date_accessor.DateAccessor.today')
    def test_upload_normalized_data(self, mock_date, mock_upload, mock_orchestrator):
        """Test that the scheduled task uploads the correct normalized data."""
        test_export_setting = TableExportSetting(
            provider='test',
            output_name='test',
            sql='test_sql',
            iterate_daily=False
        )
        schema_name = 'acct10001'
        provider_uuid = uuid.uuid4()

        mock_date.return_value = date(2015, 1, 5)

        mock_orchestrator.get_accounts.return_value = (
            [{'schema_name': schema_name, 'provider_uuid': provider_uuid}],
            [],
        )

        current_month_start = date(2015, 1, 1)
        current_month_end = date(2015, 1, 31)
        prev_month_start = date(2014, 12, 1)
        prev_month_end = date(2014, 12, 31)

        call_curr_month = call.delay(
            schema_name,
            provider_uuid,
            dictify_table_export_settings(test_export_setting),
            current_month_start,
            current_month_end,
        )
        call_prev_month = call.delay(
            schema_name, provider_uuid,
            dictify_table_export_settings(test_export_setting),
            prev_month_start,
            prev_month_end,
        )

        with patch('masu.celery.tasks.table_export_settings', [test_export_setting]):
            tasks.upload_normalized_data()
            mock_upload.assert_has_calls([call_curr_month, call_prev_month])

        mock_date.return_value = date(2012, 3, 31)
        current_month_start = date(2012, 3, 1)
        current_month_end = date(2012, 3, 31)
        prev_month_start = date(2012, 2, 1)
        prev_month_end = date(2012, 2, 29)

        call_curr_month = call.delay(
            schema_name,
            provider_uuid,
            dictify_table_export_settings(test_export_setting),
            current_month_start, current_month_end,
        )
        call_prev_month = call.delay(
            schema_name, provider_uuid,
            dictify_table_export_settings(test_export_setting),
            prev_month_start, prev_month_end,
        )

        with patch('masu.celery.tasks.table_export_settings', [test_export_setting]):
            tasks.upload_normalized_data()
            mock_upload.assert_has_calls([call_curr_month, call_prev_month])

    @patch('masu.celery.tasks.DataExportRequest')
    @patch('masu.celery.tasks.AwsS3Syncer')
    def test_sync_data_to_customer_success(self, mock_sync, mock_data_export_request):
        """Test that the scheduled task correctly calls the sync function."""
        mock_data_export_request.uuid = fake.uuid4()
        mock_data_get = mock_data_export_request.objects.get
        mock_data_save = mock_data_get.return_value.save

        tasks.sync_data_to_customer(mock_data_export_request.uuid)

        mock_data_get.assert_called_once_with(uuid=mock_data_export_request.uuid)
        self.assertEqual(mock_data_save.call_count, 2)
        mock_sync.assert_called_once()
        mock_sync.return_value.sync_bucket.assert_called_once()

    @patch('masu.celery.tasks.LOG')
    @patch('masu.celery.tasks.DataExportRequest')
    @patch('masu.celery.tasks.AwsS3Syncer')
    def test_sync_data_to_customer_fail_exc(self, mock_sync, mock_data_export_request, mock_log):
        """Test that the scheduled task correctly calls the sync function, which explodes."""
        mock_data_export_request.uuid = fake.uuid4()
        mock_data_get = mock_data_export_request.objects.get
        mock_data_save = mock_data_get.return_value.save

        mock_sync.return_value.sync_bucket.side_effect = ClientError(
            error_response={'error': fake.word()}, operation_name=fake.word()
        )

        tasks.sync_data_to_customer(mock_data_export_request.uuid)

        mock_data_get.assert_called_once_with(uuid=mock_data_export_request.uuid)
        self.assertEqual(mock_data_save.call_count, 2)
        mock_sync.assert_called_once()
        mock_sync.return_value.sync_bucket.assert_called_once()
        mock_log.exception.assert_called_once()

    @patch('masu.celery.tasks.DataExportRequest.objects')
    @patch('masu.celery.tasks.AwsS3Syncer')
    def test_sync_data_to_customer_cold_storage_retry(self, mock_sync, mock_data_export_request):
        """Test that the sync task retries syncer fails with a cold storage error."""
        data_export_object = Mock()
        data_export_object.uuid = fake.uuid4()
        data_export_object.status = APIExportRequest.PENDING

        mock_data_export_request.get.return_value = data_export_object
        mock_sync.return_value.sync_bucket.side_effect = SyncedFileInColdStorageError()
        with self.assertRaises(Retry):
            tasks.sync_data_to_customer(data_export_object.uuid)
        self.assertEquals(data_export_object.status, APIExportRequest.WAITING)

    @patch('masu.celery.tasks.sync_data_to_customer.retry', side_effect=MaxRetriesExceededError())
    @patch('masu.celery.tasks.DataExportRequest.objects')
    @patch('masu.celery.tasks.AwsS3Syncer')
    def test_sync_data_to_customer_max_retry(self, mock_sync, mock_data_export_request, mock_retry):
        """Test that the sync task retries syncer fails with a cold storage error."""
        data_export_object = Mock()
        data_export_object.uuid = fake.uuid4()
        data_export_object.status = APIExportRequest.PENDING

        mock_data_export_request.get.return_value = data_export_object
        mock_sync.return_value.sync_bucket.side_effect = SyncedFileInColdStorageError()

        tasks.sync_data_to_customer(data_export_object.uuid)
        self.assertEquals(data_export_object.status, APIExportRequest.ERROR)

    def test_delete_archived_data_bad_inputs_exception(self):
        """Test that delete_archived_data raises an exception when given bad inputs."""
        schema_name, provider_type, provider_uuid = '', '', ''
        with self.assertRaises(TypeError) as e:
            tasks.delete_archived_data(schema_name, provider_type, provider_uuid)
        self.assertIn('schema_name', str(e.exception))
        self.assertIn('provider_type', str(e.exception))
        self.assertIn('provider_uuid', str(e.exception))

    @patch('masu.util.aws.common.boto3.resource')
    @override_settings(ENABLE_S3_ARCHIVING=False)
    def test_delete_archived_data_archiving_disabled_noop(self, mock_resource):
        """Test that delete_archived_data returns early when feature is disabled."""
        schema_name, provider_type, provider_uuid = fake.slug(), Provider.PROVIDER_AWS, fake.uuid4()
        tasks.delete_archived_data(schema_name, provider_type, provider_uuid)
        mock_resource.assert_not_called()

    @patch('masu.util.aws.common.boto3.resource')
    @override_settings(S3_BUCKET_PATH='')
    def test_delete_archived_data_missing_bucket_path_exception(self, mock_resource):
        """Test that delete_archived_data raises an exception with an empty bucket path."""
        schema_name, provider_type, provider_uuid = fake.slug(), Provider.PROVIDER_AWS, fake.uuid4()
        with self.assertRaises(ImproperlyConfigured):
            tasks.delete_archived_data(schema_name, provider_type, provider_uuid)
        mock_resource.assert_not_called()

    @patch('masu.util.aws.common.boto3.resource')
    @override_settings(S3_BUCKET_PATH='data_archive')
    def test_delete_archived_data_success(self, mock_resource):
        """Test that delete_archived_data correctly interacts with AWS S3."""
        schema_name = 'acct10001'
        provider_type = Provider.PROVIDER_AWS
        provider_uuid = '00000000-0000-0000-0000-000000000001'
        expected_prefix = 'data_archive/acct10001/aws/00000000-0000-0000-0000-000000000001/'

        # Generate enough fake objects to expect calling the S3 delete api twice.
        mock_bucket = mock_resource.return_value.Bucket.return_value
        bucket_objects = [DummyS3Object(key=fake.file_path()) for _ in range(1234)]
        expected_keys = [{'Key': bucket_object.key} for bucket_object in bucket_objects]

        # Leave one object mysteriously not deleted to cover the LOG.warning use case.
        mock_bucket.objects.filter.side_effect = [bucket_objects, bucket_objects[:1]]

        with self.assertLogs('masu.celery.tasks', 'WARNING') as captured_logs:
            tasks.delete_archived_data(schema_name, provider_type, provider_uuid)
        mock_resource.assert_called()
        mock_bucket.delete_objects.assert_has_calls(
            [
                call(Delete={'Objects': expected_keys[:1000]}),
                call(Delete={'Objects': expected_keys[1000:]}),
            ]
        )
        mock_bucket.objects.filter.assert_has_calls(
            [call(Prefix=expected_prefix), call(Prefix=expected_prefix)]
        )
        self.assertIn('Found 1 objects after attempting', captured_logs.output[-1])

    @patch('masu.celery.tasks.vacuum_schema')
    def test_vacuum_schemas(self, mock_vacuum):
        """Test that the vacuum_schemas scheduled task runs for all schemas."""
        schema_one = 'acct123'
        schema_two = 'acct456'
        with connection.cursor() as cursor:
            cursor.execute(
                """
                INSERT INTO api_tenant (schema_name)
                VALUES (%s), (%s)
                """,
                [schema_one, schema_two]
            )

        tasks.vacuum_schemas()

        for schema_name in [schema_one, schema_two]:
            mock_vacuum.delay.assert_any_call(schema_name)
    mocker.patch('app.delivery.send_to_providers.send_email_to_provider',
                 side_effect=InvalidEmailError('bad email'))
    mocker.patch('app.celery.provider_tasks.deliver_email.retry')

    with pytest.raises(NotificationTechnicalFailureException):
        deliver_email(sample_notification.id)

    assert provider_tasks.deliver_email.retry.called is False
    assert sample_notification.status == 'technical-failure'


@pytest.mark.parametrize('exception_class', [
    Exception(),
    AwsSesClientException(),
    AwsSesClientThrottlingSendRateException(),
    MaxRetriesExceededError()
])
def test_should_go_into_technical_error_if_exceeds_retries_on_deliver_email_task(
        sample_notification, mocker, exception_class):
    mocker.patch('app.delivery.send_to_providers.send_email_to_provider',
                 side_effect=exception_class)
    mocker.patch('app.celery.provider_tasks.deliver_email.retry',
                 side_effect=MaxRetriesExceededError())

    with pytest.raises(NotificationTechnicalFailureException) as e:
        deliver_email(sample_notification.id)
    assert str(sample_notification.id) in str(e.value)

    provider_tasks.deliver_email.retry.assert_called_with(queue="retry-tasks")
    assert sample_notification.status == 'technical-failure'