def test_populate_daily_metrics_enrollment_data_error(transactional_db,
                                                      monkeypatch, caplog):
    # Needs to be 'today' so that enrollment data update gets called
    date_for = date.today()
    site = SiteFactory()

    def fake_populate_daily_metrics_for_site(**_kwargs):
        pass

    def fake_update_enrollment_data_fails(**kwargs):
        # TODO: test with different exceptions
        # At least one with and without `message_dict`
        raise FakeException('Hey!')

    monkeypatch.setattr('figures.tasks.populate_daily_metrics_for_site',
                        fake_populate_daily_metrics_for_site)
    monkeypatch.setattr('figures.tasks.update_enrollment_data',
                        fake_update_enrollment_data_fails)

    populate_daily_metrics(date_for=date_for)

    last_log = caplog.records[-1]
    expected_msg = ('{prefix}:FAIL figures.tasks update_enrollment_data '
                    ' unhandled exception. site[{site_id}]:{domain}').format(
                        prefix=FPD_LOG_PREFIX,
                        site_id=site.id,
                        domain=site.domain)
    assert last_log.message == expected_msg
    def handle(self, *args, **options):
        '''
        Note the '# pragma: no cover' lines below. This is because we are not
        yet mocking celery for test coverage
        '''
        print('populating Figures metrics...')

        kwargs = dict(
            date_for=options['date'],
            force_update=options['force_update'],
            )

        experimental = options['experimental']
        options.pop('experimental')

        if experimental:
            if options['no_delay']:
                experimental_populate_daily_metrics(**kwargs)
            else:
                experimental_populate_daily_metrics.delay(**kwargs)  # pragma: no cover
        else:
            if options['no_delay']:
                populate_daily_metrics(**kwargs)
            else:
                populate_daily_metrics.delay(**kwargs)  # pragma: no cover

        # TODO: improve this message to say 'today' when options['date'] is None
        print('Management command populate_figures_metrics complete. date_for: {}'.format(
            options['date']))
        print('Done.')
def test_enable_populate_daily_metrics(caplog):
    """Test figures.tasks.populate_daily_metrics

    Tests that when WAFFLE_DISABLE_PIPELINE is not active, the disabled warning msg is not logged
    """
    with override_switch('figures.disable_pipeline', active=False):
        populate_daily_metrics()
        assert 'disabled' not in caplog.text
def test_populate_daily_metrics_multisite(transactional_db, monkeypatch):
    # Stand up test data
    date_for = '2019-01-02'
    site_links = []
    for domain in ['alpha.domain', 'bravo.domain']:
        site_links.append(
            dict(
                site=SiteFactory(domain=domain),
                courses=[CourseOverviewFactory() for i in range(2)],
            ))

        populate_daily_metrics(date_for=date_for)
    def handle(self, *args, **options):
        '''
        Note the '# pragma: no cover' lines below. This is because we are not
        yet mocking celery for test coverage

        The 'mau' conditional check in this method is a quick hack to run the
        MAU task from this command. What we probably want is a 'figures_cli'
        command with subcommands.
        '''
        print('populating Figures metrics...')

        kwargs = dict(
            date_for=options['date'],
            force_update=options['force_update'],
        )

        if options['mau']:
            if options['no_delay']:
                populate_all_mau()
            else:
                populate_all_mau.delay()  # pragma: no cover
        else:
            experimental = options['experimental']
            options.pop('experimental')

            if experimental:
                if options['no_delay']:
                    experimental_populate_daily_metrics(**kwargs)
                else:
                    experimental_populate_daily_metrics.delay(
                        **kwargs)  # pragma: no cover
            else:
                if options['no_delay']:
                    populate_daily_metrics(**kwargs)
                else:
                    populate_daily_metrics.delay(**kwargs)  # pragma: no cover

        # TODO: improve this message to say 'today' when options['date'] is None
        print(
            'Management command populate_figures_metrics complete. date_for: {}'
            .format(options['date']))
        print('Done.')
def test_populate_daily_metrics_site_level_error(transactional_db, monkeypatch,
                                                 caplog):
    """
    Generic test that the first site fails but we can process the second site
    """
    assert Site.objects.count() == 1  # Because we always have 'example.com'

    good_site = Site.objects.first()
    bad_site = SiteFactory()
    populated_site_ids = []
    failed_site_ids = []
    date_for = date.today()

    def fake_populate_daily_metrics_for_site(site_id, **_kwargs):
        """
        """
        if site_id == bad_site.id:
            failed_site_ids.append(site_id)
            raise FakeException('Hey!')
        else:
            populated_site_ids.append(site_id)

    monkeypatch.setattr('figures.tasks.populate_daily_metrics_for_site',
                        fake_populate_daily_metrics_for_site)

    populate_daily_metrics(date_for=date_for)
    assert set(populated_site_ids) == set([good_site.id])
    assert set(failed_site_ids) == set([bad_site.id])

    last_log = caplog.records[-1]
    expected_msg = ('{prefix}:FAIL populate_daily_metrics unhandled site level'
                    ' exception for site[{site_id}]={domain}').format(
                        prefix=FPD_LOG_PREFIX,
                        site_id=bad_site.id,
                        domain=bad_site.domain)
    assert last_log.message == expected_msg