def test_extract(self, monkeypatch): course_id = self.course_enrollments[0].course_id monkeypatch.setattr(figures.pipeline.course_daily_metrics, 'bulk_calculate_course_progress_data', lambda **_kwargs: dict(average_progress=0.5)) results = pipeline_cdm.CourseDailyMetricsExtractor().extract(course_id) assert results
def test_extract_ed_next(self, monkeypatch, prog_func, ed_next): """Tests default and alternate progress calculators """ course_id = self.course_enrollments[0].course_id prog_func_str = 'figures.pipeline.course_daily_metrics.{}'.format( prog_func) with mock.patch(prog_func_str) as prog_mock: results = pipeline_cdm.CourseDailyMetricsExtractor().extract( course_id, self.date_for, ed_next=ed_next) assert prog_mock.called assert results
def test_extract_default(self, monkeypatch): """Default progress calculator is called when `ed_next` param not set """ expected_avg_prog = 'fake-average-progress' course_id = self.course_enrollments[0].course_id monkeypatch.setattr( figures.pipeline.course_daily_metrics, 'bulk_calculate_course_progress_data', lambda **_kwargs: dict(average_progress=expected_avg_prog)) results = pipeline_cdm.CourseDailyMetricsExtractor().extract( course_id, self.date_for) assert results['average_progress'] == expected_avg_prog
def test_when_bulk_calculate_course_progress_data_fails( self, monkeypatch, caplog): course_id = self.course_enrollments[0].course_id def mock_bulk(**_kwargs): raise Exception('fake exception') monkeypatch.setattr(figures.pipeline.course_daily_metrics, 'bulk_calculate_course_progress_data', mock_bulk) results = pipeline_cdm.CourseDailyMetricsExtractor().extract(course_id) last_log = caplog.records[-1] assert last_log.message.startswith( 'FIGURES:FAIL bulk_calculate_course_progress_data') assert not results['average_progress']
def test_when_calculate_course_progress_data_fails(self, monkeypatch, caplog, prog_func, ed_next): course_id = self.course_enrollments[0].course_id def prog_func_mock(**_kwargs): raise Exception('fake exception') monkeypatch.setattr(figures.pipeline.course_daily_metrics, prog_func, prog_func_mock) results = pipeline_cdm.CourseDailyMetricsExtractor().extract( course_id, self.date_for, ed_next) last_log = caplog.records[-1] assert last_log.message.startswith('FIGURES:FAIL {}'.format(prog_func)) assert not results['average_progress']
def test_extract(self): course_id = self.course_enrollments[0].course_id results = pipeline_cdm.CourseDailyMetricsExtractor().extract(course_id) assert results