def test_simulate_ai_grading_error(self):
        # Run the command
        cmd = simulate_ai_grading_error.Command()
        cmd.handle(self.COURSE_ID.encode('utf-8'),
                   self.ITEM_ID.encode('utf-8'), self.NUM_SUBMISSIONS, "fake")

        # Check that the correct number of incomplete workflows
        # were created.  These workflows should still have
        # a classifier set, though, because otherwise they
        # wouldn't have been scheduled for grading
        # (that is, the submissions were made before classifier
        # training completed).
        incomplete_workflows = AIGradingWorkflow.objects.filter(
            classifier_set__isnull=False, completed_at__isnull=True)
        num_errors = incomplete_workflows.count()
        self.assertEqual(self.NUM_SUBMISSIONS, num_errors)

        # Verify that we can complete the workflows successfully
        # (that is, make sure the classifier data is valid)
        # We're calling a Celery task method here,
        # but we're NOT using `apply_async`, so this will
        # execute synchronously.
        for workflow in incomplete_workflows:
            grade_essay(workflow.uuid)

        # Now there should be no incomplete workflows
        remaining_incomplete = AIGradingWorkflow.objects.filter(
            classifier_set__isnull=False, completed_at__isnull=True).count()
        self.assertEqual(remaining_incomplete, 0)
Example #2
0
 def test_params_missing_criterion_for_valid_scores(self, mock_call):
     mock_call.return_value = {
         'essay_text': 'test',
         'classifier_set': {
             u"vøȼȺƀᵾłȺɍɏ": {},
             u"ﻭɼค๓๓คɼ": {}
         },
         'algorithm_id': ALGORITHM_ID,
         'valid_scores': {}
     }
     with self.assert_retry(grade_essay, AIGradingInternalError):
         grade_essay(self.workflow_uuid)
Example #3
0
 def test_params_missing_criterion_for_valid_scores(self, mock_call):
     mock_call.return_value = {
         'essay_text': 'test',
         'classifier_set': {
             u"vøȼȺƀᵾłȺɍɏ": {},
             u"ﻭɼค๓๓คɼ": {}
         },
         'algorithm_id': ALGORITHM_ID,
         'valid_scores': {}
     }
     with self.assert_retry(grade_essay, AIGradingInternalError):
         grade_essay(self.workflow_uuid)
Example #4
0
    def test_skip_completed_workflow(self):
        # Mark the grading workflow as complete
        workflow = AIGradingWorkflow.objects.get(uuid=self.workflow_uuid)
        workflow.mark_complete_and_save()

        # The grading task should short-circuit immediately, skipping calls
        # to get parameters for the task.
        actual_call = ai_worker_api.get_grading_task_params
        patched = 'openassessment.assessment.worker.grading.ai_worker_api.get_grading_task_params'
        with mock.patch(patched) as mock_call:
            mock_call.side_effect = actual_call
            grade_essay(self.workflow_uuid)
            self.assertFalse(mock_call.called)
Example #5
0
    def test_skip_completed_workflow(self):
        # Mark the grading workflow as complete
        workflow = AIGradingWorkflow.objects.get(uuid=self.workflow_uuid)
        workflow.mark_complete_and_save()

        # The grading task should short-circuit immediately, skipping calls
        # to get parameters for the task.
        actual_call = ai_worker_api.get_grading_task_params
        patched = 'openassessment.assessment.worker.grading.ai_worker_api.get_grading_task_params'
        with mock.patch(patched) as mock_call:
            mock_call.side_effect = actual_call
            grade_essay(self.workflow_uuid)
            self.assertFalse(mock_call.called)
Example #6
0
    def test_algorithm_gives_invalid_score(self, mock_create_assessment):
        # If an algorithm provides a score that isn't in the rubric,
        # we should choose the closest valid score.
        self._set_algorithm_id(INVALID_SCORE_ALGORITHM_ID)

        # The first score given by the algorithm should be below the minimum valid score
        # The second score will be between two valid scores (0 and 1), rounding up
        grade_essay(self.workflow_uuid)
        expected_scores = {u"vøȼȺƀᵾłȺɍɏ": 0, u"ﻭɼค๓๓คɼ": 1}
        mock_create_assessment.assert_called_with(self.workflow_uuid,
                                                  expected_scores)

        # The third score will be between two valid scores (1 and 2), rounding down
        # The final score will be greater than the maximum score
        self._reset_workflow()
        grade_essay(self.workflow_uuid)
        expected_scores = {u"vøȼȺƀᵾłȺɍɏ": 1, u"ﻭɼค๓๓คɼ": 2}
        mock_create_assessment.assert_called_with(self.workflow_uuid,
                                                  expected_scores)
Example #7
0
    def test_algorithm_gives_invalid_score(self, mock_create_assessment):
        # If an algorithm provides a score that isn't in the rubric,
        # we should choose the closest valid score.
        self._set_algorithm_id(INVALID_SCORE_ALGORITHM_ID)

        # The first score given by the algorithm should be below the minimum valid score
        # The second score will be between two valid scores (0 and 1), rounding up
        grade_essay(self.workflow_uuid)
        expected_scores = {
            u"vøȼȺƀᵾłȺɍɏ": 0,
            u"ﻭɼค๓๓คɼ": 1
        }
        mock_create_assessment.assert_called_with(self.workflow_uuid, expected_scores)

        # The third score will be between two valid scores (1 and 2), rounding down
        # The final score will be greater than the maximum score
        self._reset_workflow()
        grade_essay(self.workflow_uuid)
        expected_scores = {
            u"vøȼȺƀᵾłȺɍɏ": 1,
            u"ﻭɼค๓๓คɼ": 2
        }
        mock_create_assessment.assert_called_with(self.workflow_uuid, expected_scores)
    def test_simulate_ai_grading_error(self):
        # Run the command
        cmd = simulate_ai_grading_error.Command()
        cmd.handle(
            self.COURSE_ID.encode('utf-8'),
            self.ITEM_ID.encode('utf-8'),
            self.NUM_SUBMISSIONS,
            "fake"
        )

        # Check that the correct number of incomplete workflows
        # were created.  These workflows should still have
        # a classifier set, though, because otherwise they
        # wouldn't have been scheduled for grading
        # (that is, the submissions were made before classifier
        # training completed).
        incomplete_workflows = AIGradingWorkflow.objects.filter(
            classifier_set__isnull=False,
            completed_at__isnull=True
        )
        num_errors = incomplete_workflows.count()
        self.assertEqual(self.NUM_SUBMISSIONS, num_errors)

        # Verify that we can complete the workflows successfully
        # (that is, make sure the classifier data is valid)
        # We're calling a Celery task method here,
        # but we're NOT using `apply_async`, so this will
        # execute synchronously.
        for workflow in incomplete_workflows:
            grade_essay(workflow.uuid)

        # Now there should be no incomplete workflows
        remaining_incomplete = AIGradingWorkflow.objects.filter(
            classifier_set__isnull=False,
            completed_at__isnull=True
        ).count()
        self.assertEqual(remaining_incomplete, 0)
Example #9
0
 def test_create_assessment_error(self, mock_call):
     mock_call.side_effect = AIGradingInternalError
     with self.assert_retry(grade_essay, AIGradingInternalError):
         grade_essay(self.workflow_uuid)
Example #10
0
 def test_algorithm_score_error(self):
     self._set_algorithm_id(ERROR_STUB_ALGORITHM_ID)
     with self.assert_retry(grade_essay, ScoreError):
         grade_essay(self.workflow_uuid)
Example #11
0
 def test_unknown_algorithm_id_error(self):
     # Since we're not overriding settings, the algorithm ID won't be recognized
     with self.assert_retry(grade_essay, UnknownAlgorithm):
         grade_essay(self.workflow_uuid)
Example #12
0
 def test_retrieve_params_error(self, mock_call):
     mock_call.side_effect = AIGradingInternalError("Test error")
     with self.assert_retry(grade_essay, AIGradingInternalError):
         grade_essay(self.workflow_uuid)
Example #13
0
 def test_check_complete_error(self):
     with self.assert_retry(grade_essay, AIGradingRequestError):
         grade_essay("no such workflow uuid")
Example #14
0
 def test_check_complete_error(self):
     with self.assert_retry(grade_essay, AIGradingRequestError):
         grade_essay("no such workflow uuid")
Example #15
0
 def test_create_assessment_error(self, mock_call):
     mock_call.side_effect = AIGradingInternalError
     with self.assert_retry(grade_essay, AIGradingInternalError):
         grade_essay(self.workflow_uuid)
Example #16
0
 def test_algorithm_score_error(self):
     self._set_algorithm_id(ERROR_STUB_ALGORITHM_ID)
     with self.assert_retry(grade_essay, ScoreError):
         grade_essay(self.workflow_uuid)
Example #17
0
 def test_unknown_algorithm_id_error(self):
     # Since we're not overriding settings, the algorithm ID won't be recognized
     with self.assert_retry(grade_essay, UnknownAlgorithm):
         grade_essay(self.workflow_uuid)
Example #18
0
 def test_retrieve_params_error(self, mock_call):
     mock_call.side_effect = AIGradingInternalError("Test error")
     with self.assert_retry(grade_essay, AIGradingInternalError):
         grade_essay(self.workflow_uuid)