def test_create_assessment_workflow_already_complete(self): # Try to create assessments for the same workflow multiple times ai_worker_api.create_assessment(self.workflow_uuid, self.SCORES) ai_worker_api.create_assessment(self.workflow_uuid, self.SCORES) # Expect that only one assessment is created for the submission num_assessments = Assessment.objects.filter(submission_uuid=self.submission_uuid).count() self.assertEqual(num_assessments, 1)
def test_create_assessment_workflow_already_complete(self): # Try to create assessments for the same workflow multiple times ai_worker_api.create_assessment(self.workflow_uuid, self.SCORES) ai_worker_api.create_assessment(self.workflow_uuid, self.SCORES) # Expect that only one assessment is created for the submission num_assessments = Assessment.objects.filter( submission_uuid=self.submission_uuid).count() self.assertEqual(num_assessments, 1)
def grade_essay(workflow_uuid): """ Asynchronous task to grade an essay using a text classifier (trained using a supervised ML algorithm). If the task could not be completed successfully, it will be retried a few times; if it continues to fail, it is left incomplete. Incomplate tasks can be rescheduled manually through the AI API. Args: workflow_uuid (str): The UUID of the workflow associated with this grading task. Returns: None Raises: AIError: An error occurred while making an AI worker API call. AIAlgorithmError: An error occurred while retrieving or using an AI algorithm. """ # Short-circuit if the workflow is already marked complete # This is an optimization, but grading tasks could still # execute multiple times depending on when they get picked # up by workers and marked complete. try: if ai_worker_api.is_grading_workflow_complete(workflow_uuid): return except AIError: msg = ( u"An unexpected error occurred while checking the " u"completion of grading workflow with UUID {uuid}" ).format(uuid=workflow_uuid) logger.exception(msg) raise grade_essay.retry() # Retrieve the task parameters try: params = ai_worker_api.get_grading_task_params(workflow_uuid) essay_text = params['essay_text'] classifier_set = params['classifier_set'] algorithm_id = params['algorithm_id'] valid_scores = params['valid_scores'] except (AIError, KeyError): msg = ( u"An error occurred while retrieving the AI grading task " u"parameters for the workflow with UUID {}" ).format(workflow_uuid) logger.exception(msg) raise grade_essay.retry() # Validate that the we have valid scores for each criterion for criterion_name in classifier_set.keys(): msg = None if criterion_name not in valid_scores: msg = ( u"Could not find {criterion} in the list of valid scores " u"for grading workflow with UUID {uuid}" ).format(criterion=criterion_name, uuid=workflow_uuid) elif len(valid_scores[criterion_name]) == 0: msg = ( u"Valid scores for {criterion} is empty for " u"grading workflow with UUID {uuid}" ).format(criterion=criterion_name, uuid=workflow_uuid) if msg: logger.exception(msg) raise AIGradingInternalError(msg) # Retrieve the AI algorithm try: algorithm = AIAlgorithm.algorithm_for_id(algorithm_id) except AIAlgorithmError: msg = ( u"An error occurred while retrieving " u"the algorithm ID (grading workflow UUID {})" ).format(workflow_uuid) logger.exception(msg) raise grade_essay.retry() # Use the algorithm to evaluate the essay for each criterion # Provide an in-memory cache so the algorithm can re-use # results for multiple rubric criteria. try: cache = dict() scores_by_criterion = { criterion_name: _closest_valid_score( algorithm.score(essay_text, classifier, cache), valid_scores[criterion_name] ) for criterion_name, classifier in classifier_set.iteritems() } except AIAlgorithmError: msg = ( u"An error occurred while scoring essays using " u"an AI algorithm (worker workflow UUID {})" ).format(workflow_uuid) logger.exception(msg) raise grade_essay.retry() # Create the assessment and mark the workflow complete try: ai_worker_api.create_assessment(workflow_uuid, scores_by_criterion) except AIError: msg = ( u"An error occurred while creating assessments " u"for the AI grading workflow with UUID {uuid}. " u"The assessment scores were: {scores}" ).format(uuid=workflow_uuid, scores=scores_by_criterion) logger.exception(msg) raise grade_essay.retry()
def test_create_assessment_database_error_complete_workflow(self, mock_call): mock_call.side_effect = DatabaseError("KABOOM!") with self.assertRaises(AIGradingInternalError): ai_worker_api.create_assessment(self.workflow_uuid, self.SCORES)
def test_create_assessment_no_workflow(self): with self.assertRaises(AIGradingRequestError): ai_worker_api.create_assessment("invalid_uuid", self.SCORES)
def test_create_assessment(self): ai_worker_api.create_assessment(self.workflow_uuid, self.SCORES) assessment = Assessment.objects.get(submission_uuid=self.submission_uuid) self.assertEqual(assessment.points_earned, 1)
def test_create_assessment_database_error_complete_workflow( self, mock_call): mock_call.side_effect = DatabaseError("KABOOM!") with self.assertRaises(AIGradingInternalError): ai_worker_api.create_assessment(self.workflow_uuid, self.SCORES)
def test_create_assessment(self): ai_worker_api.create_assessment(self.workflow_uuid, self.SCORES) assessment = Assessment.objects.get( submission_uuid=self.submission_uuid) self.assertEqual(assessment.points_earned, 1)