def test_get_training_example_num_queries(self): # Run through the training example once using a different submission # Training examples and rubrics will be cached and shared for other # students working on the same problem. self._warm_cache(RUBRIC, EXAMPLES) # First training example # This will need to create the student training workflow and the first item # NOTE: we *could* cache the rubric model to reduce the number of queries here, # but we're selecting it by content hash, which is indexed and should be plenty fast. with self.assertNumQueries(8): training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES) # Without assessing the first training example, try to retrieve a training example. # This should return the same example as before, so we won't need to create # any workflows or workflow items. with self.assertNumQueries(5): training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES) # Assess the current training example training_api.assess_training_example(self.submission_uuid, EXAMPLES[0]['options_selected']) # Retrieve the next training example, which requires us to create # a new workflow item (but not a new workflow). with self.assertNumQueries(8): training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES)
def test_get_training_example_num_queries(self): # Run through the training example once using a different submission # Training examples and rubrics will be cached and shared for other # students working on the same problem. self._warm_cache(RUBRIC, EXAMPLES) # First training example # This will need to create the student training workflow and the first item # NOTE: we *could* cache the rubric model to reduce the number of queries here, # but we're selecting it by content hash, which is indexed and should be plenty fast. with self.assertNumQueries(6): training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES) # Without assessing the first training example, try to retrieve a training example. # This should return the same example as before, so we won't need to create # any workflows or workflow items. with self.assertNumQueries(3): training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES) # Assess the current training example training_api.assess_training_example(self.submission_uuid, EXAMPLES[0]['options_selected']) # Retrieve the next training example, which requires us to create # a new workflow item (but not a new workflow). with self.assertNumQueries(4): training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES)
def test_assess_training_example_no_workflow(self): # If we try to assess without first retrieving an example # (which implicitly creates a workflow) # then we should get a request error. with self.assertRaises(StudentTrainingRequestError): training_api.assess_training_example( self.submission_uuid, EXAMPLES[0]['options_selected'])
def test_training_workflow(self): # Initially, we should be on the first step self._assert_workflow_status(self.submission_uuid, 0, 2) # Get a training example self._assert_get_example(self.submission_uuid, 0, EXAMPLES, RUBRIC) # Assess the training example the same way the instructor did corrections = training_api.assess_training_example( self.submission_uuid, EXAMPLES[0]['options_selected']) self.assertEqual(corrections, dict()) self._assert_workflow_status(self.submission_uuid, 1, 2) # Get another training example to assess self._assert_get_example(self.submission_uuid, 1, EXAMPLES, RUBRIC) # Give the example different scores than the instructor gave incorrect_assessment = { u"vøȼȺƀᵾłȺɍɏ": u"𝓰𝓸𝓸𝓭", u"ﻭɼค๓๓คɼ": u"𝓰𝓸𝓸𝓭", } corrections = training_api.assess_training_example( self.submission_uuid, incorrect_assessment) # Expect that we get corrected and stay on the current example self.assertItemsEqual(corrections, EXAMPLES[1]['options_selected']) self._assert_workflow_status(self.submission_uuid, 1, 2) # Try again, and this time assess the same way as the instructor corrections = training_api.assess_training_example( self.submission_uuid, EXAMPLES[1]['options_selected']) self.assertEqual(corrections, dict()) # Now we should have completed both assessments self._assert_workflow_status(self.submission_uuid, 2, 2)
def test_assess_training_example_database_error(self, mock_db): training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES) mock_db.side_effect = DatabaseError("Kaboom!") with self.assertRaises(StudentTrainingInternalError): training_api.assess_training_example( self.submission_uuid, EXAMPLES[0]['options_selected'])
def test_get_num_completed_num_queries(self): # Complete the first training example training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES) training_api.assess_training_example(self.submission_uuid, EXAMPLES[0]['options_selected']) # Check the number completed with self.assertNumQueries(2): training_api.get_num_completed(self.submission_uuid)
def test_assess_training_example_num_queries(self): # Populate the cache with training examples and rubrics self._warm_cache(RUBRIC, EXAMPLES) training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES) with self.assertNumQueries(3): training_api.assess_training_example( self.submission_uuid, EXAMPLES[0]['options_selected'])
def test_assess_training_example_no_workflow(self): # If we try to assess without first retrieving an example # (which implicitly creates a workflow) # then we should get a request error. with self.assertRaises(StudentTrainingRequestError): training_api.assess_training_example( self.submission_uuid, EXAMPLES[0]['options_selected'] )
def test_submitter_is_finished_num_queries(self): # Complete the first training example training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES) training_api.assess_training_example(self.submission_uuid, EXAMPLES[0]['options_selected']) # Check whether we've completed the requirements requirements = {'num_required': 2} with self.assertNumQueries(2): training_api.submitter_is_finished(self.submission_uuid, requirements)
def test_assess_training_example_completed_workflow(self): for example in EXAMPLES: training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES) training_api.assess_training_example(self.submission_uuid, example['options_selected']) # Try to assess again, and expect an error with self.assertRaises(StudentTrainingRequestError): training_api.assess_training_example( self.submission_uuid, EXAMPLES[0]['options_selected'] )
def test_submitter_is_finished_num_queries(self): # Complete the first training example training_api.on_start(self.submission_uuid) training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES) training_api.assess_training_example(self.submission_uuid, EXAMPLES[0]['options_selected']) # Check whether we've completed the requirements requirements = {'num_required': 2} with self.assertNumQueries(2): training_api.submitter_is_finished(self.submission_uuid, requirements)
def test_assess_training_example_database_error(self): training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES) with patch( 'openassessment.assessment.models.student_training.StudentTrainingWorkflow.objects.get' ) as mock_db: mock_db.side_effect = DatabaseError("Kaboom!") with self.assertRaises(StudentTrainingInternalError): training_api.assess_training_example( self.submission_uuid, EXAMPLES[0]['options_selected'])
def test_get_training_example_none_available(self): for example in EXAMPLES: training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES) training_api.assess_training_example(self.submission_uuid, example['options_selected']) # Now we should be complete self._assert_workflow_status(self.submission_uuid, 2, 2) # ... and if we try to get another example, we should get None self.assertIs( training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES), None )
def _warm_cache(self, rubric, examples): """ Create a submission and complete student training. This will populate the cache with training examples and rubrics, which are immutable and shared for all students training on a particular problem. Args: rubric (dict): Serialized rubric model. examples (list of dict): Serialized training examples Returns: None """ pre_submission = sub_api.create_submission(STUDENT_ITEM, ANSWER) for example in examples: training_api.get_training_example(pre_submission['uuid'], rubric, examples) training_api.assess_training_example(pre_submission['uuid'], example['options_selected'])
def _warm_cache(self, rubric, examples): """ Create a submission and complete student training. This will populate the cache with training examples and rubrics, which are immutable and shared for all students training on a particular problem. Args: rubric (dict): Serialized rubric model. examples (list of dict): Serialized training examples Returns: None """ pre_submission = sub_api.create_submission(STUDENT_ITEM, ANSWER) training_api.on_start(pre_submission['uuid']) for example in examples: training_api.get_training_example(pre_submission['uuid'], rubric, examples) training_api.assess_training_example(pre_submission['uuid'], example['options_selected'])
def test_assess_without_update(self): # Assess the first training example the same way the instructor did # but do NOT update the workflow training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES) corrections = training_api.assess_training_example( self.submission_uuid, EXAMPLES[0]['options_selected'], update_workflow=False) # Expect that we're still on the first step self.assertEqual(corrections, dict()) self._assert_workflow_status(self.submission_uuid, 0, 2)
def test_training_workflow(self): # Initially, we should be on the first step self._assert_workflow_status(self.submission_uuid, 0, 2) # Get a training example self._assert_get_example(self.submission_uuid, 0, EXAMPLES, RUBRIC) # Assess the training example the same way the instructor did corrections = training_api.assess_training_example( self.submission_uuid, EXAMPLES[0]['options_selected'] ) self.assertEqual(corrections, dict()) self._assert_workflow_status(self.submission_uuid, 1, 2) # Get another training example to assess self._assert_get_example(self.submission_uuid, 1, EXAMPLES, RUBRIC) # Give the example different scores than the instructor gave incorrect_assessment = { u"vøȼȺƀᵾłȺɍɏ": u"𝓰𝓸𝓸𝓭", u"ﻭɼค๓๓คɼ": u"𝓰𝓸𝓸𝓭", } corrections = training_api.assess_training_example( self.submission_uuid, incorrect_assessment ) # Expect that we get corrected and stay on the current example self.assertItemsEqual(corrections, EXAMPLES[1]['options_selected']) self._assert_workflow_status(self.submission_uuid, 1, 2) # Try again, and this time assess the same way as the instructor corrections = training_api.assess_training_example( self.submission_uuid, EXAMPLES[1]['options_selected'] ) self.assertEqual(corrections, dict()) # Now we should have completed both assessments self._assert_workflow_status(self.submission_uuid, 2, 2)
def test_assess_without_update(self): # Assess the first training example the same way the instructor did # but do NOT update the workflow training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES) corrections = training_api.assess_training_example( self.submission_uuid, EXAMPLES[0]['options_selected'], update_workflow=False ) # Expect that we're still on the first step self.assertEqual(corrections, dict()) self._assert_workflow_status(self.submission_uuid, 0, 2)
def training_assess(self, data, suffix=''): # pylint:disable=W0613 """ Compare the scores given by the student with those given by the course author. If they match, update the training workflow. The client can then reload this step to view the next essay or the completed step. Currently, we return a boolean indicating whether the student assessed correctly or not. However, the student training API provides the exact criteria that the student scored incorrectly, as well as the "correct" options for those criteria. In the future, we may expose this in the UI to provide more detailed feedback. Args: data (dict): Must have the following keys: options_selected (dict): Dictionary mapping criterion names to option values. Returns: Dict with keys: * "success" (bool) indicating success or error * "msg" (unicode) containing additional information if an error occurs. * "correct" (bool) indicating whether the student scored the assessment correctly. """ if 'options_selected' not in data: return {'success': False, 'msg': _(u"Missing options_selected key in request")} if not isinstance(data['options_selected'], dict): return {'success': False, 'msg': _(u"options_selected must be a dictionary")} # Check the student's scores against the course author's scores. # This implicitly updates the student training workflow (which example essay is shown) # as well as the assessment workflow (training/peer/self steps). try: corrections = student_training.assess_training_example( self.submission_uuid, data['options_selected'] ) except (student_training.StudentTrainingRequestError, student_training.StudentTrainingInternalError) as ex: return { 'success': False, 'msg': _(u"Your scores could not be checked: {error}.").format(error=ex) } except: return { 'success': False, 'msg': _(u"An unexpected error occurred.") } else: return { 'success': True, 'msg': u'', 'corrections': corrections, }
def training_assess(self, data, suffix=''): # pylint:disable=W0613 """ Compare the scores given by the student with those given by the course author. If they match, update the training workflow. The client can then reload this step to view the next essay or the completed step. Currently, we return a boolean indicating whether the student assessed correctly or not. However, the student training API provides the exact criteria that the student scored incorrectly, as well as the "correct" options for those criteria. In the future, we may expose this in the UI to provide more detailed feedback. Args: data (dict): Must have the following keys: options_selected (dict): Dictionary mapping criterion names to option values. Returns: Dict with keys: * "success" (bool) indicating success or error * "msg" (unicode) containing additional information if an error occurs. * "correct" (bool) indicating whether the student scored the assessment correctly. """ if 'options_selected' not in data: return { 'success': False, 'msg': self._(u"Missing options_selected key in request") } if not isinstance(data['options_selected'], dict): return { 'success': False, 'msg': self._(u"options_selected must be a dictionary") } # Check the student's scores against the course author's scores. # This implicitly updates the student training workflow (which example essay is shown) # as well as the assessment workflow (training/peer/self steps). try: corrections = student_training.assess_training_example( self.submission_uuid, data['options_selected']) self.runtime.publish( self, "openassessment.student_training_assess_example", { "submission_uuid": self.submission_uuid, "options_selected": data["options_selected"], "corrections": corrections }) except student_training.StudentTrainingRequestError: msg = (u"Could not check student training scores for " u"the student with submission UUID {uuid}").format( uuid=self.submission_uuid) logger.warning(msg, exc_info=True) return { 'success': False, 'msg': self._(u"Your scores could not be checked.") } except student_training.StudentTrainingInternalError: return { 'success': False, 'msg': self._(u"Your scores could not be checked.") } except: return { 'success': False, 'msg': self._(u"An unexpected error occurred.") } else: try: self.update_workflow_status() except AssessmentWorkflowError: msg = self._('Could not update workflow status.') logger.exception(msg) return {'success': False, 'msg': msg} return { 'success': True, 'msg': u'', 'corrections': corrections, }
def training_assess(self, data, suffix=''): # pylint:disable=W0613 """ Compare the scores given by the student with those given by the course author. If they match, update the training workflow. The client can then reload this step to view the next essay or the completed step. Currently, we return a boolean indicating whether the student assessed correctly or not. However, the student training API provides the exact criteria that the student scored incorrectly, as well as the "correct" options for those criteria. In the future, we may expose this in the UI to provide more detailed feedback. Args: data (dict): Must have the following keys: options_selected (dict): Dictionary mapping criterion names to option values. Returns: Dict with keys: * "success" (bool) indicating success or error * "msg" (unicode) containing additional information if an error occurs. * "correct" (bool) indicating whether the student scored the assessment correctly. """ if 'options_selected' not in data: return {'success': False, 'msg': self._(u"Missing options_selected key in request")} if not isinstance(data['options_selected'], dict): return {'success': False, 'msg': self._(u"options_selected must be a dictionary")} # Check the student's scores against the course author's scores. # This implicitly updates the student training workflow (which example essay is shown) # as well as the assessment workflow (training/peer/self steps). try: corrections = student_training.assess_training_example( self.submission_uuid, data['options_selected'] ) self.runtime.publish( self, "openassessment.student_training_assess_example", { "submission_uuid": self.submission_uuid, "options_selected": data["options_selected"], "corrections": corrections } ) except student_training.StudentTrainingRequestError: msg = ( u"Could not check student training scores for " u"the student with submission UUID {uuid}" ).format(uuid=self.submission_uuid) logger.warning(msg, exc_info=True) return { 'success': False, 'msg': self._(u"Your scores could not be checked.") } except student_training.StudentTrainingInternalError: return { 'success': False, 'msg': self._(u"Your scores could not be checked.") } except: return { 'success': False, 'msg': self._(u"An unexpected error occurred.") } else: try: self.update_workflow_status() except AssessmentWorkflowError: msg = self._('Could not update workflow status.') logger.exception(msg) return {'success': False, 'msg': msg} return { 'success': True, 'msg': u'', 'corrections': corrections, }
def test_assess_training_example_num_queries(self): # Populate the cache with training examples and rubrics self._warm_cache(RUBRIC, EXAMPLES) training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES) with self.assertNumQueries(4): training_api.assess_training_example(self.submission_uuid, EXAMPLES[0]['options_selected'])
def test_assess_training_example_database_error(self, mock_db): training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES) mock_db.side_effect = DatabaseError("Kaboom!") with self.assertRaises(StudentTrainingInternalError): training_api.assess_training_example(self.submission_uuid, EXAMPLES[0]['options_selected'])