def test_get_training_example_invalid_rubric(self):
        # Rubric is missing a very important key!
        invalid_rubric = copy.deepcopy(RUBRIC)
        del invalid_rubric['criteria']

        with self.assertRaises(StudentTrainingRequestError):
            training_api.get_training_example(self.submission_uuid, invalid_rubric, EXAMPLES)
Beispiel #2
0
    def test_get_training_example_invalid_rubric(self):
        # Rubric is missing a very important key!
        invalid_rubric = copy.deepcopy(RUBRIC)
        del invalid_rubric['criteria']

        with self.assertRaises(StudentTrainingRequestError):
            training_api.get_training_example(self.submission_uuid, invalid_rubric, EXAMPLES)
 def test_assess_training_example_database_error(self, mock_db):
     training_api.get_training_example(self.submission_uuid, RUBRIC,
                                       EXAMPLES)
     mock_db.side_effect = DatabaseError("Kaboom!")
     with self.assertRaises(StudentTrainingInternalError):
         training_api.assess_training_example(
             self.submission_uuid, EXAMPLES[0]['options_selected'])
    def test_get_num_completed_num_queries(self):
        # Complete the first training example
        training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES)
        training_api.assess_training_example(self.submission_uuid, EXAMPLES[0]['options_selected'])

        # Check the number completed
        with self.assertNumQueries(2):
            training_api.get_num_completed(self.submission_uuid)
Beispiel #5
0
    def test_get_same_example(self):
        # Retrieve a training example
        retrieved = training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES)

        # If we retrieve an example without completing the current example,
        # we should get the same one.
        next_retrieved = training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES)
        self.assertEqual(retrieved, next_retrieved)
 def test_assess_training_example_num_queries(self):
     # Populate the cache with training examples and rubrics
     self._warm_cache(RUBRIC, EXAMPLES)
     training_api.get_training_example(self.submission_uuid, RUBRIC,
                                       EXAMPLES)
     with self.assertNumQueries(3):
         training_api.assess_training_example(
             self.submission_uuid, EXAMPLES[0]['options_selected'])
    def test_get_same_example(self):
        # Retrieve a training example
        retrieved = training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES)

        # If we retrieve an example without completing the current example,
        # we should get the same one.
        next_retrieved = training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES)
        self.assertEqual(retrieved, next_retrieved)
Beispiel #8
0
    def test_get_num_completed_num_queries(self):
        # Complete the first training example
        training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES)
        training_api.assess_training_example(self.submission_uuid, EXAMPLES[0]['options_selected'])

        # Check the number completed
        with self.assertNumQueries(2):
            training_api.get_num_completed(self.submission_uuid)
    def test_submitter_is_finished_num_queries(self):
        # Complete the first training example
        training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES)
        training_api.assess_training_example(self.submission_uuid, EXAMPLES[0]['options_selected'])

        # Check whether we've completed the requirements
        requirements = {'num_required': 2}
        with self.assertNumQueries(2):
            training_api.submitter_is_finished(self.submission_uuid, requirements)
Beispiel #10
0
    def test_assess_training_example_completed_workflow(self):
        for example in EXAMPLES:
            training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES)
            training_api.assess_training_example(self.submission_uuid, example['options_selected'])

        # Try to assess again, and expect an error
        with self.assertRaises(StudentTrainingRequestError):
            training_api.assess_training_example(
                self.submission_uuid, EXAMPLES[0]['options_selected']
            )
    def test_assess_training_example_completed_workflow(self):
        for example in EXAMPLES:
            training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES)
            training_api.assess_training_example(self.submission_uuid, example['options_selected'])

        # Try to assess again, and expect an error
        with self.assertRaises(StudentTrainingRequestError):
            training_api.assess_training_example(
                self.submission_uuid, EXAMPLES[0]['options_selected']
            )
Beispiel #12
0
    def test_submitter_is_finished_num_queries(self):
        # Complete the first training example
        training_api.on_start(self.submission_uuid)
        training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES)
        training_api.assess_training_example(self.submission_uuid, EXAMPLES[0]['options_selected'])

        # Check whether we've completed the requirements
        requirements = {'num_required': 2}
        with self.assertNumQueries(2):
            training_api.submitter_is_finished(self.submission_uuid, requirements)
 def test_assess_training_example_database_error(self):
     training_api.get_training_example(self.submission_uuid, RUBRIC,
                                       EXAMPLES)
     with patch(
             'openassessment.assessment.models.student_training.StudentTrainingWorkflow.objects.get'
     ) as mock_db:
         mock_db.side_effect = DatabaseError("Kaboom!")
         with self.assertRaises(StudentTrainingInternalError):
             training_api.assess_training_example(
                 self.submission_uuid, EXAMPLES[0]['options_selected'])
Beispiel #14
0
    def test_get_training_example_none_available(self):
        for example in EXAMPLES:
            training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES)
            training_api.assess_training_example(self.submission_uuid, example['options_selected'])

        # Now we should be complete
        self._assert_workflow_status(self.submission_uuid, 2, 2)

        # ... and if we try to get another example, we should get None
        self.assertIs(
            training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES),
            None
        )
    def test_get_training_example_none_available(self):
        for example in EXAMPLES:
            training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES)
            training_api.assess_training_example(self.submission_uuid, example['options_selected'])

        # Now we should be complete
        self._assert_workflow_status(self.submission_uuid, 2, 2)

        # ... and if we try to get another example, we should get None
        self.assertIs(
            training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES),
            None
        )
    def test_assess_without_update(self):
        # Assess the first training example the same way the instructor did
        # but do NOT update the workflow
        training_api.get_training_example(self.submission_uuid, RUBRIC,
                                          EXAMPLES)
        corrections = training_api.assess_training_example(
            self.submission_uuid,
            EXAMPLES[0]['options_selected'],
            update_workflow=False)

        # Expect that we're still on the first step
        self.assertEqual(corrections, dict())
        self._assert_workflow_status(self.submission_uuid, 0, 2)
    def test_assess_without_update(self):
        # Assess the first training example the same way the instructor did
        # but do NOT update the workflow
        training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES)
        corrections = training_api.assess_training_example(
            self.submission_uuid,
            EXAMPLES[0]['options_selected'],
            update_workflow=False
        )

        # Expect that we're still on the first step
        self.assertEqual(corrections, dict())
        self._assert_workflow_status(self.submission_uuid, 0, 2)
    def _warm_cache(self, rubric, examples):
        """
        Create a submission and complete student training.
        This will populate the cache with training examples and rubrics,
        which are immutable and shared for all students training on a particular problem.

        Args:
            rubric (dict): Serialized rubric model.
            examples (list of dict): Serialized training examples

        Returns:
            None

        """
        pre_submission = sub_api.create_submission(STUDENT_ITEM, ANSWER)
        for example in examples:
            training_api.get_training_example(pre_submission['uuid'], rubric, examples)
            training_api.assess_training_example(pre_submission['uuid'], example['options_selected'])
Beispiel #19
0
    def _warm_cache(self, rubric, examples):
        """
        Create a submission and complete student training.
        This will populate the cache with training examples and rubrics,
        which are immutable and shared for all students training on a particular problem.

        Args:
            rubric (dict): Serialized rubric model.
            examples (list of dict): Serialized training examples

        Returns:
            None

        """
        pre_submission = sub_api.create_submission(STUDENT_ITEM, ANSWER)
        training_api.on_start(pre_submission['uuid'])
        for example in examples:
            training_api.get_training_example(pre_submission['uuid'], rubric, examples)
            training_api.assess_training_example(pre_submission['uuid'], example['options_selected'])
    def test_get_training_example_num_queries(self):

        # Run through the training example once using a different submission
        # Training examples and rubrics will be cached and shared for other
        # students working on the same problem.
        self._warm_cache(RUBRIC, EXAMPLES)

        # First training example
        # This will need to create the student training workflow and the first item
        # NOTE: we *could* cache the rubric model to reduce the number of queries here,
        # but we're selecting it by content hash, which is indexed and should be plenty fast.
        with self.assertNumQueries(8):
            training_api.get_training_example(self.submission_uuid, RUBRIC,
                                              EXAMPLES)

        # Without assessing the first training example, try to retrieve a training example.
        # This should return the same example as before, so we won't need to create
        # any workflows or workflow items.
        with self.assertNumQueries(5):
            training_api.get_training_example(self.submission_uuid, RUBRIC,
                                              EXAMPLES)

        # Assess the current training example
        training_api.assess_training_example(self.submission_uuid,
                                             EXAMPLES[0]['options_selected'])

        # Retrieve the next training example, which requires us to create
        # a new workflow item (but not a new workflow).
        with self.assertNumQueries(8):
            training_api.get_training_example(self.submission_uuid, RUBRIC,
                                              EXAMPLES)
    def test_get_training_example_num_queries(self):

        # Run through the training example once using a different submission
        # Training examples and rubrics will be cached and shared for other
        # students working on the same problem.
        self._warm_cache(RUBRIC, EXAMPLES)

        # First training example
        # This will need to create the student training workflow and the first item
        # NOTE: we *could* cache the rubric model to reduce the number of queries here,
        # but we're selecting it by content hash, which is indexed and should be plenty fast.
        with self.assertNumQueries(6):
            training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES)

        # Without assessing the first training example, try to retrieve a training example.
        # This should return the same example as before, so we won't need to create
        # any workflows or workflow items.
        with self.assertNumQueries(3):
            training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES)

        # Assess the current training example
        training_api.assess_training_example(self.submission_uuid, EXAMPLES[0]['options_selected'])

        # Retrieve the next training example, which requires us to create
        # a new workflow item (but not a new workflow).
        with self.assertNumQueries(4):
            training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES)
    def _assert_get_example(self, submission_uuid, order_num, input_examples, input_rubric):
        """
        Check the training example we get from the API.

        Args:
            submission_uuid (str): The submission UUID associated with the student being trained.
            order_num (int): The order number of the example we expect to retrieve.
            input_examples (list of dict): The examples we used to configure the training workflow.
            input_rubric (dict): The rubric we used to configure the training workflow.

        Returns:
            None

        Raises:
            AssertionError

        """
        example = training_api.get_training_example(submission_uuid, input_rubric, input_examples)
        expected_example = self._expected_example(input_examples[order_num], input_rubric)
        self.assertItemsEqual(example, expected_example)
Beispiel #23
0
    def _assert_get_example(self, submission_uuid, order_num, input_examples, input_rubric):
        """
        Check the training example we get from the API.

        Args:
            submission_uuid (str): The submission UUID associated with the student being trained.
            order_num (int): The order number of the example we expect to retrieve.
            input_examples (list of dict): The examples we used to configure the training workflow.
            input_rubric (dict): The rubric we used to configure the training workflow.

        Returns:
            None

        Raises:
            AssertionError

        """
        example = training_api.get_training_example(submission_uuid, input_rubric, input_examples)
        expected_example = self._expected_example(input_examples[order_num], input_rubric)
        six.assertCountEqual(self, example, expected_example)
    def training_path_and_context(self):
        """
        Return the template path and context used to render the student training step.

        Returns:
            tuple of `(path, context)` where `path` is the path to the template and
                `context` is a dict.

        """
        # Retrieve the status of the workflow.
        # If no submissions have been created yet, the status will be None.
        workflow_status = self.get_workflow_info().get('status')
        problem_closed, reason, start_date, due_date = self.is_closed(
            step="student-training")

        context = {}
        template = 'openassessmentblock/student_training/student_training_unavailable.html'

        # add allow_latex field to the context
        context['allow_latex'] = self.allow_latex

        if not workflow_status:
            return template, context

        # If the student has completed the training step, then show that the step is complete.
        # We put this condition first so that if a student has completed the step, it *always*
        # shows as complete.
        # We're assuming here that the training step always precedes the other assessment steps
        # (peer/self) -- we may need to make this more flexible later.
        if workflow_status == 'cancelled':
            template = 'openassessmentblock/student_training/student_training_cancelled.html'
        elif workflow_status and workflow_status != "training":
            template = 'openassessmentblock/student_training/student_training_complete.html'

        # If the problem is closed, then do not allow students to access the training step
        elif problem_closed and reason == 'start':
            context['training_start'] = start_date
            template = 'openassessmentblock/student_training/student_training_unavailable.html'
        elif problem_closed and reason == 'due':
            context['training_due'] = due_date
            template = 'openassessmentblock/student_training/student_training_closed.html'

        # If we're on the training step, show the student an example
        # We do this last so we can avoid querying the student training API if possible.
        else:
            training_module = self.get_assessment_module('student-training')
            if not training_module:
                return template, context

            if due_date < DISTANT_FUTURE:
                context['training_due'] = due_date

            # Report progress in the student training workflow (completed X out of Y)
            context['training_num_available'] = len(
                training_module["examples"])
            context[
                'training_num_completed'] = student_training.get_num_completed(
                    self.submission_uuid)
            context[
                'training_num_current'] = context['training_num_completed'] + 1

            # Retrieve the example essay for the student to submit
            # This will contain the essay text, the rubric, and the options the instructor selected.
            examples = convert_training_examples_list_to_dict(
                training_module["examples"])
            example = student_training.get_training_example(
                self.submission_uuid, {
                    'prompt': self.prompt,
                    'criteria': self.rubric_criteria_with_labels
                }, examples)
            if example:
                context['training_essay'] = create_submission_dict(
                    {'answer': example['answer']}, self.prompts)
                context['training_rubric'] = {
                    'criteria': example['rubric']['criteria'],
                    'points_possible': example['rubric']['points_possible']
                }
                template = 'openassessmentblock/student_training/student_training.html'
            else:
                logger.error(
                    "No training example was returned from the API for student "
                    "with Submission UUID {}".format(self.submission_uuid))
                template = "openassessmentblock/student_training/student_training_error.html"

        return template, context
    def training_path_and_context(self):
        """
        Return the template path and context used to render the student training step.

        Returns:
            tuple of `(path, context)` where `path` is the path to the template and
                `context` is a dict.

        """
        # Retrieve the status of the workflow.
        # If no submissions have been created yet, the status will be None.
        workflow_status = self.get_workflow_info().get('status')
        problem_closed, reason, start_date, due_date = self.is_closed(step="student-training")

        context = {}
        template = 'openassessmentblock/student_training/student_training_unavailable.html'

        # add allow_latex field to the context
        context['allow_latex'] = self.allow_latex

        if not workflow_status:
            return template, context

        # If the student has completed the training step, then show that the step is complete.
        # We put this condition first so that if a student has completed the step, it *always*
        # shows as complete.
        # We're assuming here that the training step always precedes the other assessment steps
        # (peer/self) -- we may need to make this more flexible later.
        if workflow_status == 'cancelled':
            template = 'openassessmentblock/student_training/student_training_cancelled.html'
        elif workflow_status and workflow_status != "training":
            template = 'openassessmentblock/student_training/student_training_complete.html'

        # If the problem is closed, then do not allow students to access the training step
        elif problem_closed and reason == 'start':
            context['training_start'] = start_date
            template = 'openassessmentblock/student_training/student_training_unavailable.html'
        elif problem_closed and reason == 'due':
            context['training_due'] = due_date
            template = 'openassessmentblock/student_training/student_training_closed.html'

        # If we're on the training step, show the student an example
        # We do this last so we can avoid querying the student training API if possible.
        else:
            training_module = self.get_assessment_module('student-training')
            if not training_module:
                return template, context

            if due_date < DISTANT_FUTURE:
                context['training_due'] = due_date

            # Report progress in the student training workflow (completed X out of Y)
            context['training_num_available'] = len(training_module["examples"])
            context['training_num_completed'] = student_training.get_num_completed(self.submission_uuid)
            context['training_num_current'] = context['training_num_completed'] + 1

            # Retrieve the example essay for the student to submit
            # This will contain the essay text, the rubric, and the options the instructor selected.
            examples = convert_training_examples_list_to_dict(training_module["examples"])
            example = student_training.get_training_example(
                self.submission_uuid,
                {
                    'prompt': self.prompt,
                    'criteria': self.rubric_criteria_with_labels
                },
                examples
            )
            if example:
                context['training_essay'] = create_submission_dict({'answer': example['answer']}, self.prompts)
                context['training_rubric'] = {
                    'criteria': example['rubric']['criteria'],
                    'points_possible': example['rubric']['points_possible']
                }
                template = 'openassessmentblock/student_training/student_training.html'
            else:
                logger.error(
                    "No training example was returned from the API for student "
                    "with Submission UUID {}".format(self.submission_uuid)
                )
                template = "openassessmentblock/student_training/student_training_error.html"

        return template, context
 def test_get_training_example_no_submission(self):
     with self.assertRaises(StudentTrainingRequestError):
         training_api.get_training_example("no_such_submission", RUBRIC,
                                           EXAMPLES)
 def test_assess_training_example_num_queries(self):
     # Populate the cache with training examples and rubrics
     self._warm_cache(RUBRIC, EXAMPLES)
     training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES)
     with self.assertNumQueries(4):
         training_api.assess_training_example(self.submission_uuid, EXAMPLES[0]['options_selected'])
 def test_assess_training_example_database_error(self, mock_db):
     training_api.get_training_example(self.submission_uuid, RUBRIC, EXAMPLES)
     mock_db.side_effect = DatabaseError("Kaboom!")
     with self.assertRaises(StudentTrainingInternalError):
         training_api.assess_training_example(self.submission_uuid, EXAMPLES[0]['options_selected'])
 def test_get_training_example_no_submission(self):
     with self.assertRaises(StudentTrainingRequestError):
         training_api.get_training_example("no_such_submission", RUBRIC, EXAMPLES)