Exemplo n.º 1
0
    def render_grade_complete(self, workflow):
        """
        Render the grade complete state.

        Args:
            workflow (dict): The serialized Workflow model.

        Returns:
            tuple of context (dict), template_path (string)
        """
        feedback = peer_api.get_assessment_feedback(self.submission_uuid)
        feedback_text = feedback.get('feedback', '') if feedback else ''
        student_submission = sub_api.get_submission(
            workflow['submission_uuid'])
        peer_assessments = peer_api.get_assessments(student_submission['uuid'])
        self_assessment = self_api.get_assessment(student_submission['uuid'])
        has_submitted_feedback = peer_api.get_assessment_feedback(
            workflow['submission_uuid']) is not None

        # We retrieve the score from the workflow, which in turn retrieves
        # the score for our current submission UUID.
        # We look up the score by submission UUID instead of student item
        # to ensure that the score always matches the rubric.
        score = workflow['score']

        context = {
            'score':
            score,
            'feedback_text':
            feedback_text,
            'student_submission':
            student_submission,
            'peer_assessments':
            peer_assessments,
            'self_assessment':
            self_assessment,
            'rubric_criteria':
            self._rubric_criteria_with_feedback(peer_assessments),
            'has_submitted_feedback':
            has_submitted_feedback,
        }

        # Update the scores we will display to the user
        # Note that we are updating a *copy* of the rubric criteria stored in the XBlock field
        max_scores = peer_api.get_rubric_max_scores(self.submission_uuid)
        median_scores = peer_api.get_assessment_median_scores(
            student_submission["uuid"])
        if median_scores is not None and max_scores is not None:
            for criterion in context["rubric_criteria"]:
                criterion["median_score"] = median_scores[criterion["name"]]
                criterion["total_value"] = max_scores[criterion["name"]]

        return ('openassessmentblock/grade/oa_grade_complete.html', context)
Exemplo n.º 2
0
 def test_create_feedback_on_an_assessment(self):
     tim_sub, tim = self._create_student_and_submission("Tim", "Tim's answer")
     bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
     peer_api.get_submission_to_assess(bob_sub['uuid'], 1)
     assessment = peer_api.create_assessment(
         bob_sub["uuid"],
         bob["student_id"],
         ASSESSMENT_DICT['options_selected'],
         ASSESSMENT_DICT['criterion_feedback'],
         ASSESSMENT_DICT['overall_feedback'],
         RUBRIC_DICT,
         REQUIRED_GRADED_BY,
     )
     peer_api.get_submission_to_assess(tim_sub['uuid'], 1)
     peer_api.create_assessment(
         tim_sub["uuid"],
         tim["student_id"],
         ASSESSMENT_DICT['options_selected'],
         ASSESSMENT_DICT['criterion_feedback'],
         ASSESSMENT_DICT['overall_feedback'],
         RUBRIC_DICT,
         REQUIRED_GRADED_BY,
     )
     peer_api.get_score(
         tim_sub["uuid"],
         {
             'must_grade': 1,
             'must_be_graded_by': 1
         }
     )
     feedback = peer_api.get_assessment_feedback(tim_sub['uuid'])
     self.assertIsNone(feedback)
     peer_api.set_assessment_feedback(
         {
             'submission_uuid': tim_sub['uuid'],
             'feedback_text': 'Bob is a jerk!',
             'options': [
                 'I disliked this assessment',
                 'I felt this assessment was unfair',
             ]
         }
     )
     saved_feedback = peer_api.get_assessment_feedback(tim_sub['uuid'])
     self.assertIsNot(saved_feedback, None)
     self.assertEquals(saved_feedback['submission_uuid'], assessment['submission_uuid'])
     self.assertEquals(saved_feedback['feedback_text'], 'Bob is a jerk!')
     self.assertItemsEqual(saved_feedback['options'], [
         {'text': 'I disliked this assessment'},
         {'text': 'I felt this assessment was unfair'},
     ])
     self.assertEquals(saved_feedback["assessments"][0]["submission_uuid"], assessment["submission_uuid"])
Exemplo n.º 3
0
    def test_submit_feedback(self, xblock):
        # Create submissions and assessments
        self._create_submission_and_assessments(xblock, self.SUBMISSION,
                                                self.PEERS, self.ASSESSMENTS,
                                                self.ASSESSMENTS[0])

        # Submit feedback on the assessments
        payload = json.dumps({
            'feedback_text': u'I disliked my assessment',
            'feedback_options': [u'Option 1', u'Option 2'],
        })
        resp = self.request(xblock,
                            'submit_feedback',
                            payload,
                            response_format='json')
        self.assertTrue(resp['success'])

        # Verify that the feedback was created in the database
        feedback = peer_api.get_assessment_feedback(xblock.submission_uuid)
        self.assertIsNot(feedback, None)
        self.assertEqual(feedback['feedback_text'],
                         u'I disliked my assessment')
        self.assertItemsEqual(feedback['options'], [{
            'text': u'Option 1'
        }, {
            'text': u'Option 2'
        }])
Exemplo n.º 4
0
    def render_grade_complete(self, workflow):
        """
        Render the grade complete state.

        Args:
            workflow (dict): The serialized Workflow model.

        Returns:
            tuple of context (dict), template_path (string)
        """
        feedback = peer_api.get_assessment_feedback(self.submission_uuid)
        feedback_text = feedback.get('feedback', '') if feedback else ''
        student_submission = sub_api.get_submission(workflow['submission_uuid'])
        peer_assessments = peer_api.get_assessments(student_submission['uuid'])
        self_assessment = self_api.get_assessment(student_submission['uuid'])
        has_submitted_feedback = peer_api.get_assessment_feedback(workflow['submission_uuid']) is not None

        # We retrieve the score from the workflow, which in turn retrieves
        # the score for our current submission UUID.
        # We look up the score by submission UUID instead of student item
        # to ensure that the score always matches the rubric.
        score = workflow['score']

        context = {
            'score': score,
            'feedback_text': feedback_text,
            'student_submission': student_submission,
            'peer_assessments': peer_assessments,
            'self_assessment': self_assessment,
            'rubric_criteria': self._rubric_criteria_with_feedback(peer_assessments),
            'has_submitted_feedback': has_submitted_feedback,
        }

        # Update the scores we will display to the user
        # Note that we are updating a *copy* of the rubric criteria stored in the XBlock field
        max_scores = peer_api.get_rubric_max_scores(self.submission_uuid)
        median_scores = peer_api.get_assessment_median_scores(student_submission["uuid"])
        if median_scores is not None and max_scores is not None:
            for criterion in context["rubric_criteria"]:
                criterion["median_score"] = median_scores[criterion["name"]]
                criterion["total_value"] = max_scores[criterion["name"]]

        return ('openassessmentblock/grade/oa_grade_complete.html', context)
Exemplo n.º 5
0
    def render_grade_complete(self, workflow):
        """
        Render the grade complete state.

        Args:
            workflow (dict): The serialized Workflow model.

        Returns:
            tuple of context (dict), template_path (string)
        """
        feedback = peer_api.get_assessment_feedback(self.submission_uuid)
        feedback_text = feedback.get('feedback', '') if feedback else ''
        student_submission = sub_api.get_submission(workflow['submission_uuid'])
        peer_assessments = peer_api.get_assessments(student_submission['uuid'])
        self_assessment = self_api.get_assessment(student_submission['uuid'])
        has_submitted_feedback = peer_api.get_assessment_feedback(workflow['submission_uuid']) is not None

        context = {
            'score': workflow['score'],
            'feedback_text': feedback_text,
            'student_submission': student_submission,
            'peer_assessments': peer_assessments,
            'self_assessment': self_assessment,
            'rubric_criteria': copy.deepcopy(self.rubric_criteria),
            'has_submitted_feedback': has_submitted_feedback,
        }

        # Update the scores we will display to the user
        # Note that we are updating a *copy* of the rubric criteria stored in the XBlock field
        max_scores = peer_api.get_rubric_max_scores(self.submission_uuid)
        median_scores = peer_api.get_assessment_median_scores(student_submission["uuid"])
        if median_scores is not None and max_scores is not None:
            for criterion in context["rubric_criteria"]:
                criterion["median_score"] = median_scores[criterion["name"]]
                criterion["total_value"] = max_scores[criterion["name"]]

        return ('openassessmentblock/grade/oa_grade_complete.html', context)
Exemplo n.º 6
0
    def test_submit_feedback_no_options(self, xblock):
        # Create submissions and assessments
        self._create_submission_and_assessments(
            xblock, self.SUBMISSION, self.PEERS, self.ASSESSMENTS, self.ASSESSMENTS[0]
        )

        # Submit feedback on the assessments with no options specified
        payload = json.dumps({
            'feedback_text': u'I disliked my assessment',
            'feedback_options': [],
        })
        resp = self.request(xblock, 'submit_feedback', payload, response_format='json')
        self.assertTrue(resp['success'])

        # Verify that the feedback was created in the database
        feedback = peer_api.get_assessment_feedback(xblock.submission_uuid)
        self.assertIsNot(feedback, None)
        self.assertItemsEqual(feedback['options'], [])
Exemplo n.º 7
0
 def test_get_assessment_feedback_error(self, mock_filter):
     mock_filter.side_effect = DatabaseError("Oh no.")
     tim_answer, tim = self._create_student_and_submission("Tim", "Tim's answer", MONDAY)
     peer_api.get_assessment_feedback(tim_answer['uuid'])