Exemplo n.º 1
0
    def render_grade_complete(self, workflow):
        """
        Render the grade complete state.

        Args:
            workflow (dict): The serialized Workflow model.

        Returns:
            tuple of context (dict), template_path (string)
        """
        feedback = peer_api.get_assessment_feedback(self.submission_uuid)
        feedback_text = feedback.get('feedback', '') if feedback else ''
        student_submission = sub_api.get_submission(
            workflow['submission_uuid'])
        peer_assessments = peer_api.get_assessments(student_submission['uuid'])
        self_assessment = self_api.get_assessment(student_submission['uuid'])
        has_submitted_feedback = peer_api.get_assessment_feedback(
            workflow['submission_uuid']) is not None

        # We retrieve the score from the workflow, which in turn retrieves
        # the score for our current submission UUID.
        # We look up the score by submission UUID instead of student item
        # to ensure that the score always matches the rubric.
        score = workflow['score']

        context = {
            'score':
            score,
            'feedback_text':
            feedback_text,
            'student_submission':
            student_submission,
            'peer_assessments':
            peer_assessments,
            'self_assessment':
            self_assessment,
            'rubric_criteria':
            self._rubric_criteria_with_feedback(peer_assessments),
            'has_submitted_feedback':
            has_submitted_feedback,
        }

        # Update the scores we will display to the user
        # Note that we are updating a *copy* of the rubric criteria stored in the XBlock field
        max_scores = peer_api.get_rubric_max_scores(self.submission_uuid)
        median_scores = peer_api.get_assessment_median_scores(
            student_submission["uuid"])
        if median_scores is not None and max_scores is not None:
            for criterion in context["rubric_criteria"]:
                criterion["median_score"] = median_scores[criterion["name"]]
                criterion["total_value"] = max_scores[criterion["name"]]

        return ('openassessmentblock/grade/oa_grade_complete.html', context)
Exemplo n.º 2
0
    def test_get_max_scores(self):
        self._create_student_and_submission("Tim", "Tim's answer")
        bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
        sub = peer_api.get_submission_to_assess(bob_sub['uuid'], 1)
        assessment = peer_api.create_assessment(
            bob_sub["uuid"], bob["student_id"],
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            RUBRIC_DICT,
            1
        )

        max_scores = peer_api.get_rubric_max_scores(sub["uuid"])
        self.assertEqual(max_scores['secret'], 1)
        self.assertEqual(max_scores['giveup'], 10)
Exemplo n.º 3
0
    def test_get_max_scores(self):
        self._create_student_and_submission("Tim", "Tim's answer")
        bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
        sub = peer_api.get_submission_to_assess(bob, 1)
        assessment = peer_api.create_assessment(
            sub["uuid"],
            bob["student_id"],
            ASSESSMENT_DICT,
            RUBRIC_DICT,
            1
        )
        self.assertEqual(assessment["points_earned"], 6)
        self.assertEqual(assessment["points_possible"], 14)
        self.assertEqual(assessment["feedback"], ASSESSMENT_DICT["feedback"])

        max_scores = peer_api.get_rubric_max_scores(sub["uuid"])
        self.assertEqual(max_scores['secret'], 1)
        self.assertEqual(max_scores['giveup'], 10)
Exemplo n.º 4
0
    def render_grade_complete(self, workflow):
        """
        Render the grade complete state.

        Args:
            workflow (dict): The serialized Workflow model.

        Returns:
            tuple of context (dict), template_path (string)
        """
        feedback = peer_api.get_assessment_feedback(self.submission_uuid)
        feedback_text = feedback.get('feedback', '') if feedback else ''
        student_submission = sub_api.get_submission(workflow['submission_uuid'])
        peer_assessments = peer_api.get_assessments(student_submission['uuid'])
        self_assessment = self_api.get_assessment(student_submission['uuid'])
        has_submitted_feedback = peer_api.get_assessment_feedback(workflow['submission_uuid']) is not None

        # We retrieve the score from the workflow, which in turn retrieves
        # the score for our current submission UUID.
        # We look up the score by submission UUID instead of student item
        # to ensure that the score always matches the rubric.
        score = workflow['score']

        context = {
            'score': score,
            'feedback_text': feedback_text,
            'student_submission': student_submission,
            'peer_assessments': peer_assessments,
            'self_assessment': self_assessment,
            'rubric_criteria': self._rubric_criteria_with_feedback(peer_assessments),
            'has_submitted_feedback': has_submitted_feedback,
        }

        # Update the scores we will display to the user
        # Note that we are updating a *copy* of the rubric criteria stored in the XBlock field
        max_scores = peer_api.get_rubric_max_scores(self.submission_uuid)
        median_scores = peer_api.get_assessment_median_scores(student_submission["uuid"])
        if median_scores is not None and max_scores is not None:
            for criterion in context["rubric_criteria"]:
                criterion["median_score"] = median_scores[criterion["name"]]
                criterion["total_value"] = max_scores[criterion["name"]]

        return ('openassessmentblock/grade/oa_grade_complete.html', context)
Exemplo n.º 5
0
    def render_grade_complete(self, workflow):
        """
        Render the grade complete state.

        Args:
            workflow (dict): The serialized Workflow model.

        Returns:
            tuple of context (dict), template_path (string)
        """
        feedback = peer_api.get_assessment_feedback(self.submission_uuid)
        feedback_text = feedback.get('feedback', '') if feedback else ''
        student_submission = sub_api.get_submission(workflow['submission_uuid'])
        peer_assessments = peer_api.get_assessments(student_submission['uuid'])
        self_assessment = self_api.get_assessment(student_submission['uuid'])
        has_submitted_feedback = peer_api.get_assessment_feedback(workflow['submission_uuid']) is not None

        context = {
            'score': workflow['score'],
            'feedback_text': feedback_text,
            'student_submission': student_submission,
            'peer_assessments': peer_assessments,
            'self_assessment': self_assessment,
            'rubric_criteria': copy.deepcopy(self.rubric_criteria),
            'has_submitted_feedback': has_submitted_feedback,
        }

        # Update the scores we will display to the user
        # Note that we are updating a *copy* of the rubric criteria stored in the XBlock field
        max_scores = peer_api.get_rubric_max_scores(self.submission_uuid)
        median_scores = peer_api.get_assessment_median_scores(student_submission["uuid"])
        if median_scores is not None and max_scores is not None:
            for criterion in context["rubric_criteria"]:
                criterion["median_score"] = median_scores[criterion["name"]]
                criterion["total_value"] = max_scores[criterion["name"]]

        return ('openassessmentblock/grade/oa_grade_complete.html', context)
Exemplo n.º 6
0
 def test_max_score_db_error(self, mock_filter):
     mock_filter.side_effect = DatabaseError("Bad things happened")
     tim, _ = self._create_student_and_submission("Tim", "Tim's answer")
     peer_api.get_rubric_max_scores(tim["uuid"])