def render_grade_complete(self, workflow): """ Render the grade complete state. Args: workflow (dict): The serialized Workflow model. Returns: tuple of context (dict), template_path (string) """ feedback = peer_api.get_assessment_feedback(self.submission_uuid) feedback_text = feedback.get('feedback', '') if feedback else '' student_submission = sub_api.get_submission( workflow['submission_uuid']) peer_assessments = peer_api.get_assessments(student_submission['uuid']) self_assessment = self_api.get_assessment(student_submission['uuid']) has_submitted_feedback = peer_api.get_assessment_feedback( workflow['submission_uuid']) is not None # We retrieve the score from the workflow, which in turn retrieves # the score for our current submission UUID. # We look up the score by submission UUID instead of student item # to ensure that the score always matches the rubric. score = workflow['score'] context = { 'score': score, 'feedback_text': feedback_text, 'student_submission': student_submission, 'peer_assessments': peer_assessments, 'self_assessment': self_assessment, 'rubric_criteria': self._rubric_criteria_with_feedback(peer_assessments), 'has_submitted_feedback': has_submitted_feedback, } # Update the scores we will display to the user # Note that we are updating a *copy* of the rubric criteria stored in the XBlock field max_scores = peer_api.get_rubric_max_scores(self.submission_uuid) median_scores = peer_api.get_assessment_median_scores( student_submission["uuid"]) if median_scores is not None and max_scores is not None: for criterion in context["rubric_criteria"]: criterion["median_score"] = median_scores[criterion["name"]] criterion["total_value"] = max_scores[criterion["name"]] return ('openassessmentblock/grade/oa_grade_complete.html', context)
def render_grade_complete(self, workflow): """ Render the grade complete state. Args: workflow (dict): The serialized Workflow model. Returns: tuple of context (dict), template_path (string) """ feedback = peer_api.get_assessment_feedback(self.submission_uuid) feedback_text = feedback.get('feedback', '') if feedback else '' student_submission = sub_api.get_submission(workflow['submission_uuid']) peer_assessments = peer_api.get_assessments(student_submission['uuid']) self_assessment = self_api.get_assessment(student_submission['uuid']) has_submitted_feedback = peer_api.get_assessment_feedback(workflow['submission_uuid']) is not None # We retrieve the score from the workflow, which in turn retrieves # the score for our current submission UUID. # We look up the score by submission UUID instead of student item # to ensure that the score always matches the rubric. score = workflow['score'] context = { 'score': score, 'feedback_text': feedback_text, 'student_submission': student_submission, 'peer_assessments': peer_assessments, 'self_assessment': self_assessment, 'rubric_criteria': self._rubric_criteria_with_feedback(peer_assessments), 'has_submitted_feedback': has_submitted_feedback, } # Update the scores we will display to the user # Note that we are updating a *copy* of the rubric criteria stored in the XBlock field max_scores = peer_api.get_rubric_max_scores(self.submission_uuid) median_scores = peer_api.get_assessment_median_scores(student_submission["uuid"]) if median_scores is not None and max_scores is not None: for criterion in context["rubric_criteria"]: criterion["median_score"] = median_scores[criterion["name"]] criterion["total_value"] = max_scores[criterion["name"]] return ('openassessmentblock/grade/oa_grade_complete.html', context)
def render_grade_complete(self, workflow): """ Render the grade complete state. Args: workflow (dict): The serialized Workflow model. Returns: tuple of context (dict), template_path (string) """ feedback = peer_api.get_assessment_feedback(self.submission_uuid) feedback_text = feedback.get('feedback', '') if feedback else '' student_submission = sub_api.get_submission(workflow['submission_uuid']) peer_assessments = peer_api.get_assessments(student_submission['uuid']) self_assessment = self_api.get_assessment(student_submission['uuid']) has_submitted_feedback = peer_api.get_assessment_feedback(workflow['submission_uuid']) is not None context = { 'score': workflow['score'], 'feedback_text': feedback_text, 'student_submission': student_submission, 'peer_assessments': peer_assessments, 'self_assessment': self_assessment, 'rubric_criteria': copy.deepcopy(self.rubric_criteria), 'has_submitted_feedback': has_submitted_feedback, } # Update the scores we will display to the user # Note that we are updating a *copy* of the rubric criteria stored in the XBlock field max_scores = peer_api.get_rubric_max_scores(self.submission_uuid) median_scores = peer_api.get_assessment_median_scores(student_submission["uuid"]) if median_scores is not None and max_scores is not None: for criterion in context["rubric_criteria"]: criterion["median_score"] = median_scores[criterion["name"]] criterion["total_value"] = max_scores[criterion["name"]] return ('openassessmentblock/grade/oa_grade_complete.html', context)
def test_median_score_db_error(self, mock_filter): mock_filter.side_effect = DatabaseError("Bad things happened") tim, _ = self._create_student_and_submission("Tim", "Tim's answer") peer_api.get_assessment_median_scores(tim["uuid"])