Example #1
0
 def test_has_finished_evaluation(self):
     """
     Verify unfinished assessments do not get counted when determining a
     complete workflow.
     """
     tim_sub, tim = self._create_student_and_submission("Tim", "Tim's answer")
     bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
     sub = peer_api.get_submission_to_assess(bob, REQUIRED_GRADED)
     self.assertEqual(sub["uuid"], tim_sub["uuid"])
     finished, count = peer_api.has_finished_required_evaluating(bob, 1)
     self.assertFalse(finished)
     self.assertEqual(count, 0)
     peer_api.create_assessment(
         sub["uuid"], bob["student_id"], ASSESSMENT_DICT, RUBRIC_DICT,
         REQUIRED_GRADED_BY,
     )
     finished, count = peer_api.has_finished_required_evaluating(bob, 1)
     self.assertTrue(finished)
     self.assertEqual(count, 1)
Example #2
0
 def test_has_finished_evaluation(self):
     """
     Verify unfinished assessments do not get counted when determining a
     complete workflow.
     """
     tim_sub, _ = self._create_student_and_submission("Tim", "Tim's answer")
     bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
     sub = peer_api.get_submission_to_assess(bob_sub['uuid'], 1)
     self.assertEqual(sub["uuid"], tim_sub["uuid"])
     finished, count = peer_api.has_finished_required_evaluating(bob_sub['uuid'], 1)
     self.assertFalse(finished)
     self.assertEqual(count, 0)
     peer_api.create_assessment(
         bob_sub["uuid"], bob["student_id"],
         ASSESSMENT_DICT['options_selected'],
         ASSESSMENT_DICT['criterion_feedback'],
         ASSESSMENT_DICT['overall_feedback'],
         RUBRIC_DICT,
         1,
     )
     finished, count = peer_api.has_finished_required_evaluating(bob_sub['uuid'], 1)
     self.assertTrue(finished)
     self.assertEqual(count, 1)
Example #3
0
    def test_peer_assessment_workflow(self):
        tim_sub, tim = self._create_student_and_submission("Tim", "Tim's answer")
        bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
        sally_sub, sally = self._create_student_and_submission("Sally", "Sally's answer")
        jim_sub, jim = self._create_student_and_submission("Jim", "Jim's answer")
        self._create_student_and_submission("Buffy", "Buffy's answer")
        self._create_student_and_submission("Xander", "Xander's answer")

        # Tim should not have a score, because he has not evaluated enough
        # peer submissions.
        requirements = {
            "peer": {
                "must_grade": REQUIRED_GRADED,
                "must_be_graded_by": REQUIRED_GRADED_BY,
            }
        }
        score = workflow_api.get_workflow_for_submission(
            tim_sub["uuid"], requirements
        )["score"]
        self.assertIsNone(score)

        for i in range(5):
            self.assertEquals((False, i), peer_api.has_finished_required_evaluating(tim_sub['uuid'], REQUIRED_GRADED))
            sub = peer_api.get_submission_to_assess(tim_sub['uuid'], REQUIRED_GRADED)
            peer_api.create_assessment(
                tim_sub["uuid"], tim["student_id"],
                ASSESSMENT_DICT['options_selected'],
                ASSESSMENT_DICT['criterion_feedback'],
                ASSESSMENT_DICT['overall_feedback'],
                RUBRIC_DICT,
                REQUIRED_GRADED_BY,
            )

        self.assertEquals((True, 5), peer_api.has_finished_required_evaluating(tim_sub['uuid'], REQUIRED_GRADED))

        # Tim should not have a score, because his submission does not have
        # enough assessments.
        self.assertIsNone(sub_api.get_score(STUDENT_ITEM))

        sub = peer_api.get_submission_to_assess(bob_sub['uuid'], REQUIRED_GRADED)
        self.assertEqual(sub["uuid"], tim_sub["uuid"])
        peer_api.create_assessment(
            bob_sub["uuid"], bob["student_id"],
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )

        sub = peer_api.get_submission_to_assess(sally_sub['uuid'], REQUIRED_GRADED)
        self.assertEqual(sub["uuid"], tim_sub["uuid"])
        peer_api.create_assessment(
            sally_sub["uuid"], sally["student_id"],
            ASSESSMENT_DICT_FAIL['options_selected'],
            ASSESSMENT_DICT_FAIL['criterion_feedback'],
            ASSESSMENT_DICT_FAIL['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )

        sub = peer_api.get_submission_to_assess(jim_sub['uuid'], REQUIRED_GRADED)
        self.assertEqual(sub["uuid"], tim_sub["uuid"])
        peer_api.create_assessment(
            jim_sub["uuid"], jim["student_id"],
            ASSESSMENT_DICT_PASS['options_selected'],
            ASSESSMENT_DICT_PASS['criterion_feedback'],
            ASSESSMENT_DICT_PASS['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )

        # Tim has met the critera, and should now be complete.
        requirements = {
            'must_grade': REQUIRED_GRADED,
            'must_be_graded_by': REQUIRED_GRADED_BY
        }
        self.assertTrue(peer_api.is_complete(tim_sub["uuid"], requirements))
Example #4
0
    def peer_path_and_context(self, continue_grading):
        """
        Return the template path and context for rendering the peer assessment step.

        Args:
            continue_grading (bool): If true, the user has chosen to continue grading.

        Returns:
            tuple of (template_path, context_dict)

        """
        path = 'openassessmentblock/peer/oa_peer_unavailable.html'
        finished = False
        problem_closed, reason, start_date, due_date = self.is_closed(step="peer-assessment")

        context_dict = {
            "rubric_criteria": self.rubric_criteria,
            "estimated_time": "20 minutes"  # TODO: Need to configure this.
        }

        if self.rubric_feedback_prompt is not None:
            context_dict["rubric_feedback_prompt"] = self.rubric_feedback_prompt

        # We display the due date whether the problem is open or closed.
        # If no date is set, it defaults to the distant future, in which
        # case we don't display the date.
        if due_date < DISTANT_FUTURE:
            context_dict['peer_due'] = due_date

        workflow = self.get_workflow_info()
        if workflow is None:
            return self.render_assessment(path, context_dict)
        continue_grading = continue_grading and workflow["status_details"]["peer"]["complete"]

        student_item = self.get_student_item_dict()
        assessment = self.get_assessment_module('peer-assessment')
        if assessment:
            context_dict["must_grade"] = assessment["must_grade"]
            finished, count = peer_api.has_finished_required_evaluating(
                self.submission_uuid,
                assessment["must_grade"]
            )
            context_dict["graded"] = count
            context_dict["review_num"] = count + 1

            if continue_grading:
                context_dict["submit_button_text"] = (
                    "Submit your assessment & review another response"
                )
            elif assessment["must_grade"] - count == 1:
                context_dict["submit_button_text"] = (
                    "Submit your assessment & move onto next step"
                )
            else:
                context_dict["submit_button_text"] = (
                    "Submit your assessment & move to response #{}"
                ).format(count + 2)

        # Once a student has completed a problem, it stays complete,
        # so this condition needs to be first.
        if (workflow.get('status') == 'done' or finished) and not continue_grading:
            path = "openassessmentblock/peer/oa_peer_complete.html"

        # Allow continued grading even if the problem due date has passed
        elif continue_grading and student_item:
            peer_sub = self.get_peer_submission(student_item, assessment)
            if peer_sub:
                path = 'openassessmentblock/peer/oa_peer_turbo_mode.html'
                context_dict["peer_submission"] = peer_sub
            else:
                path = 'openassessmentblock/peer/oa_peer_turbo_mode_waiting.html'
        elif reason == 'due' and problem_closed:
            path = 'openassessmentblock/peer/oa_peer_closed.html'
        elif reason == 'start' and problem_closed:
            context_dict["peer_start"] = start_date
            path = 'openassessmentblock/peer/oa_peer_unavailable.html'
        elif workflow.get("status") == "peer":
            peer_sub = self.get_peer_submission(student_item, assessment)
            if peer_sub:
                path = 'openassessmentblock/peer/oa_peer_assessment.html'
                context_dict["peer_submission"] = peer_sub
            else:
                path = 'openassessmentblock/peer/oa_peer_waiting.html'

        return path, context_dict
Example #5
0
    def render_peer_assessment(self, data, suffix=""):
        """Renders the Peer Assessment HTML section of the XBlock

        Generates the peer assessment HTML for the first section of an Open
        Assessment XBlock. See OpenAssessmentBlock.render_assessment() for
        more information on rendering XBlock sections.

        Args:
            data (dict): May contain an attribute 'continue_grading', which
                allows a student to continue grading peers past the required
                number of assessments.

        """
        path = "openassessmentblock/peer/oa_peer_unavailable.html"
        finished = False
        problem_closed, reason, date = self.is_closed(step="peer-assessment")
        context_dict = {
            "rubric_criteria": self.rubric_criteria,
            "estimated_time": "20 minutes",  # TODO: Need to configure this.
        }

        submissions_closed, __, __ = self.is_closed(step="submission")

        workflow = self.get_workflow_info()
        if workflow is None:
            return self.render_assessment(path, context_dict)
        continue_grading = (
            data.params.get("continue_grading", False)
            and workflow["status_details"]["peer"]["complete"]
            and submissions_closed
        )

        student_item = self.get_student_item_dict()
        assessment = self.get_assessment_module("peer-assessment")
        if assessment:
            context_dict["must_grade"] = assessment["must_grade"]
            finished, count = peer_api.has_finished_required_evaluating(student_item, assessment["must_grade"])
            context_dict["graded"] = count
            context_dict["review_num"] = count + 1

            if continue_grading:
                context_dict["submit_button_text"] = "Submit your assessment & review another response"
            elif assessment["must_grade"] - count == 1:
                context_dict["submit_button_text"] = "Submit your assessment & move onto next step"
            else:
                context_dict["submit_button_text"] = ("Submit your assessment & move to response #{}").format(count + 2)

        if reason == "due" and problem_closed:
            context_dict["peer_due"] = self.format_datetime_string(date)
            path = "openassessmentblock/peer/oa_peer_closed.html"
        elif reason == "start" and problem_closed:
            context_dict["peer_start"] = self.format_datetime_string(date)
            path = "openassessmentblock/peer/oa_peer_unavailable.html"
        elif workflow.get("status") == "peer":
            peer_sub = self.get_peer_submission(student_item, assessment)
            if peer_sub:
                path = "openassessmentblock/peer/oa_peer_assessment.html"
                context_dict["peer_submission"] = peer_sub
            else:
                path = "openassessmentblock/peer/oa_peer_waiting.html"
        elif continue_grading and student_item:
            peer_sub = self.get_peer_submission(student_item, assessment)
            if peer_sub:
                path = "openassessmentblock/peer/oa_peer_turbo_mode.html"
                context_dict["peer_submission"] = peer_sub
            else:
                path = "openassessmentblock/peer/oa_peer_turbo_mode_waiting.html"
        elif workflow.get("status") == "done" or finished:
            path = "openassessmentblock/peer/oa_peer_complete.html"

        return self.render_assessment(path, context_dict)