Beispiel #1
0
 def test_has_finished_evaluation(self):
     """
     Verify unfinished assessments do not get counted when determining a
     complete workflow.
     """
     tim_sub, _ = self._create_student_and_submission("Tim", "Tim's answer")
     bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
     sub = peer_api.get_submission_to_assess(bob_sub['uuid'], 1)
     self.assertEqual(sub["uuid"], tim_sub["uuid"])
     finished, count = peer_api.has_finished_required_evaluating(bob_sub['uuid'], 1)
     self.assertFalse(finished)
     self.assertEqual(count, 0)
     peer_api.create_assessment(
         bob_sub["uuid"], bob["student_id"],
         ASSESSMENT_DICT['options_selected'],
         ASSESSMENT_DICT['criterion_feedback'],
         ASSESSMENT_DICT['overall_feedback'],
         RUBRIC_DICT,
         1,
     )
     finished, count = peer_api.has_finished_required_evaluating(bob_sub['uuid'], 1)
     self.assertTrue(finished)
     self.assertEqual(count, 1)
    def peer_path_and_context(self, continue_grading):
        """
        Return the template path and context for rendering the peer assessment step.

        Args:
            continue_grading (bool): If true, the user has chosen to continue grading.

        Returns:
            tuple of (template_path, context_dict)

        """
        path = 'openassessmentblock/peer/oa_peer_unavailable.html'
        finished = False
        problem_closed, reason, start_date, due_date = self.is_closed(step="peer-assessment")

        context_dict = {
            "rubric_criteria": self.rubric_criteria_with_labels,
            "estimated_time": "20 minutes"  # TODO: Need to configure this.
        }

        if self.rubric_feedback_prompt is not None:
            context_dict["rubric_feedback_prompt"] = self.rubric_feedback_prompt

        # We display the due date whether the problem is open or closed.
        # If no date is set, it defaults to the distant future, in which
        # case we don't display the date.
        if due_date < DISTANT_FUTURE:
            context_dict['peer_due'] = due_date

        workflow = self.get_workflow_info()
        peer_complete = workflow.get('status_details', {}).get('peer', {}).get('complete', False)
        continue_grading = continue_grading and peer_complete

        student_item = self.get_student_item_dict()
        assessment = self.get_assessment_module('peer-assessment')
        if assessment:
            context_dict["must_grade"] = assessment["must_grade"]
            finished, count = peer_api.has_finished_required_evaluating(
                self.submission_uuid,
                assessment["must_grade"]
            )
            context_dict["graded"] = count
            context_dict["review_num"] = count + 1

            if continue_grading:
                context_dict["submit_button_text"] = self._(
                    "Submit your assessment & review another response"
                )
            elif assessment["must_grade"] - count == 1:
                context_dict["submit_button_text"] = self._(
                    "Submit your assessment & move onto next step"
                )
            else:
                context_dict["submit_button_text"] = self._(
                    "Submit your assessment & move to response #{response_number}"
                ).format(response_number=(count + 2))

        # Once a student has completed a problem, it stays complete,
        # so this condition needs to be first.
        if (workflow.get('status') == 'done' or finished) and not continue_grading:
            path = "openassessmentblock/peer/oa_peer_complete.html"

        # Allow continued grading even if the problem due date has passed
        elif continue_grading and student_item:
            peer_sub = self.get_peer_submission(student_item, assessment)
            if peer_sub:
                path = 'openassessmentblock/peer/oa_peer_turbo_mode.html'
                context_dict["peer_submission"] = peer_sub

                # Determine if file upload is supported for this XBlock.
                context_dict["allow_file_upload"] = self.allow_file_upload
                context_dict["peer_file_url"] = self.get_download_url_from_submission(peer_sub)
            else:
                path = 'openassessmentblock/peer/oa_peer_turbo_mode_waiting.html'
        elif reason == 'due' and problem_closed:
            path = 'openassessmentblock/peer/oa_peer_closed.html'
        elif reason == 'start' and problem_closed:
            context_dict["peer_start"] = start_date
            path = 'openassessmentblock/peer/oa_peer_unavailable.html'
        elif workflow.get("status") == "peer":
            peer_sub = self.get_peer_submission(student_item, assessment)
            if peer_sub:
                path = 'openassessmentblock/peer/oa_peer_assessment.html'
                context_dict["peer_submission"] = peer_sub
                # Determine if file upload is supported for this XBlock.
                context_dict["allow_file_upload"] = self.allow_file_upload
                context_dict["peer_file_url"] = self.get_download_url_from_submission(peer_sub)
                # Sets the XBlock boolean to signal to Message that it WAS NOT able to grab a submission
                self.no_peers = False
            else:
                path = 'openassessmentblock/peer/oa_peer_waiting.html'
                # Sets the XBlock boolean to signal to Message that it WAS able to grab a submission
                self.no_peers = True

        return path, context_dict
Beispiel #3
0
    def test_peer_assessment_workflow(self):
        tim_sub, tim = self._create_student_and_submission("Tim", "Tim's answer")
        bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
        sally_sub, sally = self._create_student_and_submission("Sally", "Sally's answer")
        jim_sub, jim = self._create_student_and_submission("Jim", "Jim's answer")
        self._create_student_and_submission("Buffy", "Buffy's answer")
        self._create_student_and_submission("Xander", "Xander's answer")

        # Tim should not have a score, because he has not evaluated enough
        # peer submissions.
        requirements = {
            "peer": {
                "must_grade": REQUIRED_GRADED,
                "must_be_graded_by": REQUIRED_GRADED_BY,
            }
        }
        score = workflow_api.get_workflow_for_submission(
            tim_sub["uuid"], requirements
        )["score"]
        self.assertIsNone(score)

        for i in range(5):
            self.assertEquals((False, i), peer_api.has_finished_required_evaluating(tim_sub['uuid'], REQUIRED_GRADED))
            sub = peer_api.get_submission_to_assess(tim_sub['uuid'], REQUIRED_GRADED)
            peer_api.create_assessment(
                tim_sub["uuid"], tim["student_id"],
                ASSESSMENT_DICT['options_selected'],
                ASSESSMENT_DICT['criterion_feedback'],
                ASSESSMENT_DICT['overall_feedback'],
                RUBRIC_DICT,
                REQUIRED_GRADED_BY,
            )

        self.assertEquals((True, 5), peer_api.has_finished_required_evaluating(tim_sub['uuid'], REQUIRED_GRADED))

        # Tim should not have a score, because his submission does not have
        # enough assessments.
        self.assertIsNone(sub_api.get_score(STUDENT_ITEM))

        sub = peer_api.get_submission_to_assess(bob_sub['uuid'], REQUIRED_GRADED)
        self.assertEqual(sub["uuid"], tim_sub["uuid"])
        peer_api.create_assessment(
            bob_sub["uuid"], bob["student_id"],
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )

        sub = peer_api.get_submission_to_assess(sally_sub['uuid'], REQUIRED_GRADED)
        self.assertEqual(sub["uuid"], tim_sub["uuid"])
        peer_api.create_assessment(
            sally_sub["uuid"], sally["student_id"],
            ASSESSMENT_DICT_FAIL['options_selected'],
            ASSESSMENT_DICT_FAIL['criterion_feedback'],
            ASSESSMENT_DICT_FAIL['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )

        sub = peer_api.get_submission_to_assess(jim_sub['uuid'], REQUIRED_GRADED)
        self.assertEqual(sub["uuid"], tim_sub["uuid"])
        peer_api.create_assessment(
            jim_sub["uuid"], jim["student_id"],
            ASSESSMENT_DICT_PASS['options_selected'],
            ASSESSMENT_DICT_PASS['criterion_feedback'],
            ASSESSMENT_DICT_PASS['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )

        # Tim has met the critera, and should now be complete.
        requirements = {
            'must_grade': REQUIRED_GRADED,
            'must_be_graded_by': REQUIRED_GRADED_BY
        }
        self.assertTrue(peer_api.submitter_is_finished(tim_sub["uuid"], requirements))
    def peer_path_and_context(self, continue_grading):
        """
        Return the template path and context for rendering the peer assessment step.

        Args:
            continue_grading (bool): If true, the user has chosen to continue grading.

        Returns:
            tuple of (template_path, context_dict)

        """
        # Import is placed here to avoid model import at project startup.
        from openassessment.assessment.api import peer as peer_api
        path = 'openassessmentblock/peer/oa_peer_unavailable.html'
        finished = False
        problem_closed, reason, start_date, due_date = self.is_closed(step="peer-assessment")
        user_preferences = get_user_preferences(self.runtime.service(self, 'user'))

        context_dict = {
            "rubric_criteria": self.rubric_criteria_with_labels,
            "allow_multiple_files": self.allow_multiple_files,
            "allow_latex": self.allow_latex,
            "prompts_type": self.prompts_type,
            "user_timezone": user_preferences['user_timezone'],
            "user_language": user_preferences['user_language'],
            "xblock_id": self.get_xblock_id(),
        }

        if self.rubric_feedback_prompt is not None:
            context_dict["rubric_feedback_prompt"] = self.rubric_feedback_prompt

        if self.rubric_feedback_default_text is not None:
            context_dict['rubric_feedback_default_text'] = self.rubric_feedback_default_text

        # We display the due date whether the problem is open or closed.
        # If no date is set, it defaults to the distant future, in which
        # case we don't display the date.
        if due_date < DISTANT_FUTURE:
            context_dict['peer_due'] = due_date

        workflow = self.get_workflow_info()
        workflow_status = workflow.get('status')
        peer_complete = workflow.get('status_details', {}).get('peer', {}).get('complete', False)
        peer_skipped = workflow.get('status_details', {}).get('peer', {}).get('skipped', False)
        continue_grading = continue_grading and peer_complete

        student_item = self.get_student_item_dict()
        assessment = self.get_assessment_module('peer-assessment')
        if assessment:
            context_dict["must_grade"] = assessment["must_grade"]
            finished, count = peer_api.has_finished_required_evaluating(
                self.submission_uuid,
                assessment["must_grade"]
            )
            context_dict["graded"] = count
            context_dict["review_num"] = count + 1

            if continue_grading:
                context_dict["submit_button_text"] = self._(
                    "Submit your assessment and review another response"
                )
            elif assessment["must_grade"] - count == 1:
                context_dict["submit_button_text"] = self._(
                    "Submit your assessment and move to next step"
                )
            else:
                context_dict["submit_button_text"] = self._(
                    "Submit your assessment and move to response #{response_number}"
                ).format(response_number=(count + 2))

        if workflow_status == "cancelled":
            path = 'openassessmentblock/peer/oa_peer_cancelled.html'
            # Sets the XBlock boolean to signal to Message that it WAS able to grab a submission
            self.no_peers = True

        # Once a student has completed a problem, it stays complete,
        # so this condition needs to be first.
        elif (workflow.get('status') == 'done' or finished) and not continue_grading:
            path = "openassessmentblock/peer/oa_peer_complete.html"

        # Allow continued grading even if the problem due date has passed
        elif continue_grading and student_item:
            peer_sub = self.get_peer_submission(student_item, assessment)
            if peer_sub:
                path = 'openassessmentblock/peer/oa_peer_turbo_mode.html'
                context_dict["peer_submission"] = create_submission_dict(peer_sub, self.prompts)

                # Determine if file upload is supported for this XBlock.
                context_dict["file_upload_type"] = self.file_upload_type
                context_dict["peer_file_urls"] = self.get_download_urls_from_submission(peer_sub)
            else:
                path = 'openassessmentblock/peer/oa_peer_turbo_mode_waiting.html'
        elif reason == 'due' and problem_closed:
            path = 'openassessmentblock/peer/oa_peer_closed.html'
        elif reason == 'start' and problem_closed:
            context_dict["peer_start"] = start_date
            path = 'openassessmentblock/peer/oa_peer_unavailable.html'
        elif workflow.get("status") == "peer" or peer_skipped:
            peer_sub = self.get_peer_submission(student_item, assessment)
            if peer_sub:
                path = 'openassessmentblock/peer/oa_peer_assessment.html'
                context_dict["peer_submission"] = create_submission_dict(peer_sub, self.prompts)
                # Determine if file upload is supported for this XBlock.
                context_dict["file_upload_type"] = self.file_upload_type
                context_dict["peer_file_urls"] = self.get_download_urls_from_submission(peer_sub)
                # Sets the XBlock boolean to signal to Message that it WAS NOT able to grab a submission
                self.no_peers = False
            else:
                path = 'openassessmentblock/peer/oa_peer_waiting.html'
                # Sets the XBlock boolean to signal to Message that it WAS able to grab a submission
                self.no_peers = True

        return path, context_dict