Exemplo n.º 1
0
    def test_create_assessment_with_feedback(self):
        self._create_student_and_submission("Tim", "Tim's answer")
        bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
        peer_api.get_submission_to_assess(bob_sub['uuid'], 1)

        # Creating feedback per criterion should need one additional query to update
        # for each criterion that has feedback.
        with self.assertNumQueries(self.CREATE_ASSESSMENT_NUM_QUERIES + 1):
            assessment = peer_api.create_assessment(
                bob_sub["uuid"],
                bob["student_id"],
                ASSESSMENT_DICT['options_selected'],
                ASSESSMENT_DICT['criterion_feedback'],
                ASSESSMENT_DICT['overall_feedback'],
                RUBRIC_DICT,
                REQUIRED_GRADED_BY,
            )
        self.assertEqual(assessment["feedback"], ASSESSMENT_DICT["overall_feedback"])

        # The parts are not guaranteed to be in any particular order,
        # so we need to iterate through and check them by name.
        # If we haven't explicitly set feedback for the criterion, expect
        # that it defaults to an empty string.
        for part in assessment['parts']:
            criterion_name = part['option']['criterion']['name']
            expected_feedback = ASSESSMENT_DICT['criterion_feedback'].get(criterion_name, "")
            self.assertEqual(part['feedback'], expected_feedback)
Exemplo n.º 2
0
    def _submit_peer_assessment(self,
                                xblock,
                                student_id,
                                scorer_id,
                                assessment,
                                expect_failure=False):
        """
        Create submissions for a student and scorer, then create a peer assessment
        from the scorer.

        Args:
            xblock (OpenAssessmentBlock)
            student_id (unicode): The ID of the student being assessed.
            scorer_id (unicode): The ID of the student creating the assessment.
            assessment (dict): Serialized assessment model.

        Kwargs:
            expect_failure (bool): If true, expect a failure response and return None

        Returns:
            dict: The peer assessment retrieved from the API.

        """
        # Create a submission for this problem from another user
        student_item = xblock.get_student_item_dict()
        student_item['student_id'] = student_id
        submission = xblock.create_submission(student_item, self.SUBMISSION)

        # Create a submission for the scorer (required before assessing another student)
        another_student = copy.deepcopy(student_item)
        another_student['student_id'] = scorer_id
        another_submission = xblock.create_submission(another_student,
                                                      self.SUBMISSION)

        # Pull the submission to assess
        peer_api.get_submission_to_assess(another_submission['uuid'], 3)

        # Submit an assessment and expect a successful response
        assessment = copy.deepcopy(assessment)
        resp = self.request(xblock,
                            'peer_assess',
                            json.dumps(assessment),
                            response_format='json')

        if expect_failure:
            self.assertFalse(resp['success'])
            return None
        else:
            self.assertTrue(resp['success'])

            # Retrieve the peer assessment
            retrieved_assessment = peer_api.get_assessments(
                submission['uuid'], scored_only=False)[0]
            return submission['uuid'], retrieved_assessment
Exemplo n.º 3
0
    def test_load_peer_student_view_with_dates(self, xblock):
        student_item = xblock.get_student_item_dict()

        sally_student_item = copy.deepcopy(student_item)
        sally_student_item['student_id'] = "Sally"
        sally_submission = xblock.create_submission(sally_student_item, u"Sally's answer")

        # Hal comes and submits a response.
        hal_student_item = copy.deepcopy(student_item)
        hal_student_item['student_id'] = "Hal"
        hal_submission = xblock.create_submission(hal_student_item, u"Hal's answer")

        # Now Hal will assess Sally.
        assessment = copy.deepcopy(self.ASSESSMENT)
        peer_api.get_submission_to_assess(hal_submission['uuid'], 1)
        peer_api.create_assessment(
            hal_submission['uuid'],
            hal_student_item['student_id'],
            assessment['options_selected'],
            assessment['criterion_feedback'],
            assessment['overall_feedback'],
            {'criteria': xblock.rubric_criteria},
            1
        )

        # Now Sally will assess Hal.
        assessment = copy.deepcopy(self.ASSESSMENT)
        peer_api.get_submission_to_assess(sally_submission['uuid'], 1)
        peer_api.create_assessment(
            sally_submission['uuid'],
            sally_student_item['student_id'],
            assessment['options_selected'],
            assessment['criterion_feedback'],
            assessment['overall_feedback'],
            {'criteria': xblock.rubric_criteria},
            1
        )

        # If Over Grading is on, this should now return Sally or Hal's response to Bob.
        submission = xblock.create_submission(student_item, u"Bob's answer")
        workflow_info = xblock.get_workflow_info()
        self.assertEqual(workflow_info["status"], u'peer')

        # Validate Submission Rendering.
        request = namedtuple('Request', 'params')
        request.params = {}
        peer_response = xblock.render_peer_assessment(request)
        self.assertIsNotNone(peer_response)
        self.assertNotIn(submission["answer"]["text"].encode('utf-8'), peer_response.body)

        # Validate Peer Rendering.
        self.assertTrue("Sally".encode('utf-8') in peer_response.body or
            "Hal".encode('utf-8') in peer_response.body)
Exemplo n.º 4
0
 def test_create_feedback_on_an_assessment(self):
     tim_sub, tim = self._create_student_and_submission("Tim", "Tim's answer")
     bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
     peer_api.get_submission_to_assess(bob_sub['uuid'], 1)
     assessment = peer_api.create_assessment(
         bob_sub["uuid"],
         bob["student_id"],
         ASSESSMENT_DICT['options_selected'],
         ASSESSMENT_DICT['criterion_feedback'],
         ASSESSMENT_DICT['overall_feedback'],
         RUBRIC_DICT,
         REQUIRED_GRADED_BY,
     )
     peer_api.get_submission_to_assess(tim_sub['uuid'], 1)
     peer_api.create_assessment(
         tim_sub["uuid"],
         tim["student_id"],
         ASSESSMENT_DICT['options_selected'],
         ASSESSMENT_DICT['criterion_feedback'],
         ASSESSMENT_DICT['overall_feedback'],
         RUBRIC_DICT,
         REQUIRED_GRADED_BY,
     )
     peer_api.get_score(
         tim_sub["uuid"],
         {
             'must_grade': 1,
             'must_be_graded_by': 1
         }
     )
     feedback = peer_api.get_assessment_feedback(tim_sub['uuid'])
     self.assertIsNone(feedback)
     peer_api.set_assessment_feedback(
         {
             'submission_uuid': tim_sub['uuid'],
             'feedback_text': 'Bob is a jerk!',
             'options': [
                 'I disliked this assessment',
                 'I felt this assessment was unfair',
             ]
         }
     )
     saved_feedback = peer_api.get_assessment_feedback(tim_sub['uuid'])
     self.assertIsNot(saved_feedback, None)
     self.assertEquals(saved_feedback['submission_uuid'], assessment['submission_uuid'])
     self.assertEquals(saved_feedback['feedback_text'], 'Bob is a jerk!')
     self.assertItemsEqual(saved_feedback['options'], [
         {'text': 'I disliked this assessment'},
         {'text': 'I felt this assessment was unfair'},
     ])
     self.assertEquals(saved_feedback["assessments"][0]["submission_uuid"], assessment["submission_uuid"])
Exemplo n.º 5
0
    def test_load_peer_student_view_with_dates(self, xblock):
        student_item = xblock.get_student_item_dict()

        sally_student_item = copy.deepcopy(student_item)
        sally_student_item['student_id'] = "Sally"
        sally_submission = xblock.create_submission(sally_student_item,
                                                    u"Sally's answer")

        # Hal comes and submits a response.
        hal_student_item = copy.deepcopy(student_item)
        hal_student_item['student_id'] = "Hal"
        hal_submission = xblock.create_submission(hal_student_item,
                                                  u"Hal's answer")

        # Now Hal will assess Sally.
        assessment = copy.deepcopy(self.ASSESSMENT)
        peer_api.get_submission_to_assess(hal_submission['uuid'], 1)
        peer_api.create_assessment(hal_submission['uuid'],
                                   hal_student_item['student_id'],
                                   assessment['options_selected'],
                                   assessment['criterion_feedback'],
                                   assessment['overall_feedback'],
                                   {'criteria': xblock.rubric_criteria}, 1)

        # Now Sally will assess Hal.
        assessment = copy.deepcopy(self.ASSESSMENT)
        peer_api.get_submission_to_assess(sally_submission['uuid'], 1)
        peer_api.create_assessment(sally_submission['uuid'],
                                   sally_student_item['student_id'],
                                   assessment['options_selected'],
                                   assessment['criterion_feedback'],
                                   assessment['overall_feedback'],
                                   {'criteria': xblock.rubric_criteria}, 1)

        # If Over Grading is on, this should now return Sally or Hal's response to Bob.
        submission = xblock.create_submission(student_item, u"Bob's answer")
        workflow_info = xblock.get_workflow_info()
        self.assertEqual(workflow_info["status"], u'peer')

        # Validate Submission Rendering.
        request = namedtuple('Request', 'params')
        request.params = {}
        peer_response = xblock.render_peer_assessment(request)
        self.assertIsNotNone(peer_response)
        self.assertNotIn(submission["answer"]["text"].encode('utf-8'),
                         peer_response.body)

        # Validate Peer Rendering.
        self.assertTrue("Sally".encode('utf-8') in peer_response.body
                        or "Hal".encode('utf-8') in peer_response.body)
Exemplo n.º 6
0
    def test_create_assessment_points(self):
        self._create_student_and_submission("Tim", "Tim's answer")
        bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
        peer_api.get_submission_to_assess(bob_sub['uuid'], 1)

        with self.assertNumQueries(self.CREATE_ASSESSMENT_NUM_QUERIES):
            assessment = peer_api.create_assessment(
                bob_sub["uuid"],
                bob["student_id"],
                ASSESSMENT_DICT['options_selected'], dict(), "",
                RUBRIC_DICT,
                REQUIRED_GRADED_BY,
            )
        self.assertEqual(assessment["points_earned"], 6)
        self.assertEqual(assessment["points_possible"], 14)
Exemplo n.º 7
0
    def get_peer_submission(self, student_item_dict, assessment):
        """
        Retrieve a submission to peer-assess.

        Args:
            student_item_dict (dict): The student item for the student creating the submission.
            assessment (dict): A dict describing the requirements for grading.

        Returns:
            dict: The serialized submission model.

        """
        peer_submission = False
        try:
            peer_submission = peer_api.get_submission_to_assess(
                self.submission_uuid,
                assessment["must_be_graded_by"],
                True
            )
            self.runtime.publish(
                self,
                "openassessmentblock.get_peer_submission",
                {
                    "requesting_student_id": student_item_dict["student_id"],
                    "course_id": student_item_dict["course_id"],
                    "item_id": student_item_dict["item_id"],
                    "submission_returned_uuid": (
                        peer_submission["uuid"] if peer_submission else None
                    )
                }
            )
        except PeerAssessmentWorkflowError as err:
            logger.exception(err)

        return peer_submission
Exemplo n.º 8
0
    def test_close_active_assessment(self):
        buffy_answer, _ = self._create_student_and_submission("Buffy", "Buffy's answer")
        xander_answer, _ = self._create_student_and_submission("Xander", "Xander's answer")

        # Create a workflow for Buffy.
        buffy_workflow = peer_api._get_workflow_by_submission_uuid(buffy_answer['uuid'])

        # Get a workflow item opened up.
        submission = peer_api.get_submission_to_assess(buffy_answer['uuid'], 3)

        self.assertEqual(xander_answer["uuid"], submission["uuid"])

        assessment_dict = peer_api.create_assessment(
            buffy_answer["uuid"], "Buffy",
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )
        assessment = Assessment.objects.filter(
            scorer_id=assessment_dict["scorer_id"],
            scored_at=assessment_dict["scored_at"])[0]
        peer_api._close_active_assessment(buffy_workflow, xander_answer["uuid"], assessment, REQUIRED_GRADED_BY)

        item = PeerWorkflowItem.objects.get(submission_uuid=xander_answer['uuid'])
        self.assertEqual(xander_answer["uuid"], submission["uuid"])
        self.assertIsNotNone(item.assessment)
Exemplo n.º 9
0
    def _submit_peer_assessment(self, xblock, student_id, scorer_id, assessment, expect_failure=False):
        """
        Create submissions for a student and scorer, then create a peer assessment
        from the scorer.

        Args:
            xblock (OpenAssessmentBlock)
            student_id (unicode): The ID of the student being assessed.
            scorer_id (unicode): The ID of the student creating the assessment.
            assessment (dict): Serialized assessment model.

        Kwargs:
            expect_failure (bool): If true, expect a failure response and return None

        Returns:
            dict: The peer assessment retrieved from the API.

        """
        # Create a submission for this problem from another user
        student_item = xblock.get_student_item_dict()
        student_item['student_id'] = student_id
        submission = xblock.create_submission(student_item, self.SUBMISSION)

        # Create a submission for the scorer (required before assessing another student)
        another_student = copy.deepcopy(student_item)
        another_student['student_id'] = scorer_id
        another_submission = xblock.create_submission(another_student, self.SUBMISSION)

        # Pull the submission to assess
        peer_api.get_submission_to_assess(another_submission['uuid'], 3)

        # Submit an assessment and expect a successful response
        assessment = copy.deepcopy(assessment)
        resp = self.request(xblock, 'peer_assess', json.dumps(assessment), response_format='json')

        if expect_failure:
            self.assertFalse(resp['success'])
            return None
        else:
            self.assertTrue(resp['success'])

            # Retrieve the peer assessment
            retrieved_assessment = peer_api.get_assessments(submission['uuid'], scored_only=False)[0]
            return submission['uuid'], retrieved_assessment
Exemplo n.º 10
0
    def test_create_assessment_unknown_criterion_feedback(self):
        self._create_student_and_submission("Tim", "Tim's answer")
        bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
        peer_api.get_submission_to_assess(bob_sub['uuid'], 1)

        # Create an assessment where the criterion feedback uses
        # a criterion name that isn't in the rubric.
        assessment = peer_api.create_assessment(
            bob_sub["uuid"],
            bob["student_id"],
            ASSESSMENT_DICT['options_selected'],
            {'unknown': 'Unknown criterion has feedback!'},
            ASSESSMENT_DICT['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )

        # The criterion feedback should be ignored
        for part_num in range(3):
            self.assertEqual(assessment["parts"][part_num]["feedback"], "")
Exemplo n.º 11
0
 def test_create_huge_assessment_fails(self):
     self._create_student_and_submission("Tim", "Tim's answer")
     bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
     sub = peer_api.get_submission_to_assess(bob, 1)
     with self.assertRaises(peer_api.PeerAssessmentRequestError):
         peer_api.create_assessment(
             sub["uuid"],
             bob["student_id"],
             ASSESSMENT_DICT_PASS_HUGE,
             RUBRIC_DICT,
             REQUIRED_GRADED_BY,
         )
Exemplo n.º 12
0
    def test_get_submission_to_evaluate(self):
        submission, __ = self._create_student_and_submission("Tim", "Tim's answer", MONDAY)
        self._create_student_and_submission("Bob", "Bob's answer", TUESDAY)
        self._create_student_and_submission(
            "Sally", "Sally's answer", WEDNESDAY
        )
        self._create_student_and_submission("Jim", "Jim's answer", THURSDAY)

        submission = peer_api.get_submission_to_assess(submission['uuid'], 3)
        self.assertIsNotNone(submission)
        self.assertEqual(submission["answer"], u"Bob's answer")
        self.assertEqual(submission["student_item"], 2)
        self.assertEqual(submission["attempt_number"], 1)
Exemplo n.º 13
0
    def test_peer_leases_same_submission(self):
        """
        Tests the scenario where a student pulls a peer's submission for
        assessment, lets the lease expire, then pulls the same peer's submission
        a second time.

        This creates two similar PeerWorkflowItems in the database, and when
        completing the assessment, the latest PeerWorkflowItem should be
        updated.
        """
        yesterday = timezone.now() - datetime.timedelta(days=1)
        tim_sub, tim = self._create_student_and_submission("Tim", "Tim's answer")
        self._create_student_and_submission("Bob", "Bob's answer")
        self._create_student_and_submission("Sally", "Sally's answer")
        sub = peer_api.get_submission_to_assess(tim_sub['uuid'], REQUIRED_GRADED)
        self.assertEqual(u"Bob's answer", sub['answer'])

        # And now we cheat; we want to set the clock back such that the lease
        # on this PeerWorkflowItem has expired.
        pwis = PeerWorkflowItem.objects.filter(submission_uuid=sub['uuid'])
        self.assertEqual(len(pwis), 1)
        pwis[0].started_at = yesterday
        pwis[0].save()

        sub = peer_api.get_submission_to_assess(tim_sub['uuid'], REQUIRED_GRADED)
        self.assertEqual(u"Bob's answer", sub['answer'])

        peer_api.create_assessment(
            tim_sub["uuid"], tim["student_id"],
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )

        pwis = PeerWorkflowItem.objects.filter(submission_uuid=sub['uuid'])
        self.assertEqual(len(pwis), 1)
        self.assertNotEqual(pwis[0].started_at, yesterday)
Exemplo n.º 14
0
 def test_get_assessments(self, assessment_dict):
     self._create_student_and_submission("Tim", "Tim's answer")
     bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
     sub = peer_api.get_submission_to_assess(bob, 3)
     peer_api.create_assessment(
         sub["uuid"],
         bob["student_id"],
         assessment_dict,
         RUBRIC_DICT,
         REQUIRED_GRADED_BY,
     )
     assessments = peer_api.get_assessments(sub["uuid"], scored_only=False)
     self.assertEqual(1, len(assessments))
Exemplo n.º 15
0
    def test_peer_assess_handler(self, xblock):

        # Create a submission for this problem from another user
        student_item = xblock.get_student_item_dict()
        student_item['student_id'] = 'Sally'

        submission = xblock.create_submission(student_item, self.SUBMISSION)

        # Create a submission for the scorer (required before assessing another student)
        another_student = copy.deepcopy(student_item)
        another_student['student_id'] = "Bob"
        xblock.create_submission(another_student, self.SUBMISSION)
        peer_api.get_submission_to_assess(another_student, 3)


        # Submit an assessment and expect a successful response
        assessment = copy.deepcopy(self.ASSESSMENT)
        assessment['submission_uuid'] = submission['uuid']
        resp = self.request(xblock, 'peer_assess', json.dumps(assessment), response_format='json')
        self.assertTrue(resp['success'])

        # Retrieve the assessment and check that it matches what we sent
        actual = peer_api.get_assessments(submission['uuid'], scored_only=False)
        self.assertEqual(len(actual), 1)
        self.assertEqual(actual[0]['submission_uuid'], assessment['submission_uuid'])
        self.assertEqual(actual[0]['points_earned'], 5)
        self.assertEqual(actual[0]['points_possible'], 6)
        self.assertEqual(actual[0]['scorer_id'], 'Bob')
        self.assertEqual(actual[0]['score_type'], 'PE')

        self.assertEqual(len(actual[0]['parts']), 2)
        parts = sorted(actual[0]['parts'])
        self.assertEqual(parts[0]['option']['criterion']['name'], u'Form')
        self.assertEqual(parts[0]['option']['name'], 'Fair')
        self.assertEqual(parts[1]['option']['criterion']['name'], u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮')
        self.assertEqual(parts[1]['option']['name'], u'ﻉซƈﻉɭɭﻉกՇ')

        self.assertEqual(actual[0]['feedback'], assessment['feedback'])
Exemplo n.º 16
0
    def test_create_huge_overall_feedback_error(self):
        self._create_student_and_submission("Tim", "Tim's answer")
        bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
        peer_api.get_submission_to_assess(bob_sub['uuid'], 1)

        # Huge overall feedback text
        assessment_dict = peer_api.create_assessment(
            bob_sub["uuid"],
            bob["student_id"],
            ASSESSMENT_DICT_HUGE['options_selected'],
            dict(),
            ASSESSMENT_DICT_HUGE['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )

        # The assessment feedback text should be truncated
        self.assertEqual(len(assessment_dict['feedback']), Assessment.MAXSIZE)

        # The length of the feedback text in the database should
        # equal what we got from the API.
        assessment = Assessment.objects.get()
        self.assertEqual(len(assessment.feedback), Assessment.MAXSIZE)
Exemplo n.º 17
0
    def test_create_huge_per_criterion_feedback_error(self):
        self._create_student_and_submission("Tim", "Tim's answer")
        bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
        peer_api.get_submission_to_assess(bob_sub['uuid'], 1)

        # Huge per-criterion feedback text
        assessment = peer_api.create_assessment(
            bob_sub["uuid"],
            bob["student_id"],
            ASSESSMENT_DICT_HUGE['options_selected'],
            ASSESSMENT_DICT_HUGE['criterion_feedback'],
            "",
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )

        # Verify that the feedback has been truncated
        for part in assessment['parts']:
            self.assertEqual(len(part['feedback']), Assessment.MAXSIZE)

        # Verify that the feedback in the database matches what we got back from the API
        for part in AssessmentPart.objects.all():
            self.assertEqual(len(part.feedback), Assessment.MAXSIZE)
Exemplo n.º 18
0
 def test_create_assessment(self):
     self._create_student_and_submission("Tim", "Tim's answer")
     bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
     sub = peer_api.get_submission_to_assess(bob, 1)
     assessment = peer_api.create_assessment(
         sub["uuid"],
         bob["student_id"],
         ASSESSMENT_DICT,
         RUBRIC_DICT,
         REQUIRED_GRADED_BY,
     )
     self.assertEqual(assessment["points_earned"], 6)
     self.assertEqual(assessment["points_possible"], 14)
     self.assertEqual(assessment["feedback"], ASSESSMENT_DICT["feedback"])
Exemplo n.º 19
0
 def test_error_on_get_assessment(self, mock_filter):
     self._create_student_and_submission("Tim", "Tim's answer")
     bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
     sub = peer_api.get_submission_to_assess(bob, 3)
     peer_api.create_assessment(
         sub["uuid"],
         bob["student_id"],
         ASSESSMENT_DICT,
         RUBRIC_DICT,
         REQUIRED_GRADED_BY,
         MONDAY,
     )
     mock_filter.side_effect = DatabaseError("Bad things happened")
     peer_api.get_assessments(sub["uuid"])
Exemplo n.º 20
0
 def test_get_assessments(self, assessment_dict):
     self._create_student_and_submission("Tim", "Tim's answer")
     bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
     sub = peer_api.get_submission_to_assess(bob_sub['uuid'], 3)
     peer_api.create_assessment(
         bob_sub["uuid"],
         bob["student_id"],
         assessment_dict['options_selected'],
         assessment_dict['criterion_feedback'],
         assessment_dict['overall_feedback'],
         RUBRIC_DICT,
         REQUIRED_GRADED_BY,
     )
     assessments = peer_api.get_assessments(sub["uuid"], scored_only=False)
     self.assertEqual(1, len(assessments))
Exemplo n.º 21
0
    def test_get_max_scores(self):
        self._create_student_and_submission("Tim", "Tim's answer")
        bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
        sub = peer_api.get_submission_to_assess(bob_sub['uuid'], 1)
        assessment = peer_api.create_assessment(
            bob_sub["uuid"], bob["student_id"],
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            RUBRIC_DICT,
            1
        )

        max_scores = peer_api.get_rubric_max_scores(sub["uuid"])
        self.assertEqual(max_scores['secret'], 1)
        self.assertEqual(max_scores['giveup'], 10)
Exemplo n.º 22
0
    def test_get_max_scores(self):
        self._create_student_and_submission("Tim", "Tim's answer")
        bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
        sub = peer_api.get_submission_to_assess(bob, 1)
        assessment = peer_api.create_assessment(
            sub["uuid"],
            bob["student_id"],
            ASSESSMENT_DICT,
            RUBRIC_DICT,
            1
        )
        self.assertEqual(assessment["points_earned"], 6)
        self.assertEqual(assessment["points_possible"], 14)
        self.assertEqual(assessment["feedback"], ASSESSMENT_DICT["feedback"])

        max_scores = peer_api.get_rubric_max_scores(sub["uuid"])
        self.assertEqual(max_scores['secret'], 1)
        self.assertEqual(max_scores['giveup'], 10)
Exemplo n.º 23
0
 def test_has_finished_evaluation(self):
     """
     Verify unfinished assessments do not get counted when determining a
     complete workflow.
     """
     tim_sub, tim = self._create_student_and_submission("Tim", "Tim's answer")
     bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
     sub = peer_api.get_submission_to_assess(bob, REQUIRED_GRADED)
     self.assertEqual(sub["uuid"], tim_sub["uuid"])
     finished, count = peer_api.has_finished_required_evaluating(bob, 1)
     self.assertFalse(finished)
     self.assertEqual(count, 0)
     peer_api.create_assessment(
         sub["uuid"], bob["student_id"], ASSESSMENT_DICT, RUBRIC_DICT,
         REQUIRED_GRADED_BY,
     )
     finished, count = peer_api.has_finished_required_evaluating(bob, 1)
     self.assertTrue(finished)
     self.assertEqual(count, 1)
Exemplo n.º 24
0
    def get_peer_submission(self, student_item_dict, assessment):
        peer_submission = False
        try:
            peer_submission = peer_api.get_submission_to_assess(
                student_item_dict, assessment["must_be_graded_by"], True
            )
            self.runtime.publish(
                self,
                "openassessmentblock.get_peer_submission",
                {
                    "requesting_student_id": student_item_dict["student_id"],
                    "course_id": student_item_dict["course_id"],
                    "item_id": student_item_dict["item_id"],
                    "submission_returned_uuid": (peer_submission["uuid"] if peer_submission else None),
                },
            )
        except PeerAssessmentWorkflowError as err:
            logger.exception(err)

        return peer_submission
Exemplo n.º 25
0
 def test_has_finished_evaluation(self):
     """
     Verify unfinished assessments do not get counted when determining a
     complete workflow.
     """
     tim_sub, _ = self._create_student_and_submission("Tim", "Tim's answer")
     bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
     sub = peer_api.get_submission_to_assess(bob_sub['uuid'], 1)
     self.assertEqual(sub["uuid"], tim_sub["uuid"])
     finished, count = peer_api.has_finished_required_evaluating(bob_sub['uuid'], 1)
     self.assertFalse(finished)
     self.assertEqual(count, 0)
     peer_api.create_assessment(
         bob_sub["uuid"], bob["student_id"],
         ASSESSMENT_DICT['options_selected'],
         ASSESSMENT_DICT['criterion_feedback'],
         ASSESSMENT_DICT['overall_feedback'],
         RUBRIC_DICT,
         1,
     )
     finished, count = peer_api.has_finished_required_evaluating(bob_sub['uuid'], 1)
     self.assertTrue(finished)
     self.assertEqual(count, 1)
Exemplo n.º 26
0
    def test_close_active_assessment(self):
        buffy_answer, buffy = self._create_student_and_submission("Buffy", "Buffy's answer")
        xander_answer, xander = self._create_student_and_submission("Xander", "Xander's answer")

        # Create a workflow for Buffy.
        buffy_workflow = peer_api._get_latest_workflow(buffy)

        # Get a workflow item opened up.
        submission = peer_api.get_submission_to_assess(buffy, 3)

        self.assertEqual(xander_answer["uuid"], submission["uuid"])

        assessment_dict = peer_api.create_assessment(
            xander_answer["uuid"], "Buffy", ASSESSMENT_DICT, RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )
        assessment = Assessment.objects.filter(
            scorer_id=assessment_dict["scorer_id"],
            scored_at=assessment_dict["scored_at"])[0]
        peer_api._close_active_assessment(buffy_workflow, xander_answer["uuid"], assessment, REQUIRED_GRADED_BY)

        item = peer_api._create_peer_workflow_item(buffy_workflow, xander_answer["uuid"])
        self.assertEqual(xander_answer["uuid"], submission["uuid"])
        self.assertIsNotNone(item.assessment)
Exemplo n.º 27
0
    def test_turbo_grading(self, xblock):
        student_item = xblock.get_student_item_dict()

        sally_student_item = copy.deepcopy(student_item)
        sally_student_item['student_id'] = "Sally"
        sally_submission = xblock.create_submission(sally_student_item,
                                                    u"Sally's answer")

        # Hal comes and submits a response.
        hal_student_item = copy.deepcopy(student_item)
        hal_student_item['student_id'] = "Hal"
        hal_submission = xblock.create_submission(hal_student_item,
                                                  u"Hal's answer")

        # Now Hal will assess Sally.
        assessment = copy.deepcopy(self.ASSESSMENT)
        sally_sub = peer_api.get_submission_to_assess(hal_submission['uuid'],
                                                      1)
        assessment['submission_uuid'] = sally_sub['uuid']
        peer_api.create_assessment(hal_submission['uuid'],
                                   hal_student_item['student_id'],
                                   assessment['options_selected'],
                                   assessment['criterion_feedback'],
                                   assessment['overall_feedback'],
                                   {'criteria': xblock.rubric_criteria}, 1)

        # Now Sally will assess Hal.
        assessment = copy.deepcopy(self.ASSESSMENT)
        hal_sub = peer_api.get_submission_to_assess(sally_submission['uuid'],
                                                    1)
        assessment['submission_uuid'] = hal_sub['uuid']
        peer_api.create_assessment(sally_submission['uuid'],
                                   sally_student_item['student_id'],
                                   assessment['options_selected'],
                                   assessment['criterion_feedback'],
                                   assessment['overall_feedback'],
                                   {'criteria': xblock.rubric_criteria}, 1)

        # If Over Grading is on, this should now return Sally's response to Bob.
        submission = xblock.create_submission(student_item, u"Bob's answer")
        workflow_info = xblock.get_workflow_info()
        self.assertEqual(workflow_info["status"], u'peer')

        # Validate Submission Rendering.
        request = namedtuple('Request', 'params')
        request.params = {'continue_grading': True}
        peer_response = xblock.render_peer_assessment(request)
        self.assertIsNotNone(peer_response)
        self.assertNotIn(submission["answer"]["text"].encode('utf-8'),
                         peer_response.body)

        peer_api.create_assessment(submission['uuid'],
                                   student_item['student_id'],
                                   assessment['options_selected'],
                                   assessment['criterion_feedback'],
                                   assessment['overall_feedback'],
                                   {'criteria': xblock.rubric_criteria}, 1)

        # Validate Submission Rendering.
        request = namedtuple('Request', 'params')
        request.params = {'continue_grading': True}
        peer_response = xblock.render_peer_assessment(request)
        self.assertIsNotNone(peer_response)
        self.assertNotIn(submission["answer"]["text"].encode('utf-8'),
                         peer_response.body)

        peer_api.create_assessment(submission['uuid'],
                                   student_item['student_id'],
                                   assessment['options_selected'],
                                   assessment['criterion_feedback'],
                                   assessment['overall_feedback'],
                                   {'criteria': xblock.rubric_criteria}, 1)

        # A Final over grading will not return anything.
        request = namedtuple('Request', 'params')
        request.params = {'continue_grading': True}
        peer_response = xblock.render_peer_assessment(request)
        self.assertIsNotNone(peer_response)
        self.assertNotIn(submission["answer"]["text"].encode('utf-8'),
                         peer_response.body)
        self.assertIn("Peer Assessments Complete", peer_response.body)
Exemplo n.º 28
0
    def test_complex_peer_assessment_workflow(self):
        """
        Intended to mimic a more complicated scenario where people do not
        necessarily always finish their assessments.

        1) Angel submits
        2) Angel waits for Peer Assessments
        3) Bob submits and pulls Angel's submission but never reviews it.
        4) Sally submits
        5) Sally pulls Angel's Submission but never reviews it.
        6) Jim submits
        7) Jim also doesn't care about Angel and does not bother to review.
        8) Buffy comes along and she submits
        9) Buffy cares about Angel, but she won't get Angel's submission;
            it's held by Bob, Sally, and Jim.
        10) Buffy goes on to review Bob, Sally, and Jim, but needs two more.
        11) Xander comes along and submits.
        12) Xander means well, so Xander grades Bob, Sally, and Jim, but gets
            lazy and doesn't grade Buffy when her submission comes along.
        13) Buffy is waiting in the wings. She pulls Xander's submission and
            grades it.
        14) Spike submits.
        15) Spike reviews Bob, Sally, Jim, Buffy, and Xander.
        16) Buffy reviews Spike
        17) Willow comes along and submits
        18) Willow goes to grade, and should get Xander
        19) Xander comes back and gets Buffy's submission, and grades it.
        20) Buffy should now have a grade.
        """

        # Buffy should not have a score, because she has not evaluated enough
        # peer submissions.
        requirements = {
            "peer": {
                "must_grade": REQUIRED_GRADED,
                "must_be_graded_by": REQUIRED_GRADED_BY,
            }
        }

        # 1) Angel Submits
        angel_sub, _ = self._create_student_and_submission("Angel", "Angel's answer")

        # 2) Angel waits for peers
        sub = peer_api.get_submission_to_assess(angel_sub['uuid'], REQUIRED_GRADED_BY)
        self.assertIsNone(sub)
        # 3) Bob submits
        bob_sub, _ = self._create_student_and_submission("Bob", "Bob's answer")
        sub = peer_api.get_submission_to_assess(bob_sub['uuid'], REQUIRED_GRADED_BY)
        self.assertEquals(angel_sub["uuid"], sub["uuid"])

        # 4) Sally submits
        sally_sub, _ = self._create_student_and_submission("Sally", "Sally's answer")

        # 5) Sally pulls Angel's Submission but never reviews it.
        sub = peer_api.get_submission_to_assess(sally_sub['uuid'], REQUIRED_GRADED_BY)
        self.assertEquals(angel_sub["uuid"], sub["uuid"])

        # 6) Jim submits
        jim_sub, _ = self._create_student_and_submission("Jim", "Jim's answer")

        # 7) Jim also doesn't care about Angel and does not bother to review.
        sub = peer_api.get_submission_to_assess(jim_sub['uuid'], REQUIRED_GRADED_BY)
        self.assertEquals(angel_sub["uuid"], sub["uuid"])

        # 8) Buffy comes along and she submits
        buffy_sub, buffy = self._create_student_and_submission("Buffy", "Buffy's answer")

        # 9) Buffy cares about Angel, but she won't get Angel's submission;
        # it's held by Bob, Sally, and Jim.
        sub = peer_api.get_submission_to_assess(buffy_sub['uuid'], REQUIRED_GRADED_BY)
        self.assertEquals(bob_sub["uuid"], sub["uuid"])

        # 10) Buffy goes on to review Bob, Sally, and Jim, but needs two more.
        peer_api.create_assessment(
            buffy_sub["uuid"], buffy["student_id"],
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )
        sub = peer_api.get_submission_to_assess(buffy_sub['uuid'], REQUIRED_GRADED_BY)
        self.assertEquals(sally_sub["uuid"], sub["uuid"])
        peer_api.create_assessment(
            buffy_sub["uuid"], buffy["student_id"],
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )
        sub = peer_api.get_submission_to_assess(buffy_sub['uuid'], REQUIRED_GRADED_BY)
        self.assertEquals(jim_sub["uuid"], sub["uuid"])
        peer_api.create_assessment(
            buffy_sub["uuid"], buffy["student_id"],
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )
        sub = peer_api.get_submission_to_assess(buffy_sub['uuid'], REQUIRED_GRADED_BY)
        self.assertIsNone(sub)

        # 11) Xander comes along and submits.
        xander_sub, xander = self._create_student_and_submission("Xander", "Xander's answer")

        # 12) Xander means well, so Xander grades Bob, Sally, and Jim, but gets
        # lazy and doesn't grade Buffy when her submission comes along.
        sub = peer_api.get_submission_to_assess(xander_sub['uuid'], REQUIRED_GRADED_BY)
        self.assertEquals(bob_sub["uuid"], sub["uuid"])
        peer_api.create_assessment(
            xander_sub["uuid"], xander["student_id"],
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )
        sub = peer_api.get_submission_to_assess(xander_sub['uuid'], REQUIRED_GRADED_BY)
        self.assertEquals(sally_sub["uuid"], sub["uuid"])
        peer_api.create_assessment(
            xander_sub["uuid"], xander["student_id"],
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )
        sub = peer_api.get_submission_to_assess(xander_sub['uuid'], REQUIRED_GRADED_BY)
        self.assertEquals(jim_sub["uuid"], sub["uuid"])
        peer_api.create_assessment(
            xander_sub["uuid"], xander["student_id"],
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )

        # 13) Buffy is waiting in the wings. She pulls Xander's submission and
        # grades it.
        sub = peer_api.get_submission_to_assess(buffy_sub['uuid'], REQUIRED_GRADED_BY)
        self.assertEquals(xander_sub["uuid"], sub["uuid"])
        peer_api.create_assessment(
            buffy_sub["uuid"], buffy["student_id"],
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )

        # 14) Spike submits.
        spike_sub, spike = self._create_student_and_submission("Spike", "Spike's answer")

        # 15) Spike reviews Bob, Sally, Jim, Buffy, and Xander.
        sub = peer_api.get_submission_to_assess(spike_sub['uuid'], REQUIRED_GRADED_BY)
        self.assertEquals(bob_sub["uuid"], sub["uuid"])
        peer_api.create_assessment(
            spike_sub["uuid"], spike["student_id"],
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,

        )
        sub = peer_api.get_submission_to_assess(spike_sub['uuid'], REQUIRED_GRADED_BY)
        self.assertEquals(sally_sub["uuid"], sub["uuid"])
        peer_api.create_assessment(
            spike_sub["uuid"], spike["student_id"],
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )
        sub = peer_api.get_submission_to_assess(spike_sub['uuid'], REQUIRED_GRADED_BY)
        self.assertEquals(jim_sub["uuid"], sub["uuid"])
        peer_api.create_assessment(
            spike_sub["uuid"], spike["student_id"],
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )
        sub = peer_api.get_submission_to_assess(spike_sub['uuid'], REQUIRED_GRADED_BY)
        self.assertEquals(buffy_sub["uuid"], sub["uuid"])
        peer_api.create_assessment(
            spike_sub["uuid"], spike["student_id"],
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )
        sub = peer_api.get_submission_to_assess(spike_sub['uuid'], REQUIRED_GRADED_BY)
        self.assertEquals(xander_sub["uuid"], sub["uuid"])
        peer_api.create_assessment(
            spike_sub["uuid"], spike["student_id"],
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )

        # 16) Buffy reviews Spike
        sub = peer_api.get_submission_to_assess(buffy_sub['uuid'], REQUIRED_GRADED_BY)
        self.assertEquals(spike_sub["uuid"], sub["uuid"])
        peer_api.create_assessment(
            buffy_sub["uuid"], buffy["student_id"],
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )

        # 17) Willow comes along and submits
        willow_sub, willow = self._create_student_and_submission("Willow", "Willow's answer")

        # 18) Willow goes to grade, and should get Buffy
        sub = peer_api.get_submission_to_assess(willow_sub['uuid'], REQUIRED_GRADED_BY)
        self.assertEquals(buffy_sub["uuid"], sub["uuid"])
        peer_api.create_assessment(
            willow_sub["uuid"], willow["student_id"],
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )

        # 19) Xander comes back and gets Buffy's submission, and grades it.
        sub = peer_api.get_submission_to_assess(xander_sub['uuid'], REQUIRED_GRADED_BY)
        self.assertEquals(buffy_sub["uuid"], sub["uuid"])
        peer_api.create_assessment(
            xander_sub["uuid"], xander["student_id"],
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )

        # 20) Buffy should now have a grade.
        requirements = {
            'must_grade': REQUIRED_GRADED,
            'must_be_graded_by': REQUIRED_GRADED_BY
        }
        self.assertTrue(peer_api.is_complete(buffy_sub["uuid"], requirements))
Exemplo n.º 29
0
    def test_completeness(self):
        """
        Verify that a submission in the peer workflow is only marked complete
        when we intend it to be. Incomplete assessments should never be
        included in a grade.
        """
        tim_sub, tim = self._create_student_and_submission("Tim", "Tim's answer")
        bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
        sally_sub, sally = self._create_student_and_submission("Sally", "Sally's answer")
        jim_sub, jim = self._create_student_and_submission("Jim", "Jim's answer")
        self._create_student_and_submission("Buffy", "Buffy's answer")
        self._create_student_and_submission("Xander", "Xander's answer")

        # Tim should not have a score, because he has not evaluated enough
        # peer submissions.
        requirements = {
            "peer": {
                "must_grade": REQUIRED_GRADED,
                "must_be_graded_by": REQUIRED_GRADED_BY,
                }
        }
        score = workflow_api.get_workflow_for_submission(
            tim_sub["uuid"], requirements
        )["score"]
        self.assertIsNone(score)

        # Bob and Sally pull Tim's submission for peer assessment, but do not
        # grade him right away.
        sub = peer_api.get_submission_to_assess(bob_sub['uuid'], REQUIRED_GRADED_BY)
        self.assertEqual(sub["uuid"], tim_sub["uuid"])

        sub = peer_api.get_submission_to_assess(sally_sub['uuid'], REQUIRED_GRADED_BY)
        self.assertEqual(sub["uuid"], tim_sub["uuid"])

        # Jim pulls Tim's submission, then grades it immediately.
        sub = peer_api.get_submission_to_assess(jim_sub['uuid'], REQUIRED_GRADED_BY)
        self.assertEqual(sub["uuid"], tim_sub["uuid"])

        peer_api.create_assessment(
            jim_sub["uuid"], jim["student_id"],
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )

        # Tim should have no score.
        score = workflow_api.get_workflow_for_submission(
            tim_sub["uuid"], requirements
        )["score"]
        self.assertIsNone(score)

        # Tim's workflow should not be fully graded
        self.assertIsNone(PeerWorkflow.objects.get(student_id=tim["student_id"]).grading_completed_at)

        peer_api.create_assessment(
            bob_sub["uuid"], bob["student_id"],
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )

        peer_api.create_assessment(
            sally_sub["uuid"], sally["student_id"],
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )

        # Tim should not have a score since he did not grade peers..
        score = workflow_api.get_workflow_for_submission(
            tim_sub["uuid"], requirements
        )["score"]
        self.assertIsNone(score)

        # Tim's workflow has enough grades.
        self.assertIsNotNone(PeerWorkflow.objects.get(student_id=tim["student_id"]).grading_completed_at)
Exemplo n.º 30
0
    def test_peer_assessment_workflow(self):
        tim_sub, tim = self._create_student_and_submission("Tim", "Tim's answer")
        bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
        sally_sub, sally = self._create_student_and_submission("Sally", "Sally's answer")
        jim_sub, jim = self._create_student_and_submission("Jim", "Jim's answer")
        self._create_student_and_submission("Buffy", "Buffy's answer")
        self._create_student_and_submission("Xander", "Xander's answer")

        # Tim should not have a score, because he has not evaluated enough
        # peer submissions.
        requirements = {
            "peer": {
                "must_grade": REQUIRED_GRADED,
                "must_be_graded_by": REQUIRED_GRADED_BY,
            }
        }
        score = workflow_api.get_workflow_for_submission(
            tim_sub["uuid"], requirements
        )["score"]
        self.assertIsNone(score)

        for i in range(5):
            self.assertEquals((False, i), peer_api.has_finished_required_evaluating(tim_sub['uuid'], REQUIRED_GRADED))
            sub = peer_api.get_submission_to_assess(tim_sub['uuid'], REQUIRED_GRADED)
            peer_api.create_assessment(
                tim_sub["uuid"], tim["student_id"],
                ASSESSMENT_DICT['options_selected'],
                ASSESSMENT_DICT['criterion_feedback'],
                ASSESSMENT_DICT['overall_feedback'],
                RUBRIC_DICT,
                REQUIRED_GRADED_BY,
            )

        self.assertEquals((True, 5), peer_api.has_finished_required_evaluating(tim_sub['uuid'], REQUIRED_GRADED))

        # Tim should not have a score, because his submission does not have
        # enough assessments.
        self.assertIsNone(sub_api.get_score(STUDENT_ITEM))

        sub = peer_api.get_submission_to_assess(bob_sub['uuid'], REQUIRED_GRADED)
        self.assertEqual(sub["uuid"], tim_sub["uuid"])
        peer_api.create_assessment(
            bob_sub["uuid"], bob["student_id"],
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )

        sub = peer_api.get_submission_to_assess(sally_sub['uuid'], REQUIRED_GRADED)
        self.assertEqual(sub["uuid"], tim_sub["uuid"])
        peer_api.create_assessment(
            sally_sub["uuid"], sally["student_id"],
            ASSESSMENT_DICT_FAIL['options_selected'],
            ASSESSMENT_DICT_FAIL['criterion_feedback'],
            ASSESSMENT_DICT_FAIL['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )

        sub = peer_api.get_submission_to_assess(jim_sub['uuid'], REQUIRED_GRADED)
        self.assertEqual(sub["uuid"], tim_sub["uuid"])
        peer_api.create_assessment(
            jim_sub["uuid"], jim["student_id"],
            ASSESSMENT_DICT_PASS['options_selected'],
            ASSESSMENT_DICT_PASS['criterion_feedback'],
            ASSESSMENT_DICT_PASS['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )

        # Tim has met the critera, and should now be complete.
        requirements = {
            'must_grade': REQUIRED_GRADED,
            'must_be_graded_by': REQUIRED_GRADED_BY
        }
        self.assertTrue(peer_api.is_complete(tim_sub["uuid"], requirements))
Exemplo n.º 31
0
    def _create_submission_and_assessments(self,
                                           xblock,
                                           submission_text,
                                           peers,
                                           peer_assessments,
                                           self_assessment,
                                           waiting_for_peer=False):
        """
        Create a submission and peer/self assessments, so that the user can receive a grade.

        Args:
            xblock (OpenAssessmentBlock): The XBlock, loaded for the user who needs a grade.
            submission_text (unicode): Text of the submission from the user.
            peers (list of unicode): List of user IDs of peers who will assess the user.
            peer_assessments (list of dict): List of assessment dictionaries for peer assessments.
            self_assessment (dict): Dict of assessment for self-assessment.

        Kwargs:
            waiting_for_peer (bool): If true, skip creation of peer assessments for the user's submission.

        Returns:
            None

        """
        # Create a submission from the user
        student_item = xblock.get_student_item_dict()
        student_id = student_item['student_id']
        submission = xblock.create_submission(student_item, submission_text)

        # Create submissions and assessments from other users
        scorer_submissions = []
        for scorer_name, assessment in zip(peers, peer_assessments):

            # Create a submission for each scorer for the same problem
            scorer = copy.deepcopy(student_item)
            scorer['student_id'] = scorer_name

            scorer_sub = sub_api.create_submission(scorer,
                                                   {'text': submission_text})
            workflow_api.create_workflow(scorer_sub['uuid'])

            submission = peer_api.get_submission_to_assess(
                scorer_sub['uuid'], len(peers))

            # Store the scorer's submission so our user can assess it later
            scorer_submissions.append(scorer_sub)

            # Create an assessment of the user's submission
            if not waiting_for_peer:
                peer_api.create_assessment(
                    scorer_sub['uuid'], scorer_name,
                    assessment['options_selected'],
                    assessment['criterion_feedback'],
                    assessment['overall_feedback'],
                    {'criteria': xblock.rubric_criteria},
                    xblock.get_assessment_module(
                        'peer-assessment')['must_be_graded_by'])

        # Have our user make assessments (so she can get a score)
        for asmnt in peer_assessments:
            peer_api.get_submission_to_assess(submission['uuid'], len(peers))
            peer_api.create_assessment(
                submission['uuid'], student_id, asmnt['options_selected'],
                asmnt['criterion_feedback'], asmnt['overall_feedback'],
                {'criteria': xblock.rubric_criteria},
                xblock.get_assessment_module(
                    'peer-assessment')['must_be_graded_by'])

        # Have the user submit a self-assessment (so she can get a score)
        if self_assessment is not None:
            self_api.create_assessment(submission['uuid'], student_id,
                                       self_assessment['options_selected'],
                                       {'criteria': xblock.rubric_criteria})
Exemplo n.º 32
0
    def _create_submission_and_assessments(
        self, xblock, submission_text, peers, peer_assessments, self_assessment,
        waiting_for_peer=False
    ):
        """
        Create a submission and peer/self assessments, so that the user can receive a grade.

        Args:
            xblock (OpenAssessmentBlock): The XBlock, loaded for the user who needs a grade.
            submission_text (unicode): Text of the submission from the user.
            peers (list of unicode): List of user IDs of peers who will assess the user.
            peer_assessments (list of dict): List of assessment dictionaries for peer assessments.
            self_assessment (dict): Dict of assessment for self-assessment.

        Kwargs:
            waiting_for_peer (bool): If true, skip creation of peer assessments for the user's submission.

        Returns:
            None

        """
        # Create a submission from the user
        student_item = xblock.get_student_item_dict()
        student_id = student_item['student_id']
        submission = xblock.create_submission(student_item, submission_text)

        # Create submissions and assessments from other users
        scorer_submissions = []
        for scorer_name, assessment in zip(peers, peer_assessments):

            # Create a submission for each scorer for the same problem
            scorer = copy.deepcopy(student_item)
            scorer['student_id'] = scorer_name

            scorer_sub = sub_api.create_submission(scorer, {'text': submission_text})
            workflow_api.create_workflow(scorer_sub['uuid'])

            submission = peer_api.get_submission_to_assess(scorer_sub['uuid'], len(peers))

            # Store the scorer's submission so our user can assess it later
            scorer_submissions.append(scorer_sub)

            # Create an assessment of the user's submission
            if not waiting_for_peer:
                peer_api.create_assessment(
                    scorer_sub['uuid'], scorer_name,
                    assessment['options_selected'],
                    assessment['criterion_feedback'],
                    assessment['overall_feedback'],
                    {'criteria': xblock.rubric_criteria},
                    xblock.get_assessment_module('peer-assessment')['must_be_graded_by']
                )

        # Have our user make assessments (so she can get a score)
        for asmnt in peer_assessments:
            peer_api.get_submission_to_assess(submission['uuid'], len(peers))
            peer_api.create_assessment(
                submission['uuid'],
                student_id,
                asmnt['options_selected'],
                asmnt['criterion_feedback'],
                asmnt['overall_feedback'],
                {'criteria': xblock.rubric_criteria},
                xblock.get_assessment_module('peer-assessment')['must_be_graded_by']
            )

        # Have the user submit a self-assessment (so she can get a score)
        if self_assessment is not None:
            self_api.create_assessment(
                submission['uuid'], student_id, self_assessment['options_selected'],
                {'criteria': xblock.rubric_criteria}
            )
Exemplo n.º 33
0
    def test_turbo_grading(self, xblock):
        student_item = xblock.get_student_item_dict()

        sally_student_item = copy.deepcopy(student_item)
        sally_student_item['student_id'] = "Sally"
        sally_submission = xblock.create_submission(sally_student_item, u"Sally's answer")

        # Hal comes and submits a response.
        hal_student_item = copy.deepcopy(student_item)
        hal_student_item['student_id'] = "Hal"
        hal_submission = xblock.create_submission(hal_student_item, u"Hal's answer")

        # Now Hal will assess Sally.
        assessment = copy.deepcopy(self.ASSESSMENT)
        sally_sub = peer_api.get_submission_to_assess(hal_student_item, 1)
        assessment['submission_uuid'] = sally_sub['uuid']
        peer_api.create_assessment(
            sally_sub['uuid'],
            hal_student_item['student_id'],
            assessment,
            {'criteria': xblock.rubric_criteria},
            1
        )

        # Now Sally will assess Hal.
        assessment = copy.deepcopy(self.ASSESSMENT)
        hal_sub = peer_api.get_submission_to_assess(sally_student_item, 1)
        assessment['submission_uuid'] = hal_sub['uuid']
        peer_api.create_assessment(
            hal_sub['uuid'],
            sally_student_item['student_id'],
            assessment,
            {'criteria': xblock.rubric_criteria},
            1
        )

        # If Over Grading is on, this should now return Sally's response to Bob.
        submission = xblock.create_submission(student_item, u"Bob's answer")
        workflow_info = xblock.get_workflow_info()
        self.assertEqual(workflow_info["status"], u'peer')

        # Validate Submission Rendering.
        request = namedtuple('Request', 'params')
        request.params = {'continue_grading': True}
        peer_response = xblock.render_peer_assessment(request)
        self.assertIsNotNone(peer_response)
        self.assertNotIn(submission["answer"]["text"].encode('utf-8'), peer_response.body)

        hal_response = "Hal".encode('utf-8') in peer_response.body
        sally_response = "Sally".encode('utf-8') in peer_response.body

        # Validate Peer Rendering.
        if hal_response:
            peer_uuid = hal_sub['uuid']
        elif sally_response:
            peer_uuid = sally_sub['uuid']
        else:
            self.fail("Response was neither Hal or Sally's submission.")

        peer_api.create_assessment(
            peer_uuid,
            student_item['student_id'],
            assessment,
            {'criteria': xblock.rubric_criteria},
            1
        )

        # Validate Submission Rendering.
        request = namedtuple('Request', 'params')
        request.params = {'continue_grading': True}
        peer_response = xblock.render_peer_assessment(request)
        self.assertIsNotNone(peer_response)
        self.assertNotIn(submission["answer"]["text"].encode('utf-8'), peer_response.body)

        # Validate Peer Rendering. Check that if Sally or Hal were selected
        # the first time around, the other is selected this time.
        if not hal_response and "Hal".encode('utf-8') in peer_response.body:
            peer_uuid = hal_sub['uuid']
        elif not sally_response and "Sally".encode('utf-8') in peer_response.body:
            peer_uuid = sally_sub['uuid']
        else:
            self.fail("Response was neither Hal or Sally's submission.")

        peer_api.create_assessment(
            peer_uuid,
            student_item['student_id'],
            assessment,
            {'criteria': xblock.rubric_criteria},
            1
        )

        # A Final over grading will not return anything.
        request = namedtuple('Request', 'params')
        request.params = {'continue_grading': True}
        peer_response = xblock.render_peer_assessment(request)
        self.assertIsNotNone(peer_response)
        self.assertNotIn(submission["answer"]["text"].encode('utf-8'), peer_response.body)
        self.assertIn("Congratulations".encode('utf-8'), peer_response.body)
Exemplo n.º 34
0
 def test_no_submissions_to_evaluate_for_tim(self):
     submission, __ = self._create_student_and_submission("Tim", "Tim's answer", MONDAY)
     submission = peer_api.get_submission_to_assess(submission['uuid'], 3)
     self.assertIsNone(submission)