Beispiel #1
0
 def test_no_open_assessment(self):
     self._create_student_and_submission("Tim", "Tim's answer")
     bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
     peer_api.create_assessment(
         bob_sub['uuid'], bob['student_id'],
         ASSESSMENT_DICT['options_selected'],
         ASSESSMENT_DICT['criterion_feedback'],
         ASSESSMENT_DICT['overall_feedback'],
         RUBRIC_DICT,
         1
     )
Beispiel #2
0
    def test_load_peer_student_view_with_dates(self, xblock):
        student_item = xblock.get_student_item_dict()

        sally_student_item = copy.deepcopy(student_item)
        sally_student_item['student_id'] = "Sally"
        sally_submission = xblock.create_submission(sally_student_item, u"Sally's answer")

        # Hal comes and submits a response.
        hal_student_item = copy.deepcopy(student_item)
        hal_student_item['student_id'] = "Hal"
        hal_submission = xblock.create_submission(hal_student_item, u"Hal's answer")

        # Now Hal will assess Sally.
        assessment = copy.deepcopy(self.ASSESSMENT)
        peer_api.get_submission_to_assess(hal_submission['uuid'], 1)
        peer_api.create_assessment(
            hal_submission['uuid'],
            hal_student_item['student_id'],
            assessment['options_selected'],
            assessment['criterion_feedback'],
            assessment['overall_feedback'],
            {'criteria': xblock.rubric_criteria},
            1
        )

        # Now Sally will assess Hal.
        assessment = copy.deepcopy(self.ASSESSMENT)
        peer_api.get_submission_to_assess(sally_submission['uuid'], 1)
        peer_api.create_assessment(
            sally_submission['uuid'],
            sally_student_item['student_id'],
            assessment['options_selected'],
            assessment['criterion_feedback'],
            assessment['overall_feedback'],
            {'criteria': xblock.rubric_criteria},
            1
        )

        # If Over Grading is on, this should now return Sally or Hal's response to Bob.
        submission = xblock.create_submission(student_item, u"Bob's answer")
        workflow_info = xblock.get_workflow_info()
        self.assertEqual(workflow_info["status"], u'peer')

        # Validate Submission Rendering.
        request = namedtuple('Request', 'params')
        request.params = {}
        peer_response = xblock.render_peer_assessment(request)
        self.assertIsNotNone(peer_response)
        self.assertNotIn(submission["answer"]["text"].encode('utf-8'), peer_response.body)

        # Validate Peer Rendering.
        self.assertTrue("Sally".encode('utf-8') in peer_response.body or
            "Hal".encode('utf-8') in peer_response.body)
Beispiel #3
0
 def test_error_on_assessment_creation(self, mock_filter):
     mock_filter.side_effect = DatabaseError("Bad things happened")
     submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)
     peer_api.create_peer_workflow(submission["uuid"])
     peer_api.create_assessment(
         submission["uuid"],
         STUDENT_ITEM["student_id"],
         ASSESSMENT_DICT,
         RUBRIC_DICT,
         REQUIRED_GRADED_BY,
         MONDAY,
     )
Beispiel #4
0
 def test_create_huge_assessment_fails(self):
     self._create_student_and_submission("Tim", "Tim's answer")
     bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
     sub = peer_api.get_submission_to_assess(bob, 1)
     with self.assertRaises(peer_api.PeerAssessmentRequestError):
         peer_api.create_assessment(
             sub["uuid"],
             bob["student_id"],
             ASSESSMENT_DICT_PASS_HUGE,
             RUBRIC_DICT,
             REQUIRED_GRADED_BY,
         )
Beispiel #5
0
    def test_assess_before_submitting(self):
        # Create a submission for another student
        submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)

        # Attempt to create the assessment from another student without first making a submission
        peer_api.create_assessment(
            submission["uuid"],
            "another_student",
            ASSESSMENT_DICT,
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
            MONDAY,
        )
Beispiel #6
0
 def test_get_assessments(self, assessment_dict):
     self._create_student_and_submission("Tim", "Tim's answer")
     bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
     sub = peer_api.get_submission_to_assess(bob, 3)
     peer_api.create_assessment(
         sub["uuid"],
         bob["student_id"],
         assessment_dict,
         RUBRIC_DICT,
         REQUIRED_GRADED_BY,
     )
     assessments = peer_api.get_assessments(sub["uuid"], scored_only=False)
     self.assertEqual(1, len(assessments))
Beispiel #7
0
 def test_create_feedback_on_an_assessment(self):
     tim_sub, tim = self._create_student_and_submission("Tim", "Tim's answer")
     bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
     peer_api.get_submission_to_assess(bob_sub['uuid'], 1)
     assessment = peer_api.create_assessment(
         bob_sub["uuid"],
         bob["student_id"],
         ASSESSMENT_DICT['options_selected'],
         ASSESSMENT_DICT['criterion_feedback'],
         ASSESSMENT_DICT['overall_feedback'],
         RUBRIC_DICT,
         REQUIRED_GRADED_BY,
     )
     peer_api.get_submission_to_assess(tim_sub['uuid'], 1)
     peer_api.create_assessment(
         tim_sub["uuid"],
         tim["student_id"],
         ASSESSMENT_DICT['options_selected'],
         ASSESSMENT_DICT['criterion_feedback'],
         ASSESSMENT_DICT['overall_feedback'],
         RUBRIC_DICT,
         REQUIRED_GRADED_BY,
     )
     peer_api.get_score(
         tim_sub["uuid"],
         {
             'must_grade': 1,
             'must_be_graded_by': 1
         }
     )
     feedback = peer_api.get_assessment_feedback(tim_sub['uuid'])
     self.assertIsNone(feedback)
     peer_api.set_assessment_feedback(
         {
             'submission_uuid': tim_sub['uuid'],
             'feedback_text': 'Bob is a jerk!',
             'options': [
                 'I disliked this assessment',
                 'I felt this assessment was unfair',
             ]
         }
     )
     saved_feedback = peer_api.get_assessment_feedback(tim_sub['uuid'])
     self.assertIsNot(saved_feedback, None)
     self.assertEquals(saved_feedback['submission_uuid'], assessment['submission_uuid'])
     self.assertEquals(saved_feedback['feedback_text'], 'Bob is a jerk!')
     self.assertItemsEqual(saved_feedback['options'], [
         {'text': 'I disliked this assessment'},
         {'text': 'I felt this assessment was unfair'},
     ])
     self.assertEquals(saved_feedback["assessments"][0]["submission_uuid"], assessment["submission_uuid"])
Beispiel #8
0
 def test_error_on_get_assessment(self, mock_filter):
     self._create_student_and_submission("Tim", "Tim's answer")
     bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
     sub = peer_api.get_submission_to_assess(bob, 3)
     peer_api.create_assessment(
         sub["uuid"],
         bob["student_id"],
         ASSESSMENT_DICT,
         RUBRIC_DICT,
         REQUIRED_GRADED_BY,
         MONDAY,
     )
     mock_filter.side_effect = DatabaseError("Bad things happened")
     peer_api.get_assessments(sub["uuid"])
Beispiel #9
0
    def test_load_peer_student_view_with_dates(self, xblock):
        student_item = xblock.get_student_item_dict()

        sally_student_item = copy.deepcopy(student_item)
        sally_student_item['student_id'] = "Sally"
        sally_submission = xblock.create_submission(sally_student_item,
                                                    u"Sally's answer")

        # Hal comes and submits a response.
        hal_student_item = copy.deepcopy(student_item)
        hal_student_item['student_id'] = "Hal"
        hal_submission = xblock.create_submission(hal_student_item,
                                                  u"Hal's answer")

        # Now Hal will assess Sally.
        assessment = copy.deepcopy(self.ASSESSMENT)
        peer_api.get_submission_to_assess(hal_submission['uuid'], 1)
        peer_api.create_assessment(hal_submission['uuid'],
                                   hal_student_item['student_id'],
                                   assessment['options_selected'],
                                   assessment['criterion_feedback'],
                                   assessment['overall_feedback'],
                                   {'criteria': xblock.rubric_criteria}, 1)

        # Now Sally will assess Hal.
        assessment = copy.deepcopy(self.ASSESSMENT)
        peer_api.get_submission_to_assess(sally_submission['uuid'], 1)
        peer_api.create_assessment(sally_submission['uuid'],
                                   sally_student_item['student_id'],
                                   assessment['options_selected'],
                                   assessment['criterion_feedback'],
                                   assessment['overall_feedback'],
                                   {'criteria': xblock.rubric_criteria}, 1)

        # If Over Grading is on, this should now return Sally or Hal's response to Bob.
        submission = xblock.create_submission(student_item, u"Bob's answer")
        workflow_info = xblock.get_workflow_info()
        self.assertEqual(workflow_info["status"], u'peer')

        # Validate Submission Rendering.
        request = namedtuple('Request', 'params')
        request.params = {}
        peer_response = xblock.render_peer_assessment(request)
        self.assertIsNotNone(peer_response)
        self.assertNotIn(submission["answer"]["text"].encode('utf-8'),
                         peer_response.body)

        # Validate Peer Rendering.
        self.assertTrue("Sally".encode('utf-8') in peer_response.body
                        or "Hal".encode('utf-8') in peer_response.body)
Beispiel #10
0
 def test_no_submission_found_closing_assessment(self):
     """
     Confirm the appropriate error is raised when no submission is found
     open for assessment, when submitting an assessment.
     """
     tim_sub, tim = self._create_student_and_submission("Tim", "Tim's answer", MONDAY)
     peer_api.create_assessment(
         tim_sub["uuid"], tim["student_id"],
         ASSESSMENT_DICT['options_selected'],
         ASSESSMENT_DICT['criterion_feedback'],
         ASSESSMENT_DICT['overall_feedback'],
         RUBRIC_DICT,
         REQUIRED_GRADED_BY,
     )
Beispiel #11
0
 def test_get_assessments(self, assessment_dict):
     self._create_student_and_submission("Tim", "Tim's answer")
     bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
     sub = peer_api.get_submission_to_assess(bob_sub['uuid'], 3)
     peer_api.create_assessment(
         bob_sub["uuid"],
         bob["student_id"],
         assessment_dict['options_selected'],
         assessment_dict['criterion_feedback'],
         assessment_dict['overall_feedback'],
         RUBRIC_DICT,
         REQUIRED_GRADED_BY,
     )
     assessments = peer_api.get_assessments(sub["uuid"], scored_only=False)
     self.assertEqual(1, len(assessments))
Beispiel #12
0
    def test_create_assessment_with_feedback(self):
        self._create_student_and_submission("Tim", "Tim's answer")
        bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
        peer_api.get_submission_to_assess(bob_sub['uuid'], 1)

        # Creating feedback per criterion should need one additional query to update
        # for each criterion that has feedback.
        with self.assertNumQueries(self.CREATE_ASSESSMENT_NUM_QUERIES + 1):
            assessment = peer_api.create_assessment(
                bob_sub["uuid"],
                bob["student_id"],
                ASSESSMENT_DICT['options_selected'],
                ASSESSMENT_DICT['criterion_feedback'],
                ASSESSMENT_DICT['overall_feedback'],
                RUBRIC_DICT,
                REQUIRED_GRADED_BY,
            )
        self.assertEqual(assessment["feedback"], ASSESSMENT_DICT["overall_feedback"])

        # The parts are not guaranteed to be in any particular order,
        # so we need to iterate through and check them by name.
        # If we haven't explicitly set feedback for the criterion, expect
        # that it defaults to an empty string.
        for part in assessment['parts']:
            criterion_name = part['option']['criterion']['name']
            expected_feedback = ASSESSMENT_DICT['criterion_feedback'].get(criterion_name, "")
            self.assertEqual(part['feedback'], expected_feedback)
Beispiel #13
0
    def test_close_active_assessment(self):
        buffy_answer, _ = self._create_student_and_submission("Buffy", "Buffy's answer")
        xander_answer, _ = self._create_student_and_submission("Xander", "Xander's answer")

        # Create a workflow for Buffy.
        buffy_workflow = peer_api._get_workflow_by_submission_uuid(buffy_answer['uuid'])

        # Get a workflow item opened up.
        submission = peer_api.get_submission_to_assess(buffy_answer['uuid'], 3)

        self.assertEqual(xander_answer["uuid"], submission["uuid"])

        assessment_dict = peer_api.create_assessment(
            buffy_answer["uuid"], "Buffy",
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )
        assessment = Assessment.objects.filter(
            scorer_id=assessment_dict["scorer_id"],
            scored_at=assessment_dict["scored_at"])[0]
        peer_api._close_active_assessment(buffy_workflow, xander_answer["uuid"], assessment, REQUIRED_GRADED_BY)

        item = PeerWorkflowItem.objects.get(submission_uuid=xander_answer['uuid'])
        self.assertEqual(xander_answer["uuid"], submission["uuid"])
        self.assertIsNotNone(item.assessment)
Beispiel #14
0
 def test_has_finished_evaluation(self):
     """
     Verify unfinished assessments do not get counted when determining a
     complete workflow.
     """
     tim_sub, tim = self._create_student_and_submission("Tim", "Tim's answer")
     bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
     sub = peer_api.get_submission_to_assess(bob, REQUIRED_GRADED)
     self.assertEqual(sub["uuid"], tim_sub["uuid"])
     finished, count = peer_api.has_finished_required_evaluating(bob, 1)
     self.assertFalse(finished)
     self.assertEqual(count, 0)
     peer_api.create_assessment(
         sub["uuid"], bob["student_id"], ASSESSMENT_DICT, RUBRIC_DICT,
         REQUIRED_GRADED_BY,
     )
     finished, count = peer_api.has_finished_required_evaluating(bob, 1)
     self.assertTrue(finished)
     self.assertEqual(count, 1)
Beispiel #15
0
    def test_peer_leases_same_submission(self):
        """
        Tests the scenario where a student pulls a peer's submission for
        assessment, lets the lease expire, then pulls the same peer's submission
        a second time.

        This creates two similar PeerWorkflowItems in the database, and when
        completing the assessment, the latest PeerWorkflowItem should be
        updated.
        """
        yesterday = timezone.now() - datetime.timedelta(days=1)
        tim_sub, tim = self._create_student_and_submission("Tim", "Tim's answer")
        self._create_student_and_submission("Bob", "Bob's answer")
        self._create_student_and_submission("Sally", "Sally's answer")
        sub = peer_api.get_submission_to_assess(tim_sub['uuid'], REQUIRED_GRADED)
        self.assertEqual(u"Bob's answer", sub['answer'])

        # And now we cheat; we want to set the clock back such that the lease
        # on this PeerWorkflowItem has expired.
        pwis = PeerWorkflowItem.objects.filter(submission_uuid=sub['uuid'])
        self.assertEqual(len(pwis), 1)
        pwis[0].started_at = yesterday
        pwis[0].save()

        sub = peer_api.get_submission_to_assess(tim_sub['uuid'], REQUIRED_GRADED)
        self.assertEqual(u"Bob's answer", sub['answer'])

        peer_api.create_assessment(
            tim_sub["uuid"], tim["student_id"],
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )

        pwis = PeerWorkflowItem.objects.filter(submission_uuid=sub['uuid'])
        self.assertEqual(len(pwis), 1)
        self.assertNotEqual(pwis[0].started_at, yesterday)
Beispiel #16
0
 def test_has_finished_evaluation(self):
     """
     Verify unfinished assessments do not get counted when determining a
     complete workflow.
     """
     tim_sub, _ = self._create_student_and_submission("Tim", "Tim's answer")
     bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
     sub = peer_api.get_submission_to_assess(bob_sub['uuid'], 1)
     self.assertEqual(sub["uuid"], tim_sub["uuid"])
     finished, count = peer_api.has_finished_required_evaluating(bob_sub['uuid'], 1)
     self.assertFalse(finished)
     self.assertEqual(count, 0)
     peer_api.create_assessment(
         bob_sub["uuid"], bob["student_id"],
         ASSESSMENT_DICT['options_selected'],
         ASSESSMENT_DICT['criterion_feedback'],
         ASSESSMENT_DICT['overall_feedback'],
         RUBRIC_DICT,
         1,
     )
     finished, count = peer_api.has_finished_required_evaluating(bob_sub['uuid'], 1)
     self.assertTrue(finished)
     self.assertEqual(count, 1)
Beispiel #17
0
 def test_create_assessment(self):
     self._create_student_and_submission("Tim", "Tim's answer")
     bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
     sub = peer_api.get_submission_to_assess(bob, 1)
     assessment = peer_api.create_assessment(
         sub["uuid"],
         bob["student_id"],
         ASSESSMENT_DICT,
         RUBRIC_DICT,
         REQUIRED_GRADED_BY,
     )
     self.assertEqual(assessment["points_earned"], 6)
     self.assertEqual(assessment["points_possible"], 14)
     self.assertEqual(assessment["feedback"], ASSESSMENT_DICT["feedback"])
Beispiel #18
0
    def test_create_assessment_points(self):
        self._create_student_and_submission("Tim", "Tim's answer")
        bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
        peer_api.get_submission_to_assess(bob_sub['uuid'], 1)

        with self.assertNumQueries(self.CREATE_ASSESSMENT_NUM_QUERIES):
            assessment = peer_api.create_assessment(
                bob_sub["uuid"],
                bob["student_id"],
                ASSESSMENT_DICT['options_selected'], dict(), "",
                RUBRIC_DICT,
                REQUIRED_GRADED_BY,
            )
        self.assertEqual(assessment["points_earned"], 6)
        self.assertEqual(assessment["points_possible"], 14)
Beispiel #19
0
    def test_get_max_scores(self):
        self._create_student_and_submission("Tim", "Tim's answer")
        bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
        sub = peer_api.get_submission_to_assess(bob_sub['uuid'], 1)
        assessment = peer_api.create_assessment(
            bob_sub["uuid"], bob["student_id"],
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            RUBRIC_DICT,
            1
        )

        max_scores = peer_api.get_rubric_max_scores(sub["uuid"])
        self.assertEqual(max_scores['secret'], 1)
        self.assertEqual(max_scores['giveup'], 10)
Beispiel #20
0
    def test_get_max_scores(self):
        self._create_student_and_submission("Tim", "Tim's answer")
        bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
        sub = peer_api.get_submission_to_assess(bob, 1)
        assessment = peer_api.create_assessment(
            sub["uuid"],
            bob["student_id"],
            ASSESSMENT_DICT,
            RUBRIC_DICT,
            1
        )
        self.assertEqual(assessment["points_earned"], 6)
        self.assertEqual(assessment["points_possible"], 14)
        self.assertEqual(assessment["feedback"], ASSESSMENT_DICT["feedback"])

        max_scores = peer_api.get_rubric_max_scores(sub["uuid"])
        self.assertEqual(max_scores['secret'], 1)
        self.assertEqual(max_scores['giveup'], 10)
Beispiel #21
0
    def test_create_assessment_unknown_criterion_feedback(self):
        self._create_student_and_submission("Tim", "Tim's answer")
        bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
        peer_api.get_submission_to_assess(bob_sub['uuid'], 1)

        # Create an assessment where the criterion feedback uses
        # a criterion name that isn't in the rubric.
        assessment = peer_api.create_assessment(
            bob_sub["uuid"],
            bob["student_id"],
            ASSESSMENT_DICT['options_selected'],
            {'unknown': 'Unknown criterion has feedback!'},
            ASSESSMENT_DICT['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )

        # The criterion feedback should be ignored
        for part_num in range(3):
            self.assertEqual(assessment["parts"][part_num]["feedback"], "")
Beispiel #22
0
    def test_create_huge_per_criterion_feedback_error(self):
        self._create_student_and_submission("Tim", "Tim's answer")
        bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
        peer_api.get_submission_to_assess(bob_sub['uuid'], 1)

        # Huge per-criterion feedback text
        assessment = peer_api.create_assessment(
            bob_sub["uuid"],
            bob["student_id"],
            ASSESSMENT_DICT_HUGE['options_selected'],
            ASSESSMENT_DICT_HUGE['criterion_feedback'],
            "",
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )

        # Verify that the feedback has been truncated
        for part in assessment['parts']:
            self.assertEqual(len(part['feedback']), Assessment.MAXSIZE)

        # Verify that the feedback in the database matches what we got back from the API
        for part in AssessmentPart.objects.all():
            self.assertEqual(len(part.feedback), Assessment.MAXSIZE)
Beispiel #23
0
    def test_create_huge_overall_feedback_error(self):
        self._create_student_and_submission("Tim", "Tim's answer")
        bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
        peer_api.get_submission_to_assess(bob_sub['uuid'], 1)

        # Huge overall feedback text
        assessment_dict = peer_api.create_assessment(
            bob_sub["uuid"],
            bob["student_id"],
            ASSESSMENT_DICT_HUGE['options_selected'],
            dict(),
            ASSESSMENT_DICT_HUGE['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )

        # The assessment feedback text should be truncated
        self.assertEqual(len(assessment_dict['feedback']), Assessment.MAXSIZE)

        # The length of the feedback text in the database should
        # equal what we got from the API.
        assessment = Assessment.objects.get()
        self.assertEqual(len(assessment.feedback), Assessment.MAXSIZE)
Beispiel #24
0
    def test_close_active_assessment(self):
        buffy_answer, buffy = self._create_student_and_submission("Buffy", "Buffy's answer")
        xander_answer, xander = self._create_student_and_submission("Xander", "Xander's answer")

        # Create a workflow for Buffy.
        buffy_workflow = peer_api._get_latest_workflow(buffy)

        # Get a workflow item opened up.
        submission = peer_api.get_submission_to_assess(buffy, 3)

        self.assertEqual(xander_answer["uuid"], submission["uuid"])

        assessment_dict = peer_api.create_assessment(
            xander_answer["uuid"], "Buffy", ASSESSMENT_DICT, RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )
        assessment = Assessment.objects.filter(
            scorer_id=assessment_dict["scorer_id"],
            scored_at=assessment_dict["scored_at"])[0]
        peer_api._close_active_assessment(buffy_workflow, xander_answer["uuid"], assessment, REQUIRED_GRADED_BY)

        item = peer_api._create_peer_workflow_item(buffy_workflow, xander_answer["uuid"])
        self.assertEqual(xander_answer["uuid"], submission["uuid"])
        self.assertIsNotNone(item.assessment)
Beispiel #25
0
    def peer_assess(self, data, suffix=""):
        """Place a peer assessment into OpenAssessment system

        Assess a Peer Submission.  Performs basic workflow validation to ensure
        that an assessment can be performed as this time.

        Args:
            data (dict): A dictionary containing information required to create
                a new peer assessment.  This dict should have the following attributes:
                `submission_uuid` (string): The unique identifier for the submission being assessed.
                `options_selected` (dict): Dictionary mapping criterion names to option values.
                `feedback` (unicode): Written feedback for the submission.

        Returns:
            Dict with keys "success" (bool) indicating success/failure.
            and "msg" (unicode) containing additional information if an error occurs.

        """
        # Validate the request
        if "feedback" not in data:
            return {"success": False, "msg": _("Must provide feedback in the assessment")}

        if "options_selected" not in data:
            return {"success": False, "msg": _("Must provide options selected in the assessment")}

        if "submission_uuid" not in data:
            return {"success": False, "msg": _("Must provide submission uuid for the assessment")}

        assessment_ui_model = self.get_assessment_module("peer-assessment")
        if assessment_ui_model:
            rubric_dict = {"criteria": self.rubric_criteria}
            assessment_dict = {"feedback": data["feedback"], "options_selected": data["options_selected"]}

            try:
                assessment = peer_api.create_assessment(
                    data["submission_uuid"],
                    self.get_student_item_dict()["student_id"],
                    assessment_dict,
                    rubric_dict,
                    assessment_ui_model["must_be_graded_by"],
                )
                # Emit analytics event...
                self.runtime.publish(
                    self,
                    "openassessmentblock.peer_assess",
                    {
                        "feedback": assessment["feedback"],
                        "rubric": {"content_hash": assessment["rubric"]["content_hash"]},
                        "scorer_id": assessment["scorer_id"],
                        "score_type": assessment["score_type"],
                        "scored_at": assessment["scored_at"],
                        "submission_uuid": assessment["submission_uuid"],
                        "parts": [
                            {"option": {"name": part["option"]["name"], "points": part["option"]["points"]}}
                            for part in assessment["parts"]
                        ],
                    },
                )
            except PeerAssessmentRequestError as ex:
                return {"success": False, "msg": ex.message}
            except PeerAssessmentInternalError as ex:
                msg = _("Internal error occurred while creating the assessment")
                logger.exception(msg)
                return {"success": False, "msg": msg}

            # Update both the workflow that the submission we're assessing
            # belongs to, as well as our own (e.g. have we evaluated enough?)
            try:
                self.update_workflow_status(submission_uuid=data["submission_uuid"])
                self.update_workflow_status()
            except workflow_api.AssessmentWorkflowError:
                msg = _("Could not update workflow status.")
                logger.exception(msg)
                return {"success": False, "msg": msg}

            # Temp kludge until we fix JSON serialization for datetime
            assessment["scored_at"] = str(assessment["scored_at"])

            return {"success": True, "msg": u""}

        else:
            return {"success": False, "msg": _("Could not load peer assessment.")}
Beispiel #26
0
    def handle(self, *args, **options):
        """
        Execute the command.

        Args:
            course_id (unicode): The ID of the course to create submissions for.
            item_id (unicode): The ID of the item in the course to create submissions for.
            num_submissions (int): Number of submissions to create.
        """
        if len(args) < 3:
            raise CommandError(
                'Usage: create_oa_submissions <COURSE_ID> <ITEM_ID> <NUM_SUBMISSIONS>'
            )

        course_id = unicode(args[0])
        item_id = unicode(args[1])

        try:
            num_submissions = int(args[2])
        except ValueError:
            raise CommandError('Number of submissions must be an integer')

        print u"Creating {num} submissions for {item} in {course}".format(
            num=num_submissions, item=item_id, course=course_id)

        for sub_num in range(num_submissions):

            print "Creating submission {num}".format(num=sub_num)

            # Create a dummy submission
            student_item = {
                'student_id': uuid4().hex[0:10],
                'course_id': course_id,
                'item_id': item_id,
                'item_type': 'openassessment'
            }
            submission_uuid = self._create_dummy_submission(student_item)
            self._student_items.append(student_item)

            # Create a dummy rubric
            rubric, options_selected = self._dummy_rubric()

            # Create peer assessments
            for num in range(self.NUM_PEER_ASSESSMENTS):
                print "-- Creating peer-assessment {num}".format(num=num)

                scorer_id = 'test_{num}'.format(num=num)

                # The scorer needs to make a submission before assessing
                scorer_student_item = copy.copy(student_item)
                scorer_student_item['student_id'] = scorer_id
                scorer_submission_uuid = self._create_dummy_submission(
                    scorer_student_item)

                # Retrieve the submission we want to score
                # Note that we are NOT using the priority queue here, since we know
                # exactly which submission we want to score.
                peer_api.create_peer_workflow_item(scorer_submission_uuid,
                                                   submission_uuid)

                # Create the peer assessment
                peer_api.create_assessment(
                    scorer_submission_uuid, scorer_id, options_selected, {},
                    "  ".join(loremipsum.get_paragraphs(2)), rubric,
                    self.NUM_PEER_ASSESSMENTS)

            # Create a self-assessment
            print "-- Creating self assessment"
            self_api.create_assessment(submission_uuid,
                                       student_item['student_id'],
                                       options_selected, rubric)
Beispiel #27
0
    def _create_submission_and_assessments(self,
                                           xblock,
                                           submission_text,
                                           peers,
                                           peer_assessments,
                                           self_assessment,
                                           waiting_for_peer=False):
        """
        Create a submission and peer/self assessments, so that the user can receive a grade.

        Args:
            xblock (OpenAssessmentBlock): The XBlock, loaded for the user who needs a grade.
            submission_text (unicode): Text of the submission from the user.
            peers (list of unicode): List of user IDs of peers who will assess the user.
            peer_assessments (list of dict): List of assessment dictionaries for peer assessments.
            self_assessment (dict): Dict of assessment for self-assessment.

        Kwargs:
            waiting_for_peer (bool): If true, skip creation of peer assessments for the user's submission.

        Returns:
            None

        """
        # Create a submission from the user
        student_item = xblock.get_student_item_dict()
        student_id = student_item['student_id']
        submission = xblock.create_submission(student_item, submission_text)

        # Create submissions and assessments from other users
        scorer_submissions = []
        for scorer_name, assessment in zip(peers, peer_assessments):

            # Create a submission for each scorer for the same problem
            scorer = copy.deepcopy(student_item)
            scorer['student_id'] = scorer_name

            scorer_sub = sub_api.create_submission(scorer,
                                                   {'text': submission_text})
            workflow_api.create_workflow(scorer_sub['uuid'])

            submission = peer_api.get_submission_to_assess(
                scorer_sub['uuid'], len(peers))

            # Store the scorer's submission so our user can assess it later
            scorer_submissions.append(scorer_sub)

            # Create an assessment of the user's submission
            if not waiting_for_peer:
                peer_api.create_assessment(
                    scorer_sub['uuid'], scorer_name,
                    assessment['options_selected'],
                    assessment['criterion_feedback'],
                    assessment['overall_feedback'],
                    {'criteria': xblock.rubric_criteria},
                    xblock.get_assessment_module(
                        'peer-assessment')['must_be_graded_by'])

        # Have our user make assessments (so she can get a score)
        for asmnt in peer_assessments:
            peer_api.get_submission_to_assess(submission['uuid'], len(peers))
            peer_api.create_assessment(
                submission['uuid'], student_id, asmnt['options_selected'],
                asmnt['criterion_feedback'], asmnt['overall_feedback'],
                {'criteria': xblock.rubric_criteria},
                xblock.get_assessment_module(
                    'peer-assessment')['must_be_graded_by'])

        # Have the user submit a self-assessment (so she can get a score)
        if self_assessment is not None:
            self_api.create_assessment(submission['uuid'], student_id,
                                       self_assessment['options_selected'],
                                       {'criteria': xblock.rubric_criteria})
Beispiel #28
0
    def test_completeness(self):
        """
        Verify that a submission in the peer workflow is only marked complete
        when we intend it to be. Incomplete assessments should never be
        included in a grade.
        """
        tim_sub, tim = self._create_student_and_submission("Tim", "Tim's answer")
        bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
        sally_sub, sally = self._create_student_and_submission("Sally", "Sally's answer")
        jim_sub, jim = self._create_student_and_submission("Jim", "Jim's answer")
        self._create_student_and_submission("Buffy", "Buffy's answer")
        self._create_student_and_submission("Xander", "Xander's answer")

        # Tim should not have a score, because he has not evaluated enough
        # peer submissions.
        requirements = {
            "peer": {
                "must_grade": REQUIRED_GRADED,
                "must_be_graded_by": REQUIRED_GRADED_BY,
                }
        }
        score = workflow_api.get_workflow_for_submission(
            tim_sub["uuid"], requirements
        )["score"]
        self.assertIsNone(score)

        # Bob and Sally pull Tim's submission for peer assessment, but do not
        # grade him right away.
        sub = peer_api.get_submission_to_assess(bob_sub['uuid'], REQUIRED_GRADED_BY)
        self.assertEqual(sub["uuid"], tim_sub["uuid"])

        sub = peer_api.get_submission_to_assess(sally_sub['uuid'], REQUIRED_GRADED_BY)
        self.assertEqual(sub["uuid"], tim_sub["uuid"])

        # Jim pulls Tim's submission, then grades it immediately.
        sub = peer_api.get_submission_to_assess(jim_sub['uuid'], REQUIRED_GRADED_BY)
        self.assertEqual(sub["uuid"], tim_sub["uuid"])

        peer_api.create_assessment(
            jim_sub["uuid"], jim["student_id"],
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )

        # Tim should have no score.
        score = workflow_api.get_workflow_for_submission(
            tim_sub["uuid"], requirements
        )["score"]
        self.assertIsNone(score)

        # Tim's workflow should not be fully graded
        self.assertIsNone(PeerWorkflow.objects.get(student_id=tim["student_id"]).grading_completed_at)

        peer_api.create_assessment(
            bob_sub["uuid"], bob["student_id"],
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )

        peer_api.create_assessment(
            sally_sub["uuid"], sally["student_id"],
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )

        # Tim should not have a score since he did not grade peers..
        score = workflow_api.get_workflow_for_submission(
            tim_sub["uuid"], requirements
        )["score"]
        self.assertIsNone(score)

        # Tim's workflow has enough grades.
        self.assertIsNotNone(PeerWorkflow.objects.get(student_id=tim["student_id"]).grading_completed_at)
Beispiel #29
0
    def test_peer_assessment_workflow(self):
        tim_sub, tim = self._create_student_and_submission("Tim", "Tim's answer")
        bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
        sally_sub, sally = self._create_student_and_submission("Sally", "Sally's answer")
        jim_sub, jim = self._create_student_and_submission("Jim", "Jim's answer")
        self._create_student_and_submission("Buffy", "Buffy's answer")
        self._create_student_and_submission("Xander", "Xander's answer")

        # Tim should not have a score, because he has not evaluated enough
        # peer submissions.
        requirements = {
            "peer": {
                "must_grade": REQUIRED_GRADED,
                "must_be_graded_by": REQUIRED_GRADED_BY,
            }
        }
        score = workflow_api.get_workflow_for_submission(
            tim_sub["uuid"], requirements
        )["score"]
        self.assertIsNone(score)

        for i in range(5):
            self.assertEquals((False, i), peer_api.has_finished_required_evaluating(tim_sub['uuid'], REQUIRED_GRADED))
            sub = peer_api.get_submission_to_assess(tim_sub['uuid'], REQUIRED_GRADED)
            peer_api.create_assessment(
                tim_sub["uuid"], tim["student_id"],
                ASSESSMENT_DICT['options_selected'],
                ASSESSMENT_DICT['criterion_feedback'],
                ASSESSMENT_DICT['overall_feedback'],
                RUBRIC_DICT,
                REQUIRED_GRADED_BY,
            )

        self.assertEquals((True, 5), peer_api.has_finished_required_evaluating(tim_sub['uuid'], REQUIRED_GRADED))

        # Tim should not have a score, because his submission does not have
        # enough assessments.
        self.assertIsNone(sub_api.get_score(STUDENT_ITEM))

        sub = peer_api.get_submission_to_assess(bob_sub['uuid'], REQUIRED_GRADED)
        self.assertEqual(sub["uuid"], tim_sub["uuid"])
        peer_api.create_assessment(
            bob_sub["uuid"], bob["student_id"],
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )

        sub = peer_api.get_submission_to_assess(sally_sub['uuid'], REQUIRED_GRADED)
        self.assertEqual(sub["uuid"], tim_sub["uuid"])
        peer_api.create_assessment(
            sally_sub["uuid"], sally["student_id"],
            ASSESSMENT_DICT_FAIL['options_selected'],
            ASSESSMENT_DICT_FAIL['criterion_feedback'],
            ASSESSMENT_DICT_FAIL['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )

        sub = peer_api.get_submission_to_assess(jim_sub['uuid'], REQUIRED_GRADED)
        self.assertEqual(sub["uuid"], tim_sub["uuid"])
        peer_api.create_assessment(
            jim_sub["uuid"], jim["student_id"],
            ASSESSMENT_DICT_PASS['options_selected'],
            ASSESSMENT_DICT_PASS['criterion_feedback'],
            ASSESSMENT_DICT_PASS['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )

        # Tim has met the critera, and should now be complete.
        requirements = {
            'must_grade': REQUIRED_GRADED,
            'must_be_graded_by': REQUIRED_GRADED_BY
        }
        self.assertTrue(peer_api.is_complete(tim_sub["uuid"], requirements))
Beispiel #30
0
    def test_turbo_grading(self, xblock):
        student_item = xblock.get_student_item_dict()

        sally_student_item = copy.deepcopy(student_item)
        sally_student_item['student_id'] = "Sally"
        sally_submission = xblock.create_submission(sally_student_item, u"Sally's answer")

        # Hal comes and submits a response.
        hal_student_item = copy.deepcopy(student_item)
        hal_student_item['student_id'] = "Hal"
        hal_submission = xblock.create_submission(hal_student_item, u"Hal's answer")

        # Now Hal will assess Sally.
        assessment = copy.deepcopy(self.ASSESSMENT)
        sally_sub = peer_api.get_submission_to_assess(hal_student_item, 1)
        assessment['submission_uuid'] = sally_sub['uuid']
        peer_api.create_assessment(
            sally_sub['uuid'],
            hal_student_item['student_id'],
            assessment,
            {'criteria': xblock.rubric_criteria},
            1
        )

        # Now Sally will assess Hal.
        assessment = copy.deepcopy(self.ASSESSMENT)
        hal_sub = peer_api.get_submission_to_assess(sally_student_item, 1)
        assessment['submission_uuid'] = hal_sub['uuid']
        peer_api.create_assessment(
            hal_sub['uuid'],
            sally_student_item['student_id'],
            assessment,
            {'criteria': xblock.rubric_criteria},
            1
        )

        # If Over Grading is on, this should now return Sally's response to Bob.
        submission = xblock.create_submission(student_item, u"Bob's answer")
        workflow_info = xblock.get_workflow_info()
        self.assertEqual(workflow_info["status"], u'peer')

        # Validate Submission Rendering.
        request = namedtuple('Request', 'params')
        request.params = {'continue_grading': True}
        peer_response = xblock.render_peer_assessment(request)
        self.assertIsNotNone(peer_response)
        self.assertNotIn(submission["answer"]["text"].encode('utf-8'), peer_response.body)

        hal_response = "Hal".encode('utf-8') in peer_response.body
        sally_response = "Sally".encode('utf-8') in peer_response.body

        # Validate Peer Rendering.
        if hal_response:
            peer_uuid = hal_sub['uuid']
        elif sally_response:
            peer_uuid = sally_sub['uuid']
        else:
            self.fail("Response was neither Hal or Sally's submission.")

        peer_api.create_assessment(
            peer_uuid,
            student_item['student_id'],
            assessment,
            {'criteria': xblock.rubric_criteria},
            1
        )

        # Validate Submission Rendering.
        request = namedtuple('Request', 'params')
        request.params = {'continue_grading': True}
        peer_response = xblock.render_peer_assessment(request)
        self.assertIsNotNone(peer_response)
        self.assertNotIn(submission["answer"]["text"].encode('utf-8'), peer_response.body)

        # Validate Peer Rendering. Check that if Sally or Hal were selected
        # the first time around, the other is selected this time.
        if not hal_response and "Hal".encode('utf-8') in peer_response.body:
            peer_uuid = hal_sub['uuid']
        elif not sally_response and "Sally".encode('utf-8') in peer_response.body:
            peer_uuid = sally_sub['uuid']
        else:
            self.fail("Response was neither Hal or Sally's submission.")

        peer_api.create_assessment(
            peer_uuid,
            student_item['student_id'],
            assessment,
            {'criteria': xblock.rubric_criteria},
            1
        )

        # A Final over grading will not return anything.
        request = namedtuple('Request', 'params')
        request.params = {'continue_grading': True}
        peer_response = xblock.render_peer_assessment(request)
        self.assertIsNotNone(peer_response)
        self.assertNotIn(submission["answer"]["text"].encode('utf-8'), peer_response.body)
        self.assertIn("Congratulations".encode('utf-8'), peer_response.body)
Beispiel #31
0
    def test_complex_peer_assessment_workflow(self):
        """
        Intended to mimic a more complicated scenario where people do not
        necessarily always finish their assessments.

        1) Angel submits
        2) Angel waits for Peer Assessments
        3) Bob submits and pulls Angel's submission but never reviews it.
        4) Sally submits
        5) Sally pulls Angel's Submission but never reviews it.
        6) Jim submits
        7) Jim also doesn't care about Angel and does not bother to review.
        8) Buffy comes along and she submits
        9) Buffy cares about Angel, but she won't get Angel's submission;
            it's held by Bob, Sally, and Jim.
        10) Buffy goes on to review Bob, Sally, and Jim, but needs two more.
        11) Xander comes along and submits.
        12) Xander means well, so Xander grades Bob, Sally, and Jim, but gets
            lazy and doesn't grade Buffy when her submission comes along.
        13) Buffy is waiting in the wings. She pulls Xander's submission and
            grades it.
        14) Spike submits.
        15) Spike reviews Bob, Sally, Jim, Buffy, and Xander.
        16) Buffy reviews Spike
        17) Willow comes along and submits
        18) Willow goes to grade, and should get Xander
        19) Xander comes back and gets Buffy's submission, and grades it.
        20) Buffy should now have a grade.
        """

        # Buffy should not have a score, because she has not evaluated enough
        # peer submissions.
        requirements = {
            "peer": {
                "must_grade": REQUIRED_GRADED,
                "must_be_graded_by": REQUIRED_GRADED_BY,
            }
        }

        # 1) Angel Submits
        angel_sub, _ = self._create_student_and_submission("Angel", "Angel's answer")

        # 2) Angel waits for peers
        sub = peer_api.get_submission_to_assess(angel_sub['uuid'], REQUIRED_GRADED_BY)
        self.assertIsNone(sub)
        # 3) Bob submits
        bob_sub, _ = self._create_student_and_submission("Bob", "Bob's answer")
        sub = peer_api.get_submission_to_assess(bob_sub['uuid'], REQUIRED_GRADED_BY)
        self.assertEquals(angel_sub["uuid"], sub["uuid"])

        # 4) Sally submits
        sally_sub, _ = self._create_student_and_submission("Sally", "Sally's answer")

        # 5) Sally pulls Angel's Submission but never reviews it.
        sub = peer_api.get_submission_to_assess(sally_sub['uuid'], REQUIRED_GRADED_BY)
        self.assertEquals(angel_sub["uuid"], sub["uuid"])

        # 6) Jim submits
        jim_sub, _ = self._create_student_and_submission("Jim", "Jim's answer")

        # 7) Jim also doesn't care about Angel and does not bother to review.
        sub = peer_api.get_submission_to_assess(jim_sub['uuid'], REQUIRED_GRADED_BY)
        self.assertEquals(angel_sub["uuid"], sub["uuid"])

        # 8) Buffy comes along and she submits
        buffy_sub, buffy = self._create_student_and_submission("Buffy", "Buffy's answer")

        # 9) Buffy cares about Angel, but she won't get Angel's submission;
        # it's held by Bob, Sally, and Jim.
        sub = peer_api.get_submission_to_assess(buffy_sub['uuid'], REQUIRED_GRADED_BY)
        self.assertEquals(bob_sub["uuid"], sub["uuid"])

        # 10) Buffy goes on to review Bob, Sally, and Jim, but needs two more.
        peer_api.create_assessment(
            buffy_sub["uuid"], buffy["student_id"],
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )
        sub = peer_api.get_submission_to_assess(buffy_sub['uuid'], REQUIRED_GRADED_BY)
        self.assertEquals(sally_sub["uuid"], sub["uuid"])
        peer_api.create_assessment(
            buffy_sub["uuid"], buffy["student_id"],
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )
        sub = peer_api.get_submission_to_assess(buffy_sub['uuid'], REQUIRED_GRADED_BY)
        self.assertEquals(jim_sub["uuid"], sub["uuid"])
        peer_api.create_assessment(
            buffy_sub["uuid"], buffy["student_id"],
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )
        sub = peer_api.get_submission_to_assess(buffy_sub['uuid'], REQUIRED_GRADED_BY)
        self.assertIsNone(sub)

        # 11) Xander comes along and submits.
        xander_sub, xander = self._create_student_and_submission("Xander", "Xander's answer")

        # 12) Xander means well, so Xander grades Bob, Sally, and Jim, but gets
        # lazy and doesn't grade Buffy when her submission comes along.
        sub = peer_api.get_submission_to_assess(xander_sub['uuid'], REQUIRED_GRADED_BY)
        self.assertEquals(bob_sub["uuid"], sub["uuid"])
        peer_api.create_assessment(
            xander_sub["uuid"], xander["student_id"],
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )
        sub = peer_api.get_submission_to_assess(xander_sub['uuid'], REQUIRED_GRADED_BY)
        self.assertEquals(sally_sub["uuid"], sub["uuid"])
        peer_api.create_assessment(
            xander_sub["uuid"], xander["student_id"],
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )
        sub = peer_api.get_submission_to_assess(xander_sub['uuid'], REQUIRED_GRADED_BY)
        self.assertEquals(jim_sub["uuid"], sub["uuid"])
        peer_api.create_assessment(
            xander_sub["uuid"], xander["student_id"],
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )

        # 13) Buffy is waiting in the wings. She pulls Xander's submission and
        # grades it.
        sub = peer_api.get_submission_to_assess(buffy_sub['uuid'], REQUIRED_GRADED_BY)
        self.assertEquals(xander_sub["uuid"], sub["uuid"])
        peer_api.create_assessment(
            buffy_sub["uuid"], buffy["student_id"],
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )

        # 14) Spike submits.
        spike_sub, spike = self._create_student_and_submission("Spike", "Spike's answer")

        # 15) Spike reviews Bob, Sally, Jim, Buffy, and Xander.
        sub = peer_api.get_submission_to_assess(spike_sub['uuid'], REQUIRED_GRADED_BY)
        self.assertEquals(bob_sub["uuid"], sub["uuid"])
        peer_api.create_assessment(
            spike_sub["uuid"], spike["student_id"],
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,

        )
        sub = peer_api.get_submission_to_assess(spike_sub['uuid'], REQUIRED_GRADED_BY)
        self.assertEquals(sally_sub["uuid"], sub["uuid"])
        peer_api.create_assessment(
            spike_sub["uuid"], spike["student_id"],
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )
        sub = peer_api.get_submission_to_assess(spike_sub['uuid'], REQUIRED_GRADED_BY)
        self.assertEquals(jim_sub["uuid"], sub["uuid"])
        peer_api.create_assessment(
            spike_sub["uuid"], spike["student_id"],
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )
        sub = peer_api.get_submission_to_assess(spike_sub['uuid'], REQUIRED_GRADED_BY)
        self.assertEquals(buffy_sub["uuid"], sub["uuid"])
        peer_api.create_assessment(
            spike_sub["uuid"], spike["student_id"],
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )
        sub = peer_api.get_submission_to_assess(spike_sub['uuid'], REQUIRED_GRADED_BY)
        self.assertEquals(xander_sub["uuid"], sub["uuid"])
        peer_api.create_assessment(
            spike_sub["uuid"], spike["student_id"],
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )

        # 16) Buffy reviews Spike
        sub = peer_api.get_submission_to_assess(buffy_sub['uuid'], REQUIRED_GRADED_BY)
        self.assertEquals(spike_sub["uuid"], sub["uuid"])
        peer_api.create_assessment(
            buffy_sub["uuid"], buffy["student_id"],
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )

        # 17) Willow comes along and submits
        willow_sub, willow = self._create_student_and_submission("Willow", "Willow's answer")

        # 18) Willow goes to grade, and should get Buffy
        sub = peer_api.get_submission_to_assess(willow_sub['uuid'], REQUIRED_GRADED_BY)
        self.assertEquals(buffy_sub["uuid"], sub["uuid"])
        peer_api.create_assessment(
            willow_sub["uuid"], willow["student_id"],
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )

        # 19) Xander comes back and gets Buffy's submission, and grades it.
        sub = peer_api.get_submission_to_assess(xander_sub['uuid'], REQUIRED_GRADED_BY)
        self.assertEquals(buffy_sub["uuid"], sub["uuid"])
        peer_api.create_assessment(
            xander_sub["uuid"], xander["student_id"],
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )

        # 20) Buffy should now have a grade.
        requirements = {
            'must_grade': REQUIRED_GRADED,
            'must_be_graded_by': REQUIRED_GRADED_BY
        }
        self.assertTrue(peer_api.is_complete(buffy_sub["uuid"], requirements))
Beispiel #32
0
    def handle(self, *args, **options):
        """
        Execute the command.

        Args:
            course_id (unicode): The ID of the course to create submissions for.
            item_id (unicode): The ID of the item in the course to create submissions for.
            num_submissions (int): Number of submissions to create.
        """
        if len(args) < 3:
            raise CommandError('Usage: create_oa_submissions <COURSE_ID> <ITEM_ID> <NUM_SUBMISSIONS>')

        course_id = unicode(args[0])
        item_id = unicode(args[1])

        try:
            num_submissions = int(args[2])
        except ValueError:
            raise CommandError('Number of submissions must be an integer')

        print u"Creating {num} submissions for {item} in {course}".format(
            num=num_submissions, item=item_id, course=course_id
        )

        for sub_num in range(num_submissions):

            print "Creating submission {num}".format(num=sub_num)

            # Create a dummy submission
            student_item = {
                'student_id': uuid4().hex[0:10],
                'course_id': course_id,
                'item_id': item_id,
                'item_type': 'openassessment'
            }
            submission_uuid = self._create_dummy_submission(student_item)
            self._student_items.append(student_item)

            # Create a dummy rubric
            rubric, options_selected = self._dummy_rubric()

            # Create peer assessments
            for num in range(self.NUM_PEER_ASSESSMENTS):
                print "-- Creating peer-assessment {num}".format(num=num)

                scorer_id = 'test_{num}'.format(num=num)

                # The scorer needs to make a submission before assessing
                scorer_student_item = copy.copy(student_item)
                scorer_student_item['student_id'] = scorer_id
                self._create_dummy_submission(scorer_student_item)

                # Retrieve the submission we want to score
                # Note that we are NOT using the priority queue here, since we know
                # exactly which submission we want to score.
                peer_api.create_peer_workflow_item(scorer_id, submission_uuid)

                # Create the peer assessment
                assessment = {
                    'options_selected': options_selected,
                    'feedback': "  ".join(loremipsum.get_paragraphs(2))
                }
                peer_api.create_assessment(
                    submission_uuid,
                    scorer_id,
                    assessment,
                    rubric,
                    self.NUM_PEER_ASSESSMENTS
                )

            # Create a self-assessment
            print "-- Creating self assessment"
            self_api.create_assessment(
                submission_uuid, student_item['student_id'],
                options_selected, rubric
            )
Beispiel #33
0
    def peer_assess(self, data, suffix=''):
        """Place a peer assessment into OpenAssessment system

        Assess a Peer Submission.  Performs basic workflow validation to ensure
        that an assessment can be performed as this time.

        Args:
            data (dict): A dictionary containing information required to create
                a new peer assessment.  This dict should have the following attributes:
                `submission_uuid` (string): The unique identifier for the submission being assessed.
                `options_selected` (dict): Dictionary mapping criterion names to option values.
                `feedback` (unicode): Written feedback for the submission.

        Returns:
            Dict with keys "success" (bool) indicating success/failure.
            and "msg" (unicode) containing additional information if an error occurs.

        """
        # Validate the request
        if 'options_selected' not in data:
            return {'success': False, 'msg': _('Must provide options selected in the assessment')}

        if 'overall_feedback' not in data:
            return {'success': False, 'msg': _('Must provide overall feedback in the assessment')}

        if 'criterion_feedback' not in data:
            return {'success': False, 'msg': _('Must provide feedback for criteria in the assessment')}

        assessment_ui_model = self.get_assessment_module('peer-assessment')
        if assessment_ui_model:
            rubric_dict = {
                'criteria': self.rubric_criteria
            }

            try:
                # Create the assessment
                assessment = peer_api.create_assessment(
                    self.submission_uuid,
                    self.get_student_item_dict()["student_id"],
                    data['options_selected'],
                    self._clean_criterion_feedback(data['criterion_feedback']),
                    data['overall_feedback'],
                    rubric_dict,
                    assessment_ui_model['must_be_graded_by']
                )

                # Emit analytics event...
                self._publish_peer_assessment_event(assessment)
            except PeerAssessmentRequestError as ex:
                return {'success': False, 'msg': ex.message}
            except PeerAssessmentInternalError as ex:
                msg = _("Internal error occurred while creating the assessment")
                logger.exception(msg)
                return {'success': False, 'msg': msg}

            # Update both the workflow that the submission we're assessing
            # belongs to, as well as our own (e.g. have we evaluated enough?)
            try:
                if assessment:
                    self.update_workflow_status(submission_uuid=assessment['submission_uuid'])
                self.update_workflow_status()
            except workflow_api.AssessmentWorkflowError:
                msg = _('Could not update workflow status.')
                logger.exception(msg)
                return {'success': False, 'msg': msg}

            # Temp kludge until we fix JSON serialization for datetime
            assessment["scored_at"] = str(assessment["scored_at"])

            return {'success': True, 'msg': u''}

        else:
            return {'success': False, 'msg': _('Could not load peer assessment.')}
Beispiel #34
0
    def test_turbo_grading(self, xblock):
        student_item = xblock.get_student_item_dict()

        sally_student_item = copy.deepcopy(student_item)
        sally_student_item['student_id'] = "Sally"
        sally_submission = xblock.create_submission(sally_student_item,
                                                    u"Sally's answer")

        # Hal comes and submits a response.
        hal_student_item = copy.deepcopy(student_item)
        hal_student_item['student_id'] = "Hal"
        hal_submission = xblock.create_submission(hal_student_item,
                                                  u"Hal's answer")

        # Now Hal will assess Sally.
        assessment = copy.deepcopy(self.ASSESSMENT)
        sally_sub = peer_api.get_submission_to_assess(hal_submission['uuid'],
                                                      1)
        assessment['submission_uuid'] = sally_sub['uuid']
        peer_api.create_assessment(hal_submission['uuid'],
                                   hal_student_item['student_id'],
                                   assessment['options_selected'],
                                   assessment['criterion_feedback'],
                                   assessment['overall_feedback'],
                                   {'criteria': xblock.rubric_criteria}, 1)

        # Now Sally will assess Hal.
        assessment = copy.deepcopy(self.ASSESSMENT)
        hal_sub = peer_api.get_submission_to_assess(sally_submission['uuid'],
                                                    1)
        assessment['submission_uuid'] = hal_sub['uuid']
        peer_api.create_assessment(sally_submission['uuid'],
                                   sally_student_item['student_id'],
                                   assessment['options_selected'],
                                   assessment['criterion_feedback'],
                                   assessment['overall_feedback'],
                                   {'criteria': xblock.rubric_criteria}, 1)

        # If Over Grading is on, this should now return Sally's response to Bob.
        submission = xblock.create_submission(student_item, u"Bob's answer")
        workflow_info = xblock.get_workflow_info()
        self.assertEqual(workflow_info["status"], u'peer')

        # Validate Submission Rendering.
        request = namedtuple('Request', 'params')
        request.params = {'continue_grading': True}
        peer_response = xblock.render_peer_assessment(request)
        self.assertIsNotNone(peer_response)
        self.assertNotIn(submission["answer"]["text"].encode('utf-8'),
                         peer_response.body)

        peer_api.create_assessment(submission['uuid'],
                                   student_item['student_id'],
                                   assessment['options_selected'],
                                   assessment['criterion_feedback'],
                                   assessment['overall_feedback'],
                                   {'criteria': xblock.rubric_criteria}, 1)

        # Validate Submission Rendering.
        request = namedtuple('Request', 'params')
        request.params = {'continue_grading': True}
        peer_response = xblock.render_peer_assessment(request)
        self.assertIsNotNone(peer_response)
        self.assertNotIn(submission["answer"]["text"].encode('utf-8'),
                         peer_response.body)

        peer_api.create_assessment(submission['uuid'],
                                   student_item['student_id'],
                                   assessment['options_selected'],
                                   assessment['criterion_feedback'],
                                   assessment['overall_feedback'],
                                   {'criteria': xblock.rubric_criteria}, 1)

        # A Final over grading will not return anything.
        request = namedtuple('Request', 'params')
        request.params = {'continue_grading': True}
        peer_response = xblock.render_peer_assessment(request)
        self.assertIsNotNone(peer_response)
        self.assertNotIn(submission["answer"]["text"].encode('utf-8'),
                         peer_response.body)
        self.assertIn("Peer Assessments Complete", peer_response.body)