Пример #1
0
    def test_staff_debug_student_info_full_workflow(self, xblock):
        # Simulate that we are course staff
        xblock.xmodule_runtime = self._create_mock_runtime(
            xblock.scope_ids.usage_id, True, False, "Bob"
        )

        # Commonly chosen options for assessments
        options_selected = {
            "Ideas": "Good",
            "Content": "Poor",
        }

        criterion_feedback = {
            "Ideas": "Dear diary: Lots of creativity from my dream journal last night at 2 AM,",
            "Content": "Not as insightful as I had thought in the wee hours of the morning!"
        }

        overall_feedback = "I think I should tell more people about how important worms are for the ecosystem."

        bob_item = STUDENT_ITEM.copy()
        bob_item["item_id"] = xblock.scope_ids.usage_id

        # Create a submission for Bob, and corresponding workflow.
        submission = sub_api.create_submission(bob_item, {'text':"Bob Answer"})
        peer_api.on_start(submission["uuid"])
        workflow_api.create_workflow(submission["uuid"], ['peer', 'self'])

        # Create a submission for Tim, and corresponding workflow.
        tim_item = bob_item.copy()
        tim_item["student_id"] = "Tim"
        tim_sub = sub_api.create_submission(tim_item, "Tim Answer")
        peer_api.on_start(tim_sub["uuid"])
        workflow_api.create_workflow(tim_sub["uuid"], ['peer', 'self'])

        # Bob assesses Tim.
        peer_api.get_submission_to_assess(submission['uuid'], 1)
        peer_api.create_assessment(
            submission["uuid"],
            STUDENT_ITEM["student_id"],
            options_selected, dict(), "",
            {'criteria': xblock.rubric_criteria},
            1,
        )

        # Bob assesses himself.
        self_api.create_assessment(
            submission['uuid'],
            STUDENT_ITEM["student_id"],
            options_selected,
            criterion_feedback,
            overall_feedback,
            {'criteria': xblock.rubric_criteria},
        )

        # Now Bob should be fully populated in the student info view.
        request = namedtuple('Request', 'params')
        request.params = {"student_id": "Bob"}
        # Verify that we can render without error
        resp = xblock.render_student_info(request)
        self.assertIn("bob answer", resp.body.lower())
Пример #2
0
    def test_staff_debug_student_info_full_workflow(self, xblock):
        # Simulate that we are course staff
        xblock.xmodule_runtime = self._create_mock_runtime(
            xblock.scope_ids.usage_id, True, False, "Bob"
        )

        # Commonly chosen options for assessments
        options_selected = {
            "Ideas": "Good",
            "Content": "Poor",
        }

        criterion_feedback = {
            "Ideas": "Dear diary: Lots of creativity from my dream journal last night at 2 AM,",
            "Content": "Not as insightful as I had thought in the wee hours of the morning!"
        }

        overall_feedback = "I think I should tell more people about how important worms are for the ecosystem."

        bob_item = STUDENT_ITEM.copy()
        bob_item["item_id"] = xblock.scope_ids.usage_id

        # Create a submission for Bob, and corresponding workflow.
        submission = sub_api.create_submission(bob_item, {'text':"Bob Answer"})
        peer_api.on_start(submission["uuid"])
        workflow_api.create_workflow(submission["uuid"], ['peer', 'self'])

        # Create a submission for Tim, and corresponding workflow.
        tim_item = bob_item.copy()
        tim_item["student_id"] = "Tim"
        tim_sub = sub_api.create_submission(tim_item, "Tim Answer")
        peer_api.on_start(tim_sub["uuid"])
        workflow_api.create_workflow(tim_sub["uuid"], ['peer', 'self'])

        # Bob assesses Tim.
        peer_api.get_submission_to_assess(submission['uuid'], 1)
        peer_api.create_assessment(
            submission["uuid"],
            STUDENT_ITEM["student_id"],
            options_selected, dict(), "",
            {'criteria': xblock.rubric_criteria},
            1,
        )

        # Bob assesses himself.
        self_api.create_assessment(
            submission['uuid'],
            STUDENT_ITEM["student_id"],
            options_selected,
            criterion_feedback,
            overall_feedback,
            {'criteria': xblock.rubric_criteria},
        )

        # Now Bob should be fully populated in the student info view.
        request = namedtuple('Request', 'params')
        request.params = {"student_id": "Bob"}
        # Verify that we can render without error
        resp = xblock.render_student_info(request)
        self.assertIn("bob answer", resp.body.lower())
Пример #3
0
    def test_staff_debug_student_info_peer_only(self, xblock):
        # Simulate that we are course staff
        xblock.xmodule_runtime =  self._create_mock_runtime(
            xblock.scope_ids.usage_id, True, False, "Bob"
        )

        bob_item = STUDENT_ITEM.copy()
        bob_item["item_id"] = xblock.scope_ids.usage_id
        # Create a submission for Bob, and corresponding workflow.
        submission = sub_api.create_submission(bob_item, {'text':"Bob Answer"})
        peer_api.on_start(submission["uuid"])
        workflow_api.create_workflow(submission["uuid"], ['peer'])

        # Create a submission for Tim, and corresponding workflow.
        tim_item = bob_item.copy()
        tim_item["student_id"] = "Tim"
        tim_sub = sub_api.create_submission(tim_item, "Tim Answer")
        peer_api.on_start(tim_sub["uuid"])
        workflow_api.create_workflow(tim_sub["uuid"], ['peer', 'self'])

        # Bob assesses Tim.
        peer_api.get_submission_to_assess(submission['uuid'], 1)
        peer_api.create_assessment(
            submission["uuid"],
            STUDENT_ITEM["student_id"],
            ASSESSMENT_DICT['options_selected'], dict(), "",
            {'criteria': xblock.rubric_criteria},
            1,
        )

        # Now Bob should be fully populated in the student info view.
        path, context = xblock.get_student_info_path_and_context("Bob")
        self.assertEquals("Bob Answer", context['submission']['answer']['text'])
        self.assertIsNone(context['self_assessment'])
        self.assertEquals("openassessmentblock/staff_debug/student_info.html", path)
Пример #4
0
    def test_staff_debug_student_info_peer_only(self, xblock):
        # Simulate that we are course staff
        xblock.xmodule_runtime =  self._create_mock_runtime(
            xblock.scope_ids.usage_id, True, False, "Bob"
        )

        bob_item = STUDENT_ITEM.copy()
        bob_item["item_id"] = xblock.scope_ids.usage_id
        # Create a submission for Bob, and corresponding workflow.
        submission = sub_api.create_submission(bob_item, {'text':"Bob Answer"})
        peer_api.on_start(submission["uuid"])
        workflow_api.create_workflow(submission["uuid"], ['peer'])

        # Create a submission for Tim, and corresponding workflow.
        tim_item = bob_item.copy()
        tim_item["student_id"] = "Tim"
        tim_sub = sub_api.create_submission(tim_item, "Tim Answer")
        peer_api.on_start(tim_sub["uuid"])
        workflow_api.create_workflow(tim_sub["uuid"], ['peer', 'self'])

        # Bob assesses Tim.
        peer_api.get_submission_to_assess(submission['uuid'], 1)
        peer_api.create_assessment(
            submission["uuid"],
            STUDENT_ITEM["student_id"],
            ASSESSMENT_DICT['options_selected'], dict(), "",
            {'criteria': xblock.rubric_criteria},
            1,
        )

        # Now Bob should be fully populated in the student info view.
        path, context = xblock.get_student_info_path_and_context("Bob")
        self.assertEquals("Bob Answer", context['submission']['answer']['text'])
        self.assertIsNone(context['self_assessment'])
        self.assertEquals("openassessmentblock/staff_debug/student_info.html", path)
Пример #5
0
    def test_load_peer_student_view_with_dates(self, xblock):
        student_item = xblock.get_student_item_dict()

        sally_student_item = copy.deepcopy(student_item)
        sally_student_item['student_id'] = "Sally"
        sally_submission = xblock.create_submission(
            sally_student_item, (u"Sally's answer 1", u"Sally's answer 2"))

        # Hal comes and submits a response.
        hal_student_item = copy.deepcopy(student_item)
        hal_student_item['student_id'] = "Hal"
        hal_submission = xblock.create_submission(
            hal_student_item, (u"Hal's answer 1", u"Hal's answer 2"))

        # Now Hal will assess Sally.
        assessment = copy.deepcopy(self.ASSESSMENT)
        peer_api.get_submission_to_assess(hal_submission['uuid'], 1)
        peer_api.create_assessment(hal_submission['uuid'],
                                   hal_student_item['student_id'],
                                   assessment['options_selected'],
                                   assessment['criterion_feedback'],
                                   assessment['overall_feedback'],
                                   {'criteria': xblock.rubric_criteria}, 1)

        # Now Sally will assess Hal.
        assessment = copy.deepcopy(self.ASSESSMENT)
        peer_api.get_submission_to_assess(sally_submission['uuid'], 1)
        peer_api.create_assessment(sally_submission['uuid'],
                                   sally_student_item['student_id'],
                                   assessment['options_selected'],
                                   assessment['criterion_feedback'],
                                   assessment['overall_feedback'],
                                   {'criteria': xblock.rubric_criteria}, 1)

        # If Over Grading is on, this should now return Sally or Hal's response to Bob.
        submission = xblock.create_submission(
            student_item, (u"Bob's answer 1", u"Bob's answer 2"))
        workflow_info = xblock.get_workflow_info()
        self.assertEqual(workflow_info["status"], u'peer')

        # Validate Submission Rendering.
        request = namedtuple('Request', 'params')
        request.params = {}
        peer_response = xblock.render_peer_assessment(request)
        self.assertIsNotNone(peer_response)
        self.assertNotIn(
            submission["answer"]["parts"][0]["text"].encode('utf-8'),
            peer_response.body)
        self.assertNotIn(
            submission["answer"]["parts"][1]["text"].encode('utf-8'),
            peer_response.body)

        # Validate Peer Rendering.
        self.assertTrue("Sally".encode('utf-8') in peer_response.body
                        or "Hal".encode('utf-8') in peer_response.body)
Пример #6
0
 def create_peer_assessment(self, scorer_sub, scorer, sub_to_assess,
                            assessment, criteria, grading_requirements):
     """Create a peer assessment of submission sub_to_assess by scorer."""
     peer_api.create_peer_workflow_item(scorer_sub['uuid'],
                                        sub_to_assess['uuid'])
     peer_api.create_assessment(scorer_sub['uuid'], scorer,
                                assessment['options_selected'],
                                assessment['criterion_feedback'],
                                assessment['overall_feedback'],
                                {'criteria': criteria},
                                grading_requirements)
Пример #7
0
    def test_load_peer_student_view_with_dates(self, xblock):
        student_item = xblock.get_student_item_dict()

        sally_student_item = copy.deepcopy(student_item)
        sally_student_item['student_id'] = "Sally"
        sally_submission = xblock.create_submission(sally_student_item, (u"Sally's answer 1", u"Sally's answer 2"))

        # Hal comes and submits a response.
        hal_student_item = copy.deepcopy(student_item)
        hal_student_item['student_id'] = "Hal"
        hal_submission = xblock.create_submission(hal_student_item, (u"Hal's answer 1", u"Hal's answer 2"))

        # Now Hal will assess Sally.
        assessment = copy.deepcopy(self.ASSESSMENT)
        peer_api.get_submission_to_assess(hal_submission['uuid'], 1)
        peer_api.create_assessment(
            hal_submission['uuid'],
            hal_student_item['student_id'],
            assessment['options_selected'],
            assessment['criterion_feedback'],
            assessment['overall_feedback'],
            {'criteria': xblock.rubric_criteria},
            1
        )

        # Now Sally will assess Hal.
        assessment = copy.deepcopy(self.ASSESSMENT)
        peer_api.get_submission_to_assess(sally_submission['uuid'], 1)
        peer_api.create_assessment(
            sally_submission['uuid'],
            sally_student_item['student_id'],
            assessment['options_selected'],
            assessment['criterion_feedback'],
            assessment['overall_feedback'],
            {'criteria': xblock.rubric_criteria},
            1
        )

        # If Over Grading is on, this should now return Sally or Hal's response to Bob.
        submission = xblock.create_submission(student_item, (u"Bob's answer 1", u"Bob's answer 2"))
        workflow_info = xblock.get_workflow_info()
        self.assertEqual(workflow_info["status"], u'peer')

        # Validate Submission Rendering.
        request = namedtuple('Request', 'params')
        request.params = {}
        peer_response = xblock.render_peer_assessment(request)
        self.assertIsNotNone(peer_response)
        self.assertNotIn(submission["answer"]["parts"][0]["text"].encode('utf-8'), peer_response.body)
        self.assertNotIn(submission["answer"]["parts"][1]["text"].encode('utf-8'), peer_response.body)

        # Validate Peer Rendering.
        self.assertTrue("Sally".encode('utf-8') in peer_response.body or
            "Hal".encode('utf-8') in peer_response.body)
Пример #8
0
 def test_no_open_assessment(self):
     self._create_student_and_submission("Tim", "Tim's answer")
     bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
     peer_api.create_assessment(
         bob_sub['uuid'], bob['student_id'],
         ASSESSMENT_DICT['options_selected'],
         ASSESSMENT_DICT['criterion_feedback'],
         ASSESSMENT_DICT['overall_feedback'],
         RUBRIC_DICT,
         1
     )
Пример #9
0
 def create_peer_assessment(self, scorer_sub, scorer, sub_to_assess, assessment, criteria, grading_requirements):
     """Create a peer assessment of submission sub_to_assess by scorer."""
     peer_api.create_peer_workflow_item(scorer_sub['uuid'], sub_to_assess['uuid'])
     peer_api.create_assessment(
         scorer_sub['uuid'],
         scorer,
         assessment['options_selected'],
         assessment['criterion_feedback'],
         assessment['overall_feedback'],
         {'criteria': criteria},
         grading_requirements
     )
Пример #10
0
 def test_error_on_assessment_creation(self, mock_filter):
     mock_filter.side_effect = DatabaseError("Bad things happened")
     submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)
     peer_api.create_peer_workflow(submission["uuid"])
     peer_api.create_assessment(
         submission["uuid"], STUDENT_ITEM["student_id"],
         ASSESSMENT_DICT['options_selected'],
         ASSESSMENT_DICT['criterion_feedback'],
         ASSESSMENT_DICT['overall_feedback'],
         RUBRIC_DICT,
         REQUIRED_GRADED_BY,
         MONDAY,
     )
Пример #11
0
 def test_create_feedback_on_an_assessment(self):
     tim_sub, tim = self._create_student_and_submission("Tim", "Tim's answer")
     bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
     peer_api.get_submission_to_assess(bob_sub['uuid'], 1)
     assessment = peer_api.create_assessment(
         bob_sub["uuid"],
         bob["student_id"],
         ASSESSMENT_DICT['options_selected'],
         ASSESSMENT_DICT['criterion_feedback'],
         ASSESSMENT_DICT['overall_feedback'],
         RUBRIC_DICT,
         REQUIRED_GRADED_BY,
     )
     peer_api.get_submission_to_assess(tim_sub['uuid'], 1)
     peer_api.create_assessment(
         tim_sub["uuid"],
         tim["student_id"],
         ASSESSMENT_DICT['options_selected'],
         ASSESSMENT_DICT['criterion_feedback'],
         ASSESSMENT_DICT['overall_feedback'],
         RUBRIC_DICT,
         REQUIRED_GRADED_BY,
     )
     peer_api.get_score(
         tim_sub["uuid"],
         {
             'must_grade': 1,
             'must_be_graded_by': 1
         }
     )
     feedback = peer_api.get_assessment_feedback(tim_sub['uuid'])
     self.assertIsNone(feedback)
     peer_api.set_assessment_feedback(
         {
             'submission_uuid': tim_sub['uuid'],
             'feedback_text': 'Bob is a jerk!',
             'options': [
                 'I disliked this assessment',
                 'I felt this assessment was unfair',
             ]
         }
     )
     saved_feedback = peer_api.get_assessment_feedback(tim_sub['uuid'])
     self.assertIsNot(saved_feedback, None)
     self.assertEquals(saved_feedback['submission_uuid'], assessment['submission_uuid'])
     self.assertEquals(saved_feedback['feedback_text'], 'Bob is a jerk!')
     self.assertItemsEqual(saved_feedback['options'], [
         {'text': 'I disliked this assessment'},
         {'text': 'I felt this assessment was unfair'},
     ])
     self.assertEquals(saved_feedback["assessments"][0]["submission_uuid"], assessment["submission_uuid"])
Пример #12
0
    def test_staff_debug_student_info_full_workflow(self, xblock):
        # Simulate that we are course staff
        xblock.xmodule_runtime = self._create_mock_runtime(
            xblock.scope_ids.usage_id, True, False, "Bob"
        )

        # Commonly chosen options for assessments
        options_selected = {
            "Ideas": "Good",
            "Content": "Poor",
        }

        bob_item = STUDENT_ITEM.copy()
        bob_item["item_id"] = xblock.scope_ids.usage_id

        # Create a submission for Bob, and corresponding workflow.
        submission = sub_api.create_submission(bob_item, {'text':"Bob Answer"})
        peer_api.on_start(submission["uuid"])
        workflow_api.create_workflow(submission["uuid"], ['peer', 'self'])

        # Create a submission for Tim, and corresponding workflow.
        tim_item = bob_item.copy()
        tim_item["student_id"] = "Tim"
        tim_sub = sub_api.create_submission(tim_item, "Tim Answer")
        peer_api.on_start(tim_sub["uuid"])
        workflow_api.create_workflow(tim_sub["uuid"], ['peer', 'self'])

        # Bob assesses Tim.
        peer_api.get_submission_to_assess(submission['uuid'], 1)
        peer_api.create_assessment(
            submission["uuid"],
            STUDENT_ITEM["student_id"],
            options_selected, dict(), "",
            {'criteria': xblock.rubric_criteria},
            1,
        )

        # Bob assesses himself.
        self_api.create_assessment(
            submission['uuid'],
            STUDENT_ITEM["student_id"],
            options_selected,
            {'criteria': xblock.rubric_criteria},
        )

        # Now Bob should be fully populated in the student info view.
        request = namedtuple('Request', 'params')
        request.params = {"student_id": "Bob"}
        # Verify that we can render without error
        resp = xblock.render_student_info(request)
        self.assertIn("bob answer", resp.body.lower())
Пример #13
0
 def test_no_submission_found_closing_assessment(self):
     """
     Confirm the appropriate error is raised when no submission is found
     open for assessment, when submitting an assessment.
     """
     tim_sub, tim = self._create_student_and_submission("Tim", "Tim's answer", MONDAY)
     peer_api.create_assessment(
         tim_sub["uuid"], tim["student_id"],
         ASSESSMENT_DICT['options_selected'],
         ASSESSMENT_DICT['criterion_feedback'],
         ASSESSMENT_DICT['overall_feedback'],
         RUBRIC_DICT,
         REQUIRED_GRADED_BY,
     )
Пример #14
0
    def test_assess_before_submitting(self):
        # Create a submission for another student
        submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)

        # Attempt to create the assessment from another student without first making a submission
        peer_api.create_assessment(
            submission["uuid"],
            "another_student",
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
            MONDAY,
        )
Пример #15
0
 def test_error_on_get_assessment(self, mock_filter):
     self._create_student_and_submission("Tim", "Tim's answer")
     bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
     sub = peer_api.get_submission_to_assess(bob_sub['uuid'], 3)
     peer_api.create_assessment(
         bob_sub["uuid"], bob["student_id"],
         ASSESSMENT_DICT['options_selected'],
         ASSESSMENT_DICT['criterion_feedback'],
         ASSESSMENT_DICT['overall_feedback'],
         RUBRIC_DICT,
         REQUIRED_GRADED_BY,
         MONDAY,
     )
     mock_filter.side_effect = DatabaseError("Bad things happened")
     peer_api.get_assessments(sub["uuid"])
Пример #16
0
 def test_get_assessments(self, assessment_dict):
     self._create_student_and_submission("Tim", "Tim's answer")
     bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
     sub = peer_api.get_submission_to_assess(bob_sub['uuid'], 3)
     peer_api.create_assessment(
         bob_sub["uuid"],
         bob["student_id"],
         assessment_dict['options_selected'],
         assessment_dict['criterion_feedback'],
         assessment_dict['overall_feedback'],
         RUBRIC_DICT,
         REQUIRED_GRADED_BY,
     )
     assessments = peer_api.get_assessments(sub["uuid"], scored_only=False)
     self.assertEqual(1, len(assessments))
Пример #17
0
    def test_create_assessment_with_feedback(self):
        self._create_student_and_submission("Tim", "Tim's answer")
        bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
        peer_api.get_submission_to_assess(bob_sub['uuid'], 1)

        # Creating feedback per criterion should need one additional query to update
        # for each criterion that has feedback.
        with self.assertNumQueries(self.CREATE_ASSESSMENT_NUM_QUERIES + 1):
            assessment = peer_api.create_assessment(
                bob_sub["uuid"],
                bob["student_id"],
                ASSESSMENT_DICT['options_selected'],
                ASSESSMENT_DICT['criterion_feedback'],
                ASSESSMENT_DICT['overall_feedback'],
                RUBRIC_DICT,
                REQUIRED_GRADED_BY,
            )
        self.assertEqual(assessment["feedback"], ASSESSMENT_DICT["overall_feedback"])

        # The parts are not guaranteed to be in any particular order,
        # so we need to iterate through and check them by name.
        # If we haven't explicitly set feedback for the criterion, expect
        # that it defaults to an empty string.
        for part in assessment['parts']:
            criterion_name = part['option']['criterion']['name']
            expected_feedback = ASSESSMENT_DICT['criterion_feedback'].get(criterion_name, "")
            self.assertEqual(part['feedback'], expected_feedback)
Пример #18
0
    def test_close_active_assessment(self):
        buffy_answer, _ = self._create_student_and_submission("Buffy", "Buffy's answer")
        xander_answer, _ = self._create_student_and_submission("Xander", "Xander's answer")

        # Create a workflow for Buffy.
        buffy_workflow = PeerWorkflow.get_by_submission_uuid(buffy_answer['uuid'])

        # Get a workflow item opened up.
        submission = peer_api.get_submission_to_assess(buffy_answer['uuid'], 3)

        self.assertEqual(xander_answer["uuid"], submission["uuid"])

        assessment_dict = peer_api.create_assessment(
            buffy_answer["uuid"], "Buffy",
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )
        assessment = Assessment.objects.filter(
            scorer_id=assessment_dict["scorer_id"],
            scored_at=assessment_dict["scored_at"])[0]
        buffy_workflow.close_active_assessment(xander_answer["uuid"], assessment, REQUIRED_GRADED_BY)

        item = PeerWorkflowItem.objects.get(submission_uuid=xander_answer['uuid'])
        self.assertEqual(xander_answer["uuid"], submission["uuid"])
        self.assertIsNotNone(item.assessment)
Пример #19
0
 def _create_assessment(self, submission_uuid):
     """
     Creates an assessment for the given submission.
     """
     return peer_api.create_assessment(
         submission_uuid, SCORER_ID, ASSESSMENT_DICT['options_selected'],
         ASSESSMENT_DICT['criterion_feedback'],
         ASSESSMENT_DICT['overall_feedback'], RUBRIC_DICT, 2)
Пример #20
0
    def test_staff_debug_student_info_full_workflow(self, xblock):
        # Simulate that we are course staff
        xblock.xmodule_runtime = self._create_mock_runtime(
            xblock.scope_ids.usage_id, True, "Bob")

        bob_item = STUDENT_ITEM.copy()
        bob_item["item_id"] = xblock.scope_ids.usage_id
        # Create a submission for Bob, and corresponding workflow.
        submission = sub_api.create_submission(bob_item,
                                               {'text': "Bob Answer"})
        peer_api.create_peer_workflow(submission["uuid"])
        workflow_api.create_workflow(submission["uuid"], ['peer', 'self'])

        # Create a submission for Tim, and corresponding workflow.
        tim_item = bob_item.copy()
        tim_item["student_id"] = "Tim"
        tim_sub = sub_api.create_submission(tim_item, "Tim Answer")
        peer_api.create_peer_workflow(tim_sub["uuid"])
        workflow_api.create_workflow(tim_sub["uuid"], ['peer', 'self'])

        # Bob assesses Tim.
        peer_api.get_submission_to_assess(submission['uuid'], 1)
        peer_api.create_assessment(
            submission["uuid"],
            STUDENT_ITEM["student_id"],
            ASSESSMENT_DICT['options_selected'],
            dict(),
            "",
            {'criteria': xblock.rubric_criteria},
            1,
        )

        # Bob assesses himself.
        self_api.create_assessment(
            submission['uuid'],
            STUDENT_ITEM["student_id"],
            ASSESSMENT_DICT['options_selected'],
            {'criteria': xblock.rubric_criteria},
        )

        # Now Bob should be fully populated in the student info view.
        request = namedtuple('Request', 'params')
        request.params = {"student_id": "Bob"}
        # Verify that we can render without error
        resp = xblock.render_student_info(request)
        self.assertIn("bob answer", resp.body.lower())
Пример #21
0
    def test_staff_area_student_info_peer_only(self, xblock):
        # Simulate that we are course staff
        xblock.xmodule_runtime = self._create_mock_runtime(
            xblock.scope_ids.usage_id, True, False, "Bob")
        xblock.runtime._services['user'] = NullUserService()

        bob_item = STUDENT_ITEM.copy()
        bob_item["item_id"] = xblock.scope_ids.usage_id
        # Create a submission for Bob, and corresponding workflow.
        submission = self._create_submission(
            bob_item,
            prepare_submission_for_serialization(
                ("Bob Answer 1", "Bob Answer 2")), ['peer'])

        # Create a submission for Tim, and corresponding workflow.
        tim_item = bob_item.copy()
        tim_item["student_id"] = "Tim"
        self._create_submission(tim_item, "Tim Answer", ['peer', 'self'])

        # Bob assesses Tim.
        peer_api.get_submission_to_assess(submission['uuid'], 1)
        peer_api.create_assessment(
            submission["uuid"],
            STUDENT_ITEM["student_id"],
            ASSESSMENT_DICT['options_selected'],
            dict(),
            "",
            {'criteria': xblock.rubric_criteria},
            1,
        )

        # Now Bob should be fully populated in the student info view.
        path, context = xblock.get_student_info_path_and_context("Bob")
        self.assertEquals("Bob Answer 1",
                          context['submission']['answer']['parts'][0]['text'])
        self.assertIsNotNone(context['peer_assessments'])
        self.assertIsNone(context['self_assessment'])
        self.assertIsNone(context['staff_assessment'])
        self.assertEquals(
            "openassessmentblock/staff_area/oa_student_info.html", path)

        # Bob still needs to assess other learners
        self.assertIsNone(context['grade_details'])
Пример #22
0
    def _sally_and_hal_grade_each_other_helper(self, xblock):
        """
        A helper method to set up 2 submissions, one for each of Sally and Hal, and then have each assess the other.
        """
        student_item = xblock.get_student_item_dict()

        # Sally submits a response.
        sally_student_item = copy.deepcopy(student_item)
        sally_student_item["student_id"] = "Sally"
        sally_submission = xblock.create_submission(sally_student_item, (u"Sally's answer 1", u"Sally's answer 2"))

        # Hal comes and submits a response.
        hal_student_item = copy.deepcopy(student_item)
        hal_student_item["student_id"] = "Hal"
        hal_submission = xblock.create_submission(hal_student_item, (u"Hal's answer 1", u"Hal's answer 2"))

        # Now Hal will assess Sally.
        assessment = copy.deepcopy(self.ASSESSMENT)
        peer_api.get_submission_to_assess(hal_submission["uuid"], 1)
        peer_api.create_assessment(
            hal_submission["uuid"],
            hal_student_item["student_id"],
            assessment["options_selected"],
            assessment["criterion_feedback"],
            assessment["overall_feedback"],
            {"criteria": xblock.rubric_criteria},
            1,
        )

        # Now Sally will assess Hal.
        assessment = copy.deepcopy(self.ASSESSMENT)
        peer_api.get_submission_to_assess(sally_submission["uuid"], 1)
        peer_api.create_assessment(
            sally_submission["uuid"],
            sally_student_item["student_id"],
            assessment["options_selected"],
            assessment["criterion_feedback"],
            assessment["overall_feedback"],
            {"criteria": xblock.rubric_criteria},
            1,
        )
Пример #23
0
    def _sally_and_hal_grade_each_other_helper(self, xblock):
        """
        A helper method to set up 2 submissions, one for each of Sally and Hal, and then have each assess the other.
        """
        student_item = xblock.get_student_item_dict()

        # Sally submits a response.
        sally_student_item = copy.deepcopy(student_item)
        sally_student_item['student_id'] = "Sally"
        sally_submission = xblock.create_submission(sally_student_item, (u"Sally's answer 1", u"Sally's answer 2"))

        # Hal comes and submits a response.
        hal_student_item = copy.deepcopy(student_item)
        hal_student_item['student_id'] = "Hal"
        hal_submission = xblock.create_submission(hal_student_item, (u"Hal's answer 1", u"Hal's answer 2"))

        # Now Hal will assess Sally.
        assessment = copy.deepcopy(self.ASSESSMENT)
        peer_api.get_submission_to_assess(hal_submission['uuid'], 1)
        peer_api.create_assessment(
            hal_submission['uuid'],
            hal_student_item['student_id'],
            assessment['options_selected'],
            assessment['criterion_feedback'],
            assessment['overall_feedback'],
            {'criteria': xblock.rubric_criteria},
            1
        )

        # Now Sally will assess Hal.
        assessment = copy.deepcopy(self.ASSESSMENT)
        peer_api.get_submission_to_assess(sally_submission['uuid'], 1)
        peer_api.create_assessment(
            sally_submission['uuid'],
            sally_student_item['student_id'],
            assessment['options_selected'],
            assessment['criterion_feedback'],
            assessment['overall_feedback'],
            {'criteria': xblock.rubric_criteria},
            1
        )
Пример #24
0
    def test_peer_leases_same_submission(self):
        """
        Tests the scenario where a student pulls a peer's submission for
        assessment, lets the lease expire, then pulls the same peer's submission
        a second time.

        This creates two similar PeerWorkflowItems in the database, and when
        completing the assessment, the latest PeerWorkflowItem should be
        updated.
        """
        yesterday = timezone.now() - datetime.timedelta(days=1)
        tim_sub, tim = self._create_student_and_submission("Tim", "Tim's answer")
        self._create_student_and_submission("Bob", "Bob's answer")
        self._create_student_and_submission("Sally", "Sally's answer")
        sub = peer_api.get_submission_to_assess(tim_sub['uuid'], REQUIRED_GRADED)
        self.assertEqual(u"Bob's answer", sub['answer'])

        # And now we cheat; we want to set the clock back such that the lease
        # on this PeerWorkflowItem has expired.
        pwis = PeerWorkflowItem.objects.filter(submission_uuid=sub['uuid'])
        self.assertEqual(len(pwis), 1)
        pwis[0].started_at = yesterday
        pwis[0].save()

        sub = peer_api.get_submission_to_assess(tim_sub['uuid'], REQUIRED_GRADED)
        self.assertEqual(u"Bob's answer", sub['answer'])

        peer_api.create_assessment(
            tim_sub["uuid"], tim["student_id"],
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )

        pwis = PeerWorkflowItem.objects.filter(submission_uuid=sub['uuid'])
        self.assertEqual(len(pwis), 1)
        self.assertNotEqual(pwis[0].started_at, yesterday)
Пример #25
0
    def test_staff_area_student_info_peer_only(self, xblock):
        # Simulate that we are course staff
        xblock.xmodule_runtime = self._create_mock_runtime(
            xblock.scope_ids.usage_id, True, False, "Bob"
        )
        xblock.runtime._services['user'] = NullUserService()

        bob_item = STUDENT_ITEM.copy()
        bob_item["item_id"] = xblock.scope_ids.usage_id
        # Create a submission for Bob, and corresponding workflow.
        submission = self._create_submission(
            bob_item, prepare_submission_for_serialization(("Bob Answer 1", "Bob Answer 2")), ['peer']
        )

        # Create a submission for Tim, and corresponding workflow.
        tim_item = bob_item.copy()
        tim_item["student_id"] = "Tim"
        self._create_submission(tim_item, "Tim Answer", ['peer', 'self'])

        # Bob assesses Tim.
        peer_api.get_submission_to_assess(submission['uuid'], 1)
        peer_api.create_assessment(
            submission["uuid"],
            STUDENT_ITEM["student_id"],
            ASSESSMENT_DICT['options_selected'], dict(), "",
            {'criteria': xblock.rubric_criteria},
            1,
        )

        # Now Bob should be fully populated in the student info view.
        path, context = xblock.get_student_info_path_and_context("Bob")
        self.assertEquals("Bob Answer 1", context['submission']['answer']['parts'][0]['text'])
        self.assertIsNotNone(context['peer_assessments'])
        self.assertIsNone(context['self_assessment'])
        self.assertIsNone(context['staff_assessment'])
        self.assertEquals("openassessmentblock/staff_area/oa_student_info.html", path)

        # Bob still needs to assess other learners
        self.assertIsNone(context['grade_details'])
Пример #26
0
 def _create_assessment(self, submission_uuid):
     """
     Creates an assessment for the given submission.
     """
     return peer_api.create_assessment(
         submission_uuid,
         "scorer",
         ASSESSMENT_DICT['options_selected'],
         ASSESSMENT_DICT['criterion_feedback'],
         ASSESSMENT_DICT['overall_feedback'],
         RUBRIC_DICT,
         2
     )
Пример #27
0
 def test_has_finished_evaluation(self):
     """
     Verify unfinished assessments do not get counted when determining a
     complete workflow.
     """
     tim_sub, _ = self._create_student_and_submission("Tim", "Tim's answer")
     bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
     sub = peer_api.get_submission_to_assess(bob_sub['uuid'], 1)
     self.assertEqual(sub["uuid"], tim_sub["uuid"])
     finished, count = peer_api.has_finished_required_evaluating(bob_sub['uuid'], 1)
     self.assertFalse(finished)
     self.assertEqual(count, 0)
     peer_api.create_assessment(
         bob_sub["uuid"], bob["student_id"],
         ASSESSMENT_DICT['options_selected'],
         ASSESSMENT_DICT['criterion_feedback'],
         ASSESSMENT_DICT['overall_feedback'],
         RUBRIC_DICT,
         1,
     )
     finished, count = peer_api.has_finished_required_evaluating(bob_sub['uuid'], 1)
     self.assertTrue(finished)
     self.assertEqual(count, 1)
Пример #28
0
    def test_create_assessment_points(self):
        self._create_student_and_submission("Tim", "Tim's answer")
        bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
        peer_api.get_submission_to_assess(bob_sub['uuid'], 1)

        with self.assertNumQueries(self.CREATE_ASSESSMENT_NUM_QUERIES):
            assessment = peer_api.create_assessment(
                bob_sub["uuid"],
                bob["student_id"],
                ASSESSMENT_DICT['options_selected'], dict(), "",
                RUBRIC_DICT,
                REQUIRED_GRADED_BY,
            )
        self.assertEqual(assessment["points_earned"], 6)
        self.assertEqual(assessment["points_possible"], 14)
Пример #29
0
    def test_get_max_scores(self):
        self._create_student_and_submission("Tim", "Tim's answer")
        bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
        sub = peer_api.get_submission_to_assess(bob_sub['uuid'], 1)
        assessment = peer_api.create_assessment(
            bob_sub["uuid"], bob["student_id"],
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            RUBRIC_DICT,
            1
        )

        max_scores = peer_api.get_rubric_max_scores(sub["uuid"])
        self.assertEqual(max_scores['secret'], 1)
        self.assertEqual(max_scores['giveup'], 10)
Пример #30
0
    def test_get_submitted_assessments(self):
        self._create_student_and_submission("Tim", "Tim's answer")
        bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
        peer_api.get_submission_to_assess(bob_sub['uuid'], REQUIRED_GRADED_BY)
        assessment = peer_api.create_assessment(
            bob_sub["uuid"],
            bob["student_id"],
            ASSESSMENT_DICT['options_selected'], dict(), "",
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )
        self.assertEqual(assessment["points_earned"], 6)
        self.assertEqual(assessment["points_possible"], 14)
        submitted_assessments = peer_api.get_submitted_assessments(bob_sub["uuid"], scored_only=True)
        self.assertEqual(0, len(submitted_assessments))

        submitted_assessments = peer_api.get_submitted_assessments(bob_sub["uuid"], scored_only=False)
        self.assertEqual(1, len(submitted_assessments))
Пример #31
0
    def test_create_assessment_unknown_criterion_feedback(self):
        self._create_student_and_submission("Tim", "Tim's answer")
        bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
        peer_api.get_submission_to_assess(bob_sub['uuid'], 1)

        # Create an assessment where the criterion feedback uses
        # a criterion name that isn't in the rubric.
        assessment = peer_api.create_assessment(
            bob_sub["uuid"],
            bob["student_id"],
            ASSESSMENT_DICT['options_selected'],
            {'unknown': 'Unknown criterion has feedback!'},
            ASSESSMENT_DICT['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )

        # The criterion feedback should be ignored
        for part_num in range(3):
            self.assertEqual(assessment["parts"][part_num]["feedback"], "")
Пример #32
0
    def test_create_huge_overall_feedback_error(self):
        self._create_student_and_submission("Tim", "Tim's answer")
        bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
        peer_api.get_submission_to_assess(bob_sub['uuid'], 1)

        # Huge overall feedback text
        assessment_dict = peer_api.create_assessment(
            bob_sub["uuid"],
            bob["student_id"],
            ASSESSMENT_DICT_HUGE['options_selected'],
            dict(),
            ASSESSMENT_DICT_HUGE['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )

        # The assessment feedback text should be truncated
        self.assertEqual(len(assessment_dict['feedback']), Assessment.MAXSIZE)

        # The length of the feedback text in the database should
        # equal what we got from the API.
        assessment = Assessment.objects.get()
        self.assertEqual(len(assessment.feedback), Assessment.MAXSIZE)
Пример #33
0
    def test_create_huge_per_criterion_feedback_error(self):
        self._create_student_and_submission("Tim", "Tim's answer")
        bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
        peer_api.get_submission_to_assess(bob_sub['uuid'], 1)

        # Huge per-criterion feedback text
        assessment = peer_api.create_assessment(
            bob_sub["uuid"],
            bob["student_id"],
            ASSESSMENT_DICT_HUGE['options_selected'],
            ASSESSMENT_DICT_HUGE['criterion_feedback'],
            "",
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )

        # Verify that the feedback has been truncated
        for part in assessment['parts']:
            self.assertEqual(len(part['feedback']), Assessment.MAXSIZE)

        # Verify that the feedback in the database matches what we got back from the API
        for part in AssessmentPart.objects.all():
            self.assertEqual(len(part.feedback), Assessment.MAXSIZE)
Пример #34
0
    def peer_assess(self, data, suffix=''):
        """Place a peer assessment into OpenAssessment system

        Assess a Peer Submission.  Performs basic workflow validation to ensure
        that an assessment can be performed as this time.

        Args:
            data (dict): A dictionary containing information required to create
                a new peer assessment.  This dict should have the following attributes:
                `submission_uuid` (string): The unique identifier for the submission being assessed.
                `options_selected` (dict): Dictionary mapping criterion names to option values.
                `feedback` (unicode): Written feedback for the submission.

        Returns:
            Dict with keys "success" (bool) indicating success/failure.
            and "msg" (unicode) containing additional information if an error occurs.

        """
        # Validate the request
        if 'options_selected' not in data:
            return {'success': False, 'msg': self._('Must provide options selected in the assessment')}

        if 'overall_feedback' not in data:
            return {'success': False, 'msg': self._('Must provide overall feedback in the assessment')}

        if 'criterion_feedback' not in data:
            return {'success': False, 'msg': self._('Must provide feedback for criteria in the assessment')}

        if self.submission_uuid is None:
            return {'success': False, 'msg': self._('You must submit a response before you can peer-assess.')}

        assessment_ui_model = self.get_assessment_module('peer-assessment')
        if assessment_ui_model:
            try:
                # Create the assessment
                assessment = peer_api.create_assessment(
                    self.submission_uuid,
                    self.get_student_item_dict()["student_id"],
                    data['options_selected'],
                    clean_criterion_feedback(self.rubric_criteria_with_labels, data['criterion_feedback']),
                    data['overall_feedback'],
                    create_rubric_dict(self.prompt, self.rubric_criteria_with_labels),
                    assessment_ui_model['must_be_graded_by']
                )

                # Emit analytics event...
                self.publish_assessment_event("openassessmentblock.peer_assess", assessment)

            except (PeerAssessmentRequestError, PeerAssessmentWorkflowError):
                logger.warning(
                    u"Peer API error for submission UUID {}".format(self.submission_uuid),
                    exc_info=True
                )
                return {'success': False, 'msg': self._(u"Your peer assessment could not be submitted.")}
            except PeerAssessmentInternalError:
                logger.exception(
                    u"Peer API internal error for submission UUID: {}".format(self.submission_uuid)
                )
                msg = self._("Your peer assessment could not be submitted.")
                return {'success': False, 'msg': msg}

            # Update both the workflow that the submission we're assessing
            # belongs to, as well as our own (e.g. have we evaluated enough?)
            try:
                if assessment:
                    self.update_workflow_status(submission_uuid=assessment['submission_uuid'])
                self.update_workflow_status()
            except AssessmentWorkflowError:
                logger.exception(
                    u"Workflow error occurred when submitting peer assessment "
                    u"for submission {}".format(self.submission_uuid)
                )
                msg = self._('Could not update workflow status.')
                return {'success': False, 'msg': msg}

            # Temp kludge until we fix JSON serialization for datetime
            assessment["scored_at"] = str(assessment["scored_at"])

            return {'success': True, 'msg': u''}

        else:
            return {'success': False, 'msg': self._('Could not load peer assessment.')}
Пример #35
0
    def handle(self, *args, **options):
        """
        Execute the command.

        Args:
            course_id (unicode): The ID of the course to create submissions for.
            item_id (unicode): The ID of the item in the course to create submissions for.
            num_submissions (int): Number of submissions to create.
        """
        if len(args) < 3:
            raise CommandError(
                'Usage: create_oa_submissions <COURSE_ID> <ITEM_ID> <NUM_SUBMISSIONS>'
            )

        course_id = unicode(args[0])
        item_id = unicode(args[1])

        try:
            num_submissions = int(args[2])
        except ValueError:
            raise CommandError('Number of submissions must be an integer')

        print u"Creating {num} submissions for {item} in {course}".format(
            num=num_submissions, item=item_id, course=course_id)

        for sub_num in range(num_submissions):

            print "Creating submission {num}".format(num=sub_num)

            # Create a dummy submission
            student_item = {
                'student_id': uuid4().hex[0:10],
                'course_id': course_id,
                'item_id': item_id,
                'item_type': 'openassessment'
            }
            submission_uuid = self._create_dummy_submission(student_item)
            self._student_items.append(student_item)

            # Create a dummy rubric
            rubric, options_selected = self._dummy_rubric()

            # Create peer assessments
            for num in range(self.NUM_PEER_ASSESSMENTS):
                print "-- Creating peer-assessment {num}".format(num=num)

                scorer_id = 'test_{num}'.format(num=num)

                # The scorer needs to make a submission before assessing
                scorer_student_item = copy.copy(student_item)
                scorer_student_item['student_id'] = scorer_id
                scorer_submission_uuid = self._create_dummy_submission(
                    scorer_student_item)

                # Retrieve the submission we want to score
                # Note that we are NOT using the priority queue here, since we know
                # exactly which submission we want to score.
                peer_api.create_peer_workflow_item(scorer_submission_uuid,
                                                   submission_uuid)

                # Create the peer assessment
                peer_api.create_assessment(
                    scorer_submission_uuid, scorer_id, options_selected, {},
                    "  ".join(loremipsum.get_paragraphs(2)), rubric,
                    self.NUM_PEER_ASSESSMENTS)

            # Create a self-assessment
            print "-- Creating self assessment"
            self_api.create_assessment(submission_uuid,
                                       student_item['student_id'],
                                       options_selected, rubric)
Пример #36
0
    def peer_assess(self, data, suffix=''):  # pylint: disable=unused-argument
        """Place a peer assessment into OpenAssessment system

        Assess a Peer Submission.  Performs basic workflow validation to ensure
        that an assessment can be performed as this time.

        Args:
            data (dict): A dictionary containing information required to create
                a new peer assessment.  This dict should have the following attributes:
                `submission_uuid` (string): The unique identifier for the submission being assessed.
                `options_selected` (dict): Dictionary mapping criterion names to option values.
                `overall_feedback` (unicode): Written feedback for the submission as a whole.
                `criterion_feedback` (unicode): Written feedback per the criteria for the submission.

        Returns:
            Dict with keys "success" (bool) indicating success/failure.
            and "msg" (unicode) containing additional information if an error occurs.

        """
        # Import is placed here to avoid model import at project startup.
        from openassessment.assessment.api import peer as peer_api
        if self.submission_uuid is None:
            return {
                'success': False, 'msg': self._('You must submit a response before you can perform a peer assessment.')
            }

        uuid_server, uuid_client = self._get_server_and_client_submission_uuids(data)
        if uuid_server != uuid_client:
            logger.warning(
                'Irrelevant assessment submission: expected "{uuid_server}", got "{uuid_client}"'.format(
                    uuid_server=uuid_server,
                    uuid_client=uuid_client,
                )
            )
            return {
                'success': False,
                'msg': self._('This feedback has already been submitted or the submission has been cancelled.'),
            }

        assessment_ui_model = self.get_assessment_module('peer-assessment')
        if assessment_ui_model:
            try:
                # Create the assessment
                assessment = peer_api.create_assessment(
                    self.submission_uuid,
                    self.get_student_item_dict()["student_id"],
                    data['options_selected'],
                    clean_criterion_feedback(self.rubric_criteria_with_labels, data['criterion_feedback']),
                    data['overall_feedback'],
                    create_rubric_dict(self.prompts, self.rubric_criteria_with_labels),
                    assessment_ui_model['must_be_graded_by']
                )

                # Emit analytics event...
                self.publish_assessment_event("openassessmentblock.peer_assess", assessment)

            except (PeerAssessmentRequestError, PeerAssessmentWorkflowError):
                logger.warning(
                    "Peer API error for submission UUID {}".format(self.submission_uuid),
                    exc_info=True
                )
                return {'success': False, 'msg': self._("Your peer assessment could not be submitted.")}
            except PeerAssessmentInternalError:
                logger.exception(
                    "Peer API internal error for submission UUID: {}".format(self.submission_uuid)
                )
                msg = self._("Your peer assessment could not be submitted.")
                return {'success': False, 'msg': msg}

            # Update both the workflow that the submission we're assessing
            # belongs to, as well as our own (e.g. have we evaluated enough?)
            try:
                if assessment:
                    self.update_workflow_status(submission_uuid=assessment['submission_uuid'])
                self.update_workflow_status()
            except AssessmentWorkflowError:
                logger.exception(
                    "Workflow error occurred when submitting peer assessment "
                    "for submission {}".format(self.submission_uuid)
                )
                msg = self._('Could not update workflow status.')
                return {'success': False, 'msg': msg}

            # Temp kludge until we fix JSON serialization for datetime
            assessment["scored_at"] = str(assessment["scored_at"])

            return {'success': True, 'msg': ''}

        return {'success': False, 'msg': self._('Could not load peer assessment.')}
Пример #37
0
    def _create_submission_and_assessments(
        self,
        xblock,
        submission_text,
        peers,
        peer_assessments,
        self_assessment,
        waiting_for_peer=False,
    ):
        """
        Create a submission and peer/self assessments, so that the user can receive a grade.

        Args:
            xblock (OpenAssessmentBlock): The XBlock, loaded for the user who needs a grade.
            submission_text (unicode): Text of the submission from the user.
            peers (list of unicode): List of user IDs of peers who will assess the user.
            peer_assessments (list of dict): List of assessment dictionaries for peer assessments.
            self_assessment (dict): Dict of assessment for self-assessment.

        Keyword Arguments:
            waiting_for_peer (bool): If true, skip creation of peer assessments for the user's submission.

        Returns:
            None

        """
        # Create a submission from the user
        student_item = xblock.get_student_item_dict()
        student_id = student_item['student_id']
        submission = xblock.create_submission(student_item, submission_text)

        # Create submissions and assessments from other users
        scorer_submissions = []
        for scorer_name, assessment in zip(peers, peer_assessments):

            # Create a submission for each scorer for the same problem
            scorer = copy.deepcopy(student_item)
            scorer['student_id'] = scorer_name

            scorer_sub = sub_api.create_submission(scorer,
                                                   {'text': submission_text})
            workflow_api.create_workflow(scorer_sub['uuid'], self.STEPS)

            submission = peer_api.get_submission_to_assess(
                scorer_sub['uuid'], len(peers))

            # Store the scorer's submission so our user can assess it later
            scorer_submissions.append(scorer_sub)

            # Create an assessment of the user's submission
            if not waiting_for_peer:
                peer_api.create_assessment(
                    scorer_sub['uuid'], scorer_name,
                    assessment['options_selected'],
                    assessment['criterion_feedback'],
                    assessment['overall_feedback'],
                    {'criteria': xblock.rubric_criteria},
                    xblock.get_assessment_module(
                        'peer-assessment')['must_be_graded_by'])

        # Have our user make assessments (so she can get a score)
        for asmnt in peer_assessments:
            peer_api.get_submission_to_assess(submission['uuid'], len(peers))
            peer_api.create_assessment(
                submission['uuid'], student_id, asmnt['options_selected'],
                asmnt['criterion_feedback'], asmnt['overall_feedback'],
                {'criteria': xblock.rubric_criteria},
                xblock.get_assessment_module(
                    'peer-assessment')['must_be_graded_by'])

        # Have the user submit a self-assessment (so she can get a score)
        if self_assessment is not None:
            self_api.create_assessment(submission['uuid'], student_id,
                                       self_assessment['options_selected'],
                                       self_assessment['criterion_feedback'],
                                       self_assessment['overall_feedback'],
                                       {'criteria': xblock.rubric_criteria})
Пример #38
0
    def test_turbo_grading(self, xblock):
        student_item = xblock.get_student_item_dict()

        sally_student_item = copy.deepcopy(student_item)
        sally_student_item['student_id'] = "Sally"
        sally_submission = xblock.create_submission(sally_student_item, u"Sally's answer")

        # Hal comes and submits a response.
        hal_student_item = copy.deepcopy(student_item)
        hal_student_item['student_id'] = "Hal"
        hal_submission = xblock.create_submission(hal_student_item, u"Hal's answer")

        # Now Hal will assess Sally.
        assessment = copy.deepcopy(self.ASSESSMENT)
        sally_sub = peer_api.get_submission_to_assess(hal_submission['uuid'], 1)
        assessment['submission_uuid'] = sally_sub['uuid']
        peer_api.create_assessment(
            hal_submission['uuid'],
            hal_student_item['student_id'],
            assessment['options_selected'],
            assessment['criterion_feedback'],
            assessment['overall_feedback'],
            {'criteria': xblock.rubric_criteria},
            1
        )

        # Now Sally will assess Hal.
        assessment = copy.deepcopy(self.ASSESSMENT)
        hal_sub = peer_api.get_submission_to_assess(sally_submission['uuid'], 1)
        assessment['submission_uuid'] = hal_sub['uuid']
        peer_api.create_assessment(
            sally_submission['uuid'],
            sally_student_item['student_id'],
            assessment['options_selected'],
            assessment['criterion_feedback'],
            assessment['overall_feedback'],
            {'criteria': xblock.rubric_criteria},
            1
        )

        # If Over Grading is on, this should now return Sally's response to Bob.
        submission = xblock.create_submission(student_item, u"Bob's answer")
        workflow_info = xblock.get_workflow_info()
        self.assertEqual(workflow_info["status"], u'peer')

        # Validate Submission Rendering.
        request = namedtuple('Request', 'params')
        request.params = {'continue_grading': True}
        peer_response = xblock.render_peer_assessment(request)
        self.assertIsNotNone(peer_response)
        self.assertNotIn(submission["answer"]["text"].encode('utf-8'), peer_response.body)

        peer_api.create_assessment(
            submission['uuid'],
            student_item['student_id'],
            assessment['options_selected'],
            assessment['criterion_feedback'],
            assessment['overall_feedback'],
            {'criteria': xblock.rubric_criteria},
            1
        )

        # Validate Submission Rendering.
        request = namedtuple('Request', 'params')
        request.params = {'continue_grading': True}
        peer_response = xblock.render_peer_assessment(request)
        self.assertIsNotNone(peer_response)
        self.assertNotIn(submission["answer"]["text"].encode('utf-8'), peer_response.body)

        peer_api.create_assessment(
            submission['uuid'],
            student_item['student_id'],
            assessment['options_selected'],
            assessment['criterion_feedback'],
            assessment['overall_feedback'],
            {'criteria': xblock.rubric_criteria},
            1
        )

        # A Final over grading will not return anything.
        request = namedtuple('Request', 'params')
        request.params = {'continue_grading': True}
        peer_response = xblock.render_peer_assessment(request)
        self.assertIsNotNone(peer_response)
        self.assertNotIn(submission["answer"]["text"].encode('utf-8'), peer_response.body)
        self.assertIn("Peer Assessments Complete", peer_response.body)
Пример #39
0
    def handle(self, *args, **options):
        """
        Execute the command.

        Args:
            course_id (unicode): The ID of the course to create submissions for.
            item_id (unicode): The ID of the item in the course to create submissions for.
            num_submissions (int): Number of submissions to create.
        """
        if len(args) < 3:
            raise CommandError('Usage: create_oa_submissions <COURSE_ID> <ITEM_ID> <NUM_SUBMISSIONS>')

        course_id = unicode(args[0])
        item_id = unicode(args[1])

        try:
            num_submissions = int(args[2])
        except ValueError:
            raise CommandError('Number of submissions must be an integer')

        print u"Creating {num} submissions for {item} in {course}".format(
            num=num_submissions, item=item_id, course=course_id
        )

        for sub_num in range(num_submissions):

            print "Creating submission {num}".format(num=sub_num)

            # Create a dummy submission
            student_item = {
                'student_id': uuid4().hex[0:10],
                'course_id': course_id,
                'item_id': item_id,
                'item_type': 'openassessment'
            }
            submission_uuid = self._create_dummy_submission(student_item)
            self._student_items.append(student_item)

            # Create a dummy rubric
            rubric, options_selected = self._dummy_rubric()

            # Create peer assessments
            for num in range(self.NUM_PEER_ASSESSMENTS):
                print "-- Creating peer-assessment {num}".format(num=num)

                scorer_id = 'test_{num}'.format(num=num)

                # The scorer needs to make a submission before assessing
                scorer_student_item = copy.copy(student_item)
                scorer_student_item['student_id'] = scorer_id
                scorer_submission_uuid = self._create_dummy_submission(scorer_student_item)

                # Retrieve the submission we want to score
                # Note that we are NOT using the priority queue here, since we know
                # exactly which submission we want to score.
                peer_api.create_peer_workflow_item(scorer_submission_uuid, submission_uuid)

                # Create the peer assessment
                peer_api.create_assessment(
                    scorer_submission_uuid,
                    scorer_id,
                    options_selected, {}, "  ".join(loremipsum.get_paragraphs(2)),
                    rubric,
                    self.NUM_PEER_ASSESSMENTS
                )

            # Create a self-assessment
            print "-- Creating self assessment"
            self_api.create_assessment(
                submission_uuid, student_item['student_id'],
                options_selected, rubric
            )
Пример #40
0
    def test_turbo_grading(self, xblock):
        student_item = xblock.get_student_item_dict()

        sally_student_item = copy.deepcopy(student_item)
        sally_student_item['student_id'] = "Sally"
        sally_submission = xblock.create_submission(sally_student_item,
                                                    u"Sally's answer")

        # Hal comes and submits a response.
        hal_student_item = copy.deepcopy(student_item)
        hal_student_item['student_id'] = "Hal"
        hal_submission = xblock.create_submission(hal_student_item,
                                                  u"Hal's answer")

        # Now Hal will assess Sally.
        assessment = copy.deepcopy(self.ASSESSMENT)
        sally_sub = peer_api.get_submission_to_assess(hal_submission['uuid'],
                                                      1)
        assessment['submission_uuid'] = sally_sub['uuid']
        peer_api.create_assessment(hal_submission['uuid'],
                                   hal_student_item['student_id'],
                                   assessment['options_selected'],
                                   assessment['criterion_feedback'],
                                   assessment['overall_feedback'],
                                   {'criteria': xblock.rubric_criteria}, 1)

        # Now Sally will assess Hal.
        assessment = copy.deepcopy(self.ASSESSMENT)
        hal_sub = peer_api.get_submission_to_assess(sally_submission['uuid'],
                                                    1)
        assessment['submission_uuid'] = hal_sub['uuid']
        peer_api.create_assessment(sally_submission['uuid'],
                                   sally_student_item['student_id'],
                                   assessment['options_selected'],
                                   assessment['criterion_feedback'],
                                   assessment['overall_feedback'],
                                   {'criteria': xblock.rubric_criteria}, 1)

        # If Over Grading is on, this should now return Sally's response to Bob.
        submission = xblock.create_submission(student_item, u"Bob's answer")
        workflow_info = xblock.get_workflow_info()
        self.assertEqual(workflow_info["status"], u'peer')

        # Validate Submission Rendering.
        request = namedtuple('Request', 'params')
        request.params = {'continue_grading': True}
        peer_response = xblock.render_peer_assessment(request)
        self.assertIsNotNone(peer_response)
        self.assertNotIn(submission["answer"]["text"].encode('utf-8'),
                         peer_response.body)

        peer_api.create_assessment(submission['uuid'],
                                   student_item['student_id'],
                                   assessment['options_selected'],
                                   assessment['criterion_feedback'],
                                   assessment['overall_feedback'],
                                   {'criteria': xblock.rubric_criteria}, 1)

        # Validate Submission Rendering.
        request = namedtuple('Request', 'params')
        request.params = {'continue_grading': True}
        peer_response = xblock.render_peer_assessment(request)
        self.assertIsNotNone(peer_response)
        self.assertNotIn(submission["answer"]["text"].encode('utf-8'),
                         peer_response.body)

        peer_api.create_assessment(submission['uuid'],
                                   student_item['student_id'],
                                   assessment['options_selected'],
                                   assessment['criterion_feedback'],
                                   assessment['overall_feedback'],
                                   {'criteria': xblock.rubric_criteria}, 1)

        # A Final over grading will not return anything.
        request = namedtuple('Request', 'params')
        request.params = {'continue_grading': True}
        peer_response = xblock.render_peer_assessment(request)
        self.assertIsNotNone(peer_response)
        self.assertNotIn(submission["answer"]["text"].encode('utf-8'),
                         peer_response.body)
        self.assertIn("Peer Assessments Complete", peer_response.body)
Пример #41
0
    def _create_submission_and_assessments(
        self, xblock, submission_text, peers, peer_assessments, self_assessment,
        waiting_for_peer=False,
    ):
        """
        Create a submission and peer/self assessments, so that the user can receive a grade.

        Args:
            xblock (OpenAssessmentBlock): The XBlock, loaded for the user who needs a grade.
            submission_text (unicode): Text of the submission from the user.
            peers (list of unicode): List of user IDs of peers who will assess the user.
            peer_assessments (list of dict): List of assessment dictionaries for peer assessments.
            self_assessment (dict): Dict of assessment for self-assessment.

        Keyword Arguments:
            waiting_for_peer (bool): If true, skip creation of peer assessments for the user's submission.

        Returns:
            None

        """
        # Create a submission from the user
        student_item = xblock.get_student_item_dict()
        student_id = student_item['student_id']
        submission = xblock.create_submission(student_item, submission_text)

        # Create submissions and assessments from other users
        scorer_submissions = []
        for scorer_name, assessment in zip(peers, peer_assessments):

            # Create a submission for each scorer for the same problem
            scorer = copy.deepcopy(student_item)
            scorer['student_id'] = scorer_name

            scorer_sub = sub_api.create_submission(scorer, {'text': submission_text})
            workflow_api.create_workflow(scorer_sub['uuid'], self.STEPS)

            submission = peer_api.get_submission_to_assess(scorer_sub['uuid'], len(peers))

            # Store the scorer's submission so our user can assess it later
            scorer_submissions.append(scorer_sub)

            # Create an assessment of the user's submission
            if not waiting_for_peer:
                peer_api.create_assessment(
                    scorer_sub['uuid'], scorer_name,
                    assessment['options_selected'],
                    assessment['criterion_feedback'],
                    assessment['overall_feedback'],
                    {'criteria': xblock.rubric_criteria},
                    xblock.get_assessment_module('peer-assessment')['must_be_graded_by']
                )

        # Have our user make assessments (so she can get a score)
        for asmnt in peer_assessments:
            peer_api.get_submission_to_assess(submission['uuid'], len(peers))
            peer_api.create_assessment(
                submission['uuid'],
                student_id,
                asmnt['options_selected'],
                asmnt['criterion_feedback'],
                asmnt['overall_feedback'],
                {'criteria': xblock.rubric_criteria},
                xblock.get_assessment_module('peer-assessment')['must_be_graded_by']
            )

        # Have the user submit a self-assessment (so she can get a score)
        if self_assessment is not None:
            self_api.create_assessment(
                submission['uuid'], student_id, self_assessment['options_selected'],
                self_assessment['criterion_feedback'], self_assessment['overall_feedback'],
                {'criteria': xblock.rubric_criteria}
            )
Пример #42
0
    def peer_assess(self, data, suffix=''):
        """Place a peer assessment into OpenAssessment system

        Assess a Peer Submission.  Performs basic workflow validation to ensure
        that an assessment can be performed as this time.

        Args:
            data (dict): A dictionary containing information required to create
                a new peer assessment.  This dict should have the following attributes:
                `submission_uuid` (string): The unique identifier for the submission being assessed.
                `options_selected` (dict): Dictionary mapping criterion names to option values.
                `feedback` (unicode): Written feedback for the submission.

        Returns:
            Dict with keys "success" (bool) indicating success/failure.
            and "msg" (unicode) containing additional information if an error occurs.

        """
        # Validate the request
        if 'options_selected' not in data:
            return {'success': False, 'msg': _('Must provide options selected in the assessment')}

        if 'overall_feedback' not in data:
            return {'success': False, 'msg': _('Must provide overall feedback in the assessment')}

        if 'criterion_feedback' not in data:
            return {'success': False, 'msg': _('Must provide feedback for criteria in the assessment')}

        assessment_ui_model = self.get_assessment_module('peer-assessment')
        if assessment_ui_model:
            rubric_dict = {
                'criteria': self.rubric_criteria
            }

            try:
                # Create the assessment
                assessment = peer_api.create_assessment(
                    self.submission_uuid,
                    self.get_student_item_dict()["student_id"],
                    data['options_selected'],
                    self._clean_criterion_feedback(data['criterion_feedback']),
                    data['overall_feedback'],
                    rubric_dict,
                    assessment_ui_model['must_be_graded_by']
                )

                # Emit analytics event...
                self._publish_peer_assessment_event(assessment)
            except PeerAssessmentRequestError as ex:
                return {'success': False, 'msg': ex.message}
            except PeerAssessmentInternalError as ex:
                msg = _("Internal error occurred while creating the assessment")
                logger.exception(msg)
                return {'success': False, 'msg': msg}

            # Update both the workflow that the submission we're assessing
            # belongs to, as well as our own (e.g. have we evaluated enough?)
            try:
                if assessment:
                    self.update_workflow_status(submission_uuid=assessment['submission_uuid'])
                self.update_workflow_status()
            except workflow_api.AssessmentWorkflowError:
                msg = _('Could not update workflow status.')
                logger.exception(msg)
                return {'success': False, 'msg': msg}

            # Temp kludge until we fix JSON serialization for datetime
            assessment["scored_at"] = str(assessment["scored_at"])

            return {'success': True, 'msg': u''}

        else:
            return {'success': False, 'msg': _('Could not load peer assessment.')}
Пример #43
0
    def handle(self, *args, **options):
        """
        Execute the command.

        Args:
            course_id (unicode): The ID of the course to create submissions for.
            item_id (unicode): The ID of the item in the course to create submissions for.
            num_submissions (int): Number of submissions to create.
            percentage (int or float): Percentage for assessments to be made against submissions.
        """
        if len(args) < 4:
            raise CommandError(
                'Usage: create_oa_submissions <COURSE_ID> <ITEM_ID> <NUM_SUBMISSIONS> <PERCENTAGE>'
            )

        course_id = str(args[0])
        item_id = str(args[1])

        try:
            num_submissions = int(args[2])
        except ValueError:
            raise CommandError('Number of submissions must be an integer')

        try:
            percentage = float(args[3])
            assessments_to_create = (percentage / 100) * num_submissions
        except ValueError:
            raise CommandError(
                'Percentage for completed submissions must be an integer or float'
            )

        print(u"Creating {num} submissions for {item} in {course}".format(
            num=num_submissions, item=item_id, course=course_id))

        assessments_created = 0

        for sub_num in range(num_submissions):

            print(u"Creating submission {num}".format(num=sub_num))

            # Create a dummy submission
            student_item = {
                'student_id': uuid4().hex[0:10],
                'course_id': course_id,
                'item_id': item_id,
                'item_type': 'openassessment'
            }
            submission_uuid = self._create_dummy_submission(student_item)
            self._student_items.append(student_item)

            # Create a dummy rubric
            rubric, options_selected = self._dummy_rubric()

            # Create peer assessments
            for num in range(self.NUM_PEER_ASSESSMENTS):
                print(u"-- Creating peer-workflow {num}".format(num=num))

                scorer_id = 'test_{num}'.format(num=num)

                # The scorer needs to make a submission before assessing
                scorer_student_item = copy.copy(student_item)
                scorer_student_item['student_id'] = scorer_id
                scorer_submission_uuid = self._create_dummy_submission(
                    scorer_student_item)

                # Retrieve the submission we want to score
                # Note that we are NOT using the priority queue here, since we know
                # exactly which submission we want to score.
                peer_api.create_peer_workflow_item(scorer_submission_uuid,
                                                   submission_uuid)
                if assessments_created < assessments_to_create:
                    print(u"-- Creating peer-assessment {num}".format(num=num))
                    # Create the peer assessment
                    peer_api.create_assessment(
                        scorer_submission_uuid, scorer_id, options_selected,
                        {}, "  ".join(loremipsum.get_paragraphs(2)), rubric,
                        self.NUM_PEER_ASSESSMENTS)
            assessments_created += 1

            if self.self_assessment_required:
                # Create a self-assessment
                print(u"-- Creating self assessment")
                self_api.create_assessment(
                    submission_uuid, student_item['student_id'],
                    options_selected, {},
                    "  ".join(loremipsum.get_paragraphs(2)), rubric)
        print(u"%s assessments being completed for %s submissions" %
              (assessments_created, num_submissions))