def test_staff_delete_student_state(self, xblock):
        # Simulate that we are course staff
        xblock.xmodule_runtime = self._create_mock_runtime(
            xblock.scope_ids.usage_id, True, False, 'Bob'
        )
        xblock.runtime._services['user'] = NullUserService()  # pylint: disable=protected-access

        bob_item = STUDENT_ITEM.copy()
        bob_item["item_id"] = xblock.scope_ids.usage_id
        # Create a submission for Bob, and corresponding workflow.
        submission = self._create_submission(bob_item, {'text': "Bob Answer"}, ['self'])

        # Bob assesses himself.
        self_api.create_assessment(
            submission['uuid'],
            STUDENT_ITEM["student_id"],
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            {'criteria': xblock.rubric_criteria},
        )

        request = namedtuple('Request', 'params')
        request.params = {"student_username": '******'}
        # Verify that we can see the student's grade
        resp = xblock.render_student_info(request)
        self.assertIn("final grade", resp.body.lower())

        # Staff user Bob can clear his own submission
        xblock.clear_student_state('Bob', 'test_course', xblock.scope_ids.usage_id, bob_item['student_id'])

        # Verify that the submission was cleared
        resp = xblock.render_student_info(request)
        self.assertIn("response was not found", resp.body.lower())
    def test_staff_delete_student_state(self, xblock):
        # Simulate that we are course staff
        xblock.xmodule_runtime = self._create_mock_runtime(
            xblock.scope_ids.usage_id, True, False, 'Bob'
        )
        xblock.runtime._services['user'] = NullUserService()  # pylint: disable=protected-access

        bob_item = STUDENT_ITEM.copy()
        bob_item["item_id"] = xblock.scope_ids.usage_id
        # Create a submission for Bob, and corresponding workflow.
        submission = self._create_submission(bob_item, {'text': "Bob Answer"}, ['self'])

        # Bob assesses himself.
        self_api.create_assessment(
            submission['uuid'],
            STUDENT_ITEM["student_id"],
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            {'criteria': xblock.rubric_criteria},
        )

        request = namedtuple('Request', 'params')
        request.params = {"student_username": '******'}
        # Verify that we can see the student's grade
        resp = xblock.render_student_info(request)
        self.assertIn("final grade", resp.body.lower())

        # Staff user Bob can clear his own submission
        xblock.clear_student_state('Bob', 'test_course', xblock.scope_ids.usage_id, bob_item['student_id'])

        # Verify that the submission was cleared
        resp = xblock.render_student_info(request)
        self.assertIn("response was not found", resp.body.lower())
    def test_staff_debug_student_info_full_workflow(self, xblock):
        # Simulate that we are course staff
        xblock.xmodule_runtime = self._create_mock_runtime(
            xblock.scope_ids.usage_id, True, False, "Bob"
        )

        # Commonly chosen options for assessments
        options_selected = {
            "Ideas": "Good",
            "Content": "Poor",
        }

        criterion_feedback = {
            "Ideas": "Dear diary: Lots of creativity from my dream journal last night at 2 AM,",
            "Content": "Not as insightful as I had thought in the wee hours of the morning!"
        }

        overall_feedback = "I think I should tell more people about how important worms are for the ecosystem."

        bob_item = STUDENT_ITEM.copy()
        bob_item["item_id"] = xblock.scope_ids.usage_id

        # Create a submission for Bob, and corresponding workflow.
        submission = sub_api.create_submission(bob_item, {'text':"Bob Answer"})
        peer_api.on_start(submission["uuid"])
        workflow_api.create_workflow(submission["uuid"], ['peer', 'self'])

        # Create a submission for Tim, and corresponding workflow.
        tim_item = bob_item.copy()
        tim_item["student_id"] = "Tim"
        tim_sub = sub_api.create_submission(tim_item, "Tim Answer")
        peer_api.on_start(tim_sub["uuid"])
        workflow_api.create_workflow(tim_sub["uuid"], ['peer', 'self'])

        # Bob assesses Tim.
        peer_api.get_submission_to_assess(submission['uuid'], 1)
        peer_api.create_assessment(
            submission["uuid"],
            STUDENT_ITEM["student_id"],
            options_selected, dict(), "",
            {'criteria': xblock.rubric_criteria},
            1,
        )

        # Bob assesses himself.
        self_api.create_assessment(
            submission['uuid'],
            STUDENT_ITEM["student_id"],
            options_selected,
            criterion_feedback,
            overall_feedback,
            {'criteria': xblock.rubric_criteria},
        )

        # Now Bob should be fully populated in the student info view.
        request = namedtuple('Request', 'params')
        request.params = {"student_id": "Bob"}
        # Verify that we can render without error
        resp = xblock.render_student_info(request)
        self.assertIn("bob answer", resp.body.lower())
    def test_staff_area_student_info_self_only(self, xblock):
        # Simulate that we are course staff
        xblock.xmodule_runtime = self._create_mock_runtime(
            xblock.scope_ids.usage_id, True, False, "Bob"
        )
        xblock.runtime._services['user'] = NullUserService()  # pylint: disable=protected-access
        bob_item = STUDENT_ITEM.copy()
        bob_item["item_id"] = xblock.scope_ids.usage_id
        # Create a submission for Bob, and corresponding workflow.
        submission = self._create_submission(
            bob_item, prepare_submission_for_serialization(("Bob Answer 1", "Bob Answer 2")), ['self']
        )

        # Bob assesses himself.
        self_api.create_assessment(
            submission['uuid'],
            STUDENT_ITEM["student_id"],
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            {'criteria': xblock.rubric_criteria},
        )

        path, context = xblock.get_student_info_path_and_context("Bob")
        self.assertEquals("Bob Answer 1", context['submission']['answer']['parts'][0]['text'])
        self.assertIsNone(context['peer_assessments'])
        self.assertIsNotNone(context['self_assessment'])
        self.assertIsNone(context['staff_assessment'])
        self.assertEquals("openassessmentblock/staff_area/oa_student_info.html", path)

        grade_details = context['grade_details']
        self.assertEquals(1, len(grade_details['criteria'][0]['assessments']))
        self.assertEquals('Self Assessment Grade', grade_details['criteria'][0]['assessments'][0]['title'])
Exemple #5
0
    def test_staff_debug_student_info_self_only(self, xblock):
        # Simulate that we are course staff
        xblock.xmodule_runtime = self._create_mock_runtime(
            xblock.scope_ids.usage_id, True, False, "Bob"
        )
        xblock.runtime._services['user'] = NullUserService()
        bob_item = STUDENT_ITEM.copy()
        bob_item["item_id"] = xblock.scope_ids.usage_id
        # Create a submission for Bob, and corresponding workflow.
        submission = sub_api.create_submission(
            bob_item, prepare_submission_for_serialization(("Bob Answer 1", "Bob Answer 2"))
        )
        peer_api.on_start(submission["uuid"])
        workflow_api.create_workflow(submission["uuid"], ['self'])

        # Bob assesses himself.
        self_api.create_assessment(
            submission['uuid'],
            STUDENT_ITEM["student_id"],
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            {'criteria': xblock.rubric_criteria},
        )

        path, context = xblock.get_student_info_path_and_context("Bob")
        self.assertEquals("Bob Answer 1", context['submission']['answer']['parts'][0]['text'])
        self.assertEquals([], context['peer_assessments'])
        self.assertEquals("openassessmentblock/staff_debug/student_info.html", path)
Exemple #6
0
    def test_staff_debug_student_info_full_workflow(self, xblock):
        # Simulate that we are course staff
        xblock.xmodule_runtime = self._create_mock_runtime(
            xblock.scope_ids.usage_id, True, False, "Bob"
        )

        # Commonly chosen options for assessments
        options_selected = {
            "Ideas": "Good",
            "Content": "Poor",
        }

        criterion_feedback = {
            "Ideas": "Dear diary: Lots of creativity from my dream journal last night at 2 AM,",
            "Content": "Not as insightful as I had thought in the wee hours of the morning!"
        }

        overall_feedback = "I think I should tell more people about how important worms are for the ecosystem."

        bob_item = STUDENT_ITEM.copy()
        bob_item["item_id"] = xblock.scope_ids.usage_id

        # Create a submission for Bob, and corresponding workflow.
        submission = sub_api.create_submission(bob_item, {'text':"Bob Answer"})
        peer_api.on_start(submission["uuid"])
        workflow_api.create_workflow(submission["uuid"], ['peer', 'self'])

        # Create a submission for Tim, and corresponding workflow.
        tim_item = bob_item.copy()
        tim_item["student_id"] = "Tim"
        tim_sub = sub_api.create_submission(tim_item, "Tim Answer")
        peer_api.on_start(tim_sub["uuid"])
        workflow_api.create_workflow(tim_sub["uuid"], ['peer', 'self'])

        # Bob assesses Tim.
        peer_api.get_submission_to_assess(submission['uuid'], 1)
        peer_api.create_assessment(
            submission["uuid"],
            STUDENT_ITEM["student_id"],
            options_selected, dict(), "",
            {'criteria': xblock.rubric_criteria},
            1,
        )

        # Bob assesses himself.
        self_api.create_assessment(
            submission['uuid'],
            STUDENT_ITEM["student_id"],
            options_selected,
            criterion_feedback,
            overall_feedback,
            {'criteria': xblock.rubric_criteria},
        )

        # Now Bob should be fully populated in the student info view.
        request = namedtuple('Request', 'params')
        request.params = {"student_id": "Bob"}
        # Verify that we can render without error
        resp = xblock.render_student_info(request)
        self.assertIn("bob answer", resp.body.lower())
Exemple #7
0
    def test_completed_and_past_due(self, xblock):
        # Simulate having completed self assessment
        # Even though the problem is closed, we should still see
        # that we completed the step.
        submission = xblock.create_submission(
            xblock.get_student_item_dict(), u"Đøɨn' ɨŧ ŧø đɇȺŧħ"
        )
        self_api.create_assessment(
            submission['uuid'],
            xblock.get_student_item_dict()['student_id'],
            {u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ﻉซƈﻉɭɭﻉกՇ', u'Form': u'Fair'},
            {}, "Good job!",
            create_rubric_dict(xblock.prompt, xblock.rubric_criteria)
        )

        # This case probably isn't possible, because presumably when we create
        # the self-assessment, the workflow status will be "waiting" or "done".
        # We're checking it anyway to be overly defensive: if the user has made a self-assessment,
        # we ALWAYS show complete, even if the workflow tells us we're still have status 'self'.
        self._assert_path_and_context(
            xblock, 'openassessmentblock/self/oa_self_complete.html',
            {'self_due': datetime.datetime(2000, 1, 1).replace(tzinfo=pytz.utc), 'allow_latex': False},
            workflow_status='self',
            submission_uuid=submission['uuid']
        )
Exemple #8
0
 def test_open_completed_self_assessment(self, xblock):
     # Simulate the workflow being in the self assessment step
     # and we've created a self-assessment
     submission = xblock.create_submission(
         xblock.get_student_item_dict(), (u"Đøɨn' ɨŧ ŧø đɇȺŧħ 1", u"Đøɨn' ɨŧ ŧø đɇȺŧħ 2")
     )
     self_api.create_assessment(
         submission['uuid'],
         xblock.get_student_item_dict()['student_id'],
         {u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ﻉซƈﻉɭɭﻉกՇ', u'Form': u'Fair'},
         {}, "Good job!",
         create_rubric_dict(xblock.prompts, xblock.rubric_criteria)
     )
     self._assert_path_and_context(
         xblock, 'openassessmentblock/self/oa_self_complete.html',
         {
             'allow_multiple_files': True,
             'allow_latex': False,
             'prompts_type': 'text',
             'user_timezone': pytz.utc,
             'user_language': 'en'
         },
         workflow_status='self',
         submission_uuid=submission['uuid']
     )
Exemple #9
0
    def test_completed_and_past_due(self, xblock):
        # Simulate having completed self assessment
        # Even though the problem is closed, we should still see
        # that we completed the step.
        submission = xblock.create_submission(xblock.get_student_item_dict(),
                                              u"Đøɨn' ɨŧ ŧø đɇȺŧħ")
        self_api.create_assessment(
            submission['uuid'],
            xblock.get_student_item_dict()['student_id'], {
                u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ﻉซƈﻉɭɭﻉกՇ',
                u'Form': u'Fair'
            }, {}, "Good job!",
            create_rubric_dict(xblock.prompt, xblock.rubric_criteria))

        # This case probably isn't possible, because presumably when we create
        # the self-assessment, the workflow status will be "waiting" or "done".
        # We're checking it anyway to be overly defensive: if the user has made a self-assessment,
        # we ALWAYS show complete, even if the workflow tells us we're still have status 'self'.
        self._assert_path_and_context(
            xblock,
            'openassessmentblock/self/oa_self_complete.html', {
                'self_due': datetime.datetime(2000, 1,
                                              1).replace(tzinfo=pytz.utc)
            },
            workflow_status='self',
            submission_uuid=submission['uuid'])
Exemple #10
0
    def test_staff_debug_student_info_self_only(self, xblock):
        # Simulate that we are course staff
        xblock.xmodule_runtime =  self._create_mock_runtime(
            xblock.scope_ids.usage_id, True, "Bob"
        )

        bob_item = STUDENT_ITEM.copy()
        bob_item["item_id"] = xblock.scope_ids.usage_id
        # Create a submission for Bob, and corresponding workflow.
        submission = sub_api.create_submission(bob_item, {'text':"Bob Answer"})
        peer_api.on_start(submission["uuid"])
        workflow_api.create_workflow(submission["uuid"], ['self'])

        # Bob assesses himself.
        self_api.create_assessment(
            submission['uuid'],
            STUDENT_ITEM["student_id"],
            ASSESSMENT_DICT['options_selected'],
            {'criteria': xblock.rubric_criteria},
        )

        # Now Bob should be fully populated in the student info view.
        request = namedtuple('Request', 'params')
        request.params = {"student_id": "Bob"}
        # Verify that we can render without error
        path, context = xblock.get_student_info_path_and_context(request)
        self.assertEquals("Bob Answer", context['submission']['answer']['text'])
        self.assertEquals([], context['peer_assessments'])
        self.assertEquals("openassessmentblock/staff_debug/student_info.html", path)
Exemple #11
0
    def test_create_multiple_self_assessments(self):
        # Create a submission to self-assess
        submission = create_submission(self.STUDENT_ITEM, "Test answer")

        # Self assess once
        assessment = create_assessment(
            submission['uuid'],
            u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
            self.OPTIONS_SELECTED,
            self.CRITERION_FEEDBACK,
            self.OVERALL_FEEDBACK,
            self.RUBRIC,
        )

        # Attempt to self-assess again, which should raise an exception
        with self.assertRaises(SelfAssessmentRequestError):
            create_assessment(
                submission['uuid'],
                u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
                self.OPTIONS_SELECTED,
                self.CRITERION_FEEDBACK,
                self.OVERALL_FEEDBACK,
                self.RUBRIC,
            )

        # Expect that we still have the original assessment
        retrieved = get_assessment(submission["uuid"])
        six.assertCountEqual(self, assessment, retrieved)
Exemple #12
0
    def test_staff_debug_student_info_self_only(self, xblock):
        # Simulate that we are course staff
        xblock.xmodule_runtime = self._create_mock_runtime(
            xblock.scope_ids.usage_id, True, "Bob")

        bob_item = STUDENT_ITEM.copy()
        bob_item["item_id"] = xblock.scope_ids.usage_id
        # Create a submission for Bob, and corresponding workflow.
        submission = sub_api.create_submission(bob_item,
                                               {'text': "Bob Answer"})
        peer_api.on_start(submission["uuid"])
        workflow_api.create_workflow(submission["uuid"], ['self'])

        # Bob assesses himself.
        self_api.create_assessment(
            submission['uuid'],
            STUDENT_ITEM["student_id"],
            ASSESSMENT_DICT['options_selected'],
            {'criteria': xblock.rubric_criteria},
        )

        # Now Bob should be fully populated in the student info view.
        request = namedtuple('Request', 'params')
        request.params = {"student_id": "Bob"}
        # Verify that we can render without error
        path, context = xblock.get_student_info_path_and_context(request)
        self.assertEquals("Bob Answer",
                          context['submission']['answer']['text'])
        self.assertEquals([], context['peer_assessments'])
        self.assertEquals("openassessmentblock/staff_debug/student_info.html",
                          path)
Exemple #13
0
    def test_staff_area_student_info_self_only(self, xblock):
        # Simulate that we are course staff
        xblock.xmodule_runtime = self._create_mock_runtime(
            xblock.scope_ids.usage_id, True, False, "Bob")
        xblock.runtime._services['user'] = NullUserService()
        bob_item = STUDENT_ITEM.copy()
        bob_item["item_id"] = xblock.scope_ids.usage_id
        # Create a submission for Bob, and corresponding workflow.
        submission = sub_api.create_submission(
            bob_item,
            prepare_submission_for_serialization(
                ("Bob Answer 1", "Bob Answer 2")))
        peer_api.on_start(submission["uuid"])
        workflow_api.create_workflow(submission["uuid"], ['self'])

        # Bob assesses himself.
        self_api.create_assessment(
            submission['uuid'],
            STUDENT_ITEM["student_id"],
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            {'criteria': xblock.rubric_criteria},
        )

        path, context = xblock.get_student_info_path_and_context("Bob")
        self.assertEquals("Bob Answer 1",
                          context['submission']['answer']['parts'][0]['text'])
        self.assertEquals([], context['peer_assessments'])
        self.assertEquals("openassessmentblock/staff_area/student_info.html",
                          path)
Exemple #14
0
 def test_create_assessment_no_submission(self):
     # Attempt to create a self-assessment for a submission that doesn't exist
     with self.assertRaises(SelfAssessmentRequestError):
         create_assessment('invalid_submission_uuid',
                           u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
                           self.OPTIONS_SELECTED,
                           self.RUBRIC,
                           scored_at=datetime.datetime(2014, 4, 1))
Exemple #15
0
 def test_create_assessment_no_submission(self):
     # Attempt to create a self-assessment for a submission that doesn't exist
     with self.assertRaises(SelfAssessmentRequestError):
         create_assessment(
             'deadbeef-1234-5678-9100-1234deadbeef', u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
             self.OPTIONS_SELECTED, self.CRITERION_FEEDBACK, self.OVERALL_FEEDBACK, self.RUBRIC,
             scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
         )
Exemple #16
0
 def test_create_assessment_no_submission(self):
     # Attempt to create a self-assessment for a submission that doesn't exist
     with self.assertRaises(SelfAssessmentRequestError):
         create_assessment(
             'deadbeef-1234-5678-9100-1234deadbeef', u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
             self.OPTIONS_SELECTED, self.CRITERION_FEEDBACK, self.OVERALL_FEEDBACK, self.RUBRIC,
             scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
         )
Exemple #17
0
 def create_self_assessment(self, submission, student_id, assessment,
                            criteria):
     """Submit a self assessment using the information given."""
     self_api.create_assessment(submission['uuid'], student_id,
                                assessment['options_selected'],
                                assessment['criterion_feedback'],
                                assessment['overall_feedback'],
                                {'criteria': criteria})
Exemple #18
0
 def test_create_assessment_no_submission(self):
     # Attempt to create a self-assessment for a submission that doesn't exist
     with self.assertRaises(SelfAssessmentRequestError):
         create_assessment(
             'invalid_submission_uuid', u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
             self.OPTIONS_SELECTED, self.RUBRIC,
             scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
         )
Exemple #19
0
 def create_self_assessment(self, submission, student_id, assessment, criteria):
     """Submit a self assessment using the information given."""
     self_api.create_assessment(
         submission['uuid'],
         student_id,
         assessment['options_selected'],
         assessment['criterion_feedback'],
         assessment['overall_feedback'],
         {'criteria': criteria}
     )
Exemple #20
0
    def test_create_assessment_wrong_user(self):
        # Create a submission
        create_submission(self.STUDENT_ITEM, "Test answer")

        # Attempt to create a self-assessment for the submission from a different user
        with self.assertRaises(SelfAssessmentRequestError):
            create_assessment(
                'deadbeef-1234-5678-9100-1234deadbeef', u'another user',
                self.OPTIONS_SELECTED, self.CRITERION_FEEDBACK, self.OVERALL_FEEDBACK, self.RUBRIC,
                scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
            )
Exemple #21
0
    def test_create_assessment_wrong_user(self):
        # Create a submission
        submission = create_submission(self.STUDENT_ITEM, "Test answer")

        # Attempt to create a self-assessment for the submission from a different user
        with self.assertRaises(SelfAssessmentRequestError):
            create_assessment(
                'invalid_submission_uuid', u'another user',
                self.OPTIONS_SELECTED, self.RUBRIC,
                scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
            )
Exemple #22
0
    def test_create_assessment_wrong_user(self):
        # Create a submission
        submission = create_submission(self.STUDENT_ITEM, "Test answer")

        # Attempt to create a self-assessment for the submission from a different user
        with self.assertRaises(SelfAssessmentRequestError):
            create_assessment(
                'deadbeef-1234-5678-9100-1234deadbeef', u'another user',
                self.OPTIONS_SELECTED, self.CRITERION_FEEDBACK, self.OVERALL_FEEDBACK, self.RUBRIC,
                scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
            )
Exemple #23
0
    def test_create_assessment_wrong_user(self):
        # Create a submission
        submission = create_submission(self.STUDENT_ITEM, "Test answer")

        # Attempt to create a self-assessment for the submission from a different user
        with self.assertRaises(SelfAssessmentRequestError):
            create_assessment('invalid_submission_uuid',
                              u'another user',
                              self.OPTIONS_SELECTED,
                              self.RUBRIC,
                              scored_at=datetime.datetime(2014, 4, 1))
Exemple #24
0
    def test_create_assessment_database_error(self, mock_complete_assessment):
        mock_complete_assessment.side_effect = DatabaseError

        # Create a submission to self-assess
        submission = create_submission(self.STUDENT_ITEM, "Test answer")

        with self.assertRaises(SelfAssessmentInternalError):
            # Create a self-assessment for the submission
            create_assessment(
                submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
                self.OPTIONS_SELECTED, self.CRITERION_FEEDBACK, self.OVERALL_FEEDBACK, self.RUBRIC,
                scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
            )
    def test_staff_debug_student_info_full_workflow(self, xblock):
        # Simulate that we are course staff
        xblock.xmodule_runtime = self._create_mock_runtime(
            xblock.scope_ids.usage_id, True, False, "Bob"
        )

        # Commonly chosen options for assessments
        options_selected = {
            "Ideas": "Good",
            "Content": "Poor",
        }

        bob_item = STUDENT_ITEM.copy()
        bob_item["item_id"] = xblock.scope_ids.usage_id

        # Create a submission for Bob, and corresponding workflow.
        submission = sub_api.create_submission(bob_item, {'text':"Bob Answer"})
        peer_api.on_start(submission["uuid"])
        workflow_api.create_workflow(submission["uuid"], ['peer', 'self'])

        # Create a submission for Tim, and corresponding workflow.
        tim_item = bob_item.copy()
        tim_item["student_id"] = "Tim"
        tim_sub = sub_api.create_submission(tim_item, "Tim Answer")
        peer_api.on_start(tim_sub["uuid"])
        workflow_api.create_workflow(tim_sub["uuid"], ['peer', 'self'])

        # Bob assesses Tim.
        peer_api.get_submission_to_assess(submission['uuid'], 1)
        peer_api.create_assessment(
            submission["uuid"],
            STUDENT_ITEM["student_id"],
            options_selected, dict(), "",
            {'criteria': xblock.rubric_criteria},
            1,
        )

        # Bob assesses himself.
        self_api.create_assessment(
            submission['uuid'],
            STUDENT_ITEM["student_id"],
            options_selected,
            {'criteria': xblock.rubric_criteria},
        )

        # Now Bob should be fully populated in the student info view.
        request = namedtuple('Request', 'params')
        request.params = {"student_id": "Bob"}
        # Verify that we can render without error
        resp = xblock.render_student_info(request)
        self.assertIn("bob answer", resp.body.lower())
Exemple #26
0
    def test_create_assessment_invalid_criterion_feedback(self):
        # Create a submission
        submission = create_submission(self.STUDENT_ITEM, "Test answer")

        # Mutate the criterion feedback to not include all the appropriate criteria.
        criterion_feedback = {"clarify": "not", "accurate": "sure"}

        # Attempt to create a self-assessment with criterion_feedback that do not match the rubric
        with self.assertRaises(SelfAssessmentRequestError):
            create_assessment(
                submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
                self.OPTIONS_SELECTED, criterion_feedback, self.OVERALL_FEEDBACK, self.RUBRIC,
                scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
            )
Exemple #27
0
    def test_create_assessment_invalid_criterion_feedback(self):
        # Create a submission
        submission = create_submission(self.STUDENT_ITEM, "Test answer")

        # Mutate the criterion feedback to not include all the appropriate criteria.
        criterion_feedback = {"clarify": "not", "accurate": "sure"}

        # Attempt to create a self-assessment with criterion_feedback that do not match the rubric
        with self.assertRaises(SelfAssessmentRequestError):
            create_assessment(
                submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
                self.OPTIONS_SELECTED, criterion_feedback, self.OVERALL_FEEDBACK, self.RUBRIC,
                scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
            )
Exemple #28
0
    def test_create_assessment_missing_criterion(self):
        # Create a submission
        submission = create_submission(self.STUDENT_ITEM, "Test answer")

        # Delete one of the criterion that's present in the rubric
        options = copy.deepcopy(self.OPTIONS_SELECTED)
        del options['clarity']

        # Attempt to create a self-assessment with options that do not match the rubric
        with self.assertRaises(SelfAssessmentRequestError):
            create_assessment(
                submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
                options, self.CRITERION_FEEDBACK, self.OVERALL_FEEDBACK, self.RUBRIC,
                scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
            )
Exemple #29
0
    def test_create_assessment_invalid_criterion(self):
        # Create a submission
        submission = create_submission(self.STUDENT_ITEM, "Test answer")

        # Mutate the selected option criterion so it does not match a criterion in the rubric
        options = copy.deepcopy(self.OPTIONS_SELECTED)
        options['invalid criterion'] = 'very clear'

        # Attempt to create a self-assessment with options that do not match the rubric
        with self.assertRaises(SelfAssessmentRequestError):
            create_assessment(submission['uuid'],
                              u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
                              options,
                              self.RUBRIC,
                              scored_at=datetime.datetime(2014, 4, 1))
Exemple #30
0
    def test_create_assessment_invalid_option(self):
        # Create a submission
        submission = create_submission(self.STUDENT_ITEM, "Test answer")

        # Mutate the selected option so the value does not match an available option
        options = copy.deepcopy(self.OPTIONS_SELECTED)
        options['clarity'] = 'invalid option'

        # Attempt to create a self-assessment with options that do not match the rubric
        with self.assertRaises(SelfAssessmentRequestError):
            create_assessment(
                submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
                options, self.CRITERION_FEEDBACK, self.OVERALL_FEEDBACK, self.RUBRIC,
                scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
            )
Exemple #31
0
    def test_create_assessment_invalid_criterion(self):
        # Create a submission
        submission = create_submission(self.STUDENT_ITEM, "Test answer")

        # Mutate the selected option criterion so it does not match a criterion in the rubric
        options = copy.deepcopy(self.OPTIONS_SELECTED)
        options['invalid criterion'] = 'very clear'

        # Attempt to create a self-assessment with options that do not match the rubric
        with self.assertRaises(SelfAssessmentRequestError):
            create_assessment(
                submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
                options, self.CRITERION_FEEDBACK, self.OVERALL_FEEDBACK, self.RUBRIC,
                scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
            )
Exemple #32
0
    def test_create_assessment_missing_criterion(self):
        # Create a submission
        submission = create_submission(self.STUDENT_ITEM, "Test answer")

        # Delete one of the criterion that's present in the rubric
        options = copy.deepcopy(self.OPTIONS_SELECTED)
        del options['clarity']

        # Attempt to create a self-assessment with options that do not match the rubric
        with self.assertRaises(SelfAssessmentRequestError):
            create_assessment(
                submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
                options, self.RUBRIC,
                scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
            )
Exemple #33
0
    def test_create_assessment_invalid_option(self):
        # Create a submission
        submission = create_submission(self.STUDENT_ITEM, "Test answer")

        # Mutate the selected option so the value does not match an available option
        options = copy.deepcopy(self.OPTIONS_SELECTED)
        options['clarity'] = 'invalid option'

        # Attempt to create a self-assessment with options that do not match the rubric
        with self.assertRaises(SelfAssessmentRequestError):
            create_assessment(
                submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
                options, self.RUBRIC,
                scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
            )
Exemple #34
0
    def test_create_assessment_timestamp(self):
        # Create a submission to self-assess
        submission = create_submission(self.STUDENT_ITEM, "Test answer")

        # Record the current system clock time
        before = datetime.datetime.utcnow().replace(tzinfo=pytz.utc)

        # Create a self-assessment for the submission
        # Do not override the scored_at timestamp, so it should be set to the current time
        assessment = create_assessment(
            submission['uuid'],
            u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
            self.OPTIONS_SELECTED,
            self.CRITERION_FEEDBACK,
            self.OVERALL_FEEDBACK,
            self.RUBRIC,
        )

        # Retrieve the self-assessment
        retrieved = get_assessment(submission["uuid"])

        # Expect that both the created and retrieved assessments have the same
        # timestamp, and it's >= our recorded time.
        self.assertEqual(assessment['scored_at'], retrieved['scored_at'])
        self.assertGreaterEqual(assessment['scored_at'], before)
Exemple #35
0
    def test_create_assessment_criterion_with_zero_options(self):
        # Create a submission to self-assess
        submission = create_submission(self.STUDENT_ITEM, "Test answer")

        # Modify the rubric to include a criterion with no options (only written feedback)
        rubric = copy.deepcopy(self.RUBRIC)
        rubric['criteria'].append({
            "name": "feedback only",
            "prompt": "feedback only",
            "options": []
        })

        criterion_feedback = copy.deepcopy(self.CRITERION_FEEDBACK)
        criterion_feedback['feedback only'] = "This is the feedback for the Zero Option Criterion."

        # Create a self-assessment for the submission
        assessment = create_assessment(
            submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
            self.OPTIONS_SELECTED, criterion_feedback, self.OVERALL_FEEDBACK, rubric,
            scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
        )

        # The self-assessment should have set the feedback for
        # the criterion with no options to an empty string
        self.assertEqual(assessment["parts"][2]["option"], None)
        self.assertEqual(assessment["parts"][2]["feedback"], u"This is the feedback for the Zero Option Criterion.")
Exemple #36
0
 def test_open_completed_self_assessment(self, xblock):
     # Simulate the workflow being in the self assessment step
     # and we've created a self-assessment
     submission = xblock.create_submission(xblock.get_student_item_dict(),
                                           u"Đøɨn' ɨŧ ŧø đɇȺŧħ")
     self_api.create_assessment(
         submission['uuid'],
         xblock.get_student_item_dict()['student_id'], {
             u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ﻉซƈﻉɭɭﻉกՇ',
             u'Form': u'Fair'
         }, {'criteria': xblock.rubric_criteria})
     self._assert_path_and_context(
         xblock,
         'openassessmentblock/self/oa_self_complete.html', {},
         workflow_status='self',
         submission_uuid=submission['uuid'])
Exemple #37
0
    def test_create_assessment_all_criteria_have_zero_options(self):
        # Create a submission to self-assess
        submission = create_submission(self.STUDENT_ITEM, "Test answer")

        # Use a rubric with only criteria with no options (only written feedback)
        rubric = copy.deepcopy(self.RUBRIC)
        for criterion in rubric["criteria"]:
            criterion["options"] = []

        # Create a self-assessment for the submission
        # We don't select any options, since none of the criteria have options
        options_selected = {}

        # However, because they don't have options, they need to have criterion feedback.
        criterion_feedback = {
            'clarity': 'I thought it was about as accurate as Scrubs is to the medical profession.',
            'accuracy': 'I thought it was about as accurate as Scrubs is to the medical profession.'
        }

        overall_feedback = ""

        assessment = create_assessment(
            submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
            options_selected,  criterion_feedback, overall_feedback,
            rubric, scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
        )

        # The self-assessment should have set the feedback for
        # all criteria to an empty string.
        for part in assessment["parts"]:
            self.assertEqual(part["option"], None)
            self.assertEqual(
                part["feedback"], u'I thought it was about as accurate as Scrubs is to the medical profession.'
            )
Exemple #38
0
    def test_create_assessment(self):
        # Initially, there should be no submission or self assessment
        self.assertEqual(get_assessment("5"), None)

        # Create a submission to self-assess
        submission = create_submission(self.STUDENT_ITEM, "Test answer")

        # Now there should be a submission, but no self-assessment
        assessment = get_assessment(submission["uuid"])
        self.assertIs(assessment, None)
        self.assertFalse(submitter_is_finished(submission['uuid'], {}))

        # Create a self-assessment for the submission
        assessment = create_assessment(
            submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
            self.OPTIONS_SELECTED, self.CRITERION_FEEDBACK, self.OVERALL_FEEDBACK, self.RUBRIC,
            scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
        )

        # Self-assessment should be complete
        self.assertTrue(submitter_is_finished(submission['uuid'], {}))

        # Retrieve the self-assessment
        retrieved = get_assessment(submission["uuid"])

        # Check that the assessment we created matches the assessment we retrieved
        # and that both have the correct values
        self.assertCountEqual(assessment, retrieved)
        self.assertEqual(assessment['submission_uuid'], submission['uuid'])
        self.assertEqual(assessment['points_earned'], 8)
        self.assertEqual(assessment['points_possible'], 10)
        self.assertEqual(assessment['feedback'], u'' + self.OVERALL_FEEDBACK)
        self.assertEqual(assessment['score_type'], u'SE')
Exemple #39
0
    def test_create_assessment_all_criteria_have_zero_options(self):
        # Create a submission to self-assess
        submission = create_submission(self.STUDENT_ITEM, "Test answer")

        # Use a rubric with only criteria with no options (only written feedback)
        rubric = copy.deepcopy(self.RUBRIC)
        for criterion in rubric["criteria"]:
            criterion["options"] = []

        # Create a self-assessment for the submission
        # We don't select any options, since none of the criteria have options
        options_selected = {}

        # However, because they don't have options, they need to have criterion feedback.
        criterion_feedback = {
            'clarity': 'I thought it was about as accurate as Scrubs is to the medical profession.',
            'accuracy': 'I thought it was about as accurate as Scrubs is to the medical profession.'
        }

        overall_feedback = ""

        assessment = create_assessment(
            submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
            options_selected, criterion_feedback, overall_feedback,
            rubric, scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
        )

        # The self-assessment should have set the feedback for
        # all criteria to an empty string.
        for part in assessment["parts"]:
            self.assertEqual(part["option"], None)
            self.assertEqual(
                part["feedback"], u'I thought it was about as accurate as Scrubs is to the medical profession.'
            )
Exemple #40
0
    def test_create_assessment_criterion_with_zero_options(self):
        # Create a submission to self-assess
        submission = create_submission(self.STUDENT_ITEM, "Test answer")

        # Modify the rubric to include a criterion with no options (only written feedback)
        rubric = copy.deepcopy(self.RUBRIC)
        rubric['criteria'].append({
            "name": "feedback only",
            "prompt": "feedback only",
            "options": []
        })

        criterion_feedback = copy.deepcopy(self.CRITERION_FEEDBACK)
        criterion_feedback['feedback only'] = "This is the feedback for the Zero Option Criterion."

        # Create a self-assessment for the submission
        assessment = create_assessment(
            submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
            self.OPTIONS_SELECTED, criterion_feedback, self.OVERALL_FEEDBACK, rubric,
            scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
        )

        # The self-assessment should have set the feedback for
        # the criterion with no options to an empty string
        self.assertEqual(assessment["parts"][2]["option"], None)
        self.assertEqual(assessment["parts"][2]["feedback"], u"This is the feedback for the Zero Option Criterion.")
Exemple #41
0
    def test_create_assessment(self):
        # Initially, there should be no submission or self assessment
        self.assertEqual(get_assessment("5"), None)

        # Create a submission to self-assess
        submission = create_submission(self.STUDENT_ITEM, "Test answer")

        # Now there should be a submission, but no self-assessment
        assessment = get_assessment(submission["uuid"])
        self.assertIs(assessment, None)
        self.assertFalse(submitter_is_finished(submission['uuid'], {}))

        # Create a self-assessment for the submission
        assessment = create_assessment(
            submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
            self.OPTIONS_SELECTED, self.RUBRIC,
            scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
        )

        # Self-assessment should be complete
        self.assertTrue(submitter_is_finished(submission['uuid'], {}))

        # Retrieve the self-assessment
        retrieved = get_assessment(submission["uuid"])

        # Check that the assessment we created matches the assessment we retrieved
        # and that both have the correct values
        self.assertItemsEqual(assessment, retrieved)
        self.assertEqual(assessment['submission_uuid'], submission['uuid'])
        self.assertEqual(assessment['points_earned'], 8)
        self.assertEqual(assessment['points_possible'], 10)
        self.assertEqual(assessment['feedback'], u'')
        self.assertEqual(assessment['score_type'], u'SE')
Exemple #42
0
 def test_open_completed_self_assessment(self, xblock):
     # Simulate the workflow being in the self assessment step
     # and we've created a self-assessment
     submission = xblock.create_submission(
         xblock.get_student_item_dict(), u"Đøɨn' ɨŧ ŧø đɇȺŧħ"
     )
     self_api.create_assessment(
         submission['uuid'],
         xblock.get_student_item_dict()['student_id'],
         {u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ﻉซƈﻉɭɭﻉกՇ', u'Form': u'Fair'},
         {'criteria': xblock.rubric_criteria}
     )
     self._assert_path_and_context(
         xblock, 'openassessmentblock/self/oa_self_complete.html', {},
         workflow_status='self',
         submission_uuid=submission['uuid']
     )
Exemple #43
0
    def test_staff_debug_student_info_full_workflow(self, xblock):
        # Simulate that we are course staff
        xblock.xmodule_runtime = self._create_mock_runtime(
            xblock.scope_ids.usage_id, True, "Bob")

        bob_item = STUDENT_ITEM.copy()
        bob_item["item_id"] = xblock.scope_ids.usage_id
        # Create a submission for Bob, and corresponding workflow.
        submission = sub_api.create_submission(bob_item,
                                               {'text': "Bob Answer"})
        peer_api.create_peer_workflow(submission["uuid"])
        workflow_api.create_workflow(submission["uuid"], ['peer', 'self'])

        # Create a submission for Tim, and corresponding workflow.
        tim_item = bob_item.copy()
        tim_item["student_id"] = "Tim"
        tim_sub = sub_api.create_submission(tim_item, "Tim Answer")
        peer_api.create_peer_workflow(tim_sub["uuid"])
        workflow_api.create_workflow(tim_sub["uuid"], ['peer', 'self'])

        # Bob assesses Tim.
        peer_api.get_submission_to_assess(submission['uuid'], 1)
        peer_api.create_assessment(
            submission["uuid"],
            STUDENT_ITEM["student_id"],
            ASSESSMENT_DICT['options_selected'],
            dict(),
            "",
            {'criteria': xblock.rubric_criteria},
            1,
        )

        # Bob assesses himself.
        self_api.create_assessment(
            submission['uuid'],
            STUDENT_ITEM["student_id"],
            ASSESSMENT_DICT['options_selected'],
            {'criteria': xblock.rubric_criteria},
        )

        # Now Bob should be fully populated in the student info view.
        request = namedtuple('Request', 'params')
        request.params = {"student_id": "Bob"}
        # Verify that we can render without error
        resp = xblock.render_student_info(request)
        self.assertIn("bob answer", resp.body.lower())
Exemple #44
0
    def self_assess(self, data, suffix=''):  # pylint: disable=unused-argument
        """
        Create a self-assessment for a submission.

        Args:
            data (dict): Must have the following keys:
                options_selected (dict): Dictionary mapping criterion names to option values.

        Returns:
            Dict with keys "success" (bool) indicating success/failure
            and "msg" (unicode) containing additional information if an error occurs.
        """

        if self.submission_uuid is None:
            return {
                'success':
                False,
                'msg':
                self.
                _("You must submit a response before you can perform a self-assessment."
                  )
            }

        try:
            assessment = self_api.create_assessment(
                self.submission_uuid,
                self.get_student_item_dict()['student_id'],
                data['options_selected'],
                clean_criterion_feedback(self.rubric_criteria,
                                         data['criterion_feedback']),
                data['overall_feedback'],
                create_rubric_dict(self.prompts,
                                   self.rubric_criteria_with_labels))
            self.publish_assessment_event("openassessmentblock.self_assess",
                                          assessment)

            # After we've created the self-assessment, we need to update the workflow.
            self.update_workflow_status()
        except (self_api.SelfAssessmentRequestError,
                workflow_api.AssessmentWorkflowRequestError):
            logger.warning(
                "An error occurred while submitting a self assessment "
                "for the submission %s",
                self.submission_uuid,
                exc_info=True)
            msg = self._("Your self assessment could not be submitted.")
            return {'success': False, 'msg': msg}
        except (self_api.SelfAssessmentInternalError,
                workflow_api.AssessmentWorkflowInternalError):
            logger.exception(
                "An error occurred while submitting a self assessment "
                "for the submission %s",
                self.submission_uuid,
            )
            msg = self._("Your self assessment could not be submitted.")
            return {'success': False, 'msg': msg}
        else:
            return {'success': True, 'msg': ""}
Exemple #45
0
    def self_assess(self, data, suffix=''):
        """
        Create a self-assessment for a submission.

        Args:
            data (dict): Must have the following keys:
                options_selected (dict): Dictionary mapping criterion names to option values.

        Returns:
            Dict with keys "success" (bool) indicating success/failure
            and "msg" (unicode) containing additional information if an error occurs.
        """
        if 'options_selected' not in data:
            return {
                'success': False,
                'msg': _(u"Missing options_selected key in request")
            }

        try:
            assessment = self_api.create_assessment(
                self.submission_uuid,
                self.get_student_item_dict()['student_id'],
                data['options_selected'], {"criteria": self.rubric_criteria})
            self.runtime.publish(
                self, "openassessmentblock.self_assess", {
                    "feedback":
                    assessment["feedback"],
                    "rubric": {
                        "content_hash": assessment["rubric"]["content_hash"],
                    },
                    "scorer_id":
                    assessment["scorer_id"],
                    "score_type":
                    assessment["score_type"],
                    "scored_at":
                    assessment["scored_at"],
                    "submission_uuid":
                    assessment["submission_uuid"],
                    "parts": [{
                        "option": {
                            "name": part["option"]["name"],
                            "points": part["option"]["points"]
                        }
                    } for part in assessment["parts"]]
                })
            # After we've created the self-assessment, we need to update the workflow.
            self.update_workflow_status()
        except self_api.SelfAssessmentRequestError as ex:
            msg = _(u"Could not create self assessment: {error}").format(
                error=ex)
            return {'success': False, 'msg': msg}
        except workflow_api.AssessmentWorkflowError as ex:
            msg = _(u"Could not update workflow: {error}").format(error=ex)
            return {'success': False, 'msg': msg}
        else:
            return {'success': True, 'msg': u""}
    def self_assess(self, data, suffix=''):
        """
        Create a self-assessment for a submission.

        Args:
            data (dict): Must have the following keys:
                options_selected (dict): Dictionary mapping criterion names to option values.

        Returns:
            Dict with keys "success" (bool) indicating success/failure
            and "msg" (unicode) containing additional information if an error occurs.
        """
        if 'options_selected' not in data:
            return {'success': False, 'msg': _(u"Missing options_selected key in request")}

        try:
            assessment = self_api.create_assessment(
                self.submission_uuid,
                self.get_student_item_dict()['student_id'],
                data['options_selected'],
                {"criteria": self.rubric_criteria}
            )
            self.runtime.publish(
                self,
                "openassessmentblock.self_assess",
                {
                    "feedback": assessment["feedback"],
                    "rubric": {
                        "content_hash": assessment["rubric"]["content_hash"],
                    },
                    "scorer_id": assessment["scorer_id"],
                    "score_type": assessment["score_type"],
                    "scored_at": assessment["scored_at"],
                    "submission_uuid": assessment["submission_uuid"],
                    "parts": [
                        {
                            "option": {
                                "name": part["option"]["name"],
                                "points": part["option"]["points"]
                            }
                        }
                        for part in assessment["parts"]
                    ]
                }
            )
            # After we've created the self-assessment, we need to update the workflow.
            self.update_workflow_status()
        except self_api.SelfAssessmentRequestError as ex:
            msg = _(u"Could not create self assessment: {error}").format(error=ex)
            return {'success': False, 'msg': msg}
        except workflow_api.AssessmentWorkflowError as ex:
            msg = _(u"Could not update workflow: {error}").format(error=ex)
            return {'success': False, 'msg': msg}
        else:
            return {'success': True, 'msg': u""}
    def self_assess(self, data, suffix=''):
        """
        Create a self-assessment for a submission.

        Args:
            data (dict): Must have the following keys:
                options_selected (dict): Dictionary mapping criterion names to option values.

        Returns:
            Dict with keys "success" (bool) indicating success/failure
            and "msg" (unicode) containing additional information if an error occurs.
        """
        if 'options_selected' not in data:
            return {'success': False, 'msg': self._(u"Missing options_selected key in request")}

        if 'overall_feedback' not in data:
            return {'success': False, 'msg': self._('Must provide overall feedback in the assessment')}

        if 'criterion_feedback' not in data:
            return {'success': False, 'msg': self._('Must provide feedback for criteria in the assessment')}

        if self.submission_uuid is None:
            return {'success': False, 'msg': self._(u"You must submit a response before you can perform a self-assessment.")}

        try:
            assessment = self_api.create_assessment(
                self.submission_uuid,
                self.get_student_item_dict()['student_id'],
                data['options_selected'],
                clean_criterion_feedback(self.rubric_criteria, data['criterion_feedback']),
                data['overall_feedback'],
                create_rubric_dict(self.prompt, self.rubric_criteria_with_labels)
            )
            self.publish_assessment_event("openassessmentblock.self_assess", assessment)

            # After we've created the self-assessment, we need to update the workflow.
            self.update_workflow_status()
        except (self_api.SelfAssessmentRequestError, workflow_api.AssessmentWorkflowRequestError):
            logger.warning(
                u"An error occurred while submitting a self assessment "
                u"for the submission {}".format(self.submission_uuid),
                exc_info=True
            )
            msg = self._(u"Your self assessment could not be submitted.")
            return {'success': False, 'msg': msg}
        except (self_api.SelfAssessmentInternalError, workflow_api.AssessmentWorkflowInternalError):
            logger.exception(
                u"An error occurred while submitting a self assessment "
                u"for the submission {}".format(self.submission_uuid),
            )
            msg = self._(u"Your self assessment could not be submitted.")
            return {'success': False, 'msg': msg}
        else:
            return {'success': True, 'msg': u""}
Exemple #48
0
    def test_create_multiple_self_assessments(self):
        # Create a submission to self-assess
        submission = create_submission(self.STUDENT_ITEM, "Test answer")

        # Self assess once
        assessment = create_assessment(
            submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
            self.OPTIONS_SELECTED, self.RUBRIC,
        )

        # Attempt to self-assess again, which should raise an exception
        with self.assertRaises(SelfAssessmentRequestError):
            create_assessment(
                submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
                self.OPTIONS_SELECTED, self.RUBRIC,
            )

        # Expect that we still have the original assessment
        retrieved = get_assessment(submission["uuid"])
        self.assertItemsEqual(assessment, retrieved)
    def test_check_all_criteria_assessed(self, data):
        student_item = {
            'student_id': '𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
            'item_id': 'test_item',
            'course_id': 'test_course',
            'item_type': 'test_type'
        }
        submission = create_submission(student_item, "Test answer")

        rubric, options_selected, criterion_feedback = self._create_data_structures_with_criterion_properties(
            has_option_selected=data['has_option_selected'],
            has_zero_options=data['has_zero_options'],
            has_feedback=data['has_feedback'])
        error = False
        try:
            create_assessment(submission['uuid'], student_item['student_id'],
                              options_selected, criterion_feedback,
                              "overall feedback", rubric)
        except SelfAssessmentRequestError:
            error = True
        self.assertEqual(data['expected_error'], error)
Exemple #50
0
    def test_create_assessment_database_error(self, mock_complete_assessment):
        mock_complete_assessment.side_effect = DatabaseError

        # Create a submission to self-assess
        submission = create_submission(self.STUDENT_ITEM, "Test answer")

        with self.assertRaises(SelfAssessmentInternalError):
            # Create a self-assessment for the submission
            assessment = create_assessment(
                submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
                self.OPTIONS_SELECTED, self.CRITERION_FEEDBACK, self.OVERALL_FEEDBACK, self.RUBRIC,
                scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
            )
Exemple #51
0
 def test_open_completed_self_assessment(self, xblock):
     # Simulate the workflow being in the self assessment step
     # and we've created a self-assessment
     submission = xblock.create_submission(
         xblock.get_student_item_dict(), (u"Đøɨn' ɨŧ ŧø đɇȺŧħ 1", u"Đøɨn' ɨŧ ŧø đɇȺŧħ 2")
     )
     self_api.create_assessment(
         submission['uuid'],
         xblock.get_student_item_dict()['student_id'],
         {u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮': u'ﻉซƈﻉɭɭﻉกՇ', u'Form': u'Fair'},
         {}, "Good job!",
         create_rubric_dict(xblock.prompts, xblock.rubric_criteria)
     )
     self._assert_path_and_context(
         xblock, 'openassessmentblock/self/oa_self_complete.html',
         {
             'allow_latex': False,
             'user_timezone': pytz.utc,
             'user_language': 'en'
         },
         workflow_status='self',
         submission_uuid=submission['uuid']
     )
    def test_check_all_criteria_assessed(self, data):
        student_item = {
            'student_id': u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
            'item_id': 'test_item',
            'course_id': 'test_course',
            'item_type': 'test_type'
        }
        submission = create_submission(student_item, "Test answer")

        rubric, options_selected, criterion_feedback = self._create_data_structures_with_criterion_properties(
            has_option_selected=data['has_option_selected'],
            has_zero_options=data['has_zero_options'],
            has_feedback=data['has_feedback']
        )
        error = False
        try:
            create_assessment(
                submission['uuid'], student_item['student_id'], options_selected,
                criterion_feedback, "overall feedback", rubric
            )
        except SelfAssessmentRequestError:
            error = True
        self.assertTrue(data['expected_error'] == error)
Exemple #53
0
    def test_create_assessment_timestamp(self):
        # Create a submission to self-assess
        submission = create_submission(self.STUDENT_ITEM, "Test answer")

        # Record the current system clock time
        before = datetime.datetime.utcnow().replace(tzinfo=pytz.utc)

        # Create a self-assessment for the submission
        # Do not override the scored_at timestamp, so it should be set to the current time
        assessment = create_assessment(
            submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
            self.OPTIONS_SELECTED, self.RUBRIC,
        )

        # Retrieve the self-assessment
        retrieved = get_assessment(submission["uuid"])

        # Expect that both the created and retrieved assessments have the same
        # timestamp, and it's >= our recorded time.
        self.assertEqual(assessment['scored_at'], retrieved['scored_at'])
        self.assertGreaterEqual(assessment['scored_at'], before)
Exemple #54
0
    def test_create_assessment_all_criteria_have_zero_options(self):
        # Create a submission to self-assess
        submission = create_submission(self.STUDENT_ITEM, "Test answer")

        # Use a rubric with only criteria with no options (only written feedback)
        rubric = copy.deepcopy(self.RUBRIC)
        for criterion in rubric["criteria"]:
            criterion["options"] = []

        # Create a self-assessment for the submission
        # We don't select any options, since none of the criteria have options
        options_selected = {}
        assessment = create_assessment(
            submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
            options_selected, rubric,
            scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
        )

        # The self-assessment should have set the feedback for
        # all criteria to an empty string.
        for part in assessment["parts"]:
            self.assertEqual(part["option"], None)
            self.assertEqual(part["feedback"], u"")
Exemple #55
0
    def _create_submission_and_assessments(
        self, xblock, submission_text, peers, peer_assessments, self_assessment,
        waiting_for_peer=False,
    ):
        """
        Create a submission and peer/self assessments, so that the user can receive a grade.

        Args:
            xblock (OpenAssessmentBlock): The XBlock, loaded for the user who needs a grade.
            submission_text (unicode): Text of the submission from the user.
            peers (list of unicode): List of user IDs of peers who will assess the user.
            peer_assessments (list of dict): List of assessment dictionaries for peer assessments.
            self_assessment (dict): Dict of assessment for self-assessment.

        Keyword Arguments:
            waiting_for_peer (bool): If true, skip creation of peer assessments for the user's submission.

        Returns:
            None

        """
        # Create a submission from the user
        student_item = xblock.get_student_item_dict()
        student_id = student_item['student_id']
        submission = xblock.create_submission(student_item, submission_text)

        # Create submissions and assessments from other users
        scorer_submissions = []
        for scorer_name, assessment in zip(peers, peer_assessments):

            # Create a submission for each scorer for the same problem
            scorer = copy.deepcopy(student_item)
            scorer['student_id'] = scorer_name

            scorer_sub = sub_api.create_submission(scorer, {'text': submission_text})
            workflow_api.create_workflow(scorer_sub['uuid'], self.STEPS)

            submission = peer_api.get_submission_to_assess(scorer_sub['uuid'], len(peers))

            # Store the scorer's submission so our user can assess it later
            scorer_submissions.append(scorer_sub)

            # Create an assessment of the user's submission
            if not waiting_for_peer:
                peer_api.create_assessment(
                    scorer_sub['uuid'], scorer_name,
                    assessment['options_selected'],
                    assessment['criterion_feedback'],
                    assessment['overall_feedback'],
                    {'criteria': xblock.rubric_criteria},
                    xblock.get_assessment_module('peer-assessment')['must_be_graded_by']
                )

        # Have our user make assessments (so she can get a score)
        for asmnt in peer_assessments:
            peer_api.get_submission_to_assess(submission['uuid'], len(peers))
            peer_api.create_assessment(
                submission['uuid'],
                student_id,
                asmnt['options_selected'],
                asmnt['criterion_feedback'],
                asmnt['overall_feedback'],
                {'criteria': xblock.rubric_criteria},
                xblock.get_assessment_module('peer-assessment')['must_be_graded_by']
            )

        # Have the user submit a self-assessment (so she can get a score)
        if self_assessment is not None:
            self_api.create_assessment(
                submission['uuid'], student_id, self_assessment['options_selected'],
                self_assessment['criterion_feedback'], self_assessment['overall_feedback'],
                {'criteria': xblock.rubric_criteria}
            )
    def handle(self, *args, **options):
        """
        Execute the command.

        Args:
            course_id (unicode): The ID of the course to create submissions for.
            item_id (unicode): The ID of the item in the course to create submissions for.
            num_submissions (int): Number of submissions to create.
        """
        if len(args) < 3:
            raise CommandError('Usage: create_oa_submissions <COURSE_ID> <ITEM_ID> <NUM_SUBMISSIONS>')

        course_id = unicode(args[0])
        item_id = unicode(args[1])

        try:
            num_submissions = int(args[2])
        except ValueError:
            raise CommandError('Number of submissions must be an integer')

        print u"Creating {num} submissions for {item} in {course}".format(
            num=num_submissions, item=item_id, course=course_id
        )

        for sub_num in range(num_submissions):

            print "Creating submission {num}".format(num=sub_num)

            # Create a dummy submission
            student_item = {
                'student_id': uuid4().hex[0:10],
                'course_id': course_id,
                'item_id': item_id,
                'item_type': 'openassessment'
            }
            submission_uuid = self._create_dummy_submission(student_item)
            self._student_items.append(student_item)

            # Create a dummy rubric
            rubric, options_selected = self._dummy_rubric()

            # Create peer assessments
            for num in range(self.NUM_PEER_ASSESSMENTS):
                print "-- Creating peer-assessment {num}".format(num=num)

                scorer_id = 'test_{num}'.format(num=num)

                # The scorer needs to make a submission before assessing
                scorer_student_item = copy.copy(student_item)
                scorer_student_item['student_id'] = scorer_id
                scorer_submission_uuid = self._create_dummy_submission(scorer_student_item)

                # Retrieve the submission we want to score
                # Note that we are NOT using the priority queue here, since we know
                # exactly which submission we want to score.
                peer_api.create_peer_workflow_item(scorer_submission_uuid, submission_uuid)

                # Create the peer assessment
                peer_api.create_assessment(
                    scorer_submission_uuid,
                    scorer_id,
                    options_selected, {}, "  ".join(loremipsum.get_paragraphs(2)),
                    rubric,
                    self.NUM_PEER_ASSESSMENTS
                )

            # Create a self-assessment
            print "-- Creating self assessment"
            self_api.create_assessment(
                submission_uuid, student_item['student_id'],
                options_selected, rubric
            )
Exemple #57
0
    def _create_submission_and_assessments(
        self,
        xblock,
        submission_text,
        peers,
        peer_assessments,
        self_assessment,
        waiting_for_peer=False,
    ):
        """
        Create a submission and peer/self assessments, so that the user can receive a grade.

        Args:
            xblock (OpenAssessmentBlock): The XBlock, loaded for the user who needs a grade.
            submission_text (unicode): Text of the submission from the user.
            peers (list of unicode): List of user IDs of peers who will assess the user.
            peer_assessments (list of dict): List of assessment dictionaries for peer assessments.
            self_assessment (dict): Dict of assessment for self-assessment.

        Keyword Arguments:
            waiting_for_peer (bool): If true, skip creation of peer assessments for the user's submission.

        Returns:
            None

        """
        # Create a submission from the user
        student_item = xblock.get_student_item_dict()
        student_id = student_item['student_id']
        submission = xblock.create_submission(student_item, submission_text)

        # Create submissions and assessments from other users
        scorer_submissions = []
        for scorer_name, assessment in zip(peers, peer_assessments):

            # Create a submission for each scorer for the same problem
            scorer = copy.deepcopy(student_item)
            scorer['student_id'] = scorer_name

            scorer_sub = sub_api.create_submission(scorer,
                                                   {'text': submission_text})
            workflow_api.create_workflow(scorer_sub['uuid'], self.STEPS)

            submission = peer_api.get_submission_to_assess(
                scorer_sub['uuid'], len(peers))

            # Store the scorer's submission so our user can assess it later
            scorer_submissions.append(scorer_sub)

            # Create an assessment of the user's submission
            if not waiting_for_peer:
                peer_api.create_assessment(
                    scorer_sub['uuid'], scorer_name,
                    assessment['options_selected'],
                    assessment['criterion_feedback'],
                    assessment['overall_feedback'],
                    {'criteria': xblock.rubric_criteria},
                    xblock.get_assessment_module(
                        'peer-assessment')['must_be_graded_by'])

        # Have our user make assessments (so she can get a score)
        for asmnt in peer_assessments:
            peer_api.get_submission_to_assess(submission['uuid'], len(peers))
            peer_api.create_assessment(
                submission['uuid'], student_id, asmnt['options_selected'],
                asmnt['criterion_feedback'], asmnt['overall_feedback'],
                {'criteria': xblock.rubric_criteria},
                xblock.get_assessment_module(
                    'peer-assessment')['must_be_graded_by'])

        # Have the user submit a self-assessment (so she can get a score)
        if self_assessment is not None:
            self_api.create_assessment(submission['uuid'], student_id,
                                       self_assessment['options_selected'],
                                       self_assessment['criterion_feedback'],
                                       self_assessment['overall_feedback'],
                                       {'criteria': xblock.rubric_criteria})
Exemple #58
0
    def self_assess(self, data, suffix=''):
        """
        Create a self-assessment for a submission.

        Args:
            data (dict): Must have the following keys:
                options_selected (dict): Dictionary mapping criterion names to option values.

        Returns:
            Dict with keys "success" (bool) indicating success/failure
            and "msg" (unicode) containing additional information if an error occurs.
        """
        if 'options_selected' not in data:
            return {
                'success': False,
                'msg': _(u"Missing options_selected key in request")
            }

        if self.submission_uuid is None:
            return {
                'success':
                False,
                'msg':
                _(u"You must submit a response before you can perform a self-assessment."
                  )
            }

        try:
            assessment = self_api.create_assessment(
                self.submission_uuid,
                self.get_student_item_dict()['student_id'],
                data['options_selected'], {"criteria": self.rubric_criteria})
            self.runtime.publish(
                self, "openassessmentblock.self_assess", {
                    "feedback":
                    assessment["feedback"],
                    "rubric": {
                        "content_hash": assessment["rubric"]["content_hash"],
                    },
                    "scorer_id":
                    assessment["scorer_id"],
                    "score_type":
                    assessment["score_type"],
                    "scored_at":
                    assessment["scored_at"],
                    "submission_uuid":
                    assessment["submission_uuid"],
                    "parts": [{
                        "option": {
                            "name": part["option"]["name"],
                            "points": part["option"]["points"]
                        }
                    } for part in assessment["parts"]]
                })
            # After we've created the self-assessment, we need to update the workflow.
            self.update_workflow_status()
        except (self_api.SelfAssessmentRequestError,
                workflow_api.AssessmentWorkflowRequestError):
            logger.warning(
                u"An error occurred while submitting a self assessment "
                u"for the submission {}".format(self.submission_uuid),
                exc_info=True)
            msg = _(u"Your self assessment could not be submitted.")
            return {'success': False, 'msg': msg}
        except (self_api.SelfAssessmentInternalError,
                workflow_api.AssessmentWorkflowInternalError):
            logger.exception(
                u"An error occurred while submitting a self assessment "
                u"for the submission {}".format(self.submission_uuid), )
            msg = _(u"Your self assessment could not be submitted.")
            return {'success': False, 'msg': msg}
        else:
            return {'success': True, 'msg': u""}
Exemple #59
0
    def handle(self, *args, **options):
        """
        Execute the command.

        Args:
            course_id (unicode): The ID of the course to create submissions for.
            item_id (unicode): The ID of the item in the course to create submissions for.
            num_submissions (int): Number of submissions to create.
            percentage (int or float): Percentage for assessments to be made against submissions.
        """
        if len(args) < 4:
            raise CommandError(
                'Usage: create_oa_submissions <COURSE_ID> <ITEM_ID> <NUM_SUBMISSIONS> <PERCENTAGE>'
            )

        course_id = str(args[0])
        item_id = str(args[1])

        try:
            num_submissions = int(args[2])
        except ValueError:
            raise CommandError('Number of submissions must be an integer')

        try:
            percentage = float(args[3])
            assessments_to_create = (percentage / 100) * num_submissions
        except ValueError:
            raise CommandError(
                'Percentage for completed submissions must be an integer or float'
            )

        print(u"Creating {num} submissions for {item} in {course}".format(
            num=num_submissions, item=item_id, course=course_id))

        assessments_created = 0

        for sub_num in range(num_submissions):

            print(u"Creating submission {num}".format(num=sub_num))

            # Create a dummy submission
            student_item = {
                'student_id': uuid4().hex[0:10],
                'course_id': course_id,
                'item_id': item_id,
                'item_type': 'openassessment'
            }
            submission_uuid = self._create_dummy_submission(student_item)
            self._student_items.append(student_item)

            # Create a dummy rubric
            rubric, options_selected = self._dummy_rubric()

            # Create peer assessments
            for num in range(self.NUM_PEER_ASSESSMENTS):
                print(u"-- Creating peer-workflow {num}".format(num=num))

                scorer_id = 'test_{num}'.format(num=num)

                # The scorer needs to make a submission before assessing
                scorer_student_item = copy.copy(student_item)
                scorer_student_item['student_id'] = scorer_id
                scorer_submission_uuid = self._create_dummy_submission(
                    scorer_student_item)

                # Retrieve the submission we want to score
                # Note that we are NOT using the priority queue here, since we know
                # exactly which submission we want to score.
                peer_api.create_peer_workflow_item(scorer_submission_uuid,
                                                   submission_uuid)
                if assessments_created < assessments_to_create:
                    print(u"-- Creating peer-assessment {num}".format(num=num))
                    # Create the peer assessment
                    peer_api.create_assessment(
                        scorer_submission_uuid, scorer_id, options_selected,
                        {}, "  ".join(loremipsum.get_paragraphs(2)), rubric,
                        self.NUM_PEER_ASSESSMENTS)
            assessments_created += 1

            if self.self_assessment_required:
                # Create a self-assessment
                print(u"-- Creating self assessment")
                self_api.create_assessment(
                    submission_uuid, student_item['student_id'],
                    options_selected, {},
                    "  ".join(loremipsum.get_paragraphs(2)), rubric)
        print(u"%s assessments being completed for %s submissions" %
              (assessments_created, num_submissions))