Esempio n. 1
0
    def test_staff_area_student_info_staff_only_no_options(self, xblock):
        # Simulate that we are course staff
        xblock.xmodule_runtime = self._create_mock_runtime(
            xblock.scope_ids.usage_id, True, False, "Bob"
        )
        xblock.runtime._services['user'] = NullUserService()
        bob_item = STUDENT_ITEM.copy()
        bob_item["item_id"] = xblock.scope_ids.usage_id
        # Create a submission for Bob, and corresponding workflow.
        submission = self._create_submission(
            bob_item, prepare_submission_for_serialization(("Bob Answer 1", "Bob Answer 2")), ['staff']
        )

        # Bob assesses himself as staff.
        staff_api.create_assessment(
            submission['uuid'],
            STUDENT_ITEM["student_id"],
            {},  # no options available
            {"vocabulary": "Good use of vocabulary!"},
            ASSESSMENT_DICT['overall_feedback'],
            {'criteria': xblock.rubric_criteria},
        )

        _, context = xblock.get_student_info_path_and_context("Bob")
        self.assertIn(
            "Good use of vocabulary!",
            self.request(
                xblock,
                "render_student_info",
                urllib.urlencode({"student_username": "******"})
            )
        )
Esempio n. 2
0
    def test_staff_area_student_info_staff_only(self, xblock):
        # Simulate that we are course staff
        xblock.xmodule_runtime = self._create_mock_runtime(
            xblock.scope_ids.usage_id, True, False, "Bob"
        )
        xblock.runtime._services['user'] = NullUserService()  # pylint: disable=protected-access
        bob_item = STUDENT_ITEM.copy()
        bob_item["item_id"] = xblock.scope_ids.usage_id
        # Create a submission for Bob, and corresponding workflow.
        submission = self._create_submission(
            bob_item, prepare_submission_for_serialization(("Bob Answer 1", "Bob Answer 2")), ['staff']
        )

        # Bob assesses himself.
        staff_api.create_assessment(
            submission['uuid'],
            STUDENT_ITEM["student_id"],
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            {'criteria': xblock.rubric_criteria},
        )

        path, context = xblock.get_student_info_path_and_context("Bob")
        self.assertEquals("Bob Answer 1", context['submission']['answer']['parts'][0]['text'])
        self.assertIsNone(context['peer_assessments'])
        self.assertIsNone(context['self_assessment'])
        self.assertIsNotNone(context['staff_assessment'])
        self.assertEquals("openassessmentblock/staff_area/oa_student_info.html", path)

        grade_details = context['grade_details']
        self.assertEquals(1, len(grade_details['criteria'][0]['assessments']))
        self.assertEquals('Staff Grade', grade_details['criteria'][0]['assessments'][0]['title'])
Esempio n. 3
0
    def test_retrieve_bulk_workflow_status(self):
        """
        Test that the bulk workflow API retrieves submission information correctly.
        """
        # Create Bob's submission without a submission
        bob_sub, bob_item = TestStaffAssessment._create_student_and_submission(
            "Bob", "Bob's answer")
        # Create Tim's submission with a submission
        tim_sub, tim_item = TestStaffAssessment._create_student_and_submission(
            "Tim", "Tim's answer")
        staff_api.create_assessment(
            tim_sub["uuid"],
            "Dumbledore",
            OPTIONS_SELECTED_DICT["none"]["options"],
            dict(),
            "",
            RUBRIC,
        )

        # Retrieve workflow status and test
        workflow_status = staff_api.bulk_retrieve_workflow_status(
            tim_item['course_id'], tim_item['item_id'],
            [tim_sub['uuid'], bob_sub['uuid']])
        self.assertEqual(workflow_status[tim_sub['uuid']], "submitted")
        self.assertEqual(workflow_status[bob_sub['uuid']], "not_submitted")
Esempio n. 4
0
    def test_invalid_rubric_options_exception(self, invalid_reason):
        # Define invalid options_selected
        dict_to_use = copy.deepcopy(OPTIONS_SELECTED_DICT['all']["options"])
        if invalid_reason == "criterion_not_found":
            dict_to_use["invalid"] = RUBRIC_OPTIONS[0]["name"]
        elif invalid_reason == "option_not_found":
            dict_to_use[RUBRIC["criteria"][0]["name"]] = "invalid"
        elif invalid_reason == "missing_criteria":
            del dict_to_use[RUBRIC["criteria"][0]["name"]]
        elif invalid_reason == "some_criteria_not_assessed":
            dict_to_use[RUBRIC["criteria"][0]["name"]] = None

        # Create a submission
        tim_sub, _ = self._create_student_and_submission("Tim", "Tim's answer")

        # Try to staff assess with invalid options selected
        with self.assertRaises(StaffAssessmentRequestError) as context_manager:
            staff_api.create_assessment(
                tim_sub["uuid"],
                "Dumbledore",
                dict_to_use,
                dict(),
                "",
                RUBRIC,
            )
        self.assertEqual(str(context_manager.exception),
                         u"Invalid options were selected in the rubric.")
Esempio n. 5
0
    def test_staff_area_student_info_staff_only(self, xblock):
        # Simulate that we are course staff
        xblock.xmodule_runtime = self._create_mock_runtime(
            xblock.scope_ids.usage_id, True, False, "Bob"
        )
        xblock.runtime._services['user'] = NullUserService()
        bob_item = STUDENT_ITEM.copy()
        bob_item["item_id"] = xblock.scope_ids.usage_id
        # Create a submission for Bob, and corresponding workflow.
        submission = self._create_submission(
            bob_item, prepare_submission_for_serialization(("Bob Answer 1", "Bob Answer 2")), ['staff']
        )

        # Bob assesses himself.
        staff_api.create_assessment(
            submission['uuid'],
            STUDENT_ITEM["student_id"],
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            {'criteria': xblock.rubric_criteria},
        )

        path, context = xblock.get_student_info_path_and_context("Bob")
        self.assertEquals("Bob Answer 1", context['submission']['answer']['parts'][0]['text'])
        self.assertIsNone(context['peer_assessments'])
        self.assertIsNone(context['self_assessment'])
        self.assertIsNotNone(context['staff_assessment'])
        self.assertEquals("openassessmentblock/staff_area/oa_student_info.html", path)

        grade_details = context['grade_details']
        self.assertEquals(1, len(grade_details['criteria'][0]['assessments']))
        self.assertEquals('Staff Grade', grade_details['criteria'][0]['assessments'][0]['title'])
Esempio n. 6
0
    def test_staff_area_student_info_staff_only_no_options(self, xblock):
        # Simulate that we are course staff
        xblock.xmodule_runtime = self._create_mock_runtime(
            xblock.scope_ids.usage_id, True, False, "Bob")
        xblock.runtime._services['user'] = NullUserService()
        bob_item = STUDENT_ITEM.copy()
        bob_item["item_id"] = xblock.scope_ids.usage_id
        # Create a submission for Bob, and corresponding workflow.
        submission = self._create_submission(
            bob_item,
            prepare_submission_for_serialization(
                ("Bob Answer 1", "Bob Answer 2")), ['staff'])

        # Bob assesses himself as staff.
        staff_api.create_assessment(
            submission['uuid'],
            STUDENT_ITEM["student_id"],
            {},  # no options available
            {"vocabulary": "Good use of vocabulary!"},
            ASSESSMENT_DICT['overall_feedback'],
            {'criteria': xblock.rubric_criteria},
        )

        _, context = xblock.get_student_info_path_and_context("Bob")
        self.assertIn(
            "Good use of vocabulary!",
            self.request(xblock, "render_student_info",
                         urllib.urlencode({"student_username": "******"})))
Esempio n. 7
0
    def test_provisionally_done(self):
        """
        Test to ensure that blocking steps, such as peer, are not considered done and do not display a score
        if the submitter's requirements have not yet been met, even if a staff score has been recorded.

        This test also ensures that a user may submit peer assessments after having been staff assessed, which was
        a bug that had been previously present.
        """
        # Tim(student) makes a submission, for a problem that requires peer assessment
        tim_sub, _ = TestStaffAssessment._create_student_and_submission(
            "Tim", "Tim's answer", problem_steps=['peer'])
        # Bob(student) also makes a submission for that problem
        bob_sub, bob = TestStaffAssessment._create_student_and_submission(
            "Bob", "Bob's answer", problem_steps=['peer'])

        # Define peer requirements. Note that neither submission will fulfill must_be_graded_by
        requirements = {"peer": {"must_grade": 1, "must_be_graded_by": 2}}

        staff_score = "none"
        # Dumbledore(staff) uses override ability to provide a score for both submissions
        staff_api.create_assessment(
            tim_sub["uuid"],
            "Dumbledore",
            OPTIONS_SELECTED_DICT[staff_score]["options"],
            dict(),
            "",
            RUBRIC,
        )
        staff_api.create_assessment(
            bob_sub["uuid"],
            "Dumbledore",
            OPTIONS_SELECTED_DICT[staff_score]["options"],
            dict(),
            "",
            RUBRIC,
        )

        # Bob completes his peer assessment duties, Tim does not
        peer_api.get_submission_to_assess(bob_sub["uuid"], 1)
        peer_assess(bob_sub["uuid"], bob["student_id"],
                    OPTIONS_SELECTED_DICT["most"]["options"], dict(), "",
                    RUBRIC, requirements["peer"]["must_be_graded_by"])

        # Verify that Bob's submission is marked done and returns the proper score
        bob_workflow = workflow_api.get_workflow_for_submission(
            bob_sub["uuid"], requirements)
        self.assertEqual(bob_workflow["score"]["points_earned"],
                         OPTIONS_SELECTED_DICT[staff_score]["expected_points"])
        self.assertEqual(bob_workflow["status"], "done")

        # Verify that Tim's submission is not marked done, and he cannot get his score
        tim_workflow = workflow_api.get_workflow_for_submission(
            tim_sub["uuid"], requirements)
        self.assertEqual(tim_workflow["score"], None)
        self.assertNotEqual(tim_workflow["status"], "done")
    def submit_ora_test_data(self, course_id, submissions_config):
        """
        Run the submit action. For each specified submission, create the submission, create an assessment if specified,
        and create a lock if specified.
        """
        for ora_config in submissions_config:
            log.info('Creating test submissions for course %s', course_id)
            for submission_config in ora_config['submissions']:
                log.info("Creating submission for user %s",
                         submission_config['username'])
                student_item = self.student_item(submission_config['username'],
                                                 course_id,
                                                 ora_config['displayName'])
                # Submissions consist of username, a line break, and then some lorem
                text_response = submission_config[
                    'username'] + '\n' + generate_lorem_sentences()
                submission = sub_api.create_submission(
                    student_item, {'parts': [{
                        'text': text_response
                    }]})
                workflow_api.create_workflow(submission['uuid'], ['staff'])
                workflow_api.update_from_assessments(submission['uuid'], None)
                log.info("Created submission %s for user %s",
                         submission['uuid'], submission_config['username'])

                if submission_config['lockOwner']:
                    log.info("Creating lock on submission %s owned by %s",
                             submission['uuid'],
                             submission_config['lockOwner'])
                    SubmissionGradingLock.claim_submission_lock(
                        submission['uuid'], self.username_to_anonymous_user_id[
                            submission_config['lockOwner']])

                if submission_config['gradeData']:
                    grade_data = submission_config['gradeData']
                    log.info(
                        "Creating assessment from user %s for submission %s",
                        grade_data['gradedBy'], submission['uuid'])
                    block = self.display_name_to_block[
                        ora_config['displayName']]
                    rubric_dict = create_rubric_dict(
                        block.prompts, block.rubric_criteria_with_labels)
                    options_selected, criterion_feedback = self.api_format_criteria(
                        grade_data['criteria'], rubric_dict)
                    staff_api.create_assessment(
                        submission['uuid'],
                        self.username_to_anonymous_user_id[
                            grade_data['gradedBy']],
                        options_selected,
                        criterion_feedback,
                        grade_data['overallFeedback'],
                        rubric_dict,
                    )
                    workflow_api.update_from_assessments(
                        submission['uuid'], None)
Esempio n. 9
0
    def test_provisionally_done(self):
        """
        Test to ensure that blocking steps, such as peer, are not considered done and do not display a score
        if the submitter's requirements have not yet been met, even if a staff score has been recorded.

        This test also ensures that a user may submit peer assessments after having been staff assessed, which was
        a bug that had been previously present.
        """
        # Tim(student) makes a submission, for a problem that requires peer assessment
        tim_sub, _ = TestStaffAssessment._create_student_and_submission("Tim", "Tim's answer", problem_steps=['peer'])
        # Bob(student) also makes a submission for that problem
        bob_sub, bob = TestStaffAssessment._create_student_and_submission("Bob", "Bob's answer", problem_steps=['peer'])

        # Define peer requirements. Note that neither submission will fulfill must_be_graded_by
        requirements = {"peer": {"must_grade": 1, "must_be_graded_by": 2}}

        staff_score = "none"
        # Dumbledore(staff) uses override ability to provide a score for both submissions
        staff_api.create_assessment(
            tim_sub["uuid"],
            "Dumbledore",
            OPTIONS_SELECTED_DICT[staff_score]["options"], dict(), "",
            RUBRIC,
        )
        staff_api.create_assessment(
            bob_sub["uuid"],
            "Dumbledore",
            OPTIONS_SELECTED_DICT[staff_score]["options"], dict(), "",
            RUBRIC,
        )

        # Bob completes his peer assessment duties, Tim does not
        peer_api.get_submission_to_assess(bob_sub["uuid"], 1)
        peer_assess(
            bob_sub["uuid"],
            bob["student_id"],
            OPTIONS_SELECTED_DICT["most"]["options"], dict(), "",
            RUBRIC,
            requirements["peer"]["must_be_graded_by"]
        )

        # Verify that Bob's submission is marked done and returns the proper score
        bob_workflow = workflow_api.get_workflow_for_submission(bob_sub["uuid"], requirements)
        self.assertEqual(bob_workflow["score"]["points_earned"], OPTIONS_SELECTED_DICT[staff_score]["expected_points"])
        self.assertEqual(bob_workflow["status"], "done")

        # Verify that Tim's submission is not marked done, and he cannot get his score
        tim_workflow = workflow_api.get_workflow_for_submission(tim_sub["uuid"], requirements)
        self.assertEqual(tim_workflow["score"], None)
        self.assertNotEqual(tim_workflow["status"], "done")
Esempio n. 10
0
    def test_database_create_error_handling(self, mock_create):
        mock_create.side_effect = DatabaseError("KABOOM!")

        # Try to create a staff assessment, handle database errors
        with self.assertRaises(StaffAssessmentInternalError) as context_manager:
            staff_api.create_assessment(
                "000000",
                "Dumbledore",
                OPTIONS_SELECTED_DICT['most']['options'], dict(), "",
                RUBRIC,
            )
        self.assertEqual(
            str(context_manager.exception),
            u"An error occurred while creating an assessment by the scorer with this ID: {}".format("Dumbledore")
        )
Esempio n. 11
0
    def test_database_create_error_handling(self, mock_create):
        mock_create.side_effect = DatabaseError("KABOOM!")

        # Try to create a staff assessment, handle database errors
        with self.assertRaises(StaffAssessmentInternalError) as context_manager:
            staff_api.create_assessment(
                "000000",
                "Dumbledore",
                OPTIONS_SELECTED_DICT['most']['options'], {}, "",
                RUBRIC,
            )
        self.assertEqual(
            str(context_manager.exception),
            "An error occurred while creating an assessment by the scorer with this ID: {}".format("Dumbledore")
        )
Esempio n. 12
0
    def test_create_assessment_required(self, key):
        """
        Simple test to ensure staff assessments are scored properly, for all values of OPTIONS_SELECTED_DICT,
        when staff scores are required.
        """
        # Create assessment
        tim_sub, _ = self._create_student_and_submission("Tim", "Tim's answer", problem_steps=['staff'])

        # Verify that we're still waiting on a staff assessment
        self._verify_done_state(tim_sub["uuid"], self.STEP_REQUIREMENTS_WITH_STAFF, expect_done=False)

        # Verify that a StaffWorkflow step has been created and is not complete
        workflow = StaffWorkflow.objects.get(submission_uuid=tim_sub['uuid'])
        self.assertIsNone(workflow.grading_completed_at)

        # Staff assess
        staff_assessment = staff_api.create_assessment(
            tim_sub["uuid"],
            "Dumbledore",
            OPTIONS_SELECTED_DICT[key]["options"], dict(), "",
            RUBRIC,
        )

        # Verify assesment made, score updated, and no longer waiting
        self.assertEqual(staff_assessment["points_earned"], OPTIONS_SELECTED_DICT[key]["expected_points"])
        self._verify_done_state(tim_sub["uuid"], self.STEP_REQUIREMENTS_WITH_STAFF)
        # Verify that a StaffWorkflow step has been marked as complete
        workflow.refresh_from_db()
        self.assertIsNotNone(workflow.grading_completed_at)
Esempio n. 13
0
    def test_create_assessment_required(self, key):
        """
        Simple test to ensure staff assessments are scored properly, for all values of OPTIONS_SELECTED_DICT,
        when staff scores are required.
        """
        # Create assessment
        tim_sub, _ = self._create_student_and_submission("Tim", "Tim's answer", problem_steps=['staff'])

        # Verify that we're still waiting on a staff assessment
        self._verify_done_state(tim_sub["uuid"], self.STEP_REQUIREMENTS_WITH_STAFF, expect_done=False)

        # Verify that a StaffWorkflow step has been created and is not complete
        workflow = StaffWorkflow.objects.get(submission_uuid=tim_sub['uuid'])
        self.assertIsNone(workflow.grading_completed_at)

        # Staff assess
        staff_assessment = staff_api.create_assessment(
            tim_sub["uuid"],
            "Dumbledore",
            OPTIONS_SELECTED_DICT[key]["options"], {}, "",
            RUBRIC,
        )

        # Verify assesment made, score updated, and no longer waiting
        self.assertEqual(staff_assessment["points_earned"], OPTIONS_SELECTED_DICT[key]["expected_points"])
        self._verify_done_state(tim_sub["uuid"], self.STEP_REQUIREMENTS_WITH_STAFF)
        # Verify that a StaffWorkflow step has been marked as complete
        workflow.refresh_from_db()
        self.assertIsNotNone(workflow.grading_completed_at)
Esempio n. 14
0
    def test_create_assessment_not_required(self, key):
        """
        Simple test to ensure staff assessments are scored properly, for all values of OPTIONS_SELECTED_DICT,
        when staff scores are not required.
        """
        # Create assessment
        tim_sub, _ = self._create_student_and_submission("Tim", "Tim's answer")

        # Staff assess it
        assessment = staff_api.create_assessment(
            tim_sub["uuid"],
            "Dumbledore",
            OPTIONS_SELECTED_DICT[key]["options"],
            dict(),
            "",
            RUBRIC,
        )

        # Ensure points are calculated properly
        self.assertEqual(assessment["points_earned"],
                         OPTIONS_SELECTED_DICT[key]["expected_points"])
        self.assertEqual(assessment["points_possible"], RUBRIC_POSSIBLE_POINTS)

        # Ensure submission and workflow are marked as finished
        self._verify_done_state(tim_sub["uuid"], self.STEP_REQUIREMENTS)
Esempio n. 15
0
    def test_grading_statistics(self):
        _, bob = self._create_student_and_submission("bob", "bob's answer")
        course_id = bob['course_id']
        item_id = bob['item_id']
        _, tim = self._create_student_and_submission("Tim", "Tim's answer")
        self._create_student_and_submission("Sue", "Sue's answer")
        stats = staff_api.get_staff_grading_statistics(course_id, item_id)
        self.assertEqual(stats, {'graded': 0, 'ungraded': 3, 'in-progress': 0})

        # Fetch a grade so that there's one 'in-progress'
        tim_to_grade = staff_api.get_submission_to_assess(
            course_id, item_id, tim['student_id'])
        stats = staff_api.get_staff_grading_statistics(course_id, item_id)
        self.assertEqual(stats, {'graded': 0, 'ungraded': 2, 'in-progress': 1})

        bob_to_grade = staff_api.get_submission_to_assess(
            tim['course_id'], tim['item_id'], bob['student_id'])
        stats = staff_api.get_staff_grading_statistics(course_id, item_id)
        self.assertEqual(stats, {'graded': 0, 'ungraded': 1, 'in-progress': 2})

        # Grade one of the submissions
        staff_api.create_assessment(
            tim_to_grade["uuid"],
            tim['student_id'],
            OPTIONS_SELECTED_DICT["all"]["options"],
            dict(),
            "",
            RUBRIC,
        )
        stats = staff_api.get_staff_grading_statistics(course_id, item_id)
        self.assertEqual(stats, {'graded': 1, 'ungraded': 1, 'in-progress': 1})

        # When one of the 'locks' times out, verify that it is no longer
        # considered ungraded.
        workflow = StaffWorkflow.objects.get(scorer_id=bob['student_id'])
        # pylint: disable=unicode-format-string
        timestamp = (now() - (workflow.TIME_LIMIT + timedelta(hours=1))
                     ).strftime("%Y-%m-%d %H:%M:%S")
        workflow.grading_started_at = timestamp
        workflow.save()
        stats = staff_api.get_staff_grading_statistics(course_id, item_id)
        self.assertEqual(stats, {'graded': 1, 'ungraded': 2, 'in-progress': 0})

        workflow_api.cancel_workflow(bob_to_grade['uuid'], "Test Cancel",
                                     bob['student_id'], {})
        stats = staff_api.get_staff_grading_statistics(course_id, item_id)
        self.assertEqual(stats, {'graded': 1, 'ungraded': 1, 'in-progress': 0})
Esempio n. 16
0
    def test_update_with_override(self):
        """
        Test that, when viewing a submission with a staff override present, the workflow is not updated repeatedly.

        See TNL-6092 for some historical context.
        """
        tim_sub, _ = TestStaffAssessment._create_student_and_submission("Tim", "Tim's answer", problem_steps=['self'])
        staff_api.create_assessment(
            tim_sub["uuid"],
            "Dumbledore",
            OPTIONS_SELECTED_DICT["none"]["options"], {}, "",
            RUBRIC,
        )
        workflow_api.get_workflow_for_submission(tim_sub["uuid"], {})
        with mock.patch('openassessment.workflow.models.sub_api.reset_score') as mock_reset:
            workflow_api.get_workflow_for_submission(tim_sub["uuid"], {})
            self.assertFalse(mock_reset.called)
Esempio n. 17
0
    def test_update_with_override(self):
        """
        Test that, when viewing a submission with a staff override present, the workflow is not updated repeatedly.

        See TNL-6092 for some historical context.
        """
        tim_sub, _ = TestStaffAssessment._create_student_and_submission("Tim", "Tim's answer", problem_steps=['self'])
        staff_api.create_assessment(
            tim_sub["uuid"],
            "Dumbledore",
            OPTIONS_SELECTED_DICT["none"]["options"], dict(), "",
            RUBRIC,
        )
        workflow_api.get_workflow_for_submission(tim_sub["uuid"], {})
        with mock.patch('openassessment.workflow.models.sub_api.reset_score') as mock_reset:
            workflow_api.get_workflow_for_submission(tim_sub["uuid"], {})
            self.assertFalse(mock_reset.called)
Esempio n. 18
0
    def test_invalid_rubric_exception(self):
        # Create a submission
        tim_sub, _ = self._create_student_and_submission("Tim", "Tim's answer")

        # Define invalid rubric
        invalid_rubric = copy.deepcopy(RUBRIC)
        for criterion in invalid_rubric["criteria"]:
            for option in criterion["options"]:
                option["points"] = -1

        # Try to staff assess with invalid rubric
        with self.assertRaises(StaffAssessmentRequestError) as context_manager:
            staff_api.create_assessment(
                tim_sub["uuid"],
                "Dumbledore",
                OPTIONS_SELECTED_DICT["most"]["options"], dict(), "",
                invalid_rubric,
            )
        self.assertEqual(str(context_manager.exception), u"The rubric definition is not valid.")
Esempio n. 19
0
    def test_invalid_rubric_exception(self):
        # Create a submission
        tim_sub, _ = self._create_student_and_submission("Tim", "Tim's answer")

        # Define invalid rubric
        invalid_rubric = copy.deepcopy(RUBRIC)
        for criterion in invalid_rubric["criteria"]:
            for option in criterion["options"]:
                option["points"] = -1

        # Try to staff assess with invalid rubric
        with self.assertRaises(StaffAssessmentRequestError) as context_manager:
            staff_api.create_assessment(
                tim_sub["uuid"],
                "Dumbledore",
                OPTIONS_SELECTED_DICT["most"]["options"], {}, "",
                invalid_rubric,
            )
        self.assertEqual(str(context_manager.exception), "The rubric definition is not valid.")
Esempio n. 20
0
    def test_grading_statistics(self):
        _, bob = self._create_student_and_submission("bob", "bob's answer")
        course_id = bob['course_id']
        item_id = bob['item_id']
        _, tim = self._create_student_and_submission("Tim", "Tim's answer")
        self._create_student_and_submission("Sue", "Sue's answer")
        stats = staff_api.get_staff_grading_statistics(course_id, item_id)
        self.assertEqual(stats, {'graded': 0, 'ungraded': 3, 'in-progress': 0})

        # Fetch a grade so that there's one 'in-progress'
        tim_to_grade = staff_api.get_submission_to_assess(course_id, item_id, tim['student_id'])
        stats = staff_api.get_staff_grading_statistics(course_id, item_id)
        self.assertEqual(stats, {'graded': 0, 'ungraded': 2, 'in-progress': 1})

        bob_to_grade = staff_api.get_submission_to_assess(tim['course_id'], tim['item_id'], bob['student_id'])
        stats = staff_api.get_staff_grading_statistics(course_id, item_id)
        self.assertEqual(stats, {'graded': 0, 'ungraded': 1, 'in-progress': 2})

        # Grade one of the submissions
        staff_api.create_assessment(
            tim_to_grade["uuid"],
            tim['student_id'],
            OPTIONS_SELECTED_DICT["all"]["options"], dict(), "",
            RUBRIC,
        )
        stats = staff_api.get_staff_grading_statistics(course_id, item_id)
        self.assertEqual(stats, {'graded': 1, 'ungraded': 1, 'in-progress': 1})

        # When one of the 'locks' times out, verify that it is no longer
        # considered ungraded.
        workflow = StaffWorkflow.objects.get(scorer_id=bob['student_id'])
        timestamp = (now() - (workflow.TIME_LIMIT + timedelta(hours=1))).strftime("%Y-%m-%d %H:%M:%S")
        workflow.grading_started_at = timestamp
        workflow.save()
        stats = staff_api.get_staff_grading_statistics(course_id, item_id)
        self.assertEqual(stats, {'graded': 1, 'ungraded': 2, 'in-progress': 0})

        workflow_api.cancel_workflow(bob_to_grade['uuid'], "Test Cancel", bob['student_id'], {})
        stats = staff_api.get_staff_grading_statistics(course_id, item_id)
        self.assertEqual(stats, {'graded': 1, 'ungraded': 1, 'in-progress': 0})
Esempio n. 21
0
    def test_create_assessment_does_not_block(self, after_type, after_assess):
        """
        Test to ensure that the presence of an override staff assessment only prevents new scores from being recorded;
        other assessments can still be made.
        """
        # Staff assessments do not block other staff scores from overriding, so skip that test
        if after_type == 'staff':
            return

        requirements = self.STEP_REQUIREMENTS
        if after_type == 'peer':
            requirements = {"peer": {"must_grade": 0, "must_be_graded_by": 1}}

        # Create assessment
        tim_sub, tim = self._create_student_and_submission(
            "Tim", "Tim's answer", problem_steps=[after_type])

        staff_score = "few"
        # Staff assess it
        staff_assessment = staff_api.create_assessment(
            tim_sub["uuid"],
            "Dumbledore",
            OPTIONS_SELECTED_DICT[staff_score]['options'],
            dict(),
            "",
            RUBRIC,
        )

        # Verify both assessment and workflow report correct score
        self.assertEqual(staff_assessment["points_earned"],
                         OPTIONS_SELECTED_DICT[staff_score]["expected_points"])
        workflow = workflow_api.get_workflow_for_submission(
            tim_sub["uuid"], requirements)
        # It's impossible to fake self requirements being complete, so we can't get the score for the self after_type
        if after_type != 'self':
            self.assertEqual(
                workflow["score"]["points_earned"],
                OPTIONS_SELECTED_DICT[staff_score]["expected_points"])

        # Now, non-force asses with a 'most' value
        # This was selected to match the value that the ai test will set
        unscored_assessment = OPTIONS_SELECTED_DICT["most"]
        assessment = after_assess(tim_sub["uuid"], tim["student_id"],
                                  unscored_assessment["options"])

        # Verify both assessment and workflow report correct score (workflow should report previous value)
        self.assertEqual(assessment["points_earned"],
                         unscored_assessment["expected_points"])
        workflow = workflow_api.get_workflow_for_submission(
            tim_sub["uuid"], requirements)
        self.assertEqual(workflow["score"]["points_earned"],
                         OPTIONS_SELECTED_DICT[staff_score]["expected_points"])
Esempio n. 22
0
    def staff_assess(self, data, suffix=''):  # pylint: disable=unused-argument
        """
        Create a staff assessment from a staff submission.
        """
        if 'submission_uuid' not in data:
            return {
                'success':
                False,
                'msg':
                self.
                _("The submission ID of the submission being assessed was not found."
                  )
            }
        if self.is_team_assignment():
            return self._team_assess(data)
        else:
            try:
                assessment = staff_api.create_assessment(
                    data['submission_uuid'],
                    self.get_student_item_dict()["student_id"],
                    data['options_selected'],
                    clean_criterion_feedback(self.rubric_criteria,
                                             data['criterion_feedback']),
                    data['overall_feedback'],
                    create_rubric_dict(self.prompts,
                                       self.rubric_criteria_with_labels))
                assess_type = data.get('assess_type', 'regrade')
                self.publish_assessment_event(
                    "openassessmentblock.staff_assess",
                    assessment,
                    type=assess_type)
                workflow_api.update_from_assessments(
                    assessment["submission_uuid"],
                    None,
                    override_submitter_requirements=(assess_type == 'regrade'))
            except StaffAssessmentRequestError:
                logger.warning(
                    "An error occurred while submitting a staff assessment "
                    "for the submission {}".format(data['submission_uuid']),
                    exc_info=True)
                msg = self._("Your staff assessment could not be submitted.")
                return {'success': False, 'msg': msg}
            except StaffAssessmentInternalError:
                logger.exception(
                    "An error occurred while submitting a staff assessment "
                    "for the submission {}".format(data['submission_uuid']), )
                msg = self._("Your staff assessment could not be submitted.")
                return {'success': False, 'msg': msg}

            return {'success': True, 'msg': ""}
Esempio n. 23
0
    def test_create_assessment_score_overrides(self, key):
        """
        Test to ensure that scores can be overriden by a staff assessment using any value.
        """
        # Initially, self-asses with an all value
        initial_assessment = OPTIONS_SELECTED_DICT["all"]

        # Unless we're trying to override with an all value, then start with none
        if key == "all":
            initial_assessment = OPTIONS_SELECTED_DICT["none"]

        # Create assessment
        tim_sub, tim = self._create_student_and_submission(
            "Tim", "Tim's answer", problem_steps=['self'])

        # Self assess it
        self_assessment = self_assess(
            tim_sub["uuid"],
            tim["student_id"],
            initial_assessment["options"],
            dict(),
            "",
            RUBRIC,
        )

        # Verify both assessment and workflow report correct score
        self.assertEqual(self_assessment["points_earned"],
                         initial_assessment["expected_points"])
        workflow = workflow_api.get_workflow_for_submission(
            tim_sub["uuid"], self.STEP_REQUIREMENTS)
        self.assertEqual(workflow["score"]["points_earned"],
                         initial_assessment["expected_points"])

        # Now override with a staff assessment
        staff_assessment = staff_api.create_assessment(
            tim_sub["uuid"],
            "Dumbledore",
            OPTIONS_SELECTED_DICT[key]["options"],
            dict(),
            "",
            RUBRIC,
        )

        # Verify both assessment and workflow report correct score
        self.assertEqual(staff_assessment["points_earned"],
                         OPTIONS_SELECTED_DICT[key]["expected_points"])
        workflow = workflow_api.get_workflow_for_submission(
            tim_sub["uuid"], self.STEP_REQUIREMENTS)
        self.assertEqual(workflow["score"]["points_earned"],
                         OPTIONS_SELECTED_DICT[key]["expected_points"])
Esempio n. 24
0
    def test_invalid_rubric_options_exception(self, invalid_reason):
        # Define invalid options_selected
        dict_to_use = copy.deepcopy(OPTIONS_SELECTED_DICT['all']["options"])
        if invalid_reason == "criterion_not_found":
            dict_to_use["invalid"] = RUBRIC_OPTIONS[0]["name"]
        elif invalid_reason == "option_not_found":
            dict_to_use[RUBRIC["criteria"][0]["name"]] = "invalid"
        elif invalid_reason == "missing_criteria":
            del dict_to_use[RUBRIC["criteria"][0]["name"]]
        elif invalid_reason == "some_criteria_not_assessed":
            dict_to_use[RUBRIC["criteria"][0]["name"]] = None

        # Create a submission
        tim_sub, _ = self._create_student_and_submission("Tim", "Tim's answer")

        # Try to staff assess with invalid options selected
        with self.assertRaises(StaffAssessmentRequestError) as context_manager:
            staff_api.create_assessment(
                tim_sub["uuid"],
                "Dumbledore",
                dict_to_use, dict(), "",
                RUBRIC,
            )
        self.assertEqual(str(context_manager.exception), u"Invalid options were selected in the rubric.")
Esempio n. 25
0
    def test_create_assessment_type_overrides(self, initial_type,
                                              initial_assess):
        """
        Test to ensure that any assesment, even a staff assessment, can be overriden by a staff assessment.
        """
        # Initially, asses with a 'most' value
        # This was selected to match the value that the ai test will set
        initial_assessment = OPTIONS_SELECTED_DICT["most"]

        # Create assessment
        tim_sub, tim = self._create_student_and_submission(
            "Tim", "Tim's answer", problem_steps=[initial_type])

        # Initially assess it
        assessment = initial_assess(tim_sub["uuid"], tim["student_id"],
                                    initial_assessment["options"])
        # and update workflow with new scores
        requirements = self.STEP_REQUIREMENTS
        if initial_type == 'peer':
            requirements = {"peer": {"must_grade": 0, "must_be_graded_by": 1}}

        # Verify both assessment and workflow report correct score
        self.assertEqual(assessment["points_earned"],
                         initial_assessment["expected_points"])
        workflow = workflow_api.get_workflow_for_submission(
            tim_sub["uuid"], requirements)
        self.assertEqual(workflow["score"]["points_earned"],
                         initial_assessment["expected_points"])

        staff_score = "few"
        # Now override with a staff assessment
        staff_assessment = staff_api.create_assessment(
            tim_sub["uuid"],
            "Dumbledore",
            OPTIONS_SELECTED_DICT[staff_score]["options"],
            dict(),
            "",
            RUBRIC,
        )

        # Verify both assessment and workflow report correct score
        self.assertEqual(staff_assessment["points_earned"],
                         OPTIONS_SELECTED_DICT[staff_score]["expected_points"])
        workflow = workflow_api.get_workflow_for_submission(
            tim_sub["uuid"], requirements)
        self.assertEqual(workflow["score"]["points_earned"],
                         OPTIONS_SELECTED_DICT[staff_score]["expected_points"])
 def do_staff_assessment(self, data):
     """
     Creates a staff assessment with the given assessment info
     publishes a openassessmentblock.staff_assess tracking event
     updates the assessed submission's workflow
     """
     if 'submission_uuid' not in data:
         return False, self._(
             "The submission ID of the submission being assessed was not found."
         )
     if self.is_team_assignment():
         return self._team_assess(data)
     else:
         try:
             assessment = staff_api.create_assessment(
                 data['submission_uuid'],
                 self.get_student_item_dict()["student_id"],
                 data['options_selected'],
                 clean_criterion_feedback(self.rubric_criteria,
                                          data['criterion_feedback']),
                 data['overall_feedback'],
                 create_rubric_dict(self.prompts,
                                    self.rubric_criteria_with_labels))
             assess_type = data.get('assess_type', 'regrade')
             self.publish_assessment_event(
                 "openassessmentblock.staff_assess",
                 assessment,
                 type=assess_type)
             workflow_api.update_from_assessments(
                 assessment["submission_uuid"],
                 None,
                 override_submitter_requirements=(assess_type == 'regrade'))
         except StaffAssessmentRequestError:
             logger.warning(
                 "An error occurred while submitting a staff assessment "
                 "for the submission %s",
                 data['submission_uuid'],
                 exc_info=True)
             msg = self._("Your staff assessment could not be submitted.")
             return False, msg
         except StaffAssessmentInternalError:
             logger.exception(
                 "An error occurred while submitting a staff assessment "
                 "for the submission %s", data['submission_uuid'])
             msg = self._("Your staff assessment could not be submitted.")
             return False, msg
     return True, ''
    def staff_assess(self, data, suffix=''):
        """
        Create a staff assessment from a staff submission.
        """
        if 'submission_uuid' not in data:
            return {
                'success': False, 'msg': self._(u"The submission ID of the submission being assessed was not found.")
            }
        try:
            assessment = staff_api.create_assessment(
                data['submission_uuid'],
                self.get_student_item_dict()["student_id"],
                data['options_selected'],
                clean_criterion_feedback(self.rubric_criteria, data['criterion_feedback']),
                data['overall_feedback'],
                create_rubric_dict(self.prompts, self.rubric_criteria_with_labels)
            )
            assess_type = data.get('assess_type', 'regrade')
            self.publish_assessment_event("openassessmentblock.staff_assess", assessment, type=assess_type)
            workflow_api.update_from_assessments(assessment["submission_uuid"], None)
            
            student_item = sub_api.get_submission_and_student(data['submission_uuid']).get('student_item', None)

            if student_item:
                student_id = student_item.get('student_id', None)
                if student_id:
                    student_email = self.get_user_email(student_id)
                    send_notification_for_assessment.delay(student_email, 'staff', "{0}".format(self.course_id), "{0}".format(self.scope_ids.usage_id))

        except StaffAssessmentRequestError:
            logger.warning(
                u"An error occurred while submitting a staff assessment "
                u"for the submission {}".format(data['submission_uuid']),
                exc_info=True
            )
            msg = self._(u"Your staff assessment could not be submitted.")
            return {'success': False, 'msg': msg}
        except StaffAssessmentInternalError:
            logger.exception(
                u"An error occurred while submitting a staff assessment "
                u"for the submission {}".format(data['submission_uuid']),
            )
            msg = self._(u"Your staff assessment could not be submitted.")
            return {'success': False, 'msg': msg}
        else:
            return {'success': True, 'msg': u""}
Esempio n. 28
0
    def test_create_assessment_does_not_block(self, after_type, after_assess):
        """
        Test to ensure that the presence of an override staff assessment only prevents new scores from being recorded;
        other assessments can still be made.
        """
        # Staff assessments do not block other staff scores from overriding, so skip that test
        if after_type == 'staff':
            return

        requirements = self.STEP_REQUIREMENTS
        if after_type == 'peer':
            requirements = {"peer": {"must_grade": 0, "must_be_graded_by": 1}}

        # Create assessment
        tim_sub, tim = self._create_student_and_submission("Tim", "Tim's answer", problem_steps=[after_type])

        staff_score = "few"
        # Staff assess it
        staff_assessment = staff_api.create_assessment(
            tim_sub["uuid"],
            "Dumbledore",
            OPTIONS_SELECTED_DICT[staff_score]['options'], dict(), "",
            RUBRIC,
        )

        # Verify both assessment and workflow report correct score
        self.assertEqual(staff_assessment["points_earned"], OPTIONS_SELECTED_DICT[staff_score]["expected_points"])
        workflow = workflow_api.get_workflow_for_submission(tim_sub["uuid"], requirements)
        # It's impossible to fake self requirements being complete, so we can't get the score for the self after_type
        if after_type != 'self':
            self.assertEqual(workflow["score"]["points_earned"], OPTIONS_SELECTED_DICT[staff_score]["expected_points"])

        # Now, non-force asses with a 'most' value
        # This was selected to match the value that the ai test will set
        unscored_assessment = OPTIONS_SELECTED_DICT["most"]
        assessment = after_assess(tim_sub["uuid"], tim["student_id"], unscored_assessment["options"])

        # Verify both assessment and workflow report correct score (workflow should report previous value)
        self.assertEqual(assessment["points_earned"], unscored_assessment["expected_points"])
        workflow = workflow_api.get_workflow_for_submission(tim_sub["uuid"], requirements)
        self.assertEqual(workflow["score"]["points_earned"], OPTIONS_SELECTED_DICT[staff_score]["expected_points"])
Esempio n. 29
0
    def test_create_assessment_not_required(self, key):
        """
        Simple test to ensure staff assessments are scored properly, for all values of OPTIONS_SELECTED_DICT,
        when staff scores are not required.
        """
        # Create assessment
        tim_sub, _ = self._create_student_and_submission("Tim", "Tim's answer")

        # Staff assess it
        assessment = staff_api.create_assessment(
            tim_sub["uuid"],
            "Dumbledore",
            OPTIONS_SELECTED_DICT[key]["options"], dict(), "",
            RUBRIC,
        )

        # Ensure points are calculated properly
        self.assertEqual(assessment["points_earned"], OPTIONS_SELECTED_DICT[key]["expected_points"])
        self.assertEqual(assessment["points_possible"], RUBRIC_POSSIBLE_POINTS)

        # Ensure submission and workflow are marked as finished
        self._verify_done_state(tim_sub["uuid"], self.STEP_REQUIREMENTS)
Esempio n. 30
0
    def test_create_assessment_score_overrides(self, key):
        """
        Test to ensure that scores can be overriden by a staff assessment using any value.
        """
        # Initially, self-asses with an all value
        initial_assessment = OPTIONS_SELECTED_DICT["all"]

        # Unless we're trying to override with an all value, then start with none
        if key == "all":
            initial_assessment = OPTIONS_SELECTED_DICT["none"]

        # Create assessment
        tim_sub, tim = self._create_student_and_submission("Tim", "Tim's answer", problem_steps=['self'])

        # Self assess it
        self_assessment = self_assess(
            tim_sub["uuid"],
            tim["student_id"],
            initial_assessment["options"], dict(), "",
            RUBRIC,
        )

        # Verify both assessment and workflow report correct score
        self.assertEqual(self_assessment["points_earned"], initial_assessment["expected_points"])
        workflow = workflow_api.get_workflow_for_submission(tim_sub["uuid"], self.STEP_REQUIREMENTS)
        self.assertEqual(workflow["score"]["points_earned"], initial_assessment["expected_points"])

        # Now override with a staff assessment
        staff_assessment = staff_api.create_assessment(
            tim_sub["uuid"],
            "Dumbledore",
            OPTIONS_SELECTED_DICT[key]["options"], dict(), "",
            RUBRIC,
        )

        # Verify both assessment and workflow report correct score
        self.assertEqual(staff_assessment["points_earned"], OPTIONS_SELECTED_DICT[key]["expected_points"])
        workflow = workflow_api.get_workflow_for_submission(tim_sub["uuid"], self.STEP_REQUIREMENTS)
        self.assertEqual(workflow["score"]["points_earned"], OPTIONS_SELECTED_DICT[key]["expected_points"])
Esempio n. 31
0
    def test_create_assessment_type_overrides(self, initial_type, initial_assess):
        """
        Test to ensure that any assesment, even a staff assessment, can be overriden by a staff assessment.
        """
        # Initially, asses with a 'most' value
        # This was selected to match the value that the ai test will set
        initial_assessment = OPTIONS_SELECTED_DICT["most"]

        # Create assessment
        tim_sub, tim = self._create_student_and_submission("Tim", "Tim's answer", problem_steps=[initial_type])

        # Initially assess it
        assessment = initial_assess(tim_sub["uuid"], tim["student_id"], initial_assessment["options"])
        # and update workflow with new scores
        requirements = self.STEP_REQUIREMENTS
        if initial_type == 'peer':
            requirements = {"peer": {"must_grade": 0, "must_be_graded_by": 1}}

        # Verify both assessment and workflow report correct score
        self.assertEqual(assessment["points_earned"], initial_assessment["expected_points"])
        workflow = workflow_api.get_workflow_for_submission(tim_sub["uuid"], requirements)
        self.assertEqual(workflow["score"]["points_earned"], initial_assessment["expected_points"])

        staff_score = "few"
        # Now override with a staff assessment
        staff_assessment = staff_api.create_assessment(
            tim_sub["uuid"],
            "Dumbledore",
            OPTIONS_SELECTED_DICT[staff_score]["options"], dict(), "",
            RUBRIC,
        )

        # Verify both assessment and workflow report correct score
        self.assertEqual(staff_assessment["points_earned"], OPTIONS_SELECTED_DICT[staff_score]["expected_points"])
        workflow = workflow_api.get_workflow_for_submission(tim_sub["uuid"], requirements)
        self.assertEqual(workflow["score"]["points_earned"], OPTIONS_SELECTED_DICT[staff_score]["expected_points"])
Esempio n. 32
0
class TestStaffAssessment(CacheResetTest):
    """
    Tests for staff assessments made as overrides, when none is required to exist.
    """

    STEP_REQUIREMENTS = {}
    STEP_REQUIREMENTS_WITH_STAFF = {'required': True}

    # This is due to ddt not playing nicely with list comprehensions
    ASSESSMENT_SCORES_DDT = [key for key in OPTIONS_SELECTED_DICT]

    @staticmethod
    def _peer_assess(scores):
        """
        Helper to fulfill peer assessment requirements.
        """
        bob_sub, bob = TestStaffAssessment._create_student_and_submission(
            "Bob", "Bob's answer", problem_steps=['peer'])
        peer_api.get_submission_to_assess(bob_sub["uuid"], 1)
        return peer_assess(bob_sub["uuid"], bob["student_id"], scores, dict(),
                           "", RUBRIC, 1)

    ASSESSMENT_TYPES_DDT = [
        ('self', lambda sub, scorer_id, scores: self_assess(
            sub, scorer_id, scores, dict(), "", RUBRIC)),
        ('peer', lambda sub, scorer_id, scores: TestStaffAssessment.
         _peer_assess(scores)),
        ('staff', lambda sub, scorer_id, scores: staff_api.create_assessment(
            sub, scorer_id, scores, dict(), "", RUBRIC)),
    ]

    def _verify_done_state(self, uuid, requirements, expect_done=True):
        """
        Asserts that a submision and workflow are (or aren't) set to status "done".
        A False value for expect_done will confirm an assessment/workflow are NOT done.
        """
        workflow = workflow_api.get_workflow_for_submission(uuid, requirements)
        if expect_done:
            self.assertTrue(
                staff_api.assessment_is_finished(uuid, requirements))
            self.assertEqual(workflow["status"], "done")
        else:
            self.assertFalse(
                staff_api.assessment_is_finished(uuid, requirements))
            self.assertNotEqual(workflow["status"], "done")

    @data(*ASSESSMENT_SCORES_DDT)
    def test_create_assessment_not_required(self, key):
        """
        Simple test to ensure staff assessments are scored properly, for all values of OPTIONS_SELECTED_DICT,
        when staff scores are not required.
        """
        # Create assessment
        tim_sub, _ = self._create_student_and_submission("Tim", "Tim's answer")

        # Staff assess it
        assessment = staff_api.create_assessment(
            tim_sub["uuid"],
            "Dumbledore",
            OPTIONS_SELECTED_DICT[key]["options"],
            dict(),
            "",
            RUBRIC,
        )

        # Ensure points are calculated properly
        self.assertEqual(assessment["points_earned"],
                         OPTIONS_SELECTED_DICT[key]["expected_points"])
        self.assertEqual(assessment["points_possible"], RUBRIC_POSSIBLE_POINTS)

        # Ensure submission and workflow are marked as finished
        self._verify_done_state(tim_sub["uuid"], self.STEP_REQUIREMENTS)

    @data(*ASSESSMENT_SCORES_DDT)
    def test_create_assessment_required(self, key):
        """
        Simple test to ensure staff assessments are scored properly, for all values of OPTIONS_SELECTED_DICT,
        when staff scores are required.
        """
        # Create assessment
        tim_sub, _ = self._create_student_and_submission(
            "Tim", "Tim's answer", problem_steps=['staff'])

        # Verify that we're still waiting on a staff assessment
        self._verify_done_state(tim_sub["uuid"],
                                self.STEP_REQUIREMENTS_WITH_STAFF,
                                expect_done=False)

        # Verify that a StaffWorkflow step has been created and is not complete
        workflow = StaffWorkflow.objects.get(submission_uuid=tim_sub['uuid'])
        self.assertIsNone(workflow.grading_completed_at)

        # Staff assess
        staff_assessment = staff_api.create_assessment(
            tim_sub["uuid"],
            "Dumbledore",
            OPTIONS_SELECTED_DICT[key]["options"],
            dict(),
            "",
            RUBRIC,
        )

        # Verify assesment made, score updated, and no longer waiting
        self.assertEqual(staff_assessment["points_earned"],
                         OPTIONS_SELECTED_DICT[key]["expected_points"])
        self._verify_done_state(tim_sub["uuid"],
                                self.STEP_REQUIREMENTS_WITH_STAFF)
        # Verify that a StaffWorkflow step has been marked as complete
        workflow.refresh_from_db()
        self.assertIsNotNone(workflow.grading_completed_at)

    @data(*ASSESSMENT_SCORES_DDT)
    def test_create_assessment_score_overrides(self, key):
        """
        Test to ensure that scores can be overriden by a staff assessment using any value.
        """
        # Initially, self-asses with an all value
        initial_assessment = OPTIONS_SELECTED_DICT["all"]

        # Unless we're trying to override with an all value, then start with none
        if key == "all":
            initial_assessment = OPTIONS_SELECTED_DICT["none"]

        # Create assessment
        tim_sub, tim = self._create_student_and_submission(
            "Tim", "Tim's answer", problem_steps=['self'])

        # Self assess it
        self_assessment = self_assess(
            tim_sub["uuid"],
            tim["student_id"],
            initial_assessment["options"],
            dict(),
            "",
            RUBRIC,
        )

        # Verify both assessment and workflow report correct score
        self.assertEqual(self_assessment["points_earned"],
                         initial_assessment["expected_points"])
        workflow = workflow_api.get_workflow_for_submission(
            tim_sub["uuid"], self.STEP_REQUIREMENTS)
        self.assertEqual(workflow["score"]["points_earned"],
                         initial_assessment["expected_points"])

        # Now override with a staff assessment
        staff_assessment = staff_api.create_assessment(
            tim_sub["uuid"],
            "Dumbledore",
            OPTIONS_SELECTED_DICT[key]["options"],
            dict(),
            "",
            RUBRIC,
        )

        # Verify both assessment and workflow report correct score
        self.assertEqual(staff_assessment["points_earned"],
                         OPTIONS_SELECTED_DICT[key]["expected_points"])
        workflow = workflow_api.get_workflow_for_submission(
            tim_sub["uuid"], self.STEP_REQUIREMENTS)
        self.assertEqual(workflow["score"]["points_earned"],
                         OPTIONS_SELECTED_DICT[key]["expected_points"])

    @data(*ASSESSMENT_TYPES_DDT)
    @unpack
    def test_create_assessment_type_overrides(self, initial_type,
                                              initial_assess):
        """
        Test to ensure that any assesment, even a staff assessment, can be overriden by a staff assessment.
        """
        # Initially, asses with a 'most' value
        # This was selected to match the value that the ai test will set
        initial_assessment = OPTIONS_SELECTED_DICT["most"]

        # Create assessment
        tim_sub, tim = self._create_student_and_submission(
            "Tim", "Tim's answer", problem_steps=[initial_type])

        # Initially assess it
        assessment = initial_assess(tim_sub["uuid"], tim["student_id"],
                                    initial_assessment["options"])
        # and update workflow with new scores
        requirements = self.STEP_REQUIREMENTS
        if initial_type == 'peer':
            requirements = {"peer": {"must_grade": 0, "must_be_graded_by": 1}}

        # Verify both assessment and workflow report correct score
        self.assertEqual(assessment["points_earned"],
                         initial_assessment["expected_points"])
        workflow = workflow_api.get_workflow_for_submission(
            tim_sub["uuid"], requirements)
        self.assertEqual(workflow["score"]["points_earned"],
                         initial_assessment["expected_points"])

        staff_score = "few"
        # Now override with a staff assessment
        staff_assessment = staff_api.create_assessment(
            tim_sub["uuid"],
            "Dumbledore",
            OPTIONS_SELECTED_DICT[staff_score]["options"],
            dict(),
            "",
            RUBRIC,
        )

        # Verify both assessment and workflow report correct score
        self.assertEqual(staff_assessment["points_earned"],
                         OPTIONS_SELECTED_DICT[staff_score]["expected_points"])
        workflow = workflow_api.get_workflow_for_submission(
            tim_sub["uuid"], requirements)
        self.assertEqual(workflow["score"]["points_earned"],
                         OPTIONS_SELECTED_DICT[staff_score]["expected_points"])

    @data(*ASSESSMENT_TYPES_DDT)
    @unpack
    def test_create_assessment_does_not_block(self, after_type, after_assess):
        """
        Test to ensure that the presence of an override staff assessment only prevents new scores from being recorded;
        other assessments can still be made.
        """
        # Staff assessments do not block other staff scores from overriding, so skip that test
        if after_type == 'staff':
            return

        requirements = self.STEP_REQUIREMENTS
        if after_type == 'peer':
            requirements = {"peer": {"must_grade": 0, "must_be_graded_by": 1}}

        # Create assessment
        tim_sub, tim = self._create_student_and_submission(
            "Tim", "Tim's answer", problem_steps=[after_type])

        staff_score = "few"
        # Staff assess it
        staff_assessment = staff_api.create_assessment(
            tim_sub["uuid"],
            "Dumbledore",
            OPTIONS_SELECTED_DICT[staff_score]['options'],
            dict(),
            "",
            RUBRIC,
        )

        # Verify both assessment and workflow report correct score
        self.assertEqual(staff_assessment["points_earned"],
                         OPTIONS_SELECTED_DICT[staff_score]["expected_points"])
        workflow = workflow_api.get_workflow_for_submission(
            tim_sub["uuid"], requirements)
        # It's impossible to fake self requirements being complete, so we can't get the score for the self after_type
        if after_type != 'self':
            self.assertEqual(
                workflow["score"]["points_earned"],
                OPTIONS_SELECTED_DICT[staff_score]["expected_points"])

        # Now, non-force asses with a 'most' value
        # This was selected to match the value that the ai test will set
        unscored_assessment = OPTIONS_SELECTED_DICT["most"]
        assessment = after_assess(tim_sub["uuid"], tim["student_id"],
                                  unscored_assessment["options"])

        # Verify both assessment and workflow report correct score (workflow should report previous value)
        self.assertEqual(assessment["points_earned"],
                         unscored_assessment["expected_points"])
        workflow = workflow_api.get_workflow_for_submission(
            tim_sub["uuid"], requirements)
        self.assertEqual(workflow["score"]["points_earned"],
                         OPTIONS_SELECTED_DICT[staff_score]["expected_points"])

    def test_provisionally_done(self):
        """
        Test to ensure that blocking steps, such as peer, are not considered done and do not display a score
        if the submitter's requirements have not yet been met, even if a staff score has been recorded.

        This test also ensures that a user may submit peer assessments after having been staff assessed, which was
        a bug that had been previously present.
        """
        # Tim(student) makes a submission, for a problem that requires peer assessment
        tim_sub, _ = TestStaffAssessment._create_student_and_submission(
            "Tim", "Tim's answer", problem_steps=['peer'])
        # Bob(student) also makes a submission for that problem
        bob_sub, bob = TestStaffAssessment._create_student_and_submission(
            "Bob", "Bob's answer", problem_steps=['peer'])

        # Define peer requirements. Note that neither submission will fulfill must_be_graded_by
        requirements = {"peer": {"must_grade": 1, "must_be_graded_by": 2}}

        staff_score = "none"
        # Dumbledore(staff) uses override ability to provide a score for both submissions
        staff_api.create_assessment(
            tim_sub["uuid"],
            "Dumbledore",
            OPTIONS_SELECTED_DICT[staff_score]["options"],
            dict(),
            "",
            RUBRIC,
        )
        staff_api.create_assessment(
            bob_sub["uuid"],
            "Dumbledore",
            OPTIONS_SELECTED_DICT[staff_score]["options"],
            dict(),
            "",
            RUBRIC,
        )

        # Bob completes his peer assessment duties, Tim does not
        peer_api.get_submission_to_assess(bob_sub["uuid"], 1)
        peer_assess(bob_sub["uuid"], bob["student_id"],
                    OPTIONS_SELECTED_DICT["most"]["options"], dict(), "",
                    RUBRIC, requirements["peer"]["must_be_graded_by"])

        # Verify that Bob's submission is marked done and returns the proper score
        bob_workflow = workflow_api.get_workflow_for_submission(
            bob_sub["uuid"], requirements)
        self.assertEqual(bob_workflow["score"]["points_earned"],
                         OPTIONS_SELECTED_DICT[staff_score]["expected_points"])
        self.assertEqual(bob_workflow["status"], "done")

        # Verify that Tim's submission is not marked done, and he cannot get his score
        tim_workflow = workflow_api.get_workflow_for_submission(
            tim_sub["uuid"], requirements)
        self.assertEqual(tim_workflow["score"], None)
        self.assertNotEqual(tim_workflow["status"], "done")

    def test_update_with_override(self):
        """
        Test that, when viewing a submission with a staff override present, the workflow is not updated repeatedly.

        See TNL-6092 for some historical context.
        """
        tim_sub, _ = TestStaffAssessment._create_student_and_submission(
            "Tim", "Tim's answer", problem_steps=['self'])
        staff_api.create_assessment(
            tim_sub["uuid"],
            "Dumbledore",
            OPTIONS_SELECTED_DICT["none"]["options"],
            dict(),
            "",
            RUBRIC,
        )
        workflow_api.get_workflow_for_submission(tim_sub["uuid"], {})
        with mock.patch('openassessment.workflow.models.sub_api.reset_score'
                        ) as mock_reset:
            workflow_api.get_workflow_for_submission(tim_sub["uuid"], {})
            self.assertFalse(mock_reset.called)

    def test_invalid_rubric_exception(self):
        # Create a submission
        tim_sub, _ = self._create_student_and_submission("Tim", "Tim's answer")

        # Define invalid rubric
        invalid_rubric = copy.deepcopy(RUBRIC)
        for criterion in invalid_rubric["criteria"]:
            for option in criterion["options"]:
                option["points"] = -1

        # Try to staff assess with invalid rubric
        with self.assertRaises(StaffAssessmentRequestError) as context_manager:
            staff_api.create_assessment(
                tim_sub["uuid"],
                "Dumbledore",
                OPTIONS_SELECTED_DICT["most"]["options"],
                dict(),
                "",
                invalid_rubric,
            )
        self.assertEqual(str(context_manager.exception),
                         u"The rubric definition is not valid.")

    @data("criterion_not_found", "option_not_found", "missing_criteria",
          "some_criteria_not_assessed")
    def test_invalid_rubric_options_exception(self, invalid_reason):
        # Define invalid options_selected
        dict_to_use = copy.deepcopy(OPTIONS_SELECTED_DICT['all']["options"])
        if invalid_reason == "criterion_not_found":
            dict_to_use["invalid"] = RUBRIC_OPTIONS[0]["name"]
        elif invalid_reason == "option_not_found":
            dict_to_use[RUBRIC["criteria"][0]["name"]] = "invalid"
        elif invalid_reason == "missing_criteria":
            del dict_to_use[RUBRIC["criteria"][0]["name"]]
        elif invalid_reason == "some_criteria_not_assessed":
            dict_to_use[RUBRIC["criteria"][0]["name"]] = None

        # Create a submission
        tim_sub, _ = self._create_student_and_submission("Tim", "Tim's answer")

        # Try to staff assess with invalid options selected
        with self.assertRaises(StaffAssessmentRequestError) as context_manager:
            staff_api.create_assessment(
                tim_sub["uuid"],
                "Dumbledore",
                dict_to_use,
                dict(),
                "",
                RUBRIC,
            )
        self.assertEqual(str(context_manager.exception),
                         u"Invalid options were selected in the rubric.")

    @mock.patch('openassessment.assessment.models.Assessment.objects.filter')
    def test_database_filter_error_handling(self, mock_filter):
        # Create a submission
        mock_filter.return_value = Assessment.objects.none()
        tim_sub, _ = self._create_student_and_submission("Tim", "Tim's answer")

        # Note that we have to define this side effect *after* creating the submission
        mock_filter.side_effect = DatabaseError("KABOOM!")

        # Try to get the latest staff assessment, handle database errors
        with self.assertRaises(
                StaffAssessmentInternalError) as context_manager:
            staff_api.get_latest_staff_assessment(tim_sub["uuid"])
        self.assertEqual(str(context_manager.exception), (
            u"An error occurred while retrieving staff assessments for the submission with UUID {uuid}: {ex}"
        ).format(uuid=tim_sub["uuid"], ex="KABOOM!"))

        # Try to get staff assessment scores by criteria, handle database errors
        with self.assertRaises(
                StaffAssessmentInternalError) as context_manager:
            staff_api.get_assessment_scores_by_criteria(tim_sub["uuid"])
        self.assertEqual(
            str(context_manager.exception),
            u"Error getting staff assessment scores for {}".format(
                tim_sub["uuid"]))

    @mock.patch('openassessment.assessment.models.Assessment.create')
    def test_database_create_error_handling(self, mock_create):
        mock_create.side_effect = DatabaseError("KABOOM!")

        # Try to create a staff assessment, handle database errors
        with self.assertRaises(
                StaffAssessmentInternalError) as context_manager:
            staff_api.create_assessment(
                "000000",
                "Dumbledore",
                OPTIONS_SELECTED_DICT['most']['options'],
                dict(),
                "",
                RUBRIC,
            )
        self.assertEqual(
            str(context_manager.exception),
            u"An error occurred while creating an assessment by the scorer with this ID: {}"
            .format("Dumbledore"))

    def test_fetch_next_submission(self):
        bob_sub, _ = self._create_student_and_submission("bob", "bob's answer")
        _, tim = self._create_student_and_submission("Tim", "Tim's answer")
        submission = staff_api.get_submission_to_assess(
            tim['course_id'], tim['item_id'], tim['student_id'])
        self.assertIsNotNone(submission)
        self.assertEqual(bob_sub, submission)

    def test_fetch_same_submission(self):
        bob_sub, bob = self._create_student_and_submission(
            "bob", "bob's answer")
        tim_sub, tim = self._create_student_and_submission(
            "Tim", "Tim's answer")
        tim_to_grade = staff_api.get_submission_to_assess(
            tim['course_id'], tim['item_id'], tim['student_id'])
        self.assertEqual(bob_sub, tim_to_grade)
        # Ensure that Bob doesn't pick up the submission that Tim is grading.
        bob_to_grade = staff_api.get_submission_to_assess(
            tim['course_id'], tim['item_id'], bob['student_id'])
        tim_to_grade = staff_api.get_submission_to_assess(
            tim['course_id'], tim['item_id'], tim['student_id'])
        self.assertEqual(bob_sub, tim_to_grade)
        self.assertEqual(tim_sub, bob_to_grade)

    def test_fetch_submission_delayed(self):
        bob_sub, bob = self._create_student_and_submission(
            "bob", "bob's answer")
        # Fetch the submission for Tim to grade
        tim_to_grade = staff_api.get_submission_to_assess(
            bob['course_id'], bob['item_id'], "Tim")
        self.assertEqual(bob_sub, tim_to_grade)

        bob_to_grade = staff_api.get_submission_to_assess(
            bob['course_id'], bob['item_id'], bob['student_id'])
        self.assertIsNone(bob_to_grade)

        # Change the grading_started_at timestamp so that the 'lock' on the
        # problem is released.
        workflow = StaffWorkflow.objects.get(scorer_id="Tim")
        # pylint: disable=unicode-format-string
        timestamp = (now() - (workflow.TIME_LIMIT + timedelta(hours=1))
                     ).strftime("%Y-%m-%d %H:%M:%S")
        workflow.grading_started_at = timestamp
        workflow.save()

        bob_to_grade = staff_api.get_submission_to_assess(
            bob['course_id'], bob['item_id'], bob['student_id'])
        self.assertEqual(tim_to_grade, bob_to_grade)

    def test_next_submission_error(self):
        _, tim = self._create_student_and_submission("Tim", "Tim's answer")
        with mock.patch(
                'openassessment.assessment.api.staff.submissions_api.get_submission'
        ) as patched_get_submission:
            patched_get_submission.side_effect = sub_api.SubmissionNotFoundError(
                'Failed')
            with self.assertRaises(staff_api.StaffAssessmentInternalError):
                staff_api.get_submission_to_assess(tim['course_id'],
                                                   tim['item_id'],
                                                   tim['student_id'])

    def test_no_available_submissions(self):
        _, tim = self._create_student_and_submission("Tim", "Tim's answer")
        # Use a non-existent course and non-existent item.
        submission = staff_api.get_submission_to_assess(
            'test_course_id', 'test_item_id', tim['student_id'])
        self.assertIsNone(submission)

    def test_cancel_staff_workflow(self):
        tim_sub, _ = self._create_student_and_submission("Tim", "Tim's answer")
        workflow_api.cancel_workflow(tim_sub['uuid'], "Test Cancel", "Bob", {})
        workflow = StaffWorkflow.objects.get(submission_uuid=tim_sub['uuid'])
        self.assertTrue(workflow.is_cancelled)

    def test_grading_statistics(self):
        _, bob = self._create_student_and_submission("bob", "bob's answer")
        course_id = bob['course_id']
        item_id = bob['item_id']
        _, tim = self._create_student_and_submission("Tim", "Tim's answer")
        self._create_student_and_submission("Sue", "Sue's answer")
        stats = staff_api.get_staff_grading_statistics(course_id, item_id)
        self.assertEqual(stats, {'graded': 0, 'ungraded': 3, 'in-progress': 0})

        # Fetch a grade so that there's one 'in-progress'
        tim_to_grade = staff_api.get_submission_to_assess(
            course_id, item_id, tim['student_id'])
        stats = staff_api.get_staff_grading_statistics(course_id, item_id)
        self.assertEqual(stats, {'graded': 0, 'ungraded': 2, 'in-progress': 1})

        bob_to_grade = staff_api.get_submission_to_assess(
            tim['course_id'], tim['item_id'], bob['student_id'])
        stats = staff_api.get_staff_grading_statistics(course_id, item_id)
        self.assertEqual(stats, {'graded': 0, 'ungraded': 1, 'in-progress': 2})

        # Grade one of the submissions
        staff_api.create_assessment(
            tim_to_grade["uuid"],
            tim['student_id'],
            OPTIONS_SELECTED_DICT["all"]["options"],
            dict(),
            "",
            RUBRIC,
        )
        stats = staff_api.get_staff_grading_statistics(course_id, item_id)
        self.assertEqual(stats, {'graded': 1, 'ungraded': 1, 'in-progress': 1})

        # When one of the 'locks' times out, verify that it is no longer
        # considered ungraded.
        workflow = StaffWorkflow.objects.get(scorer_id=bob['student_id'])
        # pylint: disable=unicode-format-string
        timestamp = (now() - (workflow.TIME_LIMIT + timedelta(hours=1))
                     ).strftime("%Y-%m-%d %H:%M:%S")
        workflow.grading_started_at = timestamp
        workflow.save()
        stats = staff_api.get_staff_grading_statistics(course_id, item_id)
        self.assertEqual(stats, {'graded': 1, 'ungraded': 2, 'in-progress': 0})

        workflow_api.cancel_workflow(bob_to_grade['uuid'], "Test Cancel",
                                     bob['student_id'], {})
        stats = staff_api.get_staff_grading_statistics(course_id, item_id)
        self.assertEqual(stats, {'graded': 1, 'ungraded': 1, 'in-progress': 0})

    @staticmethod
    def _create_student_and_submission(student,
                                       answer,
                                       date=None,
                                       problem_steps=None):
        """
        Helper method to create a student and submission for use in tests.
        """
        new_student_item = STUDENT_ITEM.copy()
        new_student_item["student_id"] = student
        submission = sub_api.create_submission(new_student_item, answer, date)
        steps = []
        init_params = {}
        if problem_steps:
            steps = problem_steps
        if 'peer' in steps:
            peer_api.on_start(submission["uuid"])
        workflow_api.create_workflow(submission["uuid"], steps, init_params)
        return submission, new_student_item