Пример #1
0
 def test_fetch_same_submission(self):
     bob_sub, bob = self._create_student_and_submission("bob", "bob's answer")
     tim_sub, tim = self._create_student_and_submission("Tim", "Tim's answer")
     tim_to_grade = staff_api.get_submission_to_assess(tim['course_id'], tim['item_id'], tim['student_id'])
     self.assertEqual(bob_sub, tim_to_grade)
     # Ensure that Bob doesn't pick up the submission that Tim is grading.
     bob_to_grade = staff_api.get_submission_to_assess(tim['course_id'], tim['item_id'], bob['student_id'])
     tim_to_grade = staff_api.get_submission_to_assess(tim['course_id'], tim['item_id'], tim['student_id'])
     self.assertEqual(bob_sub, tim_to_grade)
     self.assertEqual(tim_sub, bob_to_grade)
Пример #2
0
 def test_fetch_same_submission(self):
     bob_sub, bob = self._create_student_and_submission("bob", "bob's answer")
     tim_sub, tim = self._create_student_and_submission("Tim", "Tim's answer")
     tim_to_grade = staff_api.get_submission_to_assess(tim['course_id'], tim['item_id'], tim['student_id'])
     self.assertEqual(bob_sub, tim_to_grade)
     # Ensure that Bob doesn't pick up the submission that Tim is grading.
     bob_to_grade = staff_api.get_submission_to_assess(tim['course_id'], tim['item_id'], bob['student_id'])
     tim_to_grade = staff_api.get_submission_to_assess(tim['course_id'], tim['item_id'], tim['student_id'])
     self.assertEqual(bob_sub, tim_to_grade)
     self.assertEqual(tim_sub, bob_to_grade)
Пример #3
0
 def test_next_submission_error(self):
     _, tim = self._create_student_and_submission("Tim", "Tim's answer")
     with mock.patch(
             'openassessment.assessment.api.staff.submissions_api.get_submission'
     ) as patched_get_submission:
         patched_get_submission.side_effect = sub_api.SubmissionNotFoundError(
             'Failed')
         with self.assertRaises(staff_api.StaffAssessmentInternalError):
             staff_api.get_submission_to_assess(tim['course_id'],
                                                tim['item_id'],
                                                tim['student_id'])
Пример #4
0
    def test_grading_statistics(self):
        bob_sub, bob = self._create_student_and_submission(
            "bob", "bob's answer")
        course_id = bob['course_id']
        item_id = bob['item_id']
        tim_sub, tim = self._create_student_and_submission(
            "Tim", "Tim's answer")
        sue_sub, sue = self._create_student_and_submission(
            "Sue", "Sue's answer")
        stats = staff_api.get_staff_grading_statistics(course_id, item_id)
        self.assertEqual(stats, {'graded': 0, 'ungraded': 3, 'in-progress': 0})

        # Fetch a grade so that there's one 'in-progress'
        tim_to_grade = staff_api.get_submission_to_assess(
            course_id, item_id, tim['student_id'])
        stats = staff_api.get_staff_grading_statistics(course_id, item_id)
        self.assertEqual(stats, {'graded': 0, 'ungraded': 2, 'in-progress': 1})

        bob_to_grade = staff_api.get_submission_to_assess(
            tim['course_id'], tim['item_id'], bob['student_id'])
        stats = staff_api.get_staff_grading_statistics(course_id, item_id)
        self.assertEqual(stats, {'graded': 0, 'ungraded': 1, 'in-progress': 2})

        # Grade one of the submissions
        staff_assessment = staff_api.create_assessment(
            tim_to_grade["uuid"],
            tim['student_id'],
            OPTIONS_SELECTED_DICT["all"]["options"],
            dict(),
            "",
            RUBRIC,
        )
        stats = staff_api.get_staff_grading_statistics(course_id, item_id)
        self.assertEqual(stats, {'graded': 1, 'ungraded': 1, 'in-progress': 1})

        # When one of the 'locks' times out, verify that it is no longer
        # considered ungraded.
        workflow = StaffWorkflow.objects.get(scorer_id=bob['student_id'])
        timestamp = (now() - (workflow.TIME_LIMIT + timedelta(hours=1))
                     ).strftime("%Y-%m-%d %H:%M:%S")
        workflow.grading_started_at = timestamp
        workflow.save()
        stats = staff_api.get_staff_grading_statistics(course_id, item_id)
        self.assertEqual(stats, {'graded': 1, 'ungraded': 2, 'in-progress': 0})

        workflow_api.cancel_workflow(bob_to_grade['uuid'], "Test Cancel",
                                     bob['student_id'], {})
        stats = staff_api.get_staff_grading_statistics(course_id, item_id)
        self.assertEqual(stats, {'graded': 1, 'ungraded': 1, 'in-progress': 0})
Пример #5
0
    def render_staff_grade_form(self, data, suffix=''):  # pylint: disable=W0613
        """
        Renders a form to staff-grade the next available learner submission.

        Must be course staff to render this view.
        """
        # Import is placed here to avoid model import at project startup.
        from openassessment.assessment.api import staff as staff_api
        from submissions import api as submission_api
        try:
            student_item_dict = self.get_student_item_dict()
            course_id = student_item_dict.get('course_id')
            item_id = student_item_dict.get('item_id')
            staff_id = student_item_dict['student_id']

            # Note that this will check out a submission for grading by the specified staff member.
            # If no submissions are available for grading, will return None.
            submission_to_assess = staff_api.get_submission_to_assess(course_id, item_id, staff_id)

            if submission_to_assess is not None:
                # This is posting a tracking event to the runtime.
                self.runtime.publish(self, 'openassessmentblock.get_submission_for_staff_grading', {
                    'type': 'full-grade',
                    'requesting_staff_id': staff_id,
                    'item_id': item_id,
                    'submission_returned_uuid': submission_to_assess['uuid']
                })
                submission = submission_api.get_submission_and_student(submission_to_assess['uuid'])
                if submission:
                    anonymous_student_id = submission['student_item']['student_id']
                    submission_context = self.get_student_submission_context(
                        self.get_username(anonymous_student_id), submission
                    )
                    # Add team info to context
                    submission_context['teams_enabled'] = self.teams_enabled
                    if self.teams_enabled:
                        user = self.get_real_user(anonymous_student_id)

                        if not user:
                            logger.error(
                                '{}: User lookuip for anonymous_user_id {} failed'.format(
                                    self.location,
                                    anonymous_student_id
                                )
                            )
                            raise ObjectDoesNotExist()

                        team = self.teams_service.get_team(user, self.course_id, self.selected_teamset_id)

                        submission_context['team_name'] = team.name
                        submission_context['team_usernames'] = list_to_conversational_format(
                            [user.username for user in team.users.all()]
                        )

                    path = 'openassessmentblock/staff_area/oa_staff_grade_learners_assessment.html'
                    return self.render_assessment(path, submission_context)
                return self.render_error(self._(u"Error loading the checked out learner response."))
            return self.render_error(self._(u"No other learner responses are available for grading at this time."))
        except PeerAssessmentInternalError:
            return self.render_error(self._(u"Error getting staff grade information."))
Пример #6
0
 def test_fetch_next_submission(self):
     bob_sub, _ = self._create_student_and_submission("bob", "bob's answer")
     _, tim = self._create_student_and_submission("Tim", "Tim's answer")
     submission = staff_api.get_submission_to_assess(
         tim['course_id'], tim['item_id'], tim['student_id'])
     self.assertIsNotNone(submission)
     self.assertEqual(bob_sub, submission)
Пример #7
0
 def test_no_available_submissions(self):
     tim_sub, tim = self._create_student_and_submission(
         "Tim", "Tim's answer")
     # Use a non-existent course and non-existent item.
     submission = staff_api.get_submission_to_assess(
         'test_course_id', 'test_item_id', tim['student_id'])
     self.assertIsNone(submission)
Пример #8
0
    def test_fetch_submission_delayed(self):
        bob_sub, bob = self._create_student_and_submission("bob", "bob's answer")
        # Fetch the submission for Tim to grade
        tim_to_grade = staff_api.get_submission_to_assess(bob['course_id'], bob['item_id'], "Tim")
        self.assertEqual(bob_sub, tim_to_grade)

        bob_to_grade = staff_api.get_submission_to_assess(bob['course_id'], bob['item_id'], bob['student_id'])
        self.assertIsNone(bob_to_grade)

        # Change the grading_started_at timestamp so that the 'lock' on the
        # problem is released.
        workflow = StaffWorkflow.objects.get(scorer_id="Tim")
        timestamp = (now() - (workflow.TIME_LIMIT + timedelta(hours=1))).strftime("%Y-%m-%d %H:%M:%S")
        workflow.grading_started_at = timestamp
        workflow.save()

        bob_to_grade = staff_api.get_submission_to_assess(bob['course_id'], bob['item_id'], bob['student_id'])
        self.assertEqual(tim_to_grade, bob_to_grade)
Пример #9
0
    def test_fetch_submission_delayed(self):
        bob_sub, bob = self._create_student_and_submission("bob", "bob's answer")
        # Fetch the submission for Tim to grade
        tim_to_grade = staff_api.get_submission_to_assess(bob['course_id'], bob['item_id'], "Tim")
        self.assertEqual(bob_sub, tim_to_grade)

        bob_to_grade = staff_api.get_submission_to_assess(bob['course_id'], bob['item_id'], bob['student_id'])
        self.assertIsNone(bob_to_grade)

        # Change the grading_started_at timestamp so that the 'lock' on the
        # problem is released.
        workflow = StaffWorkflow.objects.get(scorer_id="Tim")
        # pylint: disable=unicode-format-string
        timestamp = (now() - (workflow.TIME_LIMIT + timedelta(hours=1))).strftime("%Y-%m-%d %H:%M:%S")
        workflow.grading_started_at = timestamp
        workflow.save()

        bob_to_grade = staff_api.get_submission_to_assess(bob['course_id'], bob['item_id'], bob['student_id'])
        self.assertEqual(tim_to_grade, bob_to_grade)
Пример #10
0
    def render_staff_grade_form(self, data, suffix=''):  # pylint: disable=W0613
        """
        Renders a form to staff-grade the next available learner submission.

        Must be course staff to render this view.
        """
        # Import is placed here to avoid model import at project startup.
        from openassessment.assessment.api import staff as staff_api
        from submissions import api as submission_api
        try:
            student_item_dict = self.get_student_item_dict()
            course_id = student_item_dict.get('course_id')
            item_id = student_item_dict.get('item_id')
            staff_id = student_item_dict['student_id']

            # Note that this will check out a submission for grading by the specified staff member.
            # If no submissions are available for grading, will return None.
            submission_to_assess = staff_api.get_submission_to_assess(
                course_id, item_id, staff_id)

            if submission_to_assess is not None:
                # This is posting a tracking event to the runtime.
                self.runtime.publish(
                    self,
                    'openassessmentblock.get_submission_for_staff_grading', {
                        'type': 'full-grade',
                        'requesting_staff_id': staff_id,
                        'item_id': item_id,
                        'submission_returned_uuid':
                        submission_to_assess['uuid']
                    })
                submission = submission_api.get_submission_and_student(
                    submission_to_assess['uuid'])
                if submission:
                    anonymous_student_id = submission['student_item'][
                        'student_id']
                    submission_context = self.get_student_submission_context(
                        self.get_username(anonymous_student_id), submission)
                    if self.is_team_assignment():
                        self.add_team_submission_context(
                            submission_context,
                            individual_submission_uuid=submission['uuid'],
                            transform_usernames=True)
                    path = 'openassessmentblock/staff_area/oa_staff_grade_learners_assessment.html'
                    return self.render_assessment(path, submission_context)
                return self.render_error(
                    self._("Error loading the checked out learner response."))
            return self.render_error(
                self.
                _("No other learner responses are available for grading at this time."
                  ))
        except PeerAssessmentInternalError:
            return self.render_error(
                self._("Error getting staff grade information."))
Пример #11
0
    def test_grading_statistics(self):
        _, bob = self._create_student_and_submission("bob", "bob's answer")
        course_id = bob['course_id']
        item_id = bob['item_id']
        _, tim = self._create_student_and_submission("Tim", "Tim's answer")
        self._create_student_and_submission("Sue", "Sue's answer")
        stats = staff_api.get_staff_grading_statistics(course_id, item_id)
        self.assertEqual(stats, {'graded': 0, 'ungraded': 3, 'in-progress': 0})

        # Fetch a grade so that there's one 'in-progress'
        tim_to_grade = staff_api.get_submission_to_assess(course_id, item_id, tim['student_id'])
        stats = staff_api.get_staff_grading_statistics(course_id, item_id)
        self.assertEqual(stats, {'graded': 0, 'ungraded': 2, 'in-progress': 1})

        bob_to_grade = staff_api.get_submission_to_assess(tim['course_id'], tim['item_id'], bob['student_id'])
        stats = staff_api.get_staff_grading_statistics(course_id, item_id)
        self.assertEqual(stats, {'graded': 0, 'ungraded': 1, 'in-progress': 2})

        # Grade one of the submissions
        staff_api.create_assessment(
            tim_to_grade["uuid"],
            tim['student_id'],
            OPTIONS_SELECTED_DICT["all"]["options"], dict(), "",
            RUBRIC,
        )
        stats = staff_api.get_staff_grading_statistics(course_id, item_id)
        self.assertEqual(stats, {'graded': 1, 'ungraded': 1, 'in-progress': 1})

        # When one of the 'locks' times out, verify that it is no longer
        # considered ungraded.
        workflow = StaffWorkflow.objects.get(scorer_id=bob['student_id'])
        timestamp = (now() - (workflow.TIME_LIMIT + timedelta(hours=1))).strftime("%Y-%m-%d %H:%M:%S")
        workflow.grading_started_at = timestamp
        workflow.save()
        stats = staff_api.get_staff_grading_statistics(course_id, item_id)
        self.assertEqual(stats, {'graded': 1, 'ungraded': 2, 'in-progress': 0})

        workflow_api.cancel_workflow(bob_to_grade['uuid'], "Test Cancel", bob['student_id'], {})
        stats = staff_api.get_staff_grading_statistics(course_id, item_id)
        self.assertEqual(stats, {'graded': 1, 'ungraded': 1, 'in-progress': 0})
Пример #12
0
    def render_staff_grade_form(self, data, suffix=''):  # pylint: disable=W0613
        """
        Renders a form to staff-grade the next available learner submission.

        Must be course staff to render this view.
        """
        # Import is placed here to avoid model import at project startup.
        from openassessment.assessment.api import staff as staff_api
        from submissions import api as submission_api
        try:
            student_item_dict = self.get_student_item_dict()
            course_id = student_item_dict.get('course_id')
            item_id = student_item_dict.get('item_id')
            staff_id = student_item_dict['student_id']

            # Note that this will check out a submission for grading by the specified staff member.
            # If no submissions are available for grading, will return None.
            submission_to_assess = staff_api.get_submission_to_assess(course_id, item_id, staff_id)

            if submission_to_assess is not None:
                # This is posting a tracking event to the runtime.
                self.runtime.publish(self, 'openassessmentblock.get_submission_for_staff_grading', {
                    'type': 'full-grade',
                    'requesting_staff_id': staff_id,
                    'item_id': item_id,
                    'submission_returned_uuid': submission_to_assess['uuid']
                })
                submission = submission_api.get_submission_and_student(submission_to_assess['uuid'])
                if submission:
                    anonymous_student_id = submission['student_item']['student_id']
                    submission_context = self.get_student_submission_context(
                        self.get_username(anonymous_student_id), submission
                    )
                    path = 'openassessmentblock/staff_area/oa_staff_grade_learners_assessment.html'
                    return self.render_assessment(path, submission_context)
                else:
                    return self.render_error(self._(u"Error loading the checked out learner response."))
            else:
                return self.render_error(self._(u"No other learner responses are available for grading at this time."))

        except PeerAssessmentInternalError:
            return self.render_error(self._(u"Error getting staff grade information."))
Пример #13
0
 def test_no_available_submissions(self):
     _, tim = self._create_student_and_submission("Tim", "Tim's answer")
     # Use a non-existent course and non-existent item.
     submission = staff_api.get_submission_to_assess('test_course_id', 'test_item_id', tim['student_id'])
     self.assertIsNone(submission)
Пример #14
0
 def test_next_submission_error(self):
     _, tim = self._create_student_and_submission("Tim", "Tim's answer")
     with mock.patch('openassessment.assessment.api.staff.submissions_api.get_submission') as patched_get_submission:
         patched_get_submission.side_effect = sub_api.SubmissionNotFoundError('Failed')
         with self.assertRaises(staff_api.StaffAssessmentInternalError):
             staff_api.get_submission_to_assess(tim['course_id'], tim['item_id'], tim['student_id'])
Пример #15
0
 def test_fetch_next_submission(self):
     bob_sub, _ = self._create_student_and_submission("bob", "bob's answer")
     _, tim = self._create_student_and_submission("Tim", "Tim's answer")
     submission = staff_api.get_submission_to_assess(tim['course_id'], tim['item_id'], tim['student_id'])
     self.assertIsNotNone(submission)
     self.assertEqual(bob_sub, submission)