Пример #1
0
    def test_staff_assess_handler_regrade(self, xblock):
        """ If different assess_types are included in one request, they should be reflected in trcking events. """
        test_submissions = self._create_test_submissions(xblock)

        alice_full_grade_assessment = copy.copy(STAFF_GOOD_ASSESSMENT)
        alice_full_grade_assessment[
            'overall_feedback'] = "full grade for Alice"

        derek_regrade_assessment = copy.copy(STAFF_GOOD_ASSESSMENT)
        derek_regrade_assessment['assess_type'] = 'regrade'
        derek_regrade_assessment['overall_feedback'] = "regrade for Derek"

        # Submit staff assessments
        self.submit_bulk_staff_assessment(
            xblock,
            (test_submissions['Alice'], alice_full_grade_assessment),
            (test_submissions['Derek'], derek_regrade_assessment),
        )

        alice_assessment = staff_api.get_latest_staff_assessment(
            test_submissions['Alice']['uuid'])
        self.assert_assessment_event_published(
            xblock,
            'openassessmentblock.staff_assess',
            alice_assessment,
            type='full-grade')

        derek_assessment = staff_api.get_latest_staff_assessment(
            test_submissions['Derek']['uuid'])
        self.assert_assessment_event_published(
            xblock,
            'openassessmentblock.staff_assess',
            derek_assessment,
            type='regrade')
    def test_submit_ora_test_data(self):
        """ Test for behavior of the submit step """
        self.cmd.submit_ora_test_data(COURSE_ID, CONFIG_1)

        # User 1 should have a submission, their workflow should be 'done', they should have a staff grade
        # and they should be locked.
        user_1_submission = sub_api.get_submissions(
            student_item(USERNAME_1, self.mock_block.location))[0]
        user_1_workflow = workflow_api.get_workflow_for_submission(
            user_1_submission['uuid'], None)
        assert user_1_workflow['status'] == 'done'
        user_1_assessment = staff_api.get_latest_staff_assessment(
            user_1_submission['uuid'])
        assert user_1_assessment['points_earned'] == 1
        assert user_1_assessment['scorer_id'] == anonymous_user_id(
            STAFF_USER_2)
        assert user_1_assessment['feedback'] == SUBMISSION_CONFIG_1[
            'gradeData']['overallFeedback']
        user_1_lock_owner = SubmissionGradingLock.get_submission_lock(
            user_1_submission['uuid']).owner_id
        assert user_1_lock_owner == anonymous_user_id(STAFF_USER_1)

        # User 2 should have a submission, their workflow should be 'waiting', they should not have a
        # staff grade and they should not be locked
        user_2_submission = sub_api.get_submissions(
            student_item(USERNAME_2, self.mock_block.location))[0]
        user_2_workflow = workflow_api.get_workflow_for_submission(
            user_2_submission['uuid'], None)
        assert user_2_workflow['status'] == 'waiting'
        user_2_assessment = staff_api.get_latest_staff_assessment(
            user_2_submission['uuid'])
        assert user_2_assessment is None
        assert SubmissionGradingLock.get_submission_lock(
            user_2_submission['uuid']) is None
Пример #3
0
    def test_staff_bulk_assess(self, xblock, target_students):
        """ Test for normal behavior of staff bulk assess """
        test_submissions = self._create_test_submissions(xblock)
        submission_assessment_tuples = []
        # Create assessments for all specified learners, with a custom "overall feedback"
        for student_id in target_students:
            assessment = self._build_assessment_dict(student_id,
                                                     test_submissions)
            submission_assessment_tuples.append(
                (test_submissions[student_id], assessment))

        self.submit_bulk_staff_assessment(xblock,
                                          *submission_assessment_tuples)

        # Expect that a staff-assessment was created for each graded submission
        for student_id in target_students:
            submission = test_submissions[student_id]
            assessment = staff_api.get_latest_staff_assessment(
                submission['uuid'])
            self.assertIsNotNone(assessment)
            self._assert_assessment_data_values(xblock, submission, student_id,
                                                assessment)

        # Expect that submissions not included did not recieve a grade
        for student_id, ungraded_submission in test_submissions.items():
            if student_id in target_students:
                continue
            assessment = staff_api.get_latest_staff_assessment(
                ungraded_submission['uuid'])
            self.assertIsNone(assessment)
Пример #4
0
    def test_database_filter_error_handling(self, mock_filter):
        # Create a submission
        mock_filter.return_value = Assessment.objects.none()
        tim_sub, _ = self._create_student_and_submission("Tim", "Tim's answer")

        # Note that we have to define this side effect *after* creating the submission
        mock_filter.side_effect = DatabaseError("KABOOM!")

        # Try to get the latest staff assessment, handle database errors
        with self.assertRaises(StaffAssessmentInternalError) as context_manager:
            staff_api.get_latest_staff_assessment(tim_sub["uuid"])
        self.assertEqual(
            str(context_manager.exception),
            (
                "An error occurred while retrieving staff assessments for the submission with UUID {uuid}: {ex}"
            ).format(uuid=tim_sub["uuid"], ex="KABOOM!")
        )

        # Try to get staff assessment scores by criteria, handle database errors
        with self.assertRaises(StaffAssessmentInternalError) as context_manager:
            staff_api.get_assessment_scores_by_criteria(tim_sub["uuid"])
        self.assertEqual(
            str(context_manager.exception),
            "Error getting staff assessment scores for {}".format(tim_sub["uuid"])
        )
Пример #5
0
    def test_assessment_mixed_error_and_success(self, xblock):
        """ Test for error behavior when there are both failed and successful staff assessments """
        test_submissions = self._create_test_submissions(xblock)
        self.set_staff_access(xblock)

        assessment_no_submission_uuid = copy.deepcopy(STAFF_GOOD_ASSESSMENT)
        cindy_assessment = self._build_assessment_dict('Cindy',
                                                       test_submissions)
        derek_assessment = self._build_assessment_dict('Derek',
                                                       test_submissions)

        payload = [
            assessment_no_submission_uuid,
            cindy_assessment,
            assessment_no_submission_uuid,
            derek_assessment,
            assessment_no_submission_uuid,
            assessment_no_submission_uuid,
        ]
        resp = self.request(xblock,
                            'bulk_staff_assess',
                            json.dumps(payload),
                            response_format='json')
        self.assertFalse(resp['success'])
        self.assertEqual(
            resp['msg'],
            "There were one or more errors submitting the requested assessments"
        )
        self.assertDictEqual(
            resp['errors'], {
                '0':
                "The submission ID of the submission being assessed was not found.",
                '2':
                "The submission ID of the submission being assessed was not found.",
                '4':
                "The submission ID of the submission being assessed was not found.",
                '5':
                "The submission ID of the submission being assessed was not found.",
            })

        cindy_assessment = staff_api.get_latest_staff_assessment(
            test_submissions['Cindy']['uuid'])
        self._assert_assessment_data_values(xblock, test_submissions['Cindy'],
                                            'Cindy', cindy_assessment)

        derek_assessment = staff_api.get_latest_staff_assessment(
            test_submissions['Derek']['uuid'])
        self._assert_assessment_data_values(xblock, test_submissions['Derek'],
                                            'Derek', derek_assessment)
Пример #6
0
    def staff_assessment_exists(self, submission_uuid):
        """
        Returns True if there exists a staff assessment for the given uuid. False otherwise.
        """

        return staff_api.get_latest_staff_assessment(
            submission_uuid) is not None
Пример #7
0
    def test_staff_assess_handler_regrade(self, xblock):
        student_item = xblock.get_student_item_dict()

        # Create a submission for the student
        submission = xblock.create_submission(student_item, self.SUBMISSION)

        assessment_copy = copy.copy(STAFF_GOOD_ASSESSMENT)
        assessment_copy['assess_type'] = 'regrade'
        # Submit a staff-assessment
        self.submit_staff_assessment(xblock, submission, assessment=assessment_copy)
        assessment = staff_api.get_latest_staff_assessment(submission['uuid'])
        self.assert_assessment_event_published(xblock, 'openassessmentblock.staff_assess', assessment, type='regrade')
Пример #8
0
    def test_database_filter_error_handling(self, mock_filter):
        # Create a submission
        tim_sub, _ = self._create_student_and_submission("Tim", "Tim's answer")

        # Note that we have to define this side effect *after* creating the submission
        mock_filter.side_effect = DatabaseError("KABOOM!")

        # Try to get the latest staff assessment, handle database errors
        with self.assertRaises(StaffAssessmentInternalError) as context_manager:
            staff_api.get_latest_staff_assessment(tim_sub["uuid"])
        self.assertEqual(
            str(context_manager.exception),
            (
                u"An error occurred while retrieving staff assessments for the submission with UUID {uuid}: {ex}"
            ).format(uuid=tim_sub["uuid"], ex="KABOOM!")
        )

        # Try to get staff assessment scores by criteria, handle database errors
        with self.assertRaises(StaffAssessmentInternalError) as context_manager:
            staff_api.get_assessment_scores_by_criteria(tim_sub["uuid"])
        self.assertEqual(
            str(context_manager.exception),
            u"Error getting staff assessment scores for {}".format(tim_sub["uuid"])
        )
Пример #9
0
    def test_invalid_assessment_parameters(self, xblock):
        """ Test for error behavior when inputs are invalid """
        test_submissions = self._create_test_submissions(xblock)
        self.set_staff_access(xblock)

        def build_assessment_with_missing_key(student_id, key_to_delete):
            assessment = self._build_assessment_dict(student_id,
                                                     test_submissions)
            del assessment[key_to_delete]
            return assessment

        alice_no_options_selected = build_assessment_with_missing_key(
            "Alice", "options_selected")
        billy_no_criterion_feedback = build_assessment_with_missing_key(
            "Billy", "criterion_feedback")
        cindy_no_overall_feedback = build_assessment_with_missing_key(
            "Cindy", "overall_feedback")

        derek_good_assessment = self._build_assessment_dict(
            'Derek', test_submissions)

        # Expect the response to fail and for the response to include error info
        payload = [
            alice_no_options_selected, billy_no_criterion_feedback,
            cindy_no_overall_feedback, derek_good_assessment
        ]
        resp = self.request(xblock,
                            'bulk_staff_assess',
                            json.dumps(payload),
                            response_format='json')
        self.assertFalse(resp['success'])
        self.assertEqual(
            resp['msg'],
            "One or more of the submitted assessments is missing required fields"
        )
        self.assertDictEqual(
            resp['errors'], {
                '0': "You must provide options selected in the assessment.",
                '1':
                "You must provide feedback for criteria in the assessment.",
                '2': "You must provide overall feedback in the assessment.",
            })
        # The valid assessment was not included in errors, but was not processed
        derek_assessment = staff_api.get_latest_staff_assessment(
            test_submissions['Derek']['uuid'])
        self.assertIsNone(derek_assessment)
Пример #10
0
    def test_staff_assess_handler(self, xblock):
        student_item = xblock.get_student_item_dict()

        # Create a submission for the student
        submission = xblock.create_submission(student_item, self.SUBMISSION)

        # Submit a staff-assessment
        self.submit_staff_assessment(xblock,
                                     submission,
                                     assessment=STAFF_GOOD_ASSESSMENT)

        # Expect that a staff-assessment was created
        assessment = staff_api.get_latest_staff_assessment(submission['uuid'])
        self.assertEqual(assessment['submission_uuid'], submission['uuid'])
        self.assertEqual(assessment['points_earned'], 5)
        self.assertEqual(assessment['points_possible'], 6)
        self.assertEqual(assessment['scorer_id'], 'Bob')
        self.assertEqual(assessment['score_type'], 'ST')
        self.assertEqual(assessment['feedback'], 'Staff: good job!')

        self.assert_assessment_event_published(
            xblock,
            'openassessmentblock.staff_assess',
            assessment,
            type='full-grade')

        parts = assessment['parts']
        parts.sort(key=lambda x: x['option']['name'])

        self.assertEqual(len(parts), 2)
        self.assertEqual(parts[0]['option']['criterion']['name'], 'Form')
        self.assertEqual(parts[0]['option']['name'], 'Fair')
        self.assertEqual(parts[1]['option']['criterion']['name'], '𝓒𝓸𝓷𝓬𝓲𝓼𝓮')
        self.assertEqual(parts[1]['option']['name'], 'ﻉซƈﻉɭɭﻉกՇ')

        # get the assessment scores by criteria
        assessment_by_crit = staff_api.get_assessment_scores_by_criteria(
            submission["uuid"])
        self.assertEqual(assessment_by_crit['𝓒𝓸𝓷𝓬𝓲𝓼𝓮'], 3)
        self.assertEqual(assessment_by_crit['Form'], 2)

        score = staff_api.get_score(submission["uuid"], None)
        self.assertEqual(assessment['points_earned'], score['points_earned'])
        self.assertEqual(assessment['points_possible'],
                         score['points_possible'])
    def assert_submission_created(self, user, expected_graded_by,
                                  expected_locked_by):
        submission = sub_api.get_submissions(
            student_item(user, self.mock_block.location))[0]
        workflow = workflow_api.get_workflow_for_submission(
            submission['uuid'], None)
        assessment = staff_api.get_latest_staff_assessment(submission['uuid'])
        lock = SubmissionGradingLock.get_submission_lock(submission['uuid'])

        if expected_graded_by:
            assert workflow['status'] == 'done'
            assert assessment['scorer_id'] == anonymous_user_id(
                expected_graded_by)
        else:
            assert workflow['status'] == 'waiting'
            assert assessment is None

        if expected_locked_by:
            assert lock is not None
            assert lock.owner_id == anonymous_user_id(STAFF_USER_1)
Пример #12
0
    def test_staff_assess_handler(self, xblock):
        student_item = xblock.get_student_item_dict()

        # Create a submission for the student
        submission = xblock.create_submission(student_item, self.SUBMISSION)

        # Submit a staff-assessment
        self.submit_staff_assessment(xblock, submission, assessment=STAFF_GOOD_ASSESSMENT)

        # Expect that a staff-assessment was created
        assessment = staff_api.get_latest_staff_assessment(submission['uuid'])
        self.assertEqual(assessment['submission_uuid'], submission['uuid'])
        self.assertEqual(assessment['points_earned'], 5)
        self.assertEqual(assessment['points_possible'], 6)
        self.assertEqual(assessment['scorer_id'], 'Bob')
        self.assertEqual(assessment['score_type'], 'ST')
        self.assertEqual(assessment['feedback'], u'Staff: good job!')

        parts = sorted(assessment['parts'])
        self.assertEqual(len(parts), 2)
        self.assertEqual(parts[0]['option']['criterion']['name'], u'Form')
        self.assertEqual(parts[0]['option']['name'], 'Fair')
        self.assertEqual(parts[1]['option']['criterion']['name'], u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮')
        self.assertEqual(parts[1]['option']['name'], u'ﻉซƈﻉɭɭﻉกՇ')

        # get the assessment scores by criteria
        assessment_by_crit = staff_api.get_assessment_scores_by_criteria(submission["uuid"])
        self.assertEqual(assessment_by_crit[u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮'], 3)
        self.assertEqual(assessment_by_crit[u'Form'], 2)

        score = staff_api.get_score(submission["uuid"], None)
        self.assertEqual(assessment['points_earned'], score['points_earned'])
        self.assertEqual(assessment['points_possible'], score['points_possible'])

        self.assert_assessment_event_published(
            xblock, 'openassessmentblock.staff_assess', assessment, type='full-grade'
        )
    def staff_assessment_exists(self, submission_uuid):
        """
        Returns True if there exists a staff assessment for the given uuid. False otherwise.
        """

        return staff_api.get_latest_staff_assessment(submission_uuid) is not None
Пример #14
0
    def render_grade_complete(self, workflow):
        """
        Render the grade complete state.

        Args:
            workflow (dict): The serialized Workflow model.

        Returns:
            tuple of context (dict), template_path (string)
        """
        # Import is placed here to avoid model import at project startup.
        from openassessment.assessment.api import peer as peer_api
        from openassessment.assessment.api import self as self_api
        from openassessment.assessment.api import staff as staff_api
        from submissions import api as sub_api

        # Peer specific stuff...
        assessment_steps = self.assessment_steps
        submission_uuid = workflow['submission_uuid']

        staff_assessment = None
        self_assessment = None
        feedback = None
        peer_assessments = []
        has_submitted_feedback = False

        if "peer-assessment" in assessment_steps:
            peer_api.get_score(submission_uuid, self.workflow_requirements()["peer"])
            feedback = peer_api.get_assessment_feedback(submission_uuid)
            peer_assessments = [
                self._assessment_grade_context(peer_assessment)
                for peer_assessment in peer_api.get_assessments(submission_uuid)
            ]
            has_submitted_feedback = feedback is not None

        if "self-assessment" in assessment_steps:
            self_assessment = self._assessment_grade_context(
                self_api.get_assessment(submission_uuid)
            )

        raw_staff_assessment = staff_api.get_latest_staff_assessment(submission_uuid)
        if raw_staff_assessment:
            staff_assessment = self._assessment_grade_context(raw_staff_assessment)

        feedback_text = feedback.get('feedback', '') if feedback else ''
        student_submission = sub_api.get_submission(submission_uuid)

        # We retrieve the score from the workflow, which in turn retrieves
        # the score for our current submission UUID.
        # We look up the score by submission UUID instead of student item
        # to ensure that the score always matches the rubric.
        # It's possible for the score to be `None` even if the workflow status is "done"
        # when all the criteria in the rubric are feedback-only (no options).
        score = workflow['score']

        context = {
            'score': score,
            'feedback_text': feedback_text,
            'has_submitted_feedback': has_submitted_feedback,
            'student_submission': create_submission_dict(student_submission, self.prompts),
            'peer_assessments': peer_assessments,
            'grade_details': self.grade_details(
                submission_uuid,
                peer_assessments=peer_assessments,
                self_assessment=self_assessment,
                staff_assessment=staff_assessment,
            ),
            'file_upload_type': self.file_upload_type,
            'allow_latex': self.allow_latex,
            'file_urls': self.get_download_urls_from_submission(student_submission),
            'xblock_id': self.get_xblock_id()
        }

        return ('openassessmentblock/grade/oa_grade_complete.html', context)
Пример #15
0
    def render_grade_complete(self, workflow):
        """
        Render the grade complete state.

        Args:
            workflow (dict): The serialized Workflow model.

        Returns:
            tuple of context (dict), template_path (string)
        """
        # Import is placed here to avoid model import at project startup.
        from openassessment.assessment.api import peer as peer_api
        from openassessment.assessment.api import self as self_api
        from openassessment.assessment.api import staff as staff_api
        from submissions import api as sub_api

        # Peer specific stuff...
        assessment_steps = self.assessment_steps
        submission_uuid = workflow['submission_uuid']

        staff_assessment = None
        self_assessment = None
        feedback = None
        peer_assessments = []
        has_submitted_feedback = False

        if "peer-assessment" in assessment_steps:
            peer_api.get_score(submission_uuid,
                               self.workflow_requirements()["peer"])
            feedback = peer_api.get_assessment_feedback(submission_uuid)
            peer_assessments = [
                self._assessment_grade_context(peer_assessment) for
                peer_assessment in peer_api.get_assessments(submission_uuid)
            ]
            has_submitted_feedback = feedback is not None

        if "self-assessment" in assessment_steps:
            self_assessment = self._assessment_grade_context(
                self_api.get_assessment(submission_uuid))

        raw_staff_assessment = staff_api.get_latest_staff_assessment(
            submission_uuid)
        if raw_staff_assessment:
            staff_assessment = self._assessment_grade_context(
                raw_staff_assessment)

        feedback_text = feedback.get('feedback', '') if feedback else ''
        student_submission = sub_api.get_submission(submission_uuid)

        # We retrieve the score from the workflow, which in turn retrieves
        # the score for our current submission UUID.
        # We look up the score by submission UUID instead of student item
        # to ensure that the score always matches the rubric.
        # It's possible for the score to be `None` even if the workflow status is "done"
        # when all the criteria in the rubric are feedback-only (no options).
        score = workflow['score']

        context = {
            'score':
            score,
            'feedback_text':
            feedback_text,
            'has_submitted_feedback':
            has_submitted_feedback,
            'student_submission':
            create_submission_dict(student_submission, self.prompts),
            'peer_assessments':
            peer_assessments,
            'grade_details':
            self.grade_details(
                submission_uuid,
                peer_assessments=peer_assessments,
                self_assessment=self_assessment,
                staff_assessment=staff_assessment,
            ),
            'file_upload_type':
            self.file_upload_type,
            'allow_latex':
            self.allow_latex,
            'prompts_type':
            self.prompts_type,
            'file_urls':
            self.get_download_urls_from_submission(student_submission),
            'xblock_id':
            self.get_xblock_id()
        }

        return ('openassessmentblock/grade/oa_grade_complete.html', context)
Пример #16
0
    def add_submission_context(self, submission_uuid, context):
        """
        Add the submission information (self asssessment, peer assessments, final grade, etc.)
        to the supplied context for display in the "learner info" portion of staff tools.
        Args:
            submission_uuid (unicode): The uuid of the submission, should NOT be None.
            context: the context to update with additional information
        """
        # Import is placed here to avoid model import at project startup.
        from openassessment.assessment.api import peer as peer_api
        from openassessment.assessment.api import self as self_api
        from openassessment.assessment.api import staff as staff_api

        assessment_steps = self.assessment_steps

        self_assessment = None
        self_assessment_grade_context = None

        peer_assessments = None
        peer_assessments_grade_context = []

        staff_assessment = staff_api.get_latest_staff_assessment(
            submission_uuid)
        staff_assessment_grade_context = None

        submitted_assessments = None

        grade_details = None

        workflow = self.get_workflow_info(submission_uuid=submission_uuid)
        grade_exists = workflow.get('status') == "done"
        grade_utils = self.runtime._services.get('grade_utils')  # pylint: disable=protected-access

        if "peer-assessment" in assessment_steps:
            peer_assessments = peer_api.get_assessments(submission_uuid)
            submitted_assessments = peer_api.get_submitted_assessments(
                submission_uuid)
            if grade_exists:
                peer_api.get_score(submission_uuid,
                                   self.workflow_requirements()["peer"])
                peer_assessments_grade_context = [
                    self._assessment_grade_context(peer_assessment)
                    for peer_assessment in peer_assessments
                ]

        if "self-assessment" in assessment_steps:
            self_assessment = self_api.get_assessment(submission_uuid)
            if grade_exists:
                self_assessment_grade_context = self._assessment_grade_context(
                    self_assessment)

        if grade_exists:
            if staff_assessment:
                staff_assessment_grade_context = self._assessment_grade_context(
                    staff_assessment)

            grade_details = self.grade_details(
                submission_uuid,
                peer_assessments_grade_context,
                self_assessment_grade_context,
                staff_assessment_grade_context,
                is_staff=True,
            )

        workflow_cancellation = self.get_workflow_cancellation_info(
            workflow['submission_uuid'])

        context.update({
            'self_assessment': [self_assessment] if self_assessment else None,
            'peer_assessments':
            peer_assessments,
            'staff_assessment':
            [staff_assessment] if staff_assessment else None,
            'submitted_assessments':
            submitted_assessments,
            'grade_details':
            grade_details,
            'score':
            workflow.get('score'),
            'workflow_status':
            workflow.get('status'),
            'workflow_cancellation':
            workflow_cancellation,
            'are_grades_frozen':
            grade_utils.are_grades_frozen() if grade_utils else None
        })

        if peer_assessments or self_assessment or staff_assessment:
            max_scores = peer_api.get_rubric_max_scores(submission_uuid)
            for criterion in context["rubric_criteria"]:
                criterion["total_value"] = max_scores[criterion["name"]]
Пример #17
0
    def add_submission_context(self, submission_uuid, context):
        """
        Add the submission information (self asssessment, peer assessments, final grade, etc.)
        to the supplied context for display in the "learner info" portion of staff tools.
        Args:
            submission_uuid (unicode): The uuid of the submission, should NOT be None.
            context: the context to update with additional information
        """
        assessment_steps = self.assessment_steps

        example_based_assessment = None
        example_based_assessment_grade_context = None

        self_assessment = None
        self_assessment_grade_context = None

        peer_assessments = None
        peer_assessments_grade_context = []

        staff_assessment = staff_api.get_latest_staff_assessment(submission_uuid)
        staff_assessment_grade_context = None

        submitted_assessments = None

        grade_details = None

        workflow = self.get_workflow_info(submission_uuid=submission_uuid)
        grade_exists = workflow.get('status') == "done"

        if "peer-assessment" in assessment_steps:
            peer_assessments = peer_api.get_assessments(submission_uuid)
            submitted_assessments = peer_api.get_submitted_assessments(submission_uuid)
            if grade_exists:
                peer_api.get_score(submission_uuid, self.workflow_requirements()["peer"])
                peer_assessments_grade_context = [
                    self._assessment_grade_context(peer_assessment)
                    for peer_assessment in peer_assessments
                ]

        if "self-assessment" in assessment_steps:
            self_assessment = self_api.get_assessment(submission_uuid)
            if grade_exists:
                self_assessment_grade_context = self._assessment_grade_context(self_assessment)

        if "example-based-assessment" in assessment_steps:
            example_based_assessment = ai_api.get_latest_assessment(submission_uuid)
            if grade_exists:
                example_based_assessment_grade_context = self._assessment_grade_context(example_based_assessment)

        if grade_exists:
            if staff_assessment:
                staff_assessment_grade_context = self._assessment_grade_context(staff_assessment)

            grade_details = self.grade_details(
                submission_uuid,
                peer_assessments_grade_context,
                self_assessment_grade_context,
                example_based_assessment_grade_context,
                staff_assessment_grade_context,
                is_staff=True,
            )

        workflow_cancellation = self.get_workflow_cancellation_info(submission_uuid)

        context.update({
            'example_based_assessment': [example_based_assessment] if example_based_assessment else None,
            'self_assessment': [self_assessment] if self_assessment else None,
            'peer_assessments': peer_assessments,
            'staff_assessment': [staff_assessment] if staff_assessment else None,
            'submitted_assessments': submitted_assessments,
            'grade_details': grade_details,
            'score': workflow.get('score'),
            'workflow_status': workflow.get('status'),
            'workflow_cancellation': workflow_cancellation,
        })

        if peer_assessments or self_assessment or example_based_assessment or staff_assessment:
            max_scores = peer_api.get_rubric_max_scores(submission_uuid)
            for criterion in context["rubric_criteria"]:
                criterion["total_value"] = max_scores[criterion["name"]]
Пример #18
0
    def add_submission_context(self, submission_uuid, context):
        """
        Add the submission information (self asssessment, peer assessments, final grade, etc.)
        to the supplied context for display in the "learner info" portion of staff tools.
        Args:
            submission_uuid (unicode): The uuid of the submission, should NOT be None.
            context: the context to update with additional information
        """
        # Import is placed here to avoid model import at project startup.
        from openassessment.assessment.api import peer as peer_api
        from openassessment.assessment.api import self as self_api
        from openassessment.assessment.api import staff as staff_api

        assessment_steps = self.assessment_steps

        self_assessment = None
        self_assessment_grade_context = None

        peer_assessments = None
        peer_assessments_grade_context = []

        staff_assessment = staff_api.get_latest_staff_assessment(submission_uuid)
        staff_assessment_grade_context = None

        submitted_assessments = None

        grade_details = None

        workflow = self.get_workflow_info(submission_uuid=submission_uuid)
        grade_exists = workflow.get('status') == "done"
        grade_utils = self.runtime._services.get('grade_utils')

        if "peer-assessment" in assessment_steps:
            peer_assessments = peer_api.get_assessments(submission_uuid)
            submitted_assessments = peer_api.get_submitted_assessments(submission_uuid)
            if grade_exists:
                peer_api.get_score(submission_uuid, self.workflow_requirements()["peer"])
                peer_assessments_grade_context = [
                    self._assessment_grade_context(peer_assessment)
                    for peer_assessment in peer_assessments
                ]

        if "self-assessment" in assessment_steps:
            self_assessment = self_api.get_assessment(submission_uuid)
            if grade_exists:
                self_assessment_grade_context = self._assessment_grade_context(self_assessment)

        if grade_exists:
            if staff_assessment:
                staff_assessment_grade_context = self._assessment_grade_context(staff_assessment)

            grade_details = self.grade_details(
                submission_uuid,
                peer_assessments_grade_context,
                self_assessment_grade_context,
                staff_assessment_grade_context,
                is_staff=True,
            )

        workflow_cancellation = self.get_workflow_cancellation_info(submission_uuid)

        context.update({
            'self_assessment': [self_assessment] if self_assessment else None,
            'peer_assessments': peer_assessments,
            'staff_assessment': [staff_assessment] if staff_assessment else None,
            'submitted_assessments': submitted_assessments,
            'grade_details': grade_details,
            'score': workflow.get('score'),
            'workflow_status': workflow.get('status'),
            'workflow_cancellation': workflow_cancellation,
            'are_grades_frozen': grade_utils.are_grades_frozen()
        })

        if peer_assessments or self_assessment or staff_assessment:
            max_scores = peer_api.get_rubric_max_scores(submission_uuid)
            for criterion in context["rubric_criteria"]:
                criterion["total_value"] = max_scores[criterion["name"]]
Пример #19
0
def staff_notification():
    """
    To send ORA statactics to staff users of course
    """
    try:
        course_data = CourseOverview.objects.all()
        for cid in course_data:
            assessment_data = AssessmentWorkflow.objects.filter(
                course_id=cid.id)
            item_data = []
            
            for sid in assessment_data:
                if not bool(staff.get_latest_staff_assessment(sid.submission_uuid)):
                    if sid.item_id not in item_data:
                        item_data.append(sid.item_id)
            # item_data = AssessmentWorkflow.objects.filter(
            #     course_id=cid.id).values_list('item_id', flat=True)
            # item_data = list(set(item_data))
            for iid in item_data:
                statistics = api.get_status_counts(cid.id, iid,
                                                   ["staff", "peer", "done",
                                                    "waiting"])
                modified_statistics = dict()
                for stat in statistics:
                    modified_statistics[stat.get('status')] = stat.get('count')

                statistics = modified_statistics

                if (( statistics['staff'] == 0 ) and ( statistics['peer'] == 0 ) and ( statistics['waiting'] == 0 )):
                    return
                    
                
                course_struct = None
                chapter_name = None
 
                try:
                    course_struct = CourseStructure.objects.get(course_id=cid.id)
                except Exception as e:
                    print "Unexpected error {0}".format(e)

                if course_struct:
                    block = json.loads(course_struct.structure_json)['blocks'][iid]
                    chapter_name = block['display_name']

                staff_users = CourseAccessRole.objects.filter(course_id=cid.id,
                                                              role='staff')
                try:
                    usage_key = UsageKey.from_string(iid).replace(course_key=cid.id)
                    (course_key, chapter, section, vertical_unused,
                    position, final_target_id
                    ) = path_to_location(modulestore(), usage_key)
                    current_site_domain = 'http://{0}'.format(settings.SITE_NAME)
                    courseware_url = current_site_domain+"/courses/"+str(cid.id)+"/courseware/"+chapter+"/"+section
                    for u in staff_users:
                        html_message = render_to_string('peer_grading/ora_report.html',
                                                        {'status_counts': modified_statistics,
                                                         'course': cid.display_name,
                                                         'chapter_name' : chapter_name,
                                                         'user': u.user,
                                                         'courseware_url':courseware_url
                                                         })
                        email = EmailMessage(
                            "LYNX Online-Training: Neue Aufgaben zur Bewertung", html_message,
                            to=[u.user.email])
                        email.send()
                        TASK_LOG.info("----------Email message sent to course admins----------")
                except Exception as e:
                    TASK_LOG.info("----------Inner Exception while sending staff notification----------")
                    import traceback
                    print traceback.format_exc()
                    print e,"Inner Exception<-------"
                    pass
    except Exception as e:
        import traceback
        print traceback.format_exc() 
        print e,"<--- Error"
Пример #20
0
    def add_submission_context(self, submission_uuid, context):
        """
        Add the submission information (self asssessment, peer assessments, final grade, etc.)
        to the supplied context for display in the "learner info" portion of staff tools.
        Args:
            submission_uuid (unicode): The uuid of the submission, should NOT be None.
            context: the context to update with additional information
        """
        assessment_steps = self.assessment_steps

        example_based_assessment = None
        example_based_assessment_grade_context = None

        self_assessment = None
        self_assessment_grade_context = None

        peer_assessments = None
        peer_assessments_grade_context = []

        staff_assessment = staff_api.get_latest_staff_assessment(submission_uuid)
        staff_assessment_grade_context = None

        submitted_assessments = None

        grade_details = None

        workflow = self.get_workflow_info(submission_uuid=submission_uuid)
        grade_exists = workflow.get('status') == "done"

        if "peer-assessment" in assessment_steps:
            peer_assessments = peer_api.get_assessments(submission_uuid)
            submitted_assessments = peer_api.get_submitted_assessments(submission_uuid)
            if grade_exists:
                peer_api.get_score(submission_uuid, self.workflow_requirements()["peer"])
                peer_assessments_grade_context = [
                    self._assessment_grade_context(peer_assessment)
                    for peer_assessment in peer_assessments
                ]

        if "self-assessment" in assessment_steps:
            self_assessment = self_api.get_assessment(submission_uuid)
            if grade_exists:
                self_assessment_grade_context = self._assessment_grade_context(self_assessment)

        if "example-based-assessment" in assessment_steps:
            example_based_assessment = ai_api.get_latest_assessment(submission_uuid)
            if grade_exists:
                example_based_assessment_grade_context = self._assessment_grade_context(example_based_assessment)

        if grade_exists:
            if staff_assessment:
                staff_assessment_grade_context = self._assessment_grade_context(staff_assessment)

            grade_details = self.grade_details(
                submission_uuid,
                peer_assessments_grade_context,
                self_assessment_grade_context,
                example_based_assessment_grade_context,
                staff_assessment_grade_context,
                is_staff=True,
            )

        workflow_cancellation = self.get_workflow_cancellation_info(submission_uuid)

        context.update({
            'example_based_assessment': [example_based_assessment] if example_based_assessment else None,
            'self_assessment': [self_assessment] if self_assessment else None,
            'peer_assessments': peer_assessments,
            'staff_assessment': [staff_assessment] if staff_assessment else None,
            'submitted_assessments': submitted_assessments,
            'grade_details': grade_details,
            'score': workflow.get('score'),
            'workflow_status': workflow.get('status'),
            'workflow_cancellation': workflow_cancellation,
        })

        if peer_assessments or self_assessment or example_based_assessment or staff_assessment:
            max_scores = peer_api.get_rubric_max_scores(submission_uuid)
            for criterion in context["rubric_criteria"]:
                criterion["total_value"] = max_scores[criterion["name"]]