Пример #1
0
    def test_grade_essay_all_feedback_only_criteria(self):
        # Modify the rubric to include only feedback-only criteria
        rubric = copy.deepcopy(RUBRIC)
        for criterion in rubric['criteria']:
            criterion['options'] = []

        # Train classifiers for the rubric
        train_classifiers(rubric, {})

        # Schedule a grading task and retrieve the assessment
        ai_api.on_init(self.submission_uuid,
                       rubric=rubric,
                       algorithm_id=ALGORITHM_ID)
        assessment = ai_api.get_latest_assessment(self.submission_uuid)

        # Verify that all assessment parts have feedback set to an empty string
        for part in assessment['parts']:
            self.assertEqual(part['feedback'], u"")

        # Check the scores by criterion dict
        # Since none of the criteria had options, the scores should all default to 0
        score_dict = ai_api.get_assessment_scores_by_criteria(
            self.submission_uuid)
        self.assertItemsEqual(score_dict, {
            u"vøȼȺƀᵾłȺɍɏ": 0,
            u"ﻭɼค๓๓คɼ": 0,
        })
Пример #2
0
    def test_grade_essay_feedback_only_criterion(self):
        # Modify the rubric to include a feedback-only criterion
        # (a criterion with no options, just written feedback)
        rubric = copy.deepcopy(RUBRIC)
        rubric['criteria'].append({
            'name': 'feedback only',
            'prompt': 'feedback',
            'options': []
        })

        # Train classifiers for the rubric
        train_classifiers(rubric, self.CLASSIFIER_SCORE_OVERRIDES)

        # Schedule a grading task and retrieve the assessment
        ai_api.on_init(self.submission_uuid, rubric=rubric, algorithm_id=ALGORITHM_ID)
        assessment = ai_api.get_latest_assessment(self.submission_uuid)

        # Verify that the criteria with options were given scores
        # (from the score override used by our fake classifiers)
        self.assertEqual(assessment['parts'][0]['criterion']['name'], u"vøȼȺƀᵾłȺɍɏ")
        self.assertEqual(assessment['parts'][0]['option']['points'], 1)
        self.assertEqual(assessment['parts'][1]['criterion']['name'], u"ﻭɼค๓๓คɼ")
        self.assertEqual(assessment['parts'][1]['option']['points'], 2)

        # Verify that the criteria with no options (only feedback)
        # has no score and empty feedback
        self.assertEqual(assessment['parts'][2]['criterion']['name'], u"feedback only")
        self.assertIs(assessment['parts'][2]['option'], None)
        self.assertEqual(assessment['parts'][2]['feedback'], u"")

        # Check the scores by criterion dict
        score_dict = ai_api.get_assessment_scores_by_criteria(self.submission_uuid)
        self.assertEqual(score_dict[u"vøȼȺƀᵾłȺɍɏ"], 1)
        self.assertEqual(score_dict[u"ﻭɼค๓๓คɼ"], 2)
        self.assertEqual(score_dict['feedback only'], 0)
Пример #3
0
 def _ai_assess(sub):
     """
     Helper to fulfill ai assessment requirements.
     """
     # Note that CLASSIFIER_SCORE_OVERRIDES matches OPTIONS_SELECTED_DICT['most'] scores
     train_classifiers(RUBRIC, AIGradingTest.CLASSIFIER_SCORE_OVERRIDES)
     ai_api.on_init(sub, rubric=RUBRIC, algorithm_id=ALGORITHM_ID)
     return ai_api.get_latest_assessment(sub)
Пример #4
0
 def _ai_assess(sub):
     """
     Helper to fulfill ai assessment requirements.
     """
     # Note that CLASSIFIER_SCORE_OVERRIDES matches OPTIONS_SELECTED_DICT['most'] scores
     train_classifiers(RUBRIC, AIGradingTest.CLASSIFIER_SCORE_OVERRIDES)
     ai_api.on_init(sub, rubric=RUBRIC, algorithm_id=ALGORITHM_ID)
     return ai_api.get_latest_assessment(sub)
Пример #5
0
    def test_get_assessment_scores_by_criteria(self):
        ai_api.on_init(self.submission_uuid, rubric=RUBRIC, algorithm_id=ALGORITHM_ID)

        # Verify that we got the scores we provided to the stub AI algorithm
        assessment = ai_api.get_latest_assessment(self.submission_uuid)
        assessment_score_dict = ai_api.get_assessment_scores_by_criteria(self.submission_uuid)
        for part in assessment['parts']:
            criterion_name = part['option']['criterion']['name']
            expected_score = self.CLASSIFIER_SCORE_OVERRIDES[criterion_name]['score_override']
            self.assertEqual(assessment_score_dict[criterion_name], expected_score)
Пример #6
0
    def get_student_info_path_and_context(self, data):
        """
        Get the proper path and context for rendering the the student info
        section of the staff debug panel.

        """
        student_id = data.params.get('student_id', '')
        submission_uuid = None
        submission = None
        assessment_steps = self.assessment_steps

        if student_id:
            student_item = self.get_student_item_dict()
            student_item['student_id'] = student_id

            # If there is a submission available for the requested student, present
            # it. If not, there will be no other information to collect.
            submissions = submission_api.get_submissions(student_item, 1)

            if submissions:
                submission = submissions[0]
                submission_uuid = submissions[0]['uuid']

        example_based_assessment = None
        self_assessment = None
        peer_assessments = []
        submitted_assessments = []

        if "peer-assessment" in assessment_steps:
            peer_assessments = peer_api.get_assessments(submission_uuid)
            submitted_assessments = peer_api.get_submitted_assessments(submission_uuid, scored_only=False)

        if "self-assessment" in assessment_steps:
            self_assessment = self_api.get_assessment(submission_uuid)

        if "example-based-assessment" in assessment_steps:
            example_based_assessment = ai_api.get_latest_assessment(submission_uuid)

        context = {
            'submission': submission,
            'peer_assessments': peer_assessments,
            'submitted_assessments': submitted_assessments,
            'self_assessment': self_assessment,
            'example_based_assessment': example_based_assessment,
            'rubric_criteria': copy.deepcopy(self.rubric_criteria),
        }

        if peer_assessments or self_assessment or example_based_assessment:
            max_scores = peer_api.get_rubric_max_scores(submission_uuid)
            for criterion in context["rubric_criteria"]:
                criterion["total_value"] = max_scores[criterion["name"]]

        path = 'openassessmentblock/staff_debug/student_info.html'
        return path, context
Пример #7
0
    def test_get_assessment_scores_by_criteria(self):
        ai_api.on_init(self.submission_uuid,
                       rubric=RUBRIC,
                       algorithm_id=ALGORITHM_ID)

        # Verify that we got the scores we provided to the stub AI algorithm
        assessment = ai_api.get_latest_assessment(self.submission_uuid)
        assessment_score_dict = ai_api.get_assessment_scores_by_criteria(
            self.submission_uuid)
        for part in assessment['parts']:
            criterion_name = part['option']['criterion']['name']
            expected_score = self.CLASSIFIER_SCORE_OVERRIDES[criterion_name][
                'score_override']
            self.assertEqual(assessment_score_dict[criterion_name],
                             expected_score)
Пример #8
0
    def test_grade_essay(self):
        # Schedule a grading task
        # Because Celery is configured in "always eager" mode, this will
        # be executed synchronously.
        ai_api.on_init(self.submission_uuid, rubric=RUBRIC, algorithm_id=ALGORITHM_ID)

        # Verify that we got the scores we provided to the stub AI algorithm
        assessment = ai_api.get_latest_assessment(self.submission_uuid)
        for part in assessment['parts']:
            criterion_name = part['option']['criterion']['name']
            expected_score = self.CLASSIFIER_SCORE_OVERRIDES[criterion_name]['score_override']
            self.assertEqual(part['option']['points'], expected_score)

        score = ai_api.get_score(self.submission_uuid, {})
        self.assertEquals(score["points_possible"], 4)
        self.assertEquals(score["points_earned"], 3)
Пример #9
0
    def test_grade_essay(self):
        # Schedule a grading task
        # Because Celery is configured in "always eager" mode, this will
        # be executed synchronously.
        ai_api.on_init(self.submission_uuid,
                       rubric=RUBRIC,
                       algorithm_id=ALGORITHM_ID)

        # Verify that we got the scores we provided to the stub AI algorithm
        assessment = ai_api.get_latest_assessment(self.submission_uuid)
        for part in assessment['parts']:
            criterion_name = part['option']['criterion']['name']
            expected_score = self.CLASSIFIER_SCORE_OVERRIDES[criterion_name][
                'score_override']
            self.assertEqual(part['option']['points'], expected_score)

        score = ai_api.get_score(self.submission_uuid, {})
        self.assertEquals(score["points_possible"], 4)
        self.assertEquals(score["points_earned"], 3)
Пример #10
0
    def test_grade_essay_feedback_only_criterion(self):
        # Modify the rubric to include a feedback-only criterion
        # (a criterion with no options, just written feedback)
        rubric = copy.deepcopy(RUBRIC)
        rubric['criteria'].append({
            'name': 'feedback only',
            'prompt': 'feedback',
            'options': []
        })

        # Train classifiers for the rubric
        train_classifiers(rubric, self.CLASSIFIER_SCORE_OVERRIDES)

        # Schedule a grading task and retrieve the assessment
        ai_api.on_init(self.submission_uuid,
                       rubric=rubric,
                       algorithm_id=ALGORITHM_ID)
        assessment = ai_api.get_latest_assessment(self.submission_uuid)

        # Verify that the criteria with options were given scores
        # (from the score override used by our fake classifiers)
        self.assertEqual(assessment['parts'][0]['criterion']['name'],
                         u"vøȼȺƀᵾłȺɍɏ")
        self.assertEqual(assessment['parts'][0]['option']['points'], 1)
        self.assertEqual(assessment['parts'][1]['criterion']['name'],
                         u"ﻭɼค๓๓คɼ")
        self.assertEqual(assessment['parts'][1]['option']['points'], 2)

        # Verify that the criteria with no options (only feedback)
        # has no score and empty feedback
        self.assertEqual(assessment['parts'][2]['criterion']['name'],
                         u"feedback only")
        self.assertIs(assessment['parts'][2]['option'], None)
        self.assertEqual(assessment['parts'][2]['feedback'], u"")

        # Check the scores by criterion dict
        score_dict = ai_api.get_assessment_scores_by_criteria(
            self.submission_uuid)
        self.assertEqual(score_dict[u"vøȼȺƀᵾłȺɍɏ"], 1)
        self.assertEqual(score_dict[u"ﻭɼค๓๓คɼ"], 2)
        self.assertEqual(score_dict['feedback only'], 0)
Пример #11
0
    def test_grade_essay_all_feedback_only_criteria(self):
        # Modify the rubric to include only feedback-only criteria
        rubric = copy.deepcopy(RUBRIC)
        for criterion in rubric['criteria']:
            criterion['options'] = []

        # Train classifiers for the rubric
        train_classifiers(rubric, {})

        # Schedule a grading task and retrieve the assessment
        ai_api.on_init(self.submission_uuid, rubric=rubric, algorithm_id=ALGORITHM_ID)
        assessment = ai_api.get_latest_assessment(self.submission_uuid)

        # Verify that all assessment parts have feedback set to an empty string
        for part in assessment['parts']:
            self.assertEqual(part['feedback'], u"")

        # Check the scores by criterion dict
        # Since none of the criteria had options, the scores should all default to 0
        score_dict = ai_api.get_assessment_scores_by_criteria(self.submission_uuid)
        self.assertItemsEqual(score_dict, {
            u"vøȼȺƀᵾłȺɍɏ": 0,
            u"ﻭɼค๓๓คɼ": 0,
        })
Пример #12
0
    def add_submission_context(self, submission_uuid, context):
        """
        Add the submission information (self asssessment, peer assessments, final grade, etc.)
        to the supplied context for display in the "learner info" portion of staff tools.
        Args:
            submission_uuid (unicode): The uuid of the submission, should NOT be None.
            context: the context to update with additional information
        """
        assessment_steps = self.assessment_steps

        example_based_assessment = None
        example_based_assessment_grade_context = None

        self_assessment = None
        self_assessment_grade_context = None

        peer_assessments = None
        peer_assessments_grade_context = []

        staff_assessment = staff_api.get_latest_staff_assessment(submission_uuid)
        staff_assessment_grade_context = None

        submitted_assessments = None

        grade_details = None

        workflow = self.get_workflow_info(submission_uuid=submission_uuid)
        grade_exists = workflow.get('status') == "done"

        if "peer-assessment" in assessment_steps:
            peer_assessments = peer_api.get_assessments(submission_uuid)
            submitted_assessments = peer_api.get_submitted_assessments(submission_uuid)
            if grade_exists:
                peer_api.get_score(submission_uuid, self.workflow_requirements()["peer"])
                peer_assessments_grade_context = [
                    self._assessment_grade_context(peer_assessment)
                    for peer_assessment in peer_assessments
                ]

        if "self-assessment" in assessment_steps:
            self_assessment = self_api.get_assessment(submission_uuid)
            if grade_exists:
                self_assessment_grade_context = self._assessment_grade_context(self_assessment)

        if "example-based-assessment" in assessment_steps:
            example_based_assessment = ai_api.get_latest_assessment(submission_uuid)
            if grade_exists:
                example_based_assessment_grade_context = self._assessment_grade_context(example_based_assessment)

        if grade_exists:
            if staff_assessment:
                staff_assessment_grade_context = self._assessment_grade_context(staff_assessment)

            grade_details = self.grade_details(
                submission_uuid,
                peer_assessments_grade_context,
                self_assessment_grade_context,
                example_based_assessment_grade_context,
                staff_assessment_grade_context,
                is_staff=True,
            )

        workflow_cancellation = self.get_workflow_cancellation_info(submission_uuid)

        context.update({
            'example_based_assessment': [example_based_assessment] if example_based_assessment else None,
            'self_assessment': [self_assessment] if self_assessment else None,
            'peer_assessments': peer_assessments,
            'staff_assessment': [staff_assessment] if staff_assessment else None,
            'submitted_assessments': submitted_assessments,
            'grade_details': grade_details,
            'score': workflow.get('score'),
            'workflow_status': workflow.get('status'),
            'workflow_cancellation': workflow_cancellation,
        })

        if peer_assessments or self_assessment or example_based_assessment or staff_assessment:
            max_scores = peer_api.get_rubric_max_scores(submission_uuid)
            for criterion in context["rubric_criteria"]:
                criterion["total_value"] = max_scores[criterion["name"]]
Пример #13
0
    def get_student_info_path_and_context(self, student_id):
        """
        Get the proper path and context for rendering the the student info
        section of the staff debug panel.

        Args:
            student_id (unicode): The ID of the student to report.

        """
        submission_uuid = None
        submission = None
        assessment_steps = self.assessment_steps

        if student_id:
            student_item = self.get_student_item_dict()
            student_item['student_id'] = student_id

            # If there is a submission available for the requested student, present
            # it. If not, there will be no other information to collect.
            submissions = submission_api.get_submissions(student_item, 1)

            if submissions:
                submission_uuid = submissions[0]['uuid']
                submission = submissions[0]

                if 'file_key' in submission.get('answer', {}):
                    file_key = submission['answer']['file_key']

                    try:
                        submission['image_url'] = file_api.get_download_url(
                            file_key)
                    except file_api.FileUploadError:
                        # Log the error, but do not prevent the rest of the student info
                        # from being displayed.
                        msg = (
                            u"Could not retrieve image URL for staff debug page.  "
                            u"The student ID is '{student_id}', and the file key is {file_key}"
                        ).format(student_id=student_id, file_key=file_key)
                        logger.exception(msg)

        example_based_assessment = None
        self_assessment = None
        peer_assessments = []
        submitted_assessments = []

        if "peer-assessment" in assessment_steps:
            peer_assessments = peer_api.get_assessments(submission_uuid)
            submitted_assessments = peer_api.get_submitted_assessments(
                submission_uuid, scored_only=False)

        if "self-assessment" in assessment_steps:
            self_assessment = self_api.get_assessment(submission_uuid)

        if "example-based-assessment" in assessment_steps:
            example_based_assessment = ai_api.get_latest_assessment(
                submission_uuid)

        context = {
            'submission': submission,
            'peer_assessments': peer_assessments,
            'submitted_assessments': submitted_assessments,
            'self_assessment': self_assessment,
            'example_based_assessment': example_based_assessment,
            'rubric_criteria': copy.deepcopy(self.rubric_criteria_with_labels),
        }

        if peer_assessments or self_assessment or example_based_assessment:
            max_scores = peer_api.get_rubric_max_scores(submission_uuid)
            for criterion in context["rubric_criteria"]:
                criterion["total_value"] = max_scores[criterion["name"]]

        path = 'openassessmentblock/staff_debug/student_info.html'
        return path, context
Пример #14
0
    def render_grade_complete(self, workflow):
        """
        Render the grade complete state.

        Args:
            workflow (dict): The serialized Workflow model.

        Returns:
            tuple of context (dict), template_path (string)
        """
        # Peer specific stuff...
        assessment_steps = self.assessment_steps
        submission_uuid = workflow['submission_uuid']

        example_based_assessment = None
        self_assessment = None
        feedback = None
        peer_assessments = []
        has_submitted_feedback = False

        if "peer-assessment" in assessment_steps:
            feedback = peer_api.get_assessment_feedback(submission_uuid)
            peer_assessments = [
                self._assessment_grade_context(asmnt)
                for asmnt in peer_api.get_assessments(submission_uuid)
            ]
            has_submitted_feedback = feedback is not None

        if "self-assessment" in assessment_steps:
            self_assessment = self._assessment_grade_context(
                self_api.get_assessment(submission_uuid)
            )

        if "example-based-assessment" in assessment_steps:
            example_based_assessment = self._assessment_grade_context(
                ai_api.get_latest_assessment(submission_uuid)
            )

        feedback_text = feedback.get('feedback', '') if feedback else ''
        student_submission = sub_api.get_submission(submission_uuid)

        # We retrieve the score from the workflow, which in turn retrieves
        # the score for our current submission UUID.
        # We look up the score by submission UUID instead of student item
        # to ensure that the score always matches the rubric.
        # It's possible for the score to be `None` even if the workflow status is "done"
        # when all the criteria in the rubric are feedback-only (no options).
        score = workflow['score']

        context = {
            'score': score,
            'feedback_text': feedback_text,
            'student_submission': student_submission,
            'peer_assessments': peer_assessments,
            'self_assessment': self_assessment,
            'example_based_assessment': example_based_assessment,
            'rubric_criteria': self._rubric_criteria_grade_context(peer_assessments, self_assessment),
            'has_submitted_feedback': has_submitted_feedback,
            'allow_file_upload': self.allow_file_upload,
            'allow_latex': self.allow_latex,
            'file_url': self.get_download_url_from_submission(student_submission)
        }

        # Update the scores we will display to the user
        # Note that we are updating a *copy* of the rubric criteria stored in
        # the XBlock field
        max_scores = peer_api.get_rubric_max_scores(submission_uuid)
        median_scores = None
        if "peer-assessment" in assessment_steps:
            median_scores = peer_api.get_assessment_median_scores(submission_uuid)
        elif "self-assessment" in assessment_steps:
            median_scores = self_api.get_assessment_scores_by_criteria(submission_uuid)
        elif "example-based-assessment" in assessment_steps:
            median_scores = ai_api.get_assessment_scores_by_criteria(submission_uuid)

        if median_scores is not None and max_scores is not None:
            for criterion in context["rubric_criteria"]:
                # Although we prevent course authors from modifying criteria post-release,
                # it's still possible for assessments created by course staff to
                # have criteria that differ from the current problem definition.
                # It's also possible to circumvent the post-release restriction
                # if course authors directly import a course into Studio.
                # If this happens, we simply leave the score blank so that the grade
                # section can render without error.
                criterion["median_score"] = median_scores.get(criterion["name"], '')
                criterion["total_value"] = max_scores.get(criterion["name"], '')

        return ('openassessmentblock/grade/oa_grade_complete.html', context)
Пример #15
0
    def get_student_info_path_and_context(self, student_username):
        """
        Get the proper path and context for rendering the the student info
        section of the staff debug panel.

        Args:
            student_username (unicode): The username of the student to report.

        """
        submission_uuid = None
        submission = None
        assessment_steps = self.assessment_steps
        anonymous_user_id = None
        submissions = None
        student_item = None

        if student_username:
            anonymous_user_id = self.get_anonymous_user_id(student_username, self.course_id)
            student_item = self.get_student_item_dict(anonymous_user_id=anonymous_user_id)

        if anonymous_user_id:
            # If there is a submission available for the requested student, present
            # it. If not, there will be no other information to collect.
            submissions = submission_api.get_submissions(student_item, 1)

        if submissions:
            submission_uuid = submissions[0]['uuid']
            submission = submissions[0]

            if 'file_key' in submission.get('answer', {}):
                file_key = submission['answer']['file_key']

                try:
                    submission['image_url'] = file_api.get_download_url(file_key)
                except file_api.FileUploadError:
                    # Log the error, but do not prevent the rest of the student info
                    # from being displayed.
                    msg = (
                        u"Could not retrieve image URL for staff debug page.  "
                        u"The student username is '{student_username}', and the file key is {file_key}"
                    ).format(student_username=student_username, file_key=file_key)
                    logger.exception(msg)

        example_based_assessment = None
        self_assessment = None
        peer_assessments = []
        submitted_assessments = []

        if "peer-assessment" in assessment_steps:
            peer_assessments = peer_api.get_assessments(submission_uuid)
            submitted_assessments = peer_api.get_submitted_assessments(submission_uuid, scored_only=False)

        if "self-assessment" in assessment_steps:
            self_assessment = self_api.get_assessment(submission_uuid)

        if "example-based-assessment" in assessment_steps:
            example_based_assessment = ai_api.get_latest_assessment(submission_uuid)

        workflow_cancellation = workflow_api.get_assessment_workflow_cancellation(submission_uuid)
        if workflow_cancellation:
            workflow_cancellation['cancelled_by'] = self.get_username(workflow_cancellation['cancelled_by_id'])

        context = {
            'submission': submission,
            'workflow_cancellation': workflow_cancellation,
            'peer_assessments': peer_assessments,
            'submitted_assessments': submitted_assessments,
            'self_assessment': self_assessment,
            'example_based_assessment': example_based_assessment,
            'rubric_criteria': copy.deepcopy(self.rubric_criteria_with_labels),
        }

        if peer_assessments or self_assessment or example_based_assessment:
            max_scores = peer_api.get_rubric_max_scores(submission_uuid)
            for criterion in context["rubric_criteria"]:
                criterion["total_value"] = max_scores[criterion["name"]]

        path = 'openassessmentblock/staff_debug/student_info.html'
        return path, context
Пример #16
0
    def get_student_info_path_and_context(self, student_id):
        """
        Get the proper path and context for rendering the the student info
        section of the staff debug panel.

        Args:
            student_id (unicode): The ID of the student to report.

        """
        submission_uuid = None
        submission = None
        assessment_steps = self.assessment_steps
        student_item = self.get_student_item_dict()
        scores = {}
        problem_closed = None

        if student_id:
            student_item['student_id'] = student_id

            # If there is a submission available for the requested student, present
            # it. If not, there will be no other information to collect.
            submissions = submission_api.get_submissions(student_item, 1)

            if submissions:
                submission_uuid = submissions[0]['uuid']
                submission = submissions[0]

                if 'file_key' in submission.get('answer', {}):
                    file_key = submission['answer']['file_key']

                    try:
                        submission['image_url'] = file_api.get_download_url(file_key)
                    except file_api.FileUploadError:
                        # Log the error, but do not prevent the rest of the student info
                        # from being displayed.
                        msg = (
                            u"Could not retrieve image URL for staff debug page.  "
                            u"The student ID is '{student_id}', and the file key is {file_key}"
                        ).format(student_id=student_id, file_key=file_key)
                        logger.exception(msg)

        example_based_assessment = None
        self_assessment = None
        peer_assessments = []
        submitted_assessments = []

        if "peer-assessment" in assessment_steps:
            peer_assessments = peer_api.get_assessments(submission_uuid)
            submitted_assessments = peer_api.get_submitted_assessments(submission_uuid, scored_only=False)

            # Get the data we need for instructor override of the student's score
            rubric_dict = create_rubric_dict(self.prompt, self.rubric_criteria_with_labels)
            scores = peer_api.get_data_for_override_score(
                submission_uuid,
                student_item,
                rubric_dict,
            )
            problem_closed, dummy0, dummy1, dummy2 = self.is_closed(step='peer-assessment', course_staff=False)

        if "self-assessment" in assessment_steps:
            self_assessment = self_api.get_assessment(submission_uuid)

        if "example-based-assessment" in assessment_steps:
            example_based_assessment = ai_api.get_latest_assessment(submission_uuid)

        context = {
            'submission': submission,
            'peer_assessments': peer_assessments,
            'submitted_assessments': submitted_assessments,
            'self_assessment': self_assessment,
            'example_based_assessment': example_based_assessment,
            'rubric_criteria': copy.deepcopy(self.rubric_criteria_with_labels),
            'scores': scores,
            'problem_closed': problem_closed,
        }

        if peer_assessments or self_assessment or example_based_assessment:
            max_scores = peer_api.get_rubric_max_scores(submission_uuid)
            for criterion in context["rubric_criteria"]:
                criterion["total_value"] = max_scores[criterion["name"]]

        path = 'openassessmentblock/staff_debug/student_info.html'
        return path, context
Пример #17
0
 def test_get_latest_assessment_database_error(self, mock_call):
     mock_call.side_effect = DatabaseError("KABOOM!")
     with self.assertRaises(AIGradingInternalError):
         ai_api.get_latest_assessment(self.submission_uuid)
Пример #18
0
    def add_submission_context(self, submission_uuid, context):
        """
        Add the submission information (self asssessment, peer assessments, final grade, etc.)
        to the supplied context for display in the "learner info" portion of staff tools.
        Args:
            submission_uuid (unicode): The uuid of the submission, should NOT be None.
            context: the context to update with additional information
        """
        assessment_steps = self.assessment_steps

        example_based_assessment = None
        example_based_assessment_grade_context = None

        self_assessment = None
        self_assessment_grade_context = None

        peer_assessments = None
        peer_assessments_grade_context = []

        staff_assessment = staff_api.get_latest_staff_assessment(submission_uuid)
        staff_assessment_grade_context = None

        submitted_assessments = None

        grade_details = None

        workflow = self.get_workflow_info(submission_uuid=submission_uuid)
        grade_exists = workflow.get('status') == "done"

        if "peer-assessment" in assessment_steps:
            peer_assessments = peer_api.get_assessments(submission_uuid)
            submitted_assessments = peer_api.get_submitted_assessments(submission_uuid)
            if grade_exists:
                peer_api.get_score(submission_uuid, self.workflow_requirements()["peer"])
                peer_assessments_grade_context = [
                    self._assessment_grade_context(peer_assessment)
                    for peer_assessment in peer_assessments
                ]

        if "self-assessment" in assessment_steps:
            self_assessment = self_api.get_assessment(submission_uuid)
            if grade_exists:
                self_assessment_grade_context = self._assessment_grade_context(self_assessment)

        if "example-based-assessment" in assessment_steps:
            example_based_assessment = ai_api.get_latest_assessment(submission_uuid)
            if grade_exists:
                example_based_assessment_grade_context = self._assessment_grade_context(example_based_assessment)

        if grade_exists:
            if staff_assessment:
                staff_assessment_grade_context = self._assessment_grade_context(staff_assessment)

            grade_details = self.grade_details(
                submission_uuid,
                peer_assessments_grade_context,
                self_assessment_grade_context,
                example_based_assessment_grade_context,
                staff_assessment_grade_context,
                is_staff=True,
            )

        workflow_cancellation = self.get_workflow_cancellation_info(submission_uuid)

        context.update({
            'example_based_assessment': [example_based_assessment] if example_based_assessment else None,
            'self_assessment': [self_assessment] if self_assessment else None,
            'peer_assessments': peer_assessments,
            'staff_assessment': [staff_assessment] if staff_assessment else None,
            'submitted_assessments': submitted_assessments,
            'grade_details': grade_details,
            'score': workflow.get('score'),
            'workflow_status': workflow.get('status'),
            'workflow_cancellation': workflow_cancellation,
        })

        if peer_assessments or self_assessment or example_based_assessment or staff_assessment:
            max_scores = peer_api.get_rubric_max_scores(submission_uuid)
            for criterion in context["rubric_criteria"]:
                criterion["total_value"] = max_scores[criterion["name"]]
Пример #19
0
    def render_grade_complete(self, workflow):
        """
        Render the grade complete state.

        Args:
            workflow (dict): The serialized Workflow model.

        Returns:
            tuple of context (dict), template_path (string)
        """
        # Peer specific stuff...
        assessment_steps = self.assessment_steps
        submission_uuid = workflow['submission_uuid']

        example_based_assessment = None
        self_assessment = None
        feedback = None
        peer_assessments = []
        has_submitted_feedback = False

        if "peer-assessment" in assessment_steps:
            feedback = peer_api.get_assessment_feedback(submission_uuid)
            peer_assessments = peer_api.get_assessments(submission_uuid)
            has_submitted_feedback = feedback is not None

        if "self-assessment" in assessment_steps:
            self_assessment = self_api.get_assessment(submission_uuid)

        if "example-based-assessment" in assessment_steps:
            example_based_assessment = ai_api.get_latest_assessment(submission_uuid)

        feedback_text = feedback.get('feedback', '') if feedback else ''
        student_submission = sub_api.get_submission(submission_uuid)

        # We retrieve the score from the workflow, which in turn retrieves
        # the score for our current submission UUID.
        # We look up the score by submission UUID instead of student item
        # to ensure that the score always matches the rubric.
        # It's possible for the score to be `None` even if the workflow status is "done"
        # when all the criteria in the rubric are feedback-only (no options).
        score = workflow['score']

        context = {
            'score': score,
            'feedback_text': feedback_text,
            'student_submission': student_submission,
            'peer_assessments': peer_assessments,
            'self_assessment': self_assessment,
            'example_based_assessment': example_based_assessment,
            'rubric_criteria': self._rubric_criteria_with_feedback(peer_assessments),
            'has_submitted_feedback': has_submitted_feedback,
            'allow_file_upload': self.allow_file_upload,
            'file_url': self.get_download_url_from_submission(student_submission)
        }

        # Update the scores we will display to the user
        # Note that we are updating a *copy* of the rubric criteria stored in
        # the XBlock field
        max_scores = peer_api.get_rubric_max_scores(submission_uuid)
        median_scores = None
        if "peer-assessment" in assessment_steps:
            median_scores = peer_api.get_assessment_median_scores(submission_uuid)
        elif "self-assessment" in assessment_steps:
            median_scores = self_api.get_assessment_scores_by_criteria(submission_uuid)
        elif "example-based-assessment" in assessment_steps:
            median_scores = ai_api.get_assessment_scores_by_criteria(submission_uuid)

        if median_scores is not None and max_scores is not None:
            for criterion in context["rubric_criteria"]:
                criterion["median_score"] = median_scores[criterion["name"]]
                criterion["total_value"] = max_scores[criterion["name"]]

        return ('openassessmentblock/grade/oa_grade_complete.html', context)
Пример #20
0
    def render_grade_complete(self, workflow):
        """
        Render the grade complete state.

        Args:
            workflow (dict): The serialized Workflow model.

        Returns:
            tuple of context (dict), template_path (string)
        """
        # Peer specific stuff...
        assessment_steps = self.assessment_steps
        submission_uuid = workflow['submission_uuid']

        example_based_assessment = None
        self_assessment = None
        feedback = None
        peer_assessments = []
        has_submitted_feedback = False

        if "peer-assessment" in assessment_steps:
            feedback = peer_api.get_assessment_feedback(submission_uuid)
            peer_assessments = [
                self._assessment_grade_context(asmnt)
                for asmnt in peer_api.get_assessments(submission_uuid)
            ]
            has_submitted_feedback = feedback is not None

        if "self-assessment" in assessment_steps:
            self_assessment = self._assessment_grade_context(
                self_api.get_assessment(submission_uuid))

        if "example-based-assessment" in assessment_steps:
            example_based_assessment = self._assessment_grade_context(
                ai_api.get_latest_assessment(submission_uuid))

        feedback_text = feedback.get('feedback', '') if feedback else ''
        student_submission = sub_api.get_submission(submission_uuid)

        # We retrieve the score from the workflow, which in turn retrieves
        # the score for our current submission UUID.
        # We look up the score by submission UUID instead of student item
        # to ensure that the score always matches the rubric.
        # It's possible for the score to be `None` even if the workflow status is "done"
        # when all the criteria in the rubric are feedback-only (no options).
        score = workflow['score']

        context = {
            'score':
            score,
            'feedback_text':
            feedback_text,
            'student_submission':
            student_submission,
            'peer_assessments':
            peer_assessments,
            'self_assessment':
            self_assessment,
            'example_based_assessment':
            example_based_assessment,
            'rubric_criteria':
            self._rubric_criteria_grade_context(peer_assessments,
                                                self_assessment),
            'has_submitted_feedback':
            has_submitted_feedback,
            'allow_file_upload':
            self.allow_file_upload,
            'file_url':
            self.get_download_url_from_submission(student_submission)
        }

        # Update the scores we will display to the user
        # Note that we are updating a *copy* of the rubric criteria stored in
        # the XBlock field
        max_scores = peer_api.get_rubric_max_scores(submission_uuid)
        median_scores = None
        if "peer-assessment" in assessment_steps:
            median_scores = peer_api.get_assessment_median_scores(
                submission_uuid)
        elif "self-assessment" in assessment_steps:
            median_scores = self_api.get_assessment_scores_by_criteria(
                submission_uuid)
        elif "example-based-assessment" in assessment_steps:
            median_scores = ai_api.get_assessment_scores_by_criteria(
                submission_uuid)

        if median_scores is not None and max_scores is not None:
            for criterion in context["rubric_criteria"]:
                # Although we prevent course authors from modifying criteria post-release,
                # it's still possible for assessments created by course staff to
                # have criteria that differ from the current problem definition.
                # It's also possible to circumvent the post-release restriction
                # if course authors directly import a course into Studio.
                # If this happens, we simply leave the score blank so that the grade
                # section can render without error.
                criterion["median_score"] = median_scores.get(
                    criterion["name"], '')
                criterion["total_value"] = max_scores.get(
                    criterion["name"], '')

        return ('openassessmentblock/grade/oa_grade_complete.html', context)
Пример #21
0
    def render_grade_complete(self, workflow):
        """
        Render the grade complete state.

        Args:
            workflow (dict): The serialized Workflow model.

        Returns:
            tuple of context (dict), template_path (string)
        """
        # Peer specific stuff...
        assessment_steps = self.assessment_steps
        submission_uuid = workflow['submission_uuid']

        staff_assessment = None
        example_based_assessment = None
        self_assessment = None
        feedback = None
        peer_assessments = []
        has_submitted_feedback = False

        if "peer-assessment" in assessment_steps:
            peer_api.get_score(submission_uuid, self.workflow_requirements()["peer"])
            feedback = peer_api.get_assessment_feedback(submission_uuid)
            peer_assessments = [
                self._assessment_grade_context(peer_assessment)
                for peer_assessment in peer_api.get_assessments(submission_uuid)
            ]
            has_submitted_feedback = feedback is not None

        if "self-assessment" in assessment_steps:
            self_assessment = self._assessment_grade_context(
                self_api.get_assessment(submission_uuid)
            )

        if "example-based-assessment" in assessment_steps:
            example_based_assessment = self._assessment_grade_context(
                ai_api.get_latest_assessment(submission_uuid)
            )

        raw_staff_assessment = staff_api.get_latest_staff_assessment(submission_uuid)
        if raw_staff_assessment:
            staff_assessment = self._assessment_grade_context(raw_staff_assessment)

        feedback_text = feedback.get('feedback', '') if feedback else ''
        student_submission = sub_api.get_submission(submission_uuid)

        # We retrieve the score from the workflow, which in turn retrieves
        # the score for our current submission UUID.
        # We look up the score by submission UUID instead of student item
        # to ensure that the score always matches the rubric.
        # It's possible for the score to be `None` even if the workflow status is "done"
        # when all the criteria in the rubric are feedback-only (no options).
        score = workflow['score']

        context = {
            'score': score,
            'feedback_text': feedback_text,
            'has_submitted_feedback': has_submitted_feedback,
            'student_submission': create_submission_dict(student_submission, self.prompts),
            'peer_assessments': peer_assessments,
            'grade_details': self.grade_details(
                submission_uuid,
                peer_assessments=peer_assessments,
                self_assessment=self_assessment,
                example_based_assessment=example_based_assessment,
                staff_assessment=staff_assessment,
            ),
            'file_upload_type': self.file_upload_type,
            'allow_latex': self.allow_latex,
            'file_url': self.get_download_url_from_submission(student_submission)
        }

        return ('openassessmentblock/grade/oa_grade_complete.html', context)
Пример #22
0
 def test_get_latest_assessment_database_error(self, mock_call):
     mock_call.side_effect = DatabaseError("KABOOM!")
     with self.assertRaises(AIGradingInternalError):
         ai_api.get_latest_assessment(self.submission_uuid)