Пример #1
0
    def test_grade_essay_feedback_only_criterion(self):
        # Modify the rubric to include a feedback-only criterion
        # (a criterion with no options, just written feedback)
        rubric = copy.deepcopy(RUBRIC)
        rubric['criteria'].append({
            'name': 'feedback only',
            'prompt': 'feedback',
            'options': []
        })

        # Train classifiers for the rubric
        train_classifiers(rubric, self.CLASSIFIER_SCORE_OVERRIDES)

        # Schedule a grading task and retrieve the assessment
        ai_api.on_init(self.submission_uuid, rubric=rubric, algorithm_id=ALGORITHM_ID)
        assessment = ai_api.get_latest_assessment(self.submission_uuid)

        # Verify that the criteria with options were given scores
        # (from the score override used by our fake classifiers)
        self.assertEqual(assessment['parts'][0]['criterion']['name'], u"vøȼȺƀᵾłȺɍɏ")
        self.assertEqual(assessment['parts'][0]['option']['points'], 1)
        self.assertEqual(assessment['parts'][1]['criterion']['name'], u"ﻭɼค๓๓คɼ")
        self.assertEqual(assessment['parts'][1]['option']['points'], 2)

        # Verify that the criteria with no options (only feedback)
        # has no score and empty feedback
        self.assertEqual(assessment['parts'][2]['criterion']['name'], u"feedback only")
        self.assertIs(assessment['parts'][2]['option'], None)
        self.assertEqual(assessment['parts'][2]['feedback'], u"")

        # Check the scores by criterion dict
        score_dict = ai_api.get_assessment_scores_by_criteria(self.submission_uuid)
        self.assertEqual(score_dict[u"vøȼȺƀᵾłȺɍɏ"], 1)
        self.assertEqual(score_dict[u"ﻭɼค๓๓คɼ"], 2)
        self.assertEqual(score_dict['feedback only'], 0)
Пример #2
0
    def test_grade_essay_all_feedback_only_criteria(self):
        # Modify the rubric to include only feedback-only criteria
        rubric = copy.deepcopy(RUBRIC)
        for criterion in rubric['criteria']:
            criterion['options'] = []

        # Train classifiers for the rubric
        train_classifiers(rubric, {})

        # Schedule a grading task and retrieve the assessment
        ai_api.on_init(self.submission_uuid,
                       rubric=rubric,
                       algorithm_id=ALGORITHM_ID)
        assessment = ai_api.get_latest_assessment(self.submission_uuid)

        # Verify that all assessment parts have feedback set to an empty string
        for part in assessment['parts']:
            self.assertEqual(part['feedback'], u"")

        # Check the scores by criterion dict
        # Since none of the criteria had options, the scores should all default to 0
        score_dict = ai_api.get_assessment_scores_by_criteria(
            self.submission_uuid)
        self.assertItemsEqual(score_dict, {
            u"vøȼȺƀᵾłȺɍɏ": 0,
            u"ﻭɼค๓๓คɼ": 0,
        })
Пример #3
0
    def test_get_assessment_scores_by_criteria(self):
        ai_api.on_init(self.submission_uuid, rubric=RUBRIC, algorithm_id=ALGORITHM_ID)

        # Verify that we got the scores we provided to the stub AI algorithm
        assessment = ai_api.get_latest_assessment(self.submission_uuid)
        assessment_score_dict = ai_api.get_assessment_scores_by_criteria(self.submission_uuid)
        for part in assessment['parts']:
            criterion_name = part['option']['criterion']['name']
            expected_score = self.CLASSIFIER_SCORE_OVERRIDES[criterion_name]['score_override']
            self.assertEqual(assessment_score_dict[criterion_name], expected_score)
Пример #4
0
    def test_get_assessment_scores_by_criteria(self):
        ai_api.on_init(self.submission_uuid,
                       rubric=RUBRIC,
                       algorithm_id=ALGORITHM_ID)

        # Verify that we got the scores we provided to the stub AI algorithm
        assessment = ai_api.get_latest_assessment(self.submission_uuid)
        assessment_score_dict = ai_api.get_assessment_scores_by_criteria(
            self.submission_uuid)
        for part in assessment['parts']:
            criterion_name = part['option']['criterion']['name']
            expected_score = self.CLASSIFIER_SCORE_OVERRIDES[criterion_name][
                'score_override']
            self.assertEqual(assessment_score_dict[criterion_name],
                             expected_score)
Пример #5
0
    def test_grade_essay_feedback_only_criterion(self):
        # Modify the rubric to include a feedback-only criterion
        # (a criterion with no options, just written feedback)
        rubric = copy.deepcopy(RUBRIC)
        rubric['criteria'].append({
            'name': 'feedback only',
            'prompt': 'feedback',
            'options': []
        })

        # Train classifiers for the rubric
        train_classifiers(rubric, self.CLASSIFIER_SCORE_OVERRIDES)

        # Schedule a grading task and retrieve the assessment
        ai_api.on_init(self.submission_uuid,
                       rubric=rubric,
                       algorithm_id=ALGORITHM_ID)
        assessment = ai_api.get_latest_assessment(self.submission_uuid)

        # Verify that the criteria with options were given scores
        # (from the score override used by our fake classifiers)
        self.assertEqual(assessment['parts'][0]['criterion']['name'],
                         u"vøȼȺƀᵾłȺɍɏ")
        self.assertEqual(assessment['parts'][0]['option']['points'], 1)
        self.assertEqual(assessment['parts'][1]['criterion']['name'],
                         u"ﻭɼค๓๓คɼ")
        self.assertEqual(assessment['parts'][1]['option']['points'], 2)

        # Verify that the criteria with no options (only feedback)
        # has no score and empty feedback
        self.assertEqual(assessment['parts'][2]['criterion']['name'],
                         u"feedback only")
        self.assertIs(assessment['parts'][2]['option'], None)
        self.assertEqual(assessment['parts'][2]['feedback'], u"")

        # Check the scores by criterion dict
        score_dict = ai_api.get_assessment_scores_by_criteria(
            self.submission_uuid)
        self.assertEqual(score_dict[u"vøȼȺƀᵾłȺɍɏ"], 1)
        self.assertEqual(score_dict[u"ﻭɼค๓๓คɼ"], 2)
        self.assertEqual(score_dict['feedback only'], 0)
Пример #6
0
    def test_grade_essay_all_feedback_only_criteria(self):
        # Modify the rubric to include only feedback-only criteria
        rubric = copy.deepcopy(RUBRIC)
        for criterion in rubric['criteria']:
            criterion['options'] = []

        # Train classifiers for the rubric
        train_classifiers(rubric, {})

        # Schedule a grading task and retrieve the assessment
        ai_api.on_init(self.submission_uuid, rubric=rubric, algorithm_id=ALGORITHM_ID)
        assessment = ai_api.get_latest_assessment(self.submission_uuid)

        # Verify that all assessment parts have feedback set to an empty string
        for part in assessment['parts']:
            self.assertEqual(part['feedback'], u"")

        # Check the scores by criterion dict
        # Since none of the criteria had options, the scores should all default to 0
        score_dict = ai_api.get_assessment_scores_by_criteria(self.submission_uuid)
        self.assertItemsEqual(score_dict, {
            u"vøȼȺƀᵾłȺɍɏ": 0,
            u"ﻭɼค๓๓คɼ": 0,
        })
Пример #7
0
    def grade_details(
            self, submission_uuid, peer_assessments, self_assessment, example_based_assessment, staff_assessment,
            is_staff=False
    ):
        """
        Returns details about the grade assigned to the submission.

        Args:
            submission_uuid (str): The id of the submission being graded.
            peer_assessments (list of dict): Serialized assessment models from the peer API.
            self_assessment (dict): Serialized assessment model from the self API
            example_based_assessment (dict): Serialized assessment model from the example-based API
            staff_assessment (dict): Serialized assessment model from the staff API
            is_staff (bool): True if the grade details are being displayed to staff, else False.
                Default value is False (meaning grade details are being shown to the learner).

        Returns:
            A dictionary with full details about the submission's grade.

        Example:
            {
                criteria: [{
                    'label': 'Test name',
                    'name': 'f78ac7d4ca1e4134b0ba4b40ca212e72',
                    'prompt': 'Test prompt',
                    'order_num': 2,
                    'options': [...]
                    'feedback': [
                        'Good job!',
                        'Excellent work!',
                    ]
                }],
                additional_feedback: [{
                }]
                ...
            }
        """
        criteria = copy.deepcopy(self.rubric_criteria_with_labels)

        def has_feedback(assessments):
            """
            Returns True if at least one assessment has feedback.

            Args:
                assessments: A list of assessments

            Returns:
                Returns True if at least one assessment has feedback.
            """
            return any(
                assessment.get('feedback', None) or has_feedback(assessment.get('individual_assessments', []))
                for assessment in assessments
            )

        max_scores = peer_api.get_rubric_max_scores(submission_uuid)
        median_scores = None
        assessment_steps = self.assessment_steps
        if staff_assessment:
            median_scores = staff_api.get_assessment_scores_by_criteria(submission_uuid)
        elif "peer-assessment" in assessment_steps:
            median_scores = peer_api.get_assessment_median_scores(submission_uuid)
        elif "example-based-assessment" in assessment_steps:
            median_scores = ai_api.get_assessment_scores_by_criteria(submission_uuid)
        elif "self-assessment" in assessment_steps:
            median_scores = self_api.get_assessment_scores_by_criteria(submission_uuid)

        for criterion in criteria:
            criterion_name = criterion['name']

            # Record assessment info for the current criterion
            criterion['assessments'] = self._graded_assessments(
                submission_uuid, criterion,
                assessment_steps,
                staff_assessment,
                peer_assessments,
                example_based_assessment,
                self_assessment,
                is_staff=is_staff,
            )

            # Record whether there is any feedback provided in the assessments
            criterion['has_feedback'] = has_feedback(criterion['assessments'])

            # Although we prevent course authors from modifying criteria post-release,
            # it's still possible for assessments created by course staff to
            # have criteria that differ from the current problem definition.
            # It's also possible to circumvent the post-release restriction
            # if course authors directly import a course into Studio.
            # If this happens, we simply leave the score blank so that the grade
            # section can render without error.
            criterion['median_score'] = median_scores.get(criterion_name, '')
            criterion['total_value'] = max_scores.get(criterion_name, '')

        return {
            'criteria': criteria,
            'additional_feedback': self._additional_feedback(
                staff_assessment=staff_assessment,
                peer_assessments=peer_assessments,
                self_assessment=self_assessment,
            ),
        }
Пример #8
0
    def render_grade_complete(self, workflow):
        """
        Render the grade complete state.

        Args:
            workflow (dict): The serialized Workflow model.

        Returns:
            tuple of context (dict), template_path (string)
        """
        # Peer specific stuff...
        assessment_steps = self.assessment_steps
        submission_uuid = workflow['submission_uuid']

        example_based_assessment = None
        self_assessment = None
        feedback = None
        peer_assessments = []
        has_submitted_feedback = False

        if "peer-assessment" in assessment_steps:
            feedback = peer_api.get_assessment_feedback(submission_uuid)
            peer_assessments = [
                self._assessment_grade_context(asmnt)
                for asmnt in peer_api.get_assessments(submission_uuid)
            ]
            has_submitted_feedback = feedback is not None

        if "self-assessment" in assessment_steps:
            self_assessment = self._assessment_grade_context(
                self_api.get_assessment(submission_uuid))

        if "example-based-assessment" in assessment_steps:
            example_based_assessment = self._assessment_grade_context(
                ai_api.get_latest_assessment(submission_uuid))

        feedback_text = feedback.get('feedback', '') if feedback else ''
        student_submission = sub_api.get_submission(submission_uuid)

        # We retrieve the score from the workflow, which in turn retrieves
        # the score for our current submission UUID.
        # We look up the score by submission UUID instead of student item
        # to ensure that the score always matches the rubric.
        # It's possible for the score to be `None` even if the workflow status is "done"
        # when all the criteria in the rubric are feedback-only (no options).
        score = workflow['score']

        context = {
            'score':
            score,
            'feedback_text':
            feedback_text,
            'student_submission':
            student_submission,
            'peer_assessments':
            peer_assessments,
            'self_assessment':
            self_assessment,
            'example_based_assessment':
            example_based_assessment,
            'rubric_criteria':
            self._rubric_criteria_grade_context(peer_assessments,
                                                self_assessment),
            'has_submitted_feedback':
            has_submitted_feedback,
            'allow_file_upload':
            self.allow_file_upload,
            'file_url':
            self.get_download_url_from_submission(student_submission)
        }

        # Update the scores we will display to the user
        # Note that we are updating a *copy* of the rubric criteria stored in
        # the XBlock field
        max_scores = peer_api.get_rubric_max_scores(submission_uuid)
        median_scores = None
        if "peer-assessment" in assessment_steps:
            median_scores = peer_api.get_assessment_median_scores(
                submission_uuid)
        elif "self-assessment" in assessment_steps:
            median_scores = self_api.get_assessment_scores_by_criteria(
                submission_uuid)
        elif "example-based-assessment" in assessment_steps:
            median_scores = ai_api.get_assessment_scores_by_criteria(
                submission_uuid)

        if median_scores is not None and max_scores is not None:
            for criterion in context["rubric_criteria"]:
                # Although we prevent course authors from modifying criteria post-release,
                # it's still possible for assessments created by course staff to
                # have criteria that differ from the current problem definition.
                # It's also possible to circumvent the post-release restriction
                # if course authors directly import a course into Studio.
                # If this happens, we simply leave the score blank so that the grade
                # section can render without error.
                criterion["median_score"] = median_scores.get(
                    criterion["name"], '')
                criterion["total_value"] = max_scores.get(
                    criterion["name"], '')

        return ('openassessmentblock/grade/oa_grade_complete.html', context)
Пример #9
0
    def render_grade_complete(self, workflow):
        """
        Render the grade complete state.

        Args:
            workflow (dict): The serialized Workflow model.

        Returns:
            tuple of context (dict), template_path (string)
        """
        # Peer specific stuff...
        assessment_steps = self.assessment_steps
        submission_uuid = workflow['submission_uuid']

        example_based_assessment = None
        self_assessment = None
        feedback = None
        peer_assessments = []
        has_submitted_feedback = False

        if "peer-assessment" in assessment_steps:
            feedback = peer_api.get_assessment_feedback(submission_uuid)
            peer_assessments = [
                self._assessment_grade_context(asmnt)
                for asmnt in peer_api.get_assessments(submission_uuid)
            ]
            has_submitted_feedback = feedback is not None

        if "self-assessment" in assessment_steps:
            self_assessment = self._assessment_grade_context(
                self_api.get_assessment(submission_uuid)
            )

        if "example-based-assessment" in assessment_steps:
            example_based_assessment = self._assessment_grade_context(
                ai_api.get_latest_assessment(submission_uuid)
            )

        feedback_text = feedback.get('feedback', '') if feedback else ''
        student_submission = sub_api.get_submission(submission_uuid)

        # We retrieve the score from the workflow, which in turn retrieves
        # the score for our current submission UUID.
        # We look up the score by submission UUID instead of student item
        # to ensure that the score always matches the rubric.
        # It's possible for the score to be `None` even if the workflow status is "done"
        # when all the criteria in the rubric are feedback-only (no options).
        score = workflow['score']

        context = {
            'score': score,
            'feedback_text': feedback_text,
            'student_submission': student_submission,
            'peer_assessments': peer_assessments,
            'self_assessment': self_assessment,
            'example_based_assessment': example_based_assessment,
            'rubric_criteria': self._rubric_criteria_grade_context(peer_assessments, self_assessment),
            'has_submitted_feedback': has_submitted_feedback,
            'allow_file_upload': self.allow_file_upload,
            'allow_latex': self.allow_latex,
            'file_url': self.get_download_url_from_submission(student_submission)
        }

        # Update the scores we will display to the user
        # Note that we are updating a *copy* of the rubric criteria stored in
        # the XBlock field
        max_scores = peer_api.get_rubric_max_scores(submission_uuid)
        median_scores = None
        if "peer-assessment" in assessment_steps:
            median_scores = peer_api.get_assessment_median_scores(submission_uuid)
        elif "self-assessment" in assessment_steps:
            median_scores = self_api.get_assessment_scores_by_criteria(submission_uuid)
        elif "example-based-assessment" in assessment_steps:
            median_scores = ai_api.get_assessment_scores_by_criteria(submission_uuid)

        if median_scores is not None and max_scores is not None:
            for criterion in context["rubric_criteria"]:
                # Although we prevent course authors from modifying criteria post-release,
                # it's still possible for assessments created by course staff to
                # have criteria that differ from the current problem definition.
                # It's also possible to circumvent the post-release restriction
                # if course authors directly import a course into Studio.
                # If this happens, we simply leave the score blank so that the grade
                # section can render without error.
                criterion["median_score"] = median_scores.get(criterion["name"], '')
                criterion["total_value"] = max_scores.get(criterion["name"], '')

        return ('openassessmentblock/grade/oa_grade_complete.html', context)
Пример #10
0
 def test_error_getting_assessment_scores(self, mock_filter):
     mock_filter.side_effect = DatabaseError("Oh no!")
     ai_api.get_assessment_scores_by_criteria(self.submission_uuid)
Пример #11
0
    def render_grade_complete(self, workflow):
        """
        Render the grade complete state.

        Args:
            workflow (dict): The serialized Workflow model.

        Returns:
            tuple of context (dict), template_path (string)
        """
        # Peer specific stuff...
        assessment_steps = self.assessment_steps
        submission_uuid = workflow['submission_uuid']

        example_based_assessment = None
        self_assessment = None
        feedback = None
        peer_assessments = []
        has_submitted_feedback = False

        if "peer-assessment" in assessment_steps:
            feedback = peer_api.get_assessment_feedback(submission_uuid)
            peer_assessments = peer_api.get_assessments(submission_uuid)
            has_submitted_feedback = feedback is not None

        if "self-assessment" in assessment_steps:
            self_assessment = self_api.get_assessment(submission_uuid)

        if "example-based-assessment" in assessment_steps:
            example_based_assessment = ai_api.get_latest_assessment(submission_uuid)

        feedback_text = feedback.get('feedback', '') if feedback else ''
        student_submission = sub_api.get_submission(submission_uuid)

        # We retrieve the score from the workflow, which in turn retrieves
        # the score for our current submission UUID.
        # We look up the score by submission UUID instead of student item
        # to ensure that the score always matches the rubric.
        # It's possible for the score to be `None` even if the workflow status is "done"
        # when all the criteria in the rubric are feedback-only (no options).
        score = workflow['score']

        context = {
            'score': score,
            'feedback_text': feedback_text,
            'student_submission': student_submission,
            'peer_assessments': peer_assessments,
            'self_assessment': self_assessment,
            'example_based_assessment': example_based_assessment,
            'rubric_criteria': self._rubric_criteria_with_feedback(peer_assessments),
            'has_submitted_feedback': has_submitted_feedback,
            'allow_file_upload': self.allow_file_upload,
            'file_url': self.get_download_url_from_submission(student_submission)
        }

        # Update the scores we will display to the user
        # Note that we are updating a *copy* of the rubric criteria stored in
        # the XBlock field
        max_scores = peer_api.get_rubric_max_scores(submission_uuid)
        median_scores = None
        if "peer-assessment" in assessment_steps:
            median_scores = peer_api.get_assessment_median_scores(submission_uuid)
        elif "self-assessment" in assessment_steps:
            median_scores = self_api.get_assessment_scores_by_criteria(submission_uuid)
        elif "example-based-assessment" in assessment_steps:
            median_scores = ai_api.get_assessment_scores_by_criteria(submission_uuid)

        if median_scores is not None and max_scores is not None:
            for criterion in context["rubric_criteria"]:
                criterion["median_score"] = median_scores[criterion["name"]]
                criterion["total_value"] = max_scores[criterion["name"]]

        return ('openassessmentblock/grade/oa_grade_complete.html', context)
Пример #12
0
 def test_error_getting_assessment_scores(self, mock_filter):
     mock_filter.side_effect = DatabaseError("Oh no!")
     ai_api.get_assessment_scores_by_criteria(self.submission_uuid)