Exemplo n.º 1
0
 def test_missing_options(self):
     with self.assertRaises(InvalidRubric) as cm:
         rubric_from_dict(json_data("data/rubric/missing_options.json"))
     self.assertEqual(
         cm.exception.errors,
         {"criteria": [{"options": [u"This field is required."]}, {}]},  # No errors in second criterion
     )
Exemplo n.º 2
0
 def test_empty_criteria(self):
     with self.assertRaises(InvalidRubric) as cm:
         rubric_from_dict(json_data('data/rubric/empty_criteria.json'))
     self.assertEqual(
         cm.exception.errors,
         {'criteria': [u'Must have at least one criterion']}
     )
Exemplo n.º 3
0
 def test_empty_criteria(self):
     with self.assertRaises(InvalidRubric) as criteria_exception_message:
         rubric_from_dict(json_data('data/rubric/empty_criteria.json'))
     self.assertEqual(
         criteria_exception_message.exception.errors,
         {'criteria': ['Must have at least one criterion']}
     )
Exemplo n.º 4
0
 def test_missing_criteria(self):
     with self.assertRaises(InvalidRubric) as cm:
         rubric_from_dict(json_data('data/rubric/missing_criteria.json'))
     self.assertEqual(
         cm.exception.errors,
         {'criteria': [u'This field is required.']}
     )
Exemplo n.º 5
0
 def test_missing_criteria(self):
     with self.assertRaises(InvalidRubric) as criteria_exception_message:
         rubric_from_dict(json_data('data/rubric/missing_criteria.json'))
     self.assertEqual(
         criteria_exception_message.exception.errors,
         {'criteria': ['This field is required.']}
     )
Exemplo n.º 6
0
def validate_rubric(rubric_dict, current_rubric, is_released):
    """
    Check that the rubric is semantically valid.

    Args:
        rubric_dict (dict): Serialized Rubric model representing the updated state of the rubric.
        current_rubric (dict): Serialized Rubric model representing the current state of the rubric.
        is_released (bool): True if and only if the problem has been released.

    Returns:
        tuple (is_valid, msg) where
            is_valid is a boolean indicating whether the assessment is semantically valid
            and msg describes any validation errors found.
    """
    try:
        rubric_from_dict(rubric_dict)
    except InvalidRubric:
        return (False, u'This rubric definition is not valid.')

    # No duplicate criteria names
    duplicates = _duplicates([criterion['name'] for criterion in rubric_dict['criteria']])
    if len(duplicates) > 0:
        msg = u"Criteria duplicate name(s): {duplicates}".format(
            duplicates=", ".join(duplicates)
        )
        return (False, msg)

    # No duplicate option names within a criterion
    for criterion in rubric_dict['criteria']:
        duplicates = _duplicates([option['name'] for option in criterion['options']])
        if len(duplicates) > 0:
            msg = u"Options in '{criterion}' have duplicate name(s): {duplicates}".format(
                criterion=criterion['name'], duplicates=", ".join(duplicates)
            )
            return (False, msg)

    # After a problem is released, authors are allowed to change text,
    # but nothing that would change the point value of a rubric.
    if is_released:

        # Number of criteria must be the same
        if len(rubric_dict['criteria']) != len(current_rubric['criteria']):
            return (False, u'The number of criteria cannot be changed after a problem is released.')

        # Number of options for each criterion must be the same
        for new_criterion, old_criterion in _match_by_order(rubric_dict['criteria'], current_rubric['criteria']):
            if len(new_criterion['options']) != len(old_criterion['options']):
                return (False, u'The number of options cannot be changed after a problem is released.')

            else:
                for new_option, old_option in _match_by_order(new_criterion['options'], old_criterion['options']):
                    if new_option['points'] != old_option['points']:
                        return (False, u'Point values cannot be changed after a problem is released.')

    return (True, u'')
Exemplo n.º 7
0
 def test_empty_options(self):
     with self.assertRaises(InvalidRubric) as cm:
         rubric_from_dict(json_data("data/rubric/empty_options.json"))
     self.assertEqual(
         cm.exception.errors,
         {
             "criteria": [
                 {},  # There are no errors in the first criterion
                 {"options": [u"Criterion must have at least one option."]},
             ]
         },
     )
Exemplo n.º 8
0
 def test_missing_options(self):
     with self.assertRaises(InvalidRubric) as cm:
         rubric_from_dict(json_data('rubric_data/missing_options.json'))
     self.assertEqual(
         cm.exception.errors,
         {
             'criteria': [
                 {'options': [u'This field is required.']},
                 {}  # No errors in second criterion
             ]
         }
     )
Exemplo n.º 9
0
 def test_empty_options(self):
     with self.assertRaises(InvalidRubric) as cm:
         rubric_from_dict(json_data('rubric_data/empty_options.json'))
     self.assertEqual(
         cm.exception.errors,
         {
             'criteria': [
                 {},  # There are no errors in the first criterion
                 {'options': [u'Criterion must have at least one option.']}
             ]
         }
     )
Exemplo n.º 10
0
    def test_rubric_only_created_once(self):
        # Make sure sending the same Rubric data twice only creates one Rubric,
        # and returns a reference to it the next time.
        rubric_data = json_data('data/rubric/project_plan_rubric.json')

        r1 = rubric_from_dict(rubric_data)

        with self.assertNumQueries(1):
            # Just the select -- shouldn't need the create queries
            r2 = rubric_from_dict(rubric_data)

        self.assertEqual(r1.id, r2.id)
        r1.delete()
Exemplo n.º 11
0
 def test_missing_options(self):
     with self.assertRaises(InvalidRubric) as cm:
         rubric_from_dict(json_data('data/rubric/missing_options.json'))
     self.assertEqual(
         cm.exception.errors,
         {
             'criteria': [
                 {
                     'options': [u'This field is required.']
                 },
                 {}  # No errors in second criterion
             ]
         })
Exemplo n.º 12
0
    def test_rubric_only_created_once(self):
        # Make sure sending the same Rubric data twice only creates one Rubric,
        # and returns a reference to it the next time.
        rubric_data = json_data("data/rubric/project_plan_rubric.json")

        r1 = rubric_from_dict(rubric_data)

        with self.assertNumQueries(1):
            # Just the select -- shouldn't need the create queries
            r2 = rubric_from_dict(rubric_data)

        self.assertEqual(r1.id, r2.id)
        r1.delete()
Exemplo n.º 13
0
 def test_empty_options(self):
     with self.assertRaises(InvalidRubric) as cm:
         rubric_from_dict(json_data('data/rubric/empty_options.json'))
     self.assertEqual(
         cm.exception.errors,
         {
             'criteria': [
                 {},  # There are no errors in the first criterion
                 {
                     'options':
                     [u'Criterion must have at least one option.']
                 }
             ]
         })
Exemplo n.º 14
0
    def setUp(self):
        """
        Create a new grading workflow.
        """
        self.rubric = rubric_from_dict(RUBRIC)
        self.workflow = AIGradingWorkflow.objects.create(
            submission_uuid='test', essay_text='test',
            rubric=self.rubric, algorithm_id=self.ALGORITHM_ID,
            item_id=self.ITEM_ID, course_id=self.COURSE_ID
        )

        # Create a rubric with a similar structure, but different prompt
        similar_rubric_dict = copy.deepcopy(RUBRIC)
        similar_rubric_dict['prompt'] = 'Different prompt!'
        self.similar_rubric = rubric_from_dict(similar_rubric_dict)
Exemplo n.º 15
0
    def test_full_assessment_dict_criteria_no_options(self):
        # Create a rubric with a criterion that has no options (just feedback)
        rubric_dict = copy.deepcopy(RUBRIC)
        rubric_dict['criteria'].append({
            'order_num': 2,
            'name': 'feedback only',
            'prompt': 'feedback only',
            'options': []
        })
        rubric = rubric_from_dict(rubric_dict)

        # Create an assessment for the rubric
        assessment = Assessment.create(rubric, "Bob", "submission UUID", "PE")
        selected = {
            u"vøȼȺƀᵾłȺɍɏ": u"𝓰𝓸𝓸𝓭",
            u"ﻭɼค๓๓คɼ": u"єχ¢єℓℓєηт",
        }
        feedback = {
            u"feedback only": u"enjoy the feedback!"
        }
        AssessmentPart.create_from_option_names(assessment, selected, feedback=feedback)

        # Serialize the assessment
        serialized = full_assessment_dict(assessment)

        # Verify that the assessment dict correctly serialized the criterion with options.
        self.assertEqual(serialized['parts'][0]['criterion']['name'], u"vøȼȺƀᵾłȺɍɏ")
        self.assertEqual(serialized['parts'][0]['option']['name'], u"𝓰𝓸𝓸𝓭")
        self.assertEqual(serialized['parts'][1]['criterion']['name'], u"ﻭɼค๓๓คɼ")
        self.assertEqual(serialized['parts'][1]['option']['name'], u"єχ¢єℓℓєηт")

        # Verify that the assessment dict correctly serialized the criterion with no options.
        self.assertIs(serialized['parts'][2]['option'], None)
        self.assertEqual(serialized['parts'][2]['criterion']['name'], u"feedback only")
Exemplo n.º 16
0
    def test_full_assessment_dict_criteria_no_options(self):
        # Create a rubric with a criterion that has no options (just feedback)
        rubric_dict = copy.deepcopy(RUBRIC)
        rubric_dict['criteria'].append({
            'order_num': 2,
            'name': 'feedback only',
            'prompt': 'feedback only',
            'options': []
        })
        rubric = rubric_from_dict(rubric_dict)

        # Create an assessment for the rubric
        assessment = Assessment.create(rubric, "Bob", "submission-UUID", "PE")
        selected = {
            "vøȼȺƀᵾłȺɍɏ": "𝓰𝓸𝓸𝓭",
            "ﻭɼค๓๓คɼ": "єχ¢єℓℓєηт",
        }
        feedback = {
            "feedback only": "enjoy the feedback!"
        }
        AssessmentPart.create_from_option_names(assessment, selected, feedback=feedback)

        # Serialize the assessment
        serialized = full_assessment_dict(assessment)

        # Verify that the assessment dict correctly serialized the criterion with options.
        self.assertEqual(serialized['parts'][0]['criterion']['name'], "vøȼȺƀᵾłȺɍɏ")
        self.assertEqual(serialized['parts'][0]['option']['name'], "𝓰𝓸𝓸𝓭")
        self.assertEqual(serialized['parts'][1]['criterion']['name'], "ﻭɼค๓๓คɼ")
        self.assertEqual(serialized['parts'][1]['option']['name'], "єχ¢єℓℓєηт")

        # Verify that the assessment dict correctly serialized the criterion with no options.
        self.assertIs(serialized['parts'][2]['option'], None)
        self.assertEqual(serialized['parts'][2]['criterion']['name'], "feedback only")
Exemplo n.º 17
0
def get_data_for_override_score(submission_uuid, student_item, rubric_dict):
    """
    Gets the data needed to render the score override section of the student data page.

    Args:
        submission_uuid (string): UUID of the student's submission
        student_item (dict): The dictionary representation of a student item.
        rubric_dict (dict): The dictionary representation of a rubric

    Returns:
        scores (dict): points_earned, pointed_possible, points_override
    """
    scores = {}
    score_data = sub_api.get_latest_score_for_submission(submission_uuid)
    if score_data:
        scores["points_earned"] = score_data.get("points_earned")
        scores["points_possible"] = score_data.get("points_possible")

    # If there is no Score, retrieve points_possible from the rubric as there may not be any assessments yet.
    if not scores.get("points_possible"):
        try:
            rubric = rubric_from_dict(rubric_dict)
        except InvalidRubric:
            points_possible = None
        else:
            points_possible = rubric.points_possible

        scores["points_possible"] = points_possible
    latest_override_score = sub_api.get_score_override(student_item)
    if latest_override_score:
        scores["points_override"] = latest_override_score["points_earned"]

    return scores
Exemplo n.º 18
0
    def handle(self, *args, **options):
        """
        Execute the command.

        Args:
            course_id (unicode): The ID of the course to create submissions/workflows in.
            item_id (unicode): The ID of the problem in the course.
            num_submissions (int): The number of submissions/workflows to create.
            algorithm_id (unicode): The ID of the ML algorithm to use ("fake" or "ease")

        Raises:
            CommandError

        """
        if len(args) < 4:
            raise CommandError(u"Usage: simulate_ai_grading_error {}".format(
                self.args))

        # Parse arguments
        course_id = args[0].decode('utf-8')
        item_id = args[1].decode('utf-8')
        num_submissions = int(args[2])
        algorithm_id = args[3].decode('utf-8')

        # Create the rubric model
        rubric = rubric_from_dict(self.RUBRIC)

        # Train classifiers
        print u"Training classifiers using {algorithm_id}...".format(
            algorithm_id=algorithm_id)
        algorithm = AIAlgorithm.algorithm_for_id(algorithm_id)
        classifier_data = {
            criterion_name: algorithm.train_classifier(example)
            for criterion_name, example in self.EXAMPLES.iteritems()
        }
        print u"Successfully trained classifiers."

        # Create the classifier set
        classifier_set = AIClassifierSet.create_classifier_set(
            classifier_data, rubric, algorithm_id, course_id, item_id)
        print u"Successfully created classifier set with id {}".format(
            classifier_set.pk)

        # Create submissions and grading workflows
        for num in range(num_submissions):
            student_item = {
                'course_id': course_id,
                'item_id': item_id,
                'item_type': 'openassessment',
                'student_id': "{base}_{num}".format(base=self.STUDENT_ID,
                                                    num=num)
            }
            submission = sub_api.create_submission(student_item, self.ANSWER)
            workflow = AIGradingWorkflow.start_workflow(
                submission['uuid'], self.RUBRIC, algorithm_id)
            workflow.classifier_set = classifier_set
            workflow.save()
            print u"{num}: Created incomplete grading workflow with UUID {uuid}".format(
                num=num, uuid=workflow.uuid)
Exemplo n.º 19
0
 def _create_classifier(self):
     """
     Create and return an AIClassifier.
     """
     rubric = rubric_from_dict(RUBRIC)
     classifier_set = AIClassifierSet.create_classifier_set(
         CLASSIFIERS_DICT, rubric, "test_algorithm", COURSE_ID, ITEM_ID)
     return AIClassifier.objects.filter(classifier_set=classifier_set)[0]
Exemplo n.º 20
0
    def setUp(self):
        """
        Create a new grading workflow.
        """
        self.rubric = rubric_from_dict(RUBRIC)
        self.workflow = AIGradingWorkflow.objects.create(
            submission_uuid='test',
            essay_text='test',
            rubric=self.rubric,
            algorithm_id=self.ALGORITHM_ID,
            item_id=self.ITEM_ID,
            course_id=self.COURSE_ID)

        # Create a rubric with a similar structure, but different prompt
        similar_rubric_dict = copy.deepcopy(RUBRIC)
        similar_rubric_dict['prompt'] = 'Different prompt!'
        self.similar_rubric = rubric_from_dict(similar_rubric_dict)
    def handle(self, *args, **options):
        """
        Execute the command.

        Args:
            course_id (unicode): The ID of the course to create submissions/workflows in.
            item_id (unicode): The ID of the problem in the course.
            num_submissions (int): The number of submissions/workflows to create.
            algorithm_id (unicode): The ID of the ML algorithm to use ("fake" or "ease")

        Raises:
            CommandError

        """
        if len(args) < 4:
            raise CommandError(u"Usage: simulate_ai_grading_error {}".format(self.args))

        # Parse arguments
        course_id = args[0].decode('utf-8')
        item_id = args[1].decode('utf-8')
        num_submissions = int(args[2])
        algorithm_id = args[3].decode('utf-8')

        # Create the rubric model
        rubric = rubric_from_dict(self.RUBRIC)

        # Train classifiers
        print u"Training classifiers using {algorithm_id}...".format(algorithm_id=algorithm_id)
        algorithm = AIAlgorithm.algorithm_for_id(algorithm_id)
        classifier_data = {
            criterion_name: algorithm.train_classifier(example)
            for criterion_name, example in self.EXAMPLES.iteritems()
        }
        print u"Successfully trained classifiers."

        # Create the classifier set
        classifier_set = AIClassifierSet.create_classifier_set(
            classifier_data, rubric, algorithm_id, course_id, item_id
        )
        print u"Successfully created classifier set with id {}".format(classifier_set.pk)

        # Create submissions and grading workflows
        for num in range(num_submissions):
            student_item = {
                'course_id': course_id,
                'item_id': item_id,
                'item_type': 'openassessment',
                'student_id': "{base}_{num}".format(base=self.STUDENT_ID, num=num)
            }
            submission = sub_api.create_submission(student_item, self.ANSWER)
            workflow = AIGradingWorkflow.start_workflow(
                submission['uuid'], self.RUBRIC, algorithm_id
            )
            workflow.classifier_set = classifier_set
            workflow.save()
            print u"{num}: Created incomplete grading workflow with UUID {uuid}".format(
                num=num, uuid=workflow.uuid
            )
Exemplo n.º 22
0
def _complete_assessment(
    rubric_dict,
    scorer_id,
    peer_submission_uuid,
    options_selected,
    criterion_feedback,
    scorer_workflow,
    overall_feedback,
    num_required_grades,
    scored_at,
):
    """
    Internal function for atomic assessment creation. Creates a peer assessment
    and closes the associated peer workflow item in a single transaction.

    Args:
        rubric_dict (dict): The rubric model associated with this assessment
        scorer_id (str): The user ID for the user giving this assessment. This
            is required to create an assessment on a submission.
        peer_submission_uuid (str): The submission uuid for the submission being
            assessed.
        options_selected (dict): Dictionary mapping criterion names to the
            option names the user selected for that criterion.
        criterion_feedback (dict): Dictionary mapping criterion names to the
            free-form text feedback the user gave for the criterion.
            Since criterion feedback is optional, some criteria may not appear
            in the dictionary.
        scorer_workflow (PeerWorkflow): The PeerWorkflow associated with the
            scorer. Updates the workflow item associated with this assessment.
        overall_feedback (unicode): Free-form text feedback on the submission overall.
        num_required_grades (int): The required number of assessments a
            submission requires before it is completed. If this number of
            assessments is reached, the grading_completed_at timestamp is set
            for the Workflow.
        scored_at (datetime): Optional argument to override the time in which
            the assessment took place. If not specified, scored_at is set to
            now.

    Returns:
        The Assessment model

    """
    # Get or create the rubric
    rubric = rubric_from_dict(rubric_dict)

    # Create the peer assessment
    assessment = Assessment.create(
        rubric, scorer_id, peer_submission_uuid, PEER_TYPE, scored_at=scored_at, feedback=overall_feedback
    )

    # Create assessment parts for each criterion in the rubric
    # This will raise an `InvalidRubricSelection` if the selected options do not
    # match the rubric.
    AssessmentPart.create_from_option_names(assessment, options_selected, feedback=criterion_feedback)

    # Close the active assessment
    scorer_workflow.close_active_assessment(peer_submission_uuid, assessment, num_required_grades)
    return assessment
Exemplo n.º 23
0
def _complete_assessment(
        submission_uuid,
        scorer_id,
        options_selected,
        criterion_feedback,
        overall_feedback,
        rubric_dict,
        scored_at,
        scorer_workflow
):
    """
    Internal function for atomic assessment creation. Creates a staff assessment
    in a single transaction.

    Args:
        submission_uuid (str): The submission uuid for the submission being
            assessed.
        scorer_id (str): The user ID for the user giving this assessment. This
            is required to create an assessment on a submission.
        options_selected (dict): Dictionary mapping criterion names to the
            option names the user selected for that criterion.
        criterion_feedback (dict): Dictionary mapping criterion names to the
            free-form text feedback the user gave for the criterion.
            Since criterion feedback is optional, some criteria may not appear
            in the dictionary.
        overall_feedback (unicode): Free-form text feedback on the submission overall.
        rubric_dict (dict): The rubric model associated with this assessment
        scored_at (datetime): Optional argument to override the time in which
            the assessment took place. If not specified, scored_at is set to
            now.

    Returns:
        The Assessment model

    """
    # Get or create the rubric
    rubric = rubric_from_dict(rubric_dict)

    # Create the staff assessment
    assessment = Assessment.create(
        rubric,
        scorer_id,
        submission_uuid,
        STAFF_TYPE,
        scored_at=scored_at,
        feedback=overall_feedback
    )

    # Create assessment parts for each criterion in the rubric
    # This will raise an `InvalidRubricSelection` if the selected options do not
    # match the rubric.
    AssessmentPart.create_from_option_names(assessment, options_selected, feedback=criterion_feedback)

    # Close the active assessment
    if scorer_workflow is not None:
        scorer_workflow.close_active_assessment(assessment, scorer_id)
    return assessment
Exemplo n.º 24
0
 def _create_classifier(self):
     """
     Create and return an AIClassifier.
     """
     rubric = rubric_from_dict(RUBRIC)
     classifier_set = AIClassifierSet.create_classifier_set(
         CLASSIFIERS_DICT, rubric, "test_algorithm", COURSE_ID, ITEM_ID
     )
     return AIClassifier.objects.filter(classifier_set=classifier_set)[0]
Exemplo n.º 25
0
def _complete_assessment(rubric_dict, scorer_id, peer_submission_uuid,
                         options_selected, criterion_feedback, scorer_workflow,
                         overall_feedback, num_required_grades, scored_at):
    """
    Internal function for atomic assessment creation. Creates a peer assessment
    and closes the associated peer workflow item in a single transaction.

    Args:
        rubric_dict (dict): The rubric model associated with this assessment
        scorer_id (str): The user ID for the user giving this assessment. This
            is required to create an assessment on a submission.
        peer_submission_uuid (str): The submission uuid for the submission being
            assessed.
        options_selected (dict): Dictionary mapping criterion names to the
            option names the user selected for that criterion.
        criterion_feedback (dict): Dictionary mapping criterion names to the
            free-form text feedback the user gave for the criterion.
            Since criterion feedback is optional, some criteria may not appear
            in the dictionary.
        scorer_workflow (PeerWorkflow): The PeerWorkflow associated with the
            scorer. Updates the workflow item associated with this assessment.
        overall_feedback (unicode): Free-form text feedback on the submission overall.
        num_required_grades (int): The required number of assessments a
            submission requires before it is completed. If this number of
            assessments is reached, the grading_completed_at timestamp is set
            for the Workflow.
        scored_at (datetime): Optional argument to override the time in which
            the assessment took place. If not specified, scored_at is set to
            now.

    Returns:
        The Assessment model

    """
    # Get or create the rubric
    rubric = rubric_from_dict(rubric_dict)

    # Create the peer assessment
    assessment = Assessment.create(rubric,
                                   scorer_id,
                                   peer_submission_uuid,
                                   PEER_TYPE,
                                   scored_at=scored_at,
                                   feedback=overall_feedback)

    # Create assessment parts for each criterion in the rubric
    # This will raise an `InvalidRubricSelection` if the selected options do not
    # match the rubric.
    AssessmentPart.create_from_option_names(assessment,
                                            options_selected,
                                            feedback=criterion_feedback)

    # Close the active assessment
    scorer_workflow.close_active_assessment(peer_submission_uuid, assessment,
                                            num_required_grades)
    return assessment
Exemplo n.º 26
0
 def _rubric_with_one_feedback_only_criterion(self):
     """Create a rubric with one feedback-only criterion."""
     rubric_dict = copy.deepcopy(RUBRIC)
     rubric_dict['criteria'].append({
         "order_num": 2,
         "name": u"feedback",
         "prompt": u"only feedback, no points",
         "options": []
     })
     return rubric_from_dict(rubric_dict)
Exemplo n.º 27
0
    def start_workflow(cls, submission_uuid, rubric_dict, algorithm_id):
        """
        Start a grading workflow.

        Args:
            submission_uuid (str): The UUID of the submission to grade.
            rubric_dict (dict): The serialized rubric model.
            algorithm_id (unicode): The ID of the algorithm to use for grading.

        Returns:
            AIGradingWorkflow

        Raises:
            SubmissionNotFoundError
            SubmissionRequestError
            SubmissionInternalError
            InvalidRubric
            DatabaseError

        """
        # Retrieve info about the submission
        submission = sub_api.get_submission_and_student(submission_uuid)

        # Get or create the rubric
        from openassessment.assessment.serializers import rubric_from_dict
        rubric = rubric_from_dict(rubric_dict)

        # Retrieve the submission text
        # Submissions are arbitrary JSON-blobs, which *should*
        # contain a single key, "answer", containing the essay
        # submission text.  If not, though, assume we've been
        # given the essay text directly (convenient for testing).
        if isinstance(submission, dict):
            essay_text = submission.get('answer')
        else:
            essay_text = unicode(submission)

        # Create the workflow
        workflow = cls.objects.create(
            submission_uuid=submission_uuid,
            essay_text=essay_text,
            algorithm_id=algorithm_id,
            student_id=submission['student_item']['student_id'],
            item_id=submission['student_item']['item_id'],
            course_id=submission['student_item']['course_id'],
            rubric=rubric
        )

        # Retrieve and assign classifier set candidates
        workflow.assign_most_recent_classifier_set()

        workflow._log_start_workflow()

        return workflow
Exemplo n.º 28
0
    def start_workflow(cls, submission_uuid, rubric_dict, algorithm_id):
        """
        Start a grading workflow.

        Args:
            submission_uuid (str): The UUID of the submission to grade.
            rubric_dict (dict): The serialized rubric model.
            algorithm_id (unicode): The ID of the algorithm to use for grading.

        Returns:
            AIGradingWorkflow

        Raises:
            SubmissionNotFoundError
            SubmissionRequestError
            SubmissionInternalError
            InvalidRubric
            DatabaseError

        """
        # Retrieve info about the submission
        submission = sub_api.get_submission_and_student(submission_uuid)

        # Get or create the rubric
        from openassessment.assessment.serializers import rubric_from_dict
        rubric = rubric_from_dict(rubric_dict)

        # Retrieve the submission text
        # Submissions are arbitrary JSON-blobs, which *should*
        # contain a single key, "answer", containing the essay
        # submission text.  If not, though, assume we've been
        # given the essay text directly (convenient for testing).
        if isinstance(submission, dict):
            essay_text = submission.get('answer')
        else:
            essay_text = unicode(submission)

        # Create the workflow
        workflow = cls.objects.create(
            submission_uuid=submission_uuid,
            essay_text=essay_text,
            algorithm_id=algorithm_id,
            student_id=submission['student_item']['student_id'],
            item_id=submission['student_item']['item_id'],
            course_id=submission['student_item']['course_id'],
            rubric=rubric)

        # Retrieve and assign classifier set candidates
        workflow.assign_most_recent_classifier_set()

        workflow._log_start_workflow()

        return workflow
Exemplo n.º 29
0
def train_classifiers(rubric_dict, classifier_score_overrides):
    """
    Simple utility function to train classifiers.

    Args:
        rubric_dict (dict): The rubric to train the classifiers on.
        classifier_score_overrides (dict): A dictionary of classifier overrides
            to set the scores for the given submission.

    """
    rubric = rubric_from_dict(rubric_dict)
    AIClassifierSet.create_classifier_set(classifier_score_overrides, rubric,
                                          ALGORITHM_ID, COURSE_ID, ITEM_ID)
Exemplo n.º 30
0
def train_classifiers(rubric_dict, classifier_score_overrides):
    """
    Simple utility function to train classifiers.

    Args:
        rubric_dict (dict): The rubric to train the classifiers on.
        classifier_score_overrides (dict): A dictionary of classifier overrides
            to set the scores for the given submission.

    """
    rubric = rubric_from_dict(rubric_dict)
    AIClassifierSet.create_classifier_set(
        classifier_score_overrides, rubric, ALGORITHM_ID, COURSE_ID, ITEM_ID
    )
Exemplo n.º 31
0
Arquivo: self.py Projeto: lxp20201/lxp
def _complete_assessment(
        submission_uuid,
        user_id,
        options_selected,
        criterion_feedback,
        overall_feedback,
        rubric_dict,
        scored_at
):
    """
    Internal function for creating an assessment and its parts atomically.

    Args:
        submission_uuid (str): The unique identifier for the submission being
            assessed.
        user_id (str): The ID of the user creating the assessment. This must
            match the ID of the user who made the submission.
        options_selected (dict): Mapping of rubric criterion names to option
            values selected.
        criterion_feedback (dict): Dictionary mapping criterion names to the
            free-form text feedback the user gave for the criterion.
            Since criterion feedback is optional, some criteria may not appear
            in the dictionary.
        overall_feedback (unicode): Free-form text feedback on the submission overall.
        rubric_dict (dict): Serialized Rubric model.
        scored_at (datetime): The timestamp of the assessment.

    Returns:
        Assessment model

    """
    # Get or create the rubric
    rubric = rubric_from_dict(rubric_dict)

    # Create the self assessment
    assessment = Assessment.create(
        rubric,
        user_id,
        submission_uuid,
        SELF_TYPE,
        scored_at=scored_at,
        feedback=overall_feedback
    )

    # This will raise an `InvalidRubricSelection` if the selected options do not match the rubric.
    AssessmentPart.create_from_option_names(assessment, options_selected, feedback=criterion_feedback)
    return assessment
Exemplo n.º 32
0
def _complete_assessment(
        submission_uuid,
        user_id,
        options_selected,
        criterion_feedback,
        overall_feedback,
        rubric_dict,
        scored_at
):
    """
    Internal function for creating an assessment and its parts atomically.

    Args:
        submission_uuid (str): The unique identifier for the submission being
            assessed.
        user_id (str): The ID of the user creating the assessment. This must
            match the ID of the user who made the submission.
        options_selected (dict): Mapping of rubric criterion names to option
            values selected.
        criterion_feedback (dict): Dictionary mapping criterion names to the
            free-form text feedback the user gave for the criterion.
            Since criterion feedback is optional, some criteria may not appear
            in the dictionary.
        overall_feedback (unicode): Free-form text feedback on the submission overall.
        rubric_dict (dict): Serialized Rubric model.
        scored_at (datetime): The timestamp of the assessment.

    Returns:
        Assessment model

    """
    # Get or create the rubric
    rubric = rubric_from_dict(rubric_dict)

    # Create the self assessment
    assessment = Assessment.create(
        rubric,
        user_id,
        submission_uuid,
        SELF_TYPE,
        scored_at=scored_at,
        feedback=overall_feedback
    )

    # This will raise an `InvalidRubricSelection` if the selected options do not match the rubric.
    AssessmentPart.create_from_option_names(assessment, options_selected, feedback=criterion_feedback)
    return assessment
Exemplo n.º 33
0
    def start_workflow(cls, submission_uuid, rubric_dict, algorithm_id):
        """
        Start a grading workflow.

        Args:
            submission_uuid (str): The UUID of the submission to grade.
            rubric_dict (dict): The serialized rubric model.
            algorithm_id (unicode): The ID of the algorithm to use for grading.

        Returns:
            AIGradingWorkflow

        Raises:
            SubmissionNotFoundError
            SubmissionRequestError
            SubmissionInternalError
            InvalidRubric
            DatabaseError

        """
        # Retrieve info about the submission
        submission = sub_api.get_submission_and_student(submission_uuid)

        # Get or create the rubric
        from openassessment.assessment.serializers import rubric_from_dict
        rubric = rubric_from_dict(rubric_dict)

        # Create the workflow
        workflow = cls.objects.create(
            submission_uuid=submission_uuid,
            essay_text=essay_text_from_submission(submission),
            algorithm_id=algorithm_id,
            student_id=submission['student_item']['student_id'],
            item_id=submission['student_item']['item_id'],
            course_id=submission['student_item']['course_id'],
            rubric=rubric
        )

        # Retrieve and assign classifier set candidates
        workflow.assign_most_recent_classifier_set()

        workflow._log_start_workflow()

        return workflow
Exemplo n.º 34
0
    def setUp(self):
        """
        Create a submission and grading workflow.
        """
        # Create a submission
        submission = sub_api.create_submission(STUDENT_ITEM, ANSWER)
        self.submission_uuid = submission['uuid']

        # Create a workflow for the submission
        workflow = AIGradingWorkflow.start_workflow(self.submission_uuid, RUBRIC, ALGORITHM_ID)
        self.workflow_uuid = workflow.uuid

        # Associate the workflow with classifiers
        rubric = rubric_from_dict(RUBRIC)
        classifier_set = AIClassifierSet.create_classifier_set(
            self.CLASSIFIERS, rubric, ALGORITHM_ID, STUDENT_ITEM.get('course_id'), STUDENT_ITEM.get('item_id')
        )
        workflow.classifier_set = classifier_set
        workflow.save()
Exemplo n.º 35
0
    def start_workflow(cls, submission_uuid, rubric_dict, algorithm_id):
        """
        Start a grading workflow.

        Args:
            submission_uuid (str): The UUID of the submission to grade.
            rubric_dict (dict): The serialized rubric model.
            algorithm_id (unicode): The ID of the algorithm to use for grading.

        Returns:
            AIGradingWorkflow

        Raises:
            SubmissionNotFoundError
            SubmissionRequestError
            SubmissionInternalError
            InvalidRubric
            DatabaseError

        """
        # Retrieve info about the submission
        submission = sub_api.get_submission_and_student(submission_uuid)

        # Get or create the rubric
        from openassessment.assessment.serializers import rubric_from_dict
        rubric = rubric_from_dict(rubric_dict)

        # Create the workflow
        workflow = cls.objects.create(
            submission_uuid=submission_uuid,
            essay_text=essay_text_from_submission(submission),
            algorithm_id=algorithm_id,
            student_id=submission['student_item']['student_id'],
            item_id=submission['student_item']['item_id'],
            course_id=submission['student_item']['course_id'],
            rubric=rubric)

        # Retrieve and assign classifier set candidates
        workflow.assign_most_recent_classifier_set()

        workflow._log_start_workflow()

        return workflow
Exemplo n.º 36
0
    def setUp(self):
        """
        Create a submission and grading workflow.
        """
        # Create a submission
        submission = sub_api.create_submission(STUDENT_ITEM, ANSWER)
        self.submission_uuid = submission['uuid']

        # Create a workflow for the submission
        workflow = AIGradingWorkflow.start_workflow(self.submission_uuid,
                                                    RUBRIC, ALGORITHM_ID)
        self.workflow_uuid = workflow.uuid

        # Associate the workflow with classifiers
        rubric = rubric_from_dict(RUBRIC)
        classifier_set = AIClassifierSet.create_classifier_set(
            self.CLASSIFIERS, rubric, ALGORITHM_ID,
            STUDENT_ITEM.get('course_id'), STUDENT_ITEM.get('item_id'))
        workflow.classifier_set = classifier_set
        workflow.save()
Exemplo n.º 37
0
def get_classifier_set_info(rubric_dict, algorithm_id, course_id, item_id):
    """
    Get information about the classifier available for a particular problem.
    This is the classifier that would be selected to grade essays for the problem.

    Args:
        rubric_dict (dict): The serialized rubric model.
        algorithm_id (unicode): The algorithm to use for classification.
        course_id (unicode): The course identifier for the current problem.
        item_id (unicode): The item identifier for the current problem.

    Returns:
        dict with keys 'created_at', 'algorithm_id', 'course_id', and 'item_id'
        Note that course ID and item ID might be different than the current problem
        if a classifier from a different problem with a similar rubric
        is the best available match.

    """
    try:
        rubric = rubric_from_dict(rubric_dict)
        classifier_set = AIClassifierSet.most_recent_classifier_set(
            rubric, algorithm_id, course_id, item_id
        )
        if classifier_set is not None:
            return {
                'created_at': classifier_set.created_at,
                'algorithm_id': classifier_set.algorithm_id,
                'course_id': classifier_set.course_id,
                'item_id': classifier_set.item_id
            }
        else:
            return None
    except InvalidRubric:
        msg = u"Could not retrieve classifier set info: the rubric definition was not valid."
        logger.exception(msg)
        raise AIGradingRequestError(msg)
    except DatabaseError as ex:
        msg = u"An unexpected error occurred while retrieving classifier set info: {ex}".format(ex=ex)
        logger.exception(msg)
        raise AIGradingInternalError(msg)
Exemplo n.º 38
0
def validate_rubric(rubric_dict, current_rubric, is_released, _):
    """
    Check that the rubric is semantically valid.

    Args:
        rubric_dict (dict): Serialized Rubric model representing the updated state of the rubric.
        current_rubric (dict): Serialized Rubric model representing the current state of the rubric.
        is_released (bool): True if and only if the problem has been released.
        _ (function): The service function used to get the appropriate i18n text

    Returns:
        tuple (is_valid, msg) where
            is_valid is a boolean indicating whether the assessment is semantically valid
            and msg describes any validation errors found.
    """
    try:
        rubric_from_dict(rubric_dict)
    except InvalidRubric:
        return False, _(u'This rubric definition is not valid.')

    for criterion in rubric_dict['criteria']:
        # No duplicate option names within a criterion
        duplicates = _duplicates(
            [option['name'] for option in criterion['options']])
        if len(duplicates) > 0:
            msg = _(
                u"Options in '{criterion}' have duplicate name(s): {duplicates}"
            ).format(criterion=criterion['name'],
                     duplicates=", ".join(duplicates))
            return False, msg

        # Some criteria may have no options, just written feedback.
        # In this case, written feedback must be required (not optional or disabled).
        if len(criterion['options']) == 0 and criterion.get(
                'feedback', 'disabled') != 'required':
            msg = _(u'Criteria with no options must require written feedback.')
            return False, msg

    # After a problem is released, authors are allowed to change text,
    # but nothing that would change the point value of a rubric.
    if is_released:

        # Number of prompts must be the same
        if len(rubric_dict['prompts']) != len(current_rubric['prompts']):
            return False, _(
                u'Prompts cannot be created or deleted after a problem is released.'
            )

        # Number of criteria must be the same
        if len(rubric_dict['criteria']) != len(current_rubric['criteria']):
            return False, _(
                u'The number of criteria cannot be changed after a problem is released.'
            )

        # Criteria names must be the same
        # We use criteria names as unique identifiers (unfortunately)
        # throughout the system.  Changing them mid-flight can cause
        # the grade page, for example, to raise 500 errors.
        # When we implement non-XML authoring, we might be able to fix this
        # the right way by assigning unique identifiers for criteria;
        # but for now, this is the safest way to avoid breaking problems
        # post-release.
        current_criterion_names = set(
            criterion.get('name') for criterion in current_rubric['criteria'])
        new_criterion_names = set(
            criterion.get('name') for criterion in rubric_dict['criteria'])
        if current_criterion_names != new_criterion_names:
            return False, _(
                u'Criteria names cannot be changed after a problem is released'
            )

        # Number of options for each criterion must be the same
        for new_criterion, old_criterion in _match_by_order(
                rubric_dict['criteria'], current_rubric['criteria']):
            if len(new_criterion['options']) != len(old_criterion['options']):
                return False, _(
                    u'The number of options cannot be changed after a problem is released.'
                )

            else:
                for new_option, old_option in _match_by_order(
                        new_criterion['options'], old_criterion['options']):
                    if new_option['points'] != old_option['points']:
                        return False, _(
                            u'Point values cannot be changed after a problem is released.'
                        )

    return True, u''
Exemplo n.º 39
0
 def test_rubric_requires_positive_score(self):
     with self.assertRaises(InvalidRubric):
         rubric_from_dict(json_data("data/rubric/no_points.json"))
Exemplo n.º 40
0
def create_assessment(
    scorer_submission_uuid,
    scorer_id,
    options_selected,
    criterion_feedback,
    overall_feedback,
    rubric_dict,
    num_required_grades,
    scored_at=None
):
    """Creates an assessment on the given submission.

    Assessments are created based on feedback associated with a particular
    rubric.

    Args:
        scorer_submission_uuid (str): The submission uuid for the Scorer's
            workflow. The submission being assessed can be determined via the
            peer workflow of the grading student.
        scorer_id (str): The user ID for the user giving this assessment. This
            is required to create an assessment on a submission.
        options_selected (dict): Dictionary mapping criterion names to the
            option names the user selected for that criterion.
        criterion_feedback (dict): Dictionary mapping criterion names to the
            free-form text feedback the user gave for the criterion.
            Since criterion feedback is optional, some criteria may not appear
            in the dictionary.
        overall_feedback (unicode): Free-form text feedback on the submission overall.
        num_required_grades (int): The required number of assessments a
            submission requires before it is completed. If this number of
            assessments is reached, the grading_completed_at timestamp is set
            for the Workflow.

    Kwargs:
        scored_at (datetime): Optional argument to override the time in which
            the assessment took place. If not specified, scored_at is set to
            now.

    Returns:
        dict: the Assessment model, serialized as a dict.

    Raises:
        PeerAssessmentRequestError: Raised when the submission_id is invalid, or
            the assessment_dict does not contain the required values to create
            an assessment.
        PeerAssessmentInternalError: Raised when there is an internal error
            while creating a new assessment.

    Examples:
        >>> options_selected = {"clarity": "Very clear", "precision": "Somewhat precise"}
        >>> criterion_feedback = {"clarity": "I thought this essay was very clear."}
        >>> feedback = "Your submission was thrilling."
        >>> create_assessment("1", "Tim", options_selected, criterion_feedback, feedback, rubric_dict)
    """
    try:
        # Retrieve workflow information
        scorer_workflow = PeerWorkflow.objects.get(submission_uuid=scorer_submission_uuid)
        peer_workflow_item = scorer_workflow.get_latest_open_workflow_item()
        if peer_workflow_item is None:
            message = (
                u"There are no open assessments associated with the scorer's "
                u"submission UUID {}."
            ).format(scorer_submission_uuid)
            logger.warning(message)
            raise PeerAssessmentWorkflowError(message)
        peer_submission_uuid = peer_workflow_item.author.submission_uuid

        # Get or create the rubric
        rubric = rubric_from_dict(rubric_dict)

        # Create the peer assessment
        assessment = Assessment.create(
            rubric,
            scorer_id,
            peer_submission_uuid,
            PEER_TYPE,
            scored_at=scored_at,
            feedback=overall_feedback
        )

        # Create assessment parts for each criterion in the rubric
        # This will raise an `InvalidRubricSelection` if the selected options do not match the rubric.
        AssessmentPart.create_from_option_names(assessment, options_selected, feedback=criterion_feedback)

        # Close the active assessment
        scorer_workflow.close_active_assessment(peer_submission_uuid, assessment, num_required_grades)
        _log_assessment(assessment, scorer_workflow)
        return full_assessment_dict(assessment)
    except PeerWorkflow.DoesNotExist:
        message = (
            u"There is no Peer Workflow associated with the given "
            u"submission UUID {}."
        ).format(scorer_submission_uuid)
        logger.exception(message)
        raise PeerAssessmentWorkflowError(message)
    except InvalidRubric:
        msg = u"Rubric definition was not valid"
        logger.exception(msg)
        raise PeerAssessmentRequestError(msg)
    except InvalidRubricSelection:
        msg = u"Invalid options selected in the rubric"
        logger.warning(msg, exc_info=True)
        raise PeerAssessmentRequestError(msg)
    except DatabaseError:
        error_message = (
            u"An error occurred while retrieving the peer workflow item by scorer with ID: {}"
        ).format(scorer_id)
        logger.exception(error_message)
        raise PeerAssessmentInternalError(error_message)
Exemplo n.º 41
0
def create_assessment(
        scorer_submission_uuid,
        scorer_id,
        options_selected,
        criterion_feedback,
        overall_feedback,
        rubric_dict,
        num_required_grades,
        scored_at=None):
    """Creates an assessment on the given submission.

    Assessments are created based on feedback associated with a particular
    rubric.

    Args:
        scorer_submission_uuid (str): The submission uuid for the Scorer's
            workflow. The submission being assessed can be determined via the
            peer workflow of the grading student.
        scorer_id (str): The user ID for the user giving this assessment. This
            is required to create an assessment on a submission.
        options_selected (dict): Dictionary mapping criterion names to the
            option names the user selected for that criterion.
        criterion_feedback (dict): Dictionary mapping criterion names to the
            free-form text feedback the user gave for the criterion.
            Since criterion feedback is optional, some criteria may not appear
            in the dictionary.
        overall_feedback (unicode): Free-form text feedback on the submission overall.
        num_required_grades (int): The required number of assessments a
            submission requires before it is completed. If this number of
            assessments is reached, the grading_completed_at timestamp is set
            for the Workflow.

    Kwargs:
        scored_at (datetime): Optional argument to override the time in which
            the assessment took place. If not specified, scored_at is set to
            now.

    Returns:
        dict: the Assessment model, serialized as a dict.

    Raises:
        PeerAssessmentRequestError: Raised when the submission_id is invalid, or
            the assessment_dict does not contain the required values to create
            an assessment.
        PeerAssessmentInternalError: Raised when there is an internal error
            while creating a new assessment.

    Examples:
        >>> options_selected = {"clarity": "Very clear", "precision": "Somewhat precise"}
        >>> criterion_feedback = {"clarity": "I thought this essay was very clear."}
        >>> feedback = "Your submission was thrilling."
        >>> create_assessment("1", "Tim", options_selected, criterion_feedback, feedback, rubric_dict)
    """
    # Ensure that this variables is declared so if an error occurs
    # we don't get an error when trying to log it!
    assessment_dict = None

    try:
        rubric = rubric_from_dict(rubric_dict)

        # Validate that the selected options matched the rubric
        # and raise an error if this is not the case
        try:
            option_ids = rubric.options_ids(options_selected)
        except InvalidOptionSelection:
            msg = "Selected options do not match the rubric"
            logger.warning(msg, exc_info=True)
            raise PeerAssessmentRequestError(msg)

        scorer_workflow = PeerWorkflow.objects.get(submission_uuid=scorer_submission_uuid)

        peer_workflow_item = scorer_workflow.get_latest_open_workflow_item()
        if peer_workflow_item is None:
            message = (
                u"There are no open assessments associated with the scorer's "
                u"submission UUID {}."
            ).format(scorer_submission_uuid)
            logger.warning(message)
            raise PeerAssessmentWorkflowError(message)

        peer_submission_uuid = peer_workflow_item.author.submission_uuid
        peer_assessment = {
            "rubric": rubric.id,
            "scorer_id": scorer_id,
            "submission_uuid": peer_submission_uuid,
            "score_type": PEER_TYPE,
            "feedback": overall_feedback[0:Assessment.MAXSIZE],
        }

        if scored_at is not None:
            peer_assessment["scored_at"] = scored_at

        peer_serializer = AssessmentSerializer(data=peer_assessment)

        if not peer_serializer.is_valid():
            msg = (
                u"An error occurred while serializing "
                u"the peer assessment associated with "
                u"the scorer's submission UUID {}."
            ).format(scorer_submission_uuid)
            raise PeerAssessmentRequestError(msg)

        assessment = peer_serializer.save()

        # We do this to do a run around django-rest-framework serializer
        # validation, which would otherwise require two DB queries per
        # option to do validation. We already validated these options above.
        AssessmentPart.add_to_assessment(assessment, option_ids, criterion_feedback=criterion_feedback)

        # Close the active assessment
        scorer_workflow.close_active_assessment(peer_submission_uuid, assessment, num_required_grades)
        assessment_dict = full_assessment_dict(assessment)
        _log_assessment(assessment, scorer_workflow)

        return assessment_dict
    except DatabaseError:
        error_message = (
            u"An error occurred while creating assessment {} by: {}"
        ).format(assessment_dict, scorer_id)
        logger.exception(error_message)
        raise PeerAssessmentInternalError(error_message)
    except PeerWorkflow.DoesNotExist:
        message = (
            u"There is no Peer Workflow associated with the given "
            u"submission UUID {}."
        ).format(scorer_submission_uuid)
        logger.error(message)
        raise PeerAssessmentWorkflowError(message)
Exemplo n.º 42
0
def validate_rubric(rubric_dict, current_rubric, is_released):
    """
    Check that the rubric is semantically valid.

    Args:
        rubric_dict (dict): Serialized Rubric model representing the updated state of the rubric.
        current_rubric (dict): Serialized Rubric model representing the current state of the rubric.
        is_released (bool): True if and only if the problem has been released.

    Returns:
        tuple (is_valid, msg) where
            is_valid is a boolean indicating whether the assessment is semantically valid
            and msg describes any validation errors found.
    """
    try:
        rubric_from_dict(rubric_dict)
    except InvalidRubric:
        return (False, u'This rubric definition is not valid.')

    # No duplicate criteria names
    duplicates = _duplicates([criterion['name'] for criterion in rubric_dict['criteria']])
    if len(duplicates) > 0:
        msg = u"Criteria duplicate name(s): {duplicates}".format(
            duplicates=", ".join(duplicates)
        )
        return (False, msg)

    # No duplicate option names within a criterion
    for criterion in rubric_dict['criteria']:
        duplicates = _duplicates([option['name'] for option in criterion['options']])
        if len(duplicates) > 0:
            msg = u"Options in '{criterion}' have duplicate name(s): {duplicates}".format(
                criterion=criterion['name'], duplicates=", ".join(duplicates)
            )
            return (False, msg)

    # After a problem is released, authors are allowed to change text,
    # but nothing that would change the point value of a rubric.
    if is_released:

        # Number of criteria must be the same
        if len(rubric_dict['criteria']) != len(current_rubric['criteria']):
            return (False, u'The number of criteria cannot be changed after a problem is released.')

        # Criteria names must be the same
        # We use criteria names as unique identifiers (unfortunately)
        # throughout the system.  Changing them mid-flight can cause
        # the grade page, for example, to raise 500 errors.
        # When we implement non-XML authoring, we might be able to fix this
        # the right way by assigning unique identifiers for criteria;
        # but for now, this is the safest way to avoid breaking problems
        # post-release.
        current_criterion_names = set(criterion.get('name') for criterion in current_rubric['criteria'])
        new_criterion_names = set(criterion.get('name') for criterion in rubric_dict['criteria'])
        if current_criterion_names != new_criterion_names:
            return (False, u'Criteria names cannot be changed after a problem is released')

        # Number of options for each criterion must be the same
        for new_criterion, old_criterion in _match_by_order(rubric_dict['criteria'], current_rubric['criteria']):
            if len(new_criterion['options']) != len(old_criterion['options']):
                return (False, u'The number of options cannot be changed after a problem is released.')

            else:
                for new_option, old_option in _match_by_order(new_criterion['options'], old_criterion['options']):
                    if new_option['points'] != old_option['points']:
                        return (False, u'Point values cannot be changed after a problem is released.')

    return (True, u'')
Exemplo n.º 43
0
def validate_rubric(rubric_dict, current_rubric, is_released, is_example_based, _):
    """
    Check that the rubric is semantically valid.

    Args:
        rubric_dict (dict): Serialized Rubric model representing the updated state of the rubric.
        current_rubric (dict): Serialized Rubric model representing the current state of the rubric.
        is_released (bool): True if and only if the problem has been released.
        is_example_based (bool): True if and only if this is an example-based assessment.
        _ (function): The service function used to get the appropriate i18n text

    Returns:
        tuple (is_valid, msg) where
            is_valid is a boolean indicating whether the assessment is semantically valid
            and msg describes any validation errors found.
    """
    try:
        rubric_from_dict(rubric_dict)
    except InvalidRubric:
        return False, _(u'This rubric definition is not valid.')

    for criterion in rubric_dict['criteria']:
        # No duplicate option names within a criterion
        duplicates = _duplicates([option['name'] for option in criterion['options']])
        if len(duplicates) > 0:
            msg = _(u"Options in '{criterion}' have duplicate name(s): {duplicates}").format(
                criterion=criterion['name'], duplicates=", ".join(duplicates)
            )
            return False, msg

        # Some criteria may have no options, just written feedback.
        # In this case, written feedback must be required (not optional or disabled).
        if len(criterion['options']) == 0 and criterion.get('feedback', 'disabled') != 'required':
            msg = _(u'Criteria with no options must require written feedback.')
            return False, msg

        # Example-based assessments impose the additional restriction
        # that the point values for options must be unique within
        # a particular rubric criterion.
        if is_example_based:
            duplicates = _duplicates([option['points'] for option in criterion['options']])
            if len(duplicates) > 0:
                msg = _(u"Example-based assessments cannot have duplicate point values.")
                return False, msg

    # After a problem is released, authors are allowed to change text,
    # but nothing that would change the point value of a rubric.
    if is_released:

        # Number of prompts must be the same
        if len(rubric_dict['prompts']) != len(current_rubric['prompts']):
            return False, _(u'Prompts cannot be created or deleted after a problem is released.')

        # Number of criteria must be the same
        if len(rubric_dict['criteria']) != len(current_rubric['criteria']):
            return False, _(u'The number of criteria cannot be changed after a problem is released.')

        # Criteria names must be the same
        # We use criteria names as unique identifiers (unfortunately)
        # throughout the system.  Changing them mid-flight can cause
        # the grade page, for example, to raise 500 errors.
        # When we implement non-XML authoring, we might be able to fix this
        # the right way by assigning unique identifiers for criteria;
        # but for now, this is the safest way to avoid breaking problems
        # post-release.
        current_criterion_names = set(criterion.get('name') for criterion in current_rubric['criteria'])
        new_criterion_names = set(criterion.get('name') for criterion in rubric_dict['criteria'])
        if current_criterion_names != new_criterion_names:
            return False, _(u'Criteria names cannot be changed after a problem is released')

        # Number of options for each criterion must be the same
        for new_criterion, old_criterion in _match_by_order(rubric_dict['criteria'], current_rubric['criteria']):
            if len(new_criterion['options']) != len(old_criterion['options']):
                return False, _(u'The number of options cannot be changed after a problem is released.')

            else:
                for new_option, old_option in _match_by_order(new_criterion['options'], old_criterion['options']):
                    if new_option['points'] != old_option['points']:
                        return False, _(u'Point values cannot be changed after a problem is released.')

    return True, u''
Exemplo n.º 44
0
def create_assessment(
    submission_uuid,
    user_id,
    options_selected,
    criterion_feedback,
    overall_feedback,
    rubric_dict,
    scored_at=None
):
    """
    Create a self-assessment for a submission.

    Args:
        submission_uuid (str): The unique identifier for the submission being assessed.
        user_id (str): The ID of the user creating the assessment.  This must match the ID of the user who made the submission.
        options_selected (dict): Mapping of rubric criterion names to option values selected.
        criterion_feedback (dict): Dictionary mapping criterion names to the
            free-form text feedback the user gave for the criterion.
            Since criterion feedback is optional, some criteria may not appear
            in the dictionary.
        overall_feedback (unicode): Free-form text feedback on the submission overall.
        rubric_dict (dict): Serialized Rubric model.

    Keyword Arguments:
        scored_at (datetime): The timestamp of the assessment; defaults to the current time.

    Returns:
        dict: serialized Assessment model

    Raises:
        SelfAssessmentRequestError: Could not retrieve a submission that the user is allowed to score.
    """
    # Check that there are not any assessments for this submission
    if Assessment.objects.filter(submission_uuid=submission_uuid, score_type=SELF_TYPE).exists():
        msg = (
            u"Cannot submit a self-assessment for the submission {uuid} "
            "because another self-assessment already exists for that submission."
        ).format(uuid=submission_uuid)
        raise SelfAssessmentRequestError(msg)

    # Check that the student is allowed to assess this submission
    try:
        submission = get_submission_and_student(submission_uuid)
        if submission['student_item']['student_id'] != user_id:
            msg = (
                u"Cannot submit a self-assessment for the submission {uuid} "
                u"because it was created by another student "
                u"(submission student ID {student_id} does not match your "
                u"student id {other_id})"
            ).format(
                uuid=submission_uuid,
                student_id=submission['student_item']['student_id'],
                other_id=user_id
            )
            raise SelfAssessmentRequestError(msg)
    except SubmissionNotFoundError:
        msg = (
            "Could not submit a self-assessment because no submission "
            "exists with UUID {uuid}"
        ).format(uuid=submission_uuid)
        raise SelfAssessmentRequestError()

    try:
        # Get or create the rubric
        rubric = rubric_from_dict(rubric_dict)

        # Create the self assessment
        assessment = Assessment.create(
            rubric,
            user_id,
            submission_uuid,
            SELF_TYPE,
            scored_at=scored_at,
            feedback=overall_feedback
        )

        # This will raise an `InvalidRubricSelection` if the selected options do not match the rubric.
        AssessmentPart.create_from_option_names(assessment, options_selected, feedback=criterion_feedback)
        _log_assessment(assessment, submission)
    except InvalidRubric as ex:
        msg = "Invalid rubric definition: " + str(ex)
        logger.warning(msg, exc_info=True)
        raise SelfAssessmentRequestError(msg)
    except InvalidRubricSelection as ex:
        msg = "Selected options do not match the rubric: " + str(ex)
        logger.warning(msg, exc_info=True)
        raise SelfAssessmentRequestError(msg)

    # Return the serialized assessment
    return full_assessment_dict(assessment)
Exemplo n.º 45
0
 def test_empty_options(self):
     rubric = rubric_from_dict(json_data('data/rubric/empty_options.json'))
     self.assertEqual(rubric.criteria.count(), 2)
Exemplo n.º 46
0
 def test_rubric_requires_positive_score(self):
     with self.assertRaises(InvalidRubric):
         rubric_from_dict(json_data('data/rubric/no_points.json'))
Exemplo n.º 47
0
def create_assessment(submission_uuid, user_id, options_selected, rubric_dict, scored_at=None):
    """
    Create a self-assessment for a submission.

    Args:
        submission_uuid (str): The unique identifier for the submission being assessed.
        user_id (str): The ID of the user creating the assessment.  This must match the ID of the user who made the submission.
        options_selected (dict): Mapping of rubric criterion names to option values selected.
        rubric_dict (dict): Serialized Rubric model.

    Kwargs:
        scored_at (datetime): The timestamp of the assessment; defaults to the current time.

    Returns:
        dict: serialized Assessment model

    Raises:
        SelfAssessmentRequestError: Could not retrieve a submission that the user is allowed to score.
    """
    # Check that there are not any assessments for this submission
    if Assessment.objects.filter(submission_uuid=submission_uuid, score_type=SELF_TYPE).exists():
        raise SelfAssessmentRequestError(_("You've already completed your self assessment for this response."))

    # Check that the student is allowed to assess this submission
    try:
        submission = get_submission_and_student(submission_uuid)
        if submission['student_item']['student_id'] != user_id:
            raise SelfAssessmentRequestError(_("You can only complete a self assessment on your own response."))
    except SubmissionNotFoundError:
        raise SelfAssessmentRequestError(_("Could not retrieve the response."))

    # Get or create the rubric
    try:
        rubric = rubric_from_dict(rubric_dict)
        option_ids = rubric.options_ids(options_selected)
    except InvalidRubric as ex:
        msg = _("Invalid rubric definition: {errors}").format(errors=ex.errors)
        raise SelfAssessmentRequestError(msg)
    except InvalidOptionSelection:
        msg = _("Selected options do not match the rubric")
        raise SelfAssessmentRequestError(msg)

    # Create the assessment
    # Since we have already retrieved the submission, we can assume that
    # the user who created the submission exists.
    self_assessment = {
        "rubric": rubric.id,
        "scorer_id": user_id,
        "submission_uuid": submission_uuid,
        "score_type": SELF_TYPE,
        "feedback": u"",
    }

    if scored_at is not None:
        self_assessment['scored_at'] = scored_at

    # Serialize the assessment
    serializer = AssessmentSerializer(data=self_assessment)
    if not serializer.is_valid():
        msg = _("Could not create self assessment: {errors}").format(errors=serializer.errors)
        raise SelfAssessmentRequestError(msg)

    assessment = serializer.save()

    # We do this to do a run around django-rest-framework serializer
    # validation, which would otherwise require two DB queries per
    # option to do validation. We already validated these options above.
    AssessmentPart.add_to_assessment(assessment, option_ids)
    assessment_dict = full_assessment_dict(assessment)
    _log_assessment(assessment, submission)

    # Return the serialized assessment
    return assessment_dict
Exemplo n.º 48
0
 def _rubric_with_all_feedback_only_criteria(self):
     """Create a rubric with all feedback-only criteria."""
     rubric_dict = copy.deepcopy(RUBRIC)
     for criterion in rubric_dict['criteria']:
         criterion['options'] = []
     return rubric_from_dict(rubric_dict)
Exemplo n.º 49
0
 def setUp(self):
     super(AIClassifierSetTest, self).setUp()
     rubric = rubric_from_dict(RUBRIC)
     self.classifier_set = AIClassifierSet.create_classifier_set(
         CLASSIFIERS_DICT, rubric, "test_algorithm", COURSE_ID, ITEM_ID)