Esempio n. 1
0
def get_assessment_scores_by_criteria(team_submission_uuid):
    """Get the staff score for each rubric criterion

    Args:
        team_submission_uuid (str): The team submission uuid is used to get the
            assessment used to score this submission.

    Returns:
        (dict): A dictionary of rubric criterion names, with a score of
            the staff assessments.

    Raises:
        StaffAssessmentInternalError: If any error occurs while retrieving
            information from the scores, an error is raised.
    """
    try:
        # Get most recently graded assessment for a team submission
        team_submission = team_submissions_api.get_team_submission(
            team_submission_uuid)
        assessments = list(
            Assessment.objects.filter(
                submission_uuid__in=team_submission['submission_uuids'],
                score_type=STAFF_TYPE,
            )[:1])

        scores = Assessment.scores_by_criterion(assessments)
        # Since this is only being sent one score, the median score will be the
        # same as the only score.
        return Assessment.get_median_score_dict(scores)
    except DatabaseError:
        error_message = "Error getting staff assessment scores for {}".format(
            team_submission_uuid)
        logger.exception(error_message)
        raise StaffAssessmentInternalError(error_message)
Esempio n. 2
0
def get_assessment_median_scores(submission_uuid):
    """Get the median score for each rubric criterion

    For a given assessment, collect the median score for each criterion on the
    rubric. This set can be used to determine the overall score, as well as each
    part of the individual rubric scores.

    If there is a true median score, it is returned. If there are two median
    values, the average of those two values is returned, rounded up to the
    greatest integer value.

    Args:
        submission_uuid (str): The submission uuid is used to get the
            assessments used to score this submission, and generate the
            appropriate median score.

    Returns:
        (dict): A dictionary of rubric criterion names, with a median score of
            the peer assessments.

    Raises:
        PeerAssessmentInternalError: If any error occurs while retrieving
            information to form the median scores, an error is raised.
    """
    try:
        workflow = PeerWorkflow.objects.get(submission_uuid=submission_uuid)
        items = workflow.graded_by.filter(scored=True)
        assessments = [item.assessment for item in items]
        scores = Assessment.scores_by_criterion(assessments)
        return Assessment.get_median_score_dict(scores)
    except DatabaseError:
        error_message = _(u"Error getting assessment median scores {}".format(
            submission_uuid))
        logger.exception(error_message)
        raise PeerAssessmentInternalError(error_message)
Esempio n. 3
0
def get_assessment_scores_by_criteria(submission_uuid):
    """Get the staff score for each rubric criterion

    Args:
        submission_uuid (str): The submission uuid is used to get the
            assessment used to score this submission.

    Returns:
        (dict): A dictionary of rubric criterion names, with a score of
            the staff assessments.

    Raises:
        StaffAssessmentInternalError: If any error occurs while retrieving
            information from the scores, an error is raised.
    """
    try:
        # This will always create a list of length 1
        assessments = list(
            Assessment.objects.filter(
                score_type=STAFF_TYPE, submission_uuid=submission_uuid
            )[:1]
        )
        scores = Assessment.scores_by_criterion(assessments)
        # Since this is only being sent one score, the median score will be the
        # same as the only score.
        return Assessment.get_median_score_dict(scores)
    except DatabaseError:
        error_message = u"Error getting staff assessment scores for {}".format(submission_uuid)
        logger.exception(error_message)
        raise StaffAssessmentInternalError(error_message)
Esempio n. 4
0
def get_assessment_scores_by_criteria(submission_uuid):
    """Get the score for each rubric criterion

    Args:
        submission_uuid (str): The submission uuid is used to get the
            assessment used to score this submission.

    Returns:
        (dict): A dictionary of rubric criterion names, with a score of
            the example based assessments.

    Raises:
        AIGradingInternalError: If any error occurs while retrieving
            information from the scores, an error is raised.
    """
    try:
        assessments = list(
            Assessment.objects.filter(
                score_type=AI_ASSESSMENT_TYPE, submission_uuid=submission_uuid
            ).order_by('-scored_at')[:1]
        )
        scores = Assessment.scores_by_criterion(assessments)
        return Assessment.get_median_score_dict(scores)
    except DatabaseError:
        error_message = u"Error getting example-based assessment scores for {}".format(submission_uuid)
        logger.exception(error_message)
        raise AIGradingInternalError(error_message)
Esempio n. 5
0
def get_assessment_scores_by_criteria(submission_uuid):
    """Get the median score for each rubric criterion

    Args:
        submission_uuid (str): The submission uuid is used to get the
            assessments used to score this submission, and generate the
            appropriate median score.

    Returns:
        (dict): A dictionary of rubric criterion names, with a median score of
            the peer assessments.

    Raises:
        SelfAssessmentInternalError: If any error occurs while retrieving
            information to form the median scores, an error is raised.
    """
    try:
        assessments = list(
            Assessment.objects.filter(
                score_type=SELF_TYPE, submission_uuid=submission_uuid
            ).order_by('-scored_at')[:1]
        )
        scores = Assessment.scores_by_criterion(assessments)
        return Assessment.get_median_score_dict(scores)
    except DatabaseError:
        error_message = _(u"Error getting self assessment scores for {}").format(submission_uuid)
        logger.exception(error_message)
        raise SelfAssessmentInternalError(error_message)
Esempio n. 6
0
def get_assessment_median_scores(submission_uuid):
    """Get the median score for each rubric criterion

    For a given assessment, collect the median score for each criterion on the
    rubric. This set can be used to determine the overall score, as well as each
    part of the individual rubric scores.

    If there is a true median score, it is returned. If there are two median
    values, the average of those two values is returned, rounded up to the
    greatest integer value.

    Args:
        submission_uuid (str): The submission uuid is used to get the
            assessments used to score this submission, and generate the
            appropriate median score.

    Returns:
        dict: A dictionary of rubric criterion names,
        with a median score of the peer assessments.

    Raises:
        PeerAssessmentInternalError: If any error occurs while retrieving
            information to form the median scores, an error is raised.
    """
    try:
        workflow = PeerWorkflow.objects.get(submission_uuid=submission_uuid)
        items = workflow.graded_by.filter(scored=True)
        assessments = [item.assessment for item in items]
        scores = Assessment.scores_by_criterion(assessments)
        return Assessment.get_median_score_dict(scores)
    except DatabaseError:
        error_message = (u"Error getting assessment median scores for submission {uuid}").format(uuid=submission_uuid)
        logger.exception(error_message)
        raise PeerAssessmentInternalError(error_message)
Esempio n. 7
0
def get_assessment_scores_by_criteria(submission_uuid):
    """Get the staff score for each rubric criterion

    Args:
        submission_uuid (str): The submission uuid is used to get the
            assessment used to score this submission.

    Returns:
        (dict): A dictionary of rubric criterion names, with a score of
            the staff assessments.

    Raises:
        StaffAssessmentInternalError: If any error occurs while retrieving
            information from the scores, an error is raised.
    """
    try:
        # This will always create a list of length 1
        assessments = list(
            Assessment.objects.filter(score_type=STAFF_TYPE,
                                      submission_uuid=submission_uuid)[:1])
        scores = Assessment.scores_by_criterion(assessments)
        # Since this is only being sent one score, the median score will be the
        # same as the only score.
        return Assessment.get_median_score_dict(scores)
    except DatabaseError as ex:
        error_message = "Error getting staff assessment scores for {}".format(
            submission_uuid)
        logger.exception(error_message)
        raise StaffAssessmentInternalError(error_message) from ex
Esempio n. 8
0
def get_assessment_scores_by_criteria(submission_uuid):
    """Get the median score for each rubric criterion

    Args:
        submission_uuid (str): The submission uuid is used to get the
            assessments used to score this submission, and generate the
            appropriate median score.

    Returns:
        (dict): A dictionary of rubric criterion names, with a median score of
            the peer assessments.

    Raises:
        SelfAssessmentInternalError: If any error occurs while retrieving
            information to form the median scores, an error is raised.
    """
    try:
        # This will always create a list of length 1
        assessments = list(
            Assessment.objects.filter(
                score_type=SELF_TYPE,
                submission_uuid=submission_uuid).order_by('-scored_at')[:1])
        scores = Assessment.scores_by_criterion(assessments)
        # Since this is only being sent one score, the median score will be the
        # same as the only score.
        return Assessment.get_median_score_dict(scores)
    except DatabaseError:
        error_message = (
            u"Error getting self assessment scores for submission {}"
        ).format(submission_uuid)
        logger.exception(error_message)
        raise SelfAssessmentInternalError(error_message)
    def test_create_with_feedback_only_criterion(self):
        rubric = self._rubric_with_one_feedback_only_criterion()
        assessment = Assessment.create(rubric, "Bob", "submission UUID", "PE")

        # Create assessment parts
        # We can't select an option for the last criterion, but we do
        # provide written feedback.
        selected = {
            u"vøȼȺƀᵾłȺɍɏ": u"𝓰𝓸𝓸𝓭",
            u"ﻭɼค๓๓คɼ": u"єχ¢єℓℓєηт",
        }
        feedback = {
            u"feedback": u"𝕿𝖍𝖎𝖘 𝖎𝖘 𝖘𝖔𝖒𝖊 𝖋𝖊𝖊𝖉𝖇𝖆𝖈𝖐."
        }
        AssessmentPart.create_from_option_names(
            assessment, selected, feedback=feedback
        )

        # Check the score (the feedback-only assessment should count for 0 points)
        self.assertEqual(assessment.points_earned, 3)
        self.assertEqual(assessment.points_possible, 4)

        # Check the feedback text
        feedback_only = AssessmentPart.objects.get(criterion__name="feedback")
        self.assertEqual(feedback_only.feedback, u"𝕿𝖍𝖎𝖘 𝖎𝖘 𝖘𝖔𝖒𝖊 𝖋𝖊𝖊𝖉𝖇𝖆𝖈𝖐.")
Esempio n. 10
0
    def test_full_assessment_dict_criteria_no_options(self):
        # Create a rubric with a criterion that has no options (just feedback)
        rubric_dict = copy.deepcopy(RUBRIC)
        rubric_dict['criteria'].append({
            'order_num': 2,
            'name': 'feedback only',
            'prompt': 'feedback only',
            'options': []
        })
        rubric = rubric_from_dict(rubric_dict)

        # Create an assessment for the rubric
        assessment = Assessment.create(rubric, "Bob", "submission UUID", "PE")
        selected = {
            u"vøȼȺƀᵾłȺɍɏ": u"𝓰𝓸𝓸𝓭",
            u"ﻭɼค๓๓คɼ": u"єχ¢єℓℓєηт",
        }
        feedback = {
            u"feedback only": u"enjoy the feedback!"
        }
        AssessmentPart.create_from_option_names(assessment, selected, feedback=feedback)

        # Serialize the assessment
        serialized = full_assessment_dict(assessment)

        # Verify that the assessment dict correctly serialized the criterion with options.
        self.assertEqual(serialized['parts'][0]['criterion']['name'], u"vøȼȺƀᵾłȺɍɏ")
        self.assertEqual(serialized['parts'][0]['option']['name'], u"𝓰𝓸𝓸𝓭")
        self.assertEqual(serialized['parts'][1]['criterion']['name'], u"ﻭɼค๓๓คɼ")
        self.assertEqual(serialized['parts'][1]['option']['name'], u"єχ¢єℓℓєηт")

        # Verify that the assessment dict correctly serialized the criterion with no options.
        self.assertIs(serialized['parts'][2]['option'], None)
        self.assertEqual(serialized['parts'][2]['criterion']['name'], u"feedback only")
Esempio n. 11
0
    def test_full_assessment_dict_criteria_no_options(self):
        # Create a rubric with a criterion that has no options (just feedback)
        rubric_dict = copy.deepcopy(RUBRIC)
        rubric_dict['criteria'].append({
            'order_num': 2,
            'name': 'feedback only',
            'prompt': 'feedback only',
            'options': []
        })
        rubric = rubric_from_dict(rubric_dict)

        # Create an assessment for the rubric
        assessment = Assessment.create(rubric, "Bob", "submission-UUID", "PE")
        selected = {
            "vøȼȺƀᵾłȺɍɏ": "𝓰𝓸𝓸𝓭",
            "ﻭɼค๓๓คɼ": "єχ¢єℓℓєηт",
        }
        feedback = {
            "feedback only": "enjoy the feedback!"
        }
        AssessmentPart.create_from_option_names(assessment, selected, feedback=feedback)

        # Serialize the assessment
        serialized = full_assessment_dict(assessment)

        # Verify that the assessment dict correctly serialized the criterion with options.
        self.assertEqual(serialized['parts'][0]['criterion']['name'], "vøȼȺƀᵾłȺɍɏ")
        self.assertEqual(serialized['parts'][0]['option']['name'], "𝓰𝓸𝓸𝓭")
        self.assertEqual(serialized['parts'][1]['criterion']['name'], "ﻭɼค๓๓คɼ")
        self.assertEqual(serialized['parts'][1]['option']['name'], "єχ¢єℓℓєηт")

        # Verify that the assessment dict correctly serialized the criterion with no options.
        self.assertIs(serialized['parts'][2]['option'], None)
        self.assertEqual(serialized['parts'][2]['criterion']['name'], "feedback only")
Esempio n. 12
0
def _complete_assessment(
    rubric_dict,
    scorer_id,
    peer_submission_uuid,
    options_selected,
    criterion_feedback,
    scorer_workflow,
    overall_feedback,
    num_required_grades,
    scored_at,
):
    """
    Internal function for atomic assessment creation. Creates a peer assessment
    and closes the associated peer workflow item in a single transaction.

    Args:
        rubric_dict (dict): The rubric model associated with this assessment
        scorer_id (str): The user ID for the user giving this assessment. This
            is required to create an assessment on a submission.
        peer_submission_uuid (str): The submission uuid for the submission being
            assessed.
        options_selected (dict): Dictionary mapping criterion names to the
            option names the user selected for that criterion.
        criterion_feedback (dict): Dictionary mapping criterion names to the
            free-form text feedback the user gave for the criterion.
            Since criterion feedback is optional, some criteria may not appear
            in the dictionary.
        scorer_workflow (PeerWorkflow): The PeerWorkflow associated with the
            scorer. Updates the workflow item associated with this assessment.
        overall_feedback (unicode): Free-form text feedback on the submission overall.
        num_required_grades (int): The required number of assessments a
            submission requires before it is completed. If this number of
            assessments is reached, the grading_completed_at timestamp is set
            for the Workflow.
        scored_at (datetime): Optional argument to override the time in which
            the assessment took place. If not specified, scored_at is set to
            now.

    Returns:
        The Assessment model

    """
    # Get or create the rubric
    rubric = rubric_from_dict(rubric_dict)

    # Create the peer assessment
    assessment = Assessment.create(
        rubric, scorer_id, peer_submission_uuid, PEER_TYPE, scored_at=scored_at, feedback=overall_feedback
    )

    # Create assessment parts for each criterion in the rubric
    # This will raise an `InvalidRubricSelection` if the selected options do not
    # match the rubric.
    AssessmentPart.create_from_option_names(assessment, options_selected, feedback=criterion_feedback)

    # Close the active assessment
    scorer_workflow.close_active_assessment(peer_submission_uuid, assessment, num_required_grades)
    return assessment
Esempio n. 13
0
def _complete_assessment(
        submission_uuid,
        scorer_id,
        options_selected,
        criterion_feedback,
        overall_feedback,
        rubric_dict,
        scored_at,
        scorer_workflow
):
    """
    Internal function for atomic assessment creation. Creates a staff assessment
    in a single transaction.

    Args:
        submission_uuid (str): The submission uuid for the submission being
            assessed.
        scorer_id (str): The user ID for the user giving this assessment. This
            is required to create an assessment on a submission.
        options_selected (dict): Dictionary mapping criterion names to the
            option names the user selected for that criterion.
        criterion_feedback (dict): Dictionary mapping criterion names to the
            free-form text feedback the user gave for the criterion.
            Since criterion feedback is optional, some criteria may not appear
            in the dictionary.
        overall_feedback (unicode): Free-form text feedback on the submission overall.
        rubric_dict (dict): The rubric model associated with this assessment
        scored_at (datetime): Optional argument to override the time in which
            the assessment took place. If not specified, scored_at is set to
            now.

    Returns:
        The Assessment model

    """
    # Get or create the rubric
    rubric = rubric_from_dict(rubric_dict)

    # Create the staff assessment
    assessment = Assessment.create(
        rubric,
        scorer_id,
        submission_uuid,
        STAFF_TYPE,
        scored_at=scored_at,
        feedback=overall_feedback
    )

    # Create assessment parts for each criterion in the rubric
    # This will raise an `InvalidRubricSelection` if the selected options do not
    # match the rubric.
    AssessmentPart.create_from_option_names(assessment, options_selected, feedback=criterion_feedback)

    # Close the active assessment
    if scorer_workflow is not None:
        scorer_workflow.close_active_assessment(assessment, scorer_id)
    return assessment
Esempio n. 14
0
def _complete_assessment(rubric_dict, scorer_id, peer_submission_uuid,
                         options_selected, criterion_feedback, scorer_workflow,
                         overall_feedback, num_required_grades, scored_at):
    """
    Internal function for atomic assessment creation. Creates a peer assessment
    and closes the associated peer workflow item in a single transaction.

    Args:
        rubric_dict (dict): The rubric model associated with this assessment
        scorer_id (str): The user ID for the user giving this assessment. This
            is required to create an assessment on a submission.
        peer_submission_uuid (str): The submission uuid for the submission being
            assessed.
        options_selected (dict): Dictionary mapping criterion names to the
            option names the user selected for that criterion.
        criterion_feedback (dict): Dictionary mapping criterion names to the
            free-form text feedback the user gave for the criterion.
            Since criterion feedback is optional, some criteria may not appear
            in the dictionary.
        scorer_workflow (PeerWorkflow): The PeerWorkflow associated with the
            scorer. Updates the workflow item associated with this assessment.
        overall_feedback (unicode): Free-form text feedback on the submission overall.
        num_required_grades (int): The required number of assessments a
            submission requires before it is completed. If this number of
            assessments is reached, the grading_completed_at timestamp is set
            for the Workflow.
        scored_at (datetime): Optional argument to override the time in which
            the assessment took place. If not specified, scored_at is set to
            now.

    Returns:
        The Assessment model

    """
    # Get or create the rubric
    rubric = rubric_from_dict(rubric_dict)

    # Create the peer assessment
    assessment = Assessment.create(rubric,
                                   scorer_id,
                                   peer_submission_uuid,
                                   PEER_TYPE,
                                   scored_at=scored_at,
                                   feedback=overall_feedback)

    # Create assessment parts for each criterion in the rubric
    # This will raise an `InvalidRubricSelection` if the selected options do not
    # match the rubric.
    AssessmentPart.create_from_option_names(assessment,
                                            options_selected,
                                            feedback=criterion_feedback)

    # Close the active assessment
    scorer_workflow.close_active_assessment(peer_submission_uuid, assessment,
                                            num_required_grades)
    return assessment
Esempio n. 15
0
 def test_choose_score(self):
     self.assertEqual(0, Assessment.get_median_score([]))
     self.assertEqual(5, Assessment.get_median_score([5]))
     # average of 5, 6, rounded down.
     self.assertEqual(6, Assessment.get_median_score([5, 6]))
     self.assertEqual(14, Assessment.get_median_score([5, 6, 12, 16, 22, 53]))
     self.assertEqual(14, Assessment.get_median_score([6, 5, 12, 53, 16, 22]))
     self.assertEqual(16, Assessment.get_median_score([5, 6, 12, 16, 22, 53, 102]))
     self.assertEqual(16, Assessment.get_median_score([16, 6, 12, 102, 22, 53, 5]))
Esempio n. 16
0
    def test_create_from_option_points_all_feedback_only_criteria(self):
        rubric = self._rubric_with_all_feedback_only_criteria()
        assessment = Assessment.create(rubric, "Bob", "submission UUID", "PE")

        # Since there are no criteria with options, and we're not
        # providing written feedback, pass in an empty selection.
        selected = {}
        AssessmentPart.create_from_option_points(assessment, selected)

        # Score should be zero, since none of the criteria have options
        self.assertEqual(assessment.points_earned, 0)
        self.assertEqual(assessment.points_possible, 0)
Esempio n. 17
0
    def test_no_feedback_provided_for_feedback_only_criterion(self):
        rubric = self._rubric_with_one_feedback_only_criterion()
        assessment = Assessment.create(rubric, "Bob", "submission UUID", "PE")

        # Create assessment parts
        # Do NOT provide feedback for the feedback-only criterion
        selected = {
            u"vøȼȺƀᵾłȺɍɏ": u"𝓰𝓸𝓸𝓭",
            u"ﻭɼค๓๓คɼ": u"єχ¢єℓℓєηт",
        }
        feedback = {}

        # Expect an error when we try to create the assessment parts
        with self.assertRaises(InvalidRubricSelection):
            AssessmentPart.create_from_option_names(assessment, selected, feedback=feedback)
Esempio n. 18
0
def print_summary(course_id, oa_item, anonymous_student_id):
    # Print submission
    submission = get_submission(course_id, oa_item.location, anonymous_student_id)
    print "Submission status:"
    print_submission(submission, oa_item)

    # Print scored assessment(s)
    scored_items = PeerWorkflowItem.objects.filter(author=submission.id, submission_uuid=submission.submission_uuid, assessment__isnull=False, scored=True).order_by('assessment')
    print "Scored assessment(s):"
    if scored_items:
        scored_assessments = [scored_item.assessment for scored_item in scored_items]
        scored_scores = scores_by_criterion(scored_assessments)
        median_score_dict = Assessment.get_median_score_dict(scored_scores)
        print_peerworkflowitem(scored_items, scored_scores)
    else:
        scored_scores = {}
        print "... No record was found."

    # Print not-scored assessment(s)
    not_scored_items = PeerWorkflowItem.objects.filter(author=submission.id, submission_uuid=submission.submission_uuid, assessment__isnull=False, scored=False).order_by('assessment')
    print "Not-scored assessment(s):"
    if not_scored_items:
        not_scored_assessments = [not_scored_item.assessment for not_scored_item in not_scored_items]
        not_scored_scores = scores_by_criterion(not_scored_assessments)
        print_peerworkflowitem(not_scored_items, not_scored_scores)
    else:
        print "... No record was found."

    # Print latest score
    latest_score = get_latest_score(submission)
    print "Latest score:"
    if latest_score is not None:
        try:
            median_scores = peer_api.get_assessment_median_scores(submission.submission_uuid)
        except:
            median_scores = {}
        latest_score_output = PrettyTable(['Score ID'] + scored_scores.keys() + ['Points earned', 'Points possible', 'Created at'])
        latest_score_output.align = 'l'
        row = []
        row.append(latest_score.id)
        row.extend([median_scores[k] for k in scored_scores.keys()])
        row.append(latest_score.points_earned)
        row.append(latest_score.points_possible)
        row.append(latest_score.created_at)
        latest_score_output.add_row(row)
        print latest_score_output
    else:
        print "... No record was found."
Esempio n. 19
0
def _complete_assessment(
        submission_uuid,
        user_id,
        options_selected,
        criterion_feedback,
        overall_feedback,
        rubric_dict,
        scored_at
):
    """
    Internal function for creating an assessment and its parts atomically.

    Args:
        submission_uuid (str): The unique identifier for the submission being
            assessed.
        user_id (str): The ID of the user creating the assessment. This must
            match the ID of the user who made the submission.
        options_selected (dict): Mapping of rubric criterion names to option
            values selected.
        criterion_feedback (dict): Dictionary mapping criterion names to the
            free-form text feedback the user gave for the criterion.
            Since criterion feedback is optional, some criteria may not appear
            in the dictionary.
        overall_feedback (unicode): Free-form text feedback on the submission overall.
        rubric_dict (dict): Serialized Rubric model.
        scored_at (datetime): The timestamp of the assessment.

    Returns:
        Assessment model

    """
    # Get or create the rubric
    rubric = rubric_from_dict(rubric_dict)

    # Create the self assessment
    assessment = Assessment.create(
        rubric,
        user_id,
        submission_uuid,
        SELF_TYPE,
        scored_at=scored_at,
        feedback=overall_feedback
    )

    # This will raise an `InvalidRubricSelection` if the selected options do not match the rubric.
    AssessmentPart.create_from_option_names(assessment, options_selected, feedback=criterion_feedback)
    return assessment
Esempio n. 20
0
File: self.py Progetto: lxp20201/lxp
def _complete_assessment(
        submission_uuid,
        user_id,
        options_selected,
        criterion_feedback,
        overall_feedback,
        rubric_dict,
        scored_at
):
    """
    Internal function for creating an assessment and its parts atomically.

    Args:
        submission_uuid (str): The unique identifier for the submission being
            assessed.
        user_id (str): The ID of the user creating the assessment. This must
            match the ID of the user who made the submission.
        options_selected (dict): Mapping of rubric criterion names to option
            values selected.
        criterion_feedback (dict): Dictionary mapping criterion names to the
            free-form text feedback the user gave for the criterion.
            Since criterion feedback is optional, some criteria may not appear
            in the dictionary.
        overall_feedback (unicode): Free-form text feedback on the submission overall.
        rubric_dict (dict): Serialized Rubric model.
        scored_at (datetime): The timestamp of the assessment.

    Returns:
        Assessment model

    """
    # Get or create the rubric
    rubric = rubric_from_dict(rubric_dict)

    # Create the self assessment
    assessment = Assessment.create(
        rubric,
        user_id,
        submission_uuid,
        SELF_TYPE,
        scored_at=scored_at,
        feedback=overall_feedback
    )

    # This will raise an `InvalidRubricSelection` if the selected options do not match the rubric.
    AssessmentPart.create_from_option_names(assessment, options_selected, feedback=criterion_feedback)
    return assessment
Esempio n. 21
0
    def test_create_with_all_feedback_only_criteria(self):
        rubric = self._rubric_with_all_feedback_only_criteria()
        assessment = Assessment.create(rubric, "Bob", "submission UUID", "PE")

        # Create assessment parts, each of which are feedback-only (no points)
        selected = {}
        feedback = {
            u"vøȼȺƀᵾłȺɍɏ": u"𝓰𝓸𝓸𝓭",
            u"ﻭɼค๓๓คɼ": u"єχ¢єℓℓєηт",
        }
        AssessmentPart.create_from_option_names(
            assessment, selected, feedback=feedback
        )

        # Check the score (should be 0, since we haven't selected any points)
        self.assertEqual(assessment.points_earned, 0)
        self.assertEqual(assessment.points_possible, 0)
Esempio n. 22
0
    def test_create_from_option_points_feedback_only_criterion(self):
        rubric = self._rubric_with_one_feedback_only_criterion()
        assessment = Assessment.create(rubric, "Bob", "submission UUID", "PE")

        # Create assessment parts by providing scores for options
        # but NO feedback.  This simulates how an example-based AI
        # assessment is created.
        selected = {
            u"vøȼȺƀᵾłȺɍɏ": 2,
            u"ﻭɼค๓๓คɼ": 1,
        }
        AssessmentPart.create_from_option_points(assessment, selected)

        # Check the score (the feedback-only assessment should count for 0 points)
        self.assertEqual(assessment.points_earned, 3)
        self.assertEqual(assessment.points_possible, 4)

        # Check the feedback text (should default to an empty string)
        feedback_only = AssessmentPart.objects.get(criterion__name="feedback")
        self.assertEqual(feedback_only.feedback, u"")
Esempio n. 23
0
    def test_default_feedback_for_feedback_only_criterion(self):
        rubric = self._rubric_with_one_feedback_only_criterion()
        assessment = Assessment.create(rubric, "Bob", "submission UUID", "PE")

        # Create assessment parts, but do NOT provide any feedback
        # This simulates how non-peer assessments are created
        # Note that this is different from providing an empty feedback dict;
        # here, we're not providing the `feedback` kwarg at all.
        selected = {
            u"vøȼȺƀᵾłȺɍɏ": u"𝓰𝓸𝓸𝓭",
            u"ﻭɼค๓๓คɼ": u"єχ¢єℓℓєηт",
        }
        AssessmentPart.create_from_option_names(assessment, selected)

        # Check the score (the feedback-only assessment should count for 0 points)
        self.assertEqual(assessment.points_earned, 3)
        self.assertEqual(assessment.points_possible, 4)

        # Check the feedback text, which should default to an empty string
        feedback_only = AssessmentPart.objects.get(criterion__name="feedback")
        self.assertEqual(feedback_only.feedback, u"")
Esempio n. 24
0
def create_assessment(
    scorer_submission_uuid,
    scorer_id,
    options_selected,
    criterion_feedback,
    overall_feedback,
    rubric_dict,
    num_required_grades,
    scored_at=None
):
    """Creates an assessment on the given submission.

    Assessments are created based on feedback associated with a particular
    rubric.

    Args:
        scorer_submission_uuid (str): The submission uuid for the Scorer's
            workflow. The submission being assessed can be determined via the
            peer workflow of the grading student.
        scorer_id (str): The user ID for the user giving this assessment. This
            is required to create an assessment on a submission.
        options_selected (dict): Dictionary mapping criterion names to the
            option names the user selected for that criterion.
        criterion_feedback (dict): Dictionary mapping criterion names to the
            free-form text feedback the user gave for the criterion.
            Since criterion feedback is optional, some criteria may not appear
            in the dictionary.
        overall_feedback (unicode): Free-form text feedback on the submission overall.
        num_required_grades (int): The required number of assessments a
            submission requires before it is completed. If this number of
            assessments is reached, the grading_completed_at timestamp is set
            for the Workflow.

    Kwargs:
        scored_at (datetime): Optional argument to override the time in which
            the assessment took place. If not specified, scored_at is set to
            now.

    Returns:
        dict: the Assessment model, serialized as a dict.

    Raises:
        PeerAssessmentRequestError: Raised when the submission_id is invalid, or
            the assessment_dict does not contain the required values to create
            an assessment.
        PeerAssessmentInternalError: Raised when there is an internal error
            while creating a new assessment.

    Examples:
        >>> options_selected = {"clarity": "Very clear", "precision": "Somewhat precise"}
        >>> criterion_feedback = {"clarity": "I thought this essay was very clear."}
        >>> feedback = "Your submission was thrilling."
        >>> create_assessment("1", "Tim", options_selected, criterion_feedback, feedback, rubric_dict)
    """
    try:
        # Retrieve workflow information
        scorer_workflow = PeerWorkflow.objects.get(submission_uuid=scorer_submission_uuid)
        peer_workflow_item = scorer_workflow.get_latest_open_workflow_item()
        if peer_workflow_item is None:
            message = (
                u"There are no open assessments associated with the scorer's "
                u"submission UUID {}."
            ).format(scorer_submission_uuid)
            logger.warning(message)
            raise PeerAssessmentWorkflowError(message)
        peer_submission_uuid = peer_workflow_item.author.submission_uuid

        # Get or create the rubric
        rubric = rubric_from_dict(rubric_dict)

        # Create the peer assessment
        assessment = Assessment.create(
            rubric,
            scorer_id,
            peer_submission_uuid,
            PEER_TYPE,
            scored_at=scored_at,
            feedback=overall_feedback
        )

        # Create assessment parts for each criterion in the rubric
        # This will raise an `InvalidRubricSelection` if the selected options do not match the rubric.
        AssessmentPart.create_from_option_names(assessment, options_selected, feedback=criterion_feedback)

        # Close the active assessment
        scorer_workflow.close_active_assessment(peer_submission_uuid, assessment, num_required_grades)
        _log_assessment(assessment, scorer_workflow)
        return full_assessment_dict(assessment)
    except PeerWorkflow.DoesNotExist:
        message = (
            u"There is no Peer Workflow associated with the given "
            u"submission UUID {}."
        ).format(scorer_submission_uuid)
        logger.exception(message)
        raise PeerAssessmentWorkflowError(message)
    except InvalidRubric:
        msg = u"Rubric definition was not valid"
        logger.exception(msg)
        raise PeerAssessmentRequestError(msg)
    except InvalidRubricSelection:
        msg = u"Invalid options selected in the rubric"
        logger.warning(msg, exc_info=True)
        raise PeerAssessmentRequestError(msg)
    except DatabaseError:
        error_message = (
            u"An error occurred while retrieving the peer workflow item by scorer with ID: {}"
        ).format(scorer_id)
        logger.exception(error_message)
        raise PeerAssessmentInternalError(error_message)
Esempio n. 25
0
def print_summary(course_id, oa_item, anonymous_student_id):
    # Print submission
    submission = get_submission(course_id, oa_item.location,
                                anonymous_student_id)
    print "Submission status:"
    print_submission(submission, oa_item)

    # Print scored assessment(s)
    scored_items = PeerWorkflowItem.objects.filter(
        author=submission.id,
        submission_uuid=submission.submission_uuid,
        assessment__isnull=False,
        scored=True).order_by('assessment')
    print "Scored assessment(s):"
    if scored_items:
        scored_assessments = [
            scored_item.assessment for scored_item in scored_items
        ]
        scored_scores = scores_by_criterion(scored_assessments)
        median_score_dict = Assessment.get_median_score_dict(scored_scores)
        print_peerworkflowitem(scored_items, scored_scores)
    else:
        scored_scores = {}
        print "... No record was found."

    # Print not-scored assessment(s)
    not_scored_items = PeerWorkflowItem.objects.filter(
        author=submission.id,
        submission_uuid=submission.submission_uuid,
        assessment__isnull=False,
        scored=False).order_by('assessment')
    print "Not-scored assessment(s):"
    if not_scored_items:
        not_scored_assessments = [
            not_scored_item.assessment for not_scored_item in not_scored_items
        ]
        not_scored_scores = scores_by_criterion(not_scored_assessments)
        print_peerworkflowitem(not_scored_items, not_scored_scores)
    else:
        print "... No record was found."

    # Print latest score
    latest_score = get_latest_score(submission)
    print "Latest score:"
    if latest_score is not None:
        try:
            median_scores = peer_api.get_assessment_median_scores(
                submission.submission_uuid)
        except:
            median_scores = {}
        latest_score_output = PrettyTable(
            ['Score ID'] + scored_scores.keys() +
            ['Points earned', 'Points possible', 'Created at'])
        latest_score_output.align = 'l'
        row = []
        row.append(latest_score.id)
        row.extend([median_scores[k] for k in scored_scores.keys()])
        row.append(latest_score.points_earned)
        row.append(latest_score.points_possible)
        row.append(latest_score.created_at)
        latest_score_output.add_row(row)
        print latest_score_output
    else:
        print "... No record was found."
Esempio n. 26
0
def create_assessment(
    submission_uuid,
    user_id,
    options_selected,
    criterion_feedback,
    overall_feedback,
    rubric_dict,
    scored_at=None
):
    """
    Create a self-assessment for a submission.

    Args:
        submission_uuid (str): The unique identifier for the submission being assessed.
        user_id (str): The ID of the user creating the assessment.  This must match the ID of the user who made the submission.
        options_selected (dict): Mapping of rubric criterion names to option values selected.
        criterion_feedback (dict): Dictionary mapping criterion names to the
            free-form text feedback the user gave for the criterion.
            Since criterion feedback is optional, some criteria may not appear
            in the dictionary.
        overall_feedback (unicode): Free-form text feedback on the submission overall.
        rubric_dict (dict): Serialized Rubric model.

    Keyword Arguments:
        scored_at (datetime): The timestamp of the assessment; defaults to the current time.

    Returns:
        dict: serialized Assessment model

    Raises:
        SelfAssessmentRequestError: Could not retrieve a submission that the user is allowed to score.
    """
    # Check that there are not any assessments for this submission
    if Assessment.objects.filter(submission_uuid=submission_uuid, score_type=SELF_TYPE).exists():
        msg = (
            u"Cannot submit a self-assessment for the submission {uuid} "
            "because another self-assessment already exists for that submission."
        ).format(uuid=submission_uuid)
        raise SelfAssessmentRequestError(msg)

    # Check that the student is allowed to assess this submission
    try:
        submission = get_submission_and_student(submission_uuid)
        if submission['student_item']['student_id'] != user_id:
            msg = (
                u"Cannot submit a self-assessment for the submission {uuid} "
                u"because it was created by another student "
                u"(submission student ID {student_id} does not match your "
                u"student id {other_id})"
            ).format(
                uuid=submission_uuid,
                student_id=submission['student_item']['student_id'],
                other_id=user_id
            )
            raise SelfAssessmentRequestError(msg)
    except SubmissionNotFoundError:
        msg = (
            "Could not submit a self-assessment because no submission "
            "exists with UUID {uuid}"
        ).format(uuid=submission_uuid)
        raise SelfAssessmentRequestError()

    try:
        # Get or create the rubric
        rubric = rubric_from_dict(rubric_dict)

        # Create the self assessment
        assessment = Assessment.create(
            rubric,
            user_id,
            submission_uuid,
            SELF_TYPE,
            scored_at=scored_at,
            feedback=overall_feedback
        )

        # This will raise an `InvalidRubricSelection` if the selected options do not match the rubric.
        AssessmentPart.create_from_option_names(assessment, options_selected, feedback=criterion_feedback)
        _log_assessment(assessment, submission)
    except InvalidRubric as ex:
        msg = "Invalid rubric definition: " + str(ex)
        logger.warning(msg, exc_info=True)
        raise SelfAssessmentRequestError(msg)
    except InvalidRubricSelection as ex:
        msg = "Selected options do not match the rubric: " + str(ex)
        logger.warning(msg, exc_info=True)
        raise SelfAssessmentRequestError(msg)

    # Return the serialized assessment
    return full_assessment_dict(assessment)
def main():
    from django.contrib.auth.models import User
    from openassessment.assessment.models import Assessment, AssessmentPart, StaffWorkflow
    from openassessment.workflow.models import AssessmentWorkflow, AssessmentWorkflowStep
    from student.models import anonymous_id_for_user, user_by_anonymous_id
    from submissions.models import Score, ScoreSummary, ScoreAnnotation, Submission

    old_scores = Score.objects.filter(submission__isnull=True, reset=False).order_by('id')
    updated_count = 0
    for score in old_scores:
        try:
            with transaction.atomic():
                # ScoreSummary is updated on Score saves but for this script we don't want that.
                # Correct way is to disconnect post_save signal, but since the receiver function
                # is defined in the class, we can't reference it. Workaround here is to just
                # prefetch the score summary and resave it to maintain its original field values.
                score_summary = ScoreSummary.objects.get(student_item=score.student_item)

                # Update old override with submission from the score preceding it.
                # If none exists, look for it in the submissions table.
                preceding_score = Score.objects.filter(
                    student_item=score.student_item,
                    created_at__lte=score.created_at,
                    submission__isnull=False,
                ).order_by('-created_at')[:1]
                if preceding_score.count():
                    submission = preceding_score.get().submission
                else:
                    submission_qset = Submission.objects.filter(student_item=score.student_item)
                    if submission_qset.count() > 1:
                        raise Exception("MULTIPLE SUBMISSIONS FOR STUDENT_ITEM {}".format(score.student_item))
                    else:
                        submission = submission_qset[:1].get()
                score.submission = submission
                score.save()

                # Offset override reset by 1 second for convenience when sorting db
                override_date = score.created_at - datetime.timedelta(seconds=1)
                # Create reset score
                Score.objects.create(
                    student_item=score.student_item,
                    submission=None,
                    points_earned=0,
                    points_possible=0,
                    created_at=override_date,
                    reset=True,
                )

                # Restore original score summary values
                score_summary.save()

                # Fetch staff id from score course for ScoreAnnotation
                course_id = CourseKey.from_string(score.student_item.course_id)
                staff = User.objects.filter(
                    courseaccessrole__role__in=['instructor', 'staff'],
                    courseaccessrole__course_id=course_id,
                )[:1].get()
                staff_id = anonymous_id_for_user(staff, course_id, save=False)

                # Create ScoreAnnotation
                score_annotation = ScoreAnnotation(
                    score=score,
                    annotation_type="staff_defined",
                    creator=staff_id,
                    reason="A staff member has defined the score for this submission",
                )
                score_annotation.save()

                # ORA2 Table Updates...
                # Fetch rubric from an existing assessment
                assessment = Assessment.objects.filter(submission_uuid=submission.uuid)[:1].get()
                rubric = assessment.rubric

                staff_assessment = Assessment.create(
                    rubric=rubric,
                    scorer_id=staff_id,
                    submission_uuid=submission.uuid,
                    score_type="ST",
                    scored_at=override_date,
                )

                # Fake criterion selections
                rubric_index = rubric.index
                assessment_parts = []

                criteria_without_options = rubric_index.find_criteria_without_options()
                criteria_with_options = set(rubric_index._criteria_index.values()) - criteria_without_options
                ordered_criteria = sorted(criteria_with_options, key=lambda criterion: criterion.order_num)
                criteria_options = [c.options.all() for c in ordered_criteria]
                # Just take the first combination of options which add up to the override point score
                for selection in itertools.product(*criteria_options):
                    total = sum(option.points for option in selection)
                    if total == score.points_earned:
                        for option in selection:
                            assessment_parts.append({
                                'criterion': option.criterion,
                                'option': option
                            })
                        break

                # Default to first option for each criteria if no matching sum found
                if not assessment_parts:
                    print "NO CLEAN SUM FOR SUBMISSION " + submission.uuid
                    for options in criteria_options:
                        assessment_parts.append({
                            'criterion': options[0].criterion,
                            'option': options[0],
                        })

                # Add feedback-only criteria
                for criterion in criteria_without_options:
                    assessment_parts.append({
                        'criterion': criterion,
                        'option': None
                    })

                AssessmentPart.objects.bulk_create([
                    AssessmentPart(
                        assessment=staff_assessment,
                        criterion=assessment_part['criterion'],
                        option=assessment_part['option'],
                        feedback=u""
                    )
                    for assessment_part in assessment_parts
                ])

                try:
                    staff_workflow = StaffWorkflow.objects.get(submission_uuid=submission.uuid)
                    staff_workflow.assessment = staff_assessment.id
                    staff_workflow.grading_completed_at = override_date
                except StaffWorkflow.DoesNotExist:
                    staff_workflow = StaffWorkflow(
                        scorer_id=staff_id,
                        course_id=score.student_item.course_id,
                        item_id=score.student_item.item_id,
                        submission_uuid=submission.uuid,
                        created_at=override_date,
                        grading_completed_at=override_date,
                        assessment=staff_assessment.id,
                    )
                staff_workflow.save()

                workflow = AssessmentWorkflow.get_by_submission_uuid(submission.uuid)
                try:
                    staff_step = workflow.steps.get(name='staff')
                    staff_step.assessment_completed_at = score.created_at
                    staff_step.submitter_completed_at = score.created_at
                    staff_step.save()
                except AssessmentWorkflowStep.DoesNotExist:
                    for step in workflow.steps.all():
                        step.assessment_completed_at = score.created_at
                        step.submitter_completed_at = score.created_at
                        step.order_num += 1
                        step.save()
                    workflow.steps.add(
                        AssessmentWorkflowStep(
                            name='staff',
                            order_num=0,
                            assessment_completed_at=score.created_at,
                            submitter_completed_at=score.created_at,
                        )
                    )

                # Update workflow status to done if it wasn't subsequently cancelled
                if workflow.status != 'cancelled':
                    workflow.status = 'done'
                    workflow.save()

            updated_count += 1
            user = user_by_anonymous_id(score.student_item.student_id)
            print(
                "Successfully updated score {} for user {} with email {} in course {} for item: {}".format(
                    score.id,
                    user.username,
                    user.email,
                    score.student_item.course_id,
                    score.student_item.item_id,
                )
            )
        except Exception as err:
            print("An error occurred updating score {}: {}".format(score.id, err))
            print("Please update this score manually and retry script.")
            break

    print("Script finished, number of scores updated: {}.".format(updated_count))