def get_assessment_scores_by_criteria(submission_uuid): """Get the median score for each rubric criterion Args: submission_uuid (str): The submission uuid is used to get the assessments used to score this submission, and generate the appropriate median score. Returns: (dict): A dictionary of rubric criterion names, with a median score of the peer assessments. Raises: SelfAssessmentInternalError: If any error occurs while retrieving information to form the median scores, an error is raised. """ try: # This will always create a list of length 1 assessments = list( Assessment.objects.filter( score_type=SELF_TYPE, submission_uuid=submission_uuid).order_by('-scored_at')[:1]) scores = Assessment.scores_by_criterion(assessments) # Since this is only being sent one score, the median score will be the # same as the only score. return Assessment.get_median_score_dict(scores) except DatabaseError: error_message = ( u"Error getting self assessment scores for submission {}" ).format(submission_uuid) logger.exception(error_message) raise SelfAssessmentInternalError(error_message)
def create_assessment(submission_uuid, user_id, options_selected, criterion_feedback, overall_feedback, rubric_dict, scored_at=None): """ Create a self-assessment for a submission. Args: submission_uuid (str): The unique identifier for the submission being assessed. user_id (str): The ID of the user creating the assessment. This must match the ID of the user who made the submission. options_selected (dict): Mapping of rubric criterion names to option values selected. criterion_feedback (dict): Dictionary mapping criterion names to the free-form text feedback the user gave for the criterion. Since criterion feedback is optional, some criteria may not appear in the dictionary. overall_feedback (unicode): Free-form text feedback on the submission overall. rubric_dict (dict): Serialized Rubric model. Keyword Arguments: scored_at (datetime): The timestamp of the assessment; defaults to the current time. Returns: dict: serialized Assessment model Raises: SelfAssessmentRequestError: Could not retrieve a submission that the user is allowed to score. """ # Check that there are not any assessments for this submission if Assessment.objects.filter(submission_uuid=submission_uuid, score_type=SELF_TYPE).exists(): msg = ( u"Cannot submit a self-assessment for the submission {uuid} " "because another self-assessment already exists for that submission." ).format(uuid=submission_uuid) raise SelfAssessmentRequestError(msg) # Check that the student is allowed to assess this submission try: submission = get_submission_and_student(submission_uuid) if submission['student_item']['student_id'] != user_id: msg = ( u"Cannot submit a self-assessment for the submission {uuid} " u"because it was created by another learner " u"(submission learner ID {student_id} does not match your " u"learner id {other_id})").format( uuid=submission_uuid, student_id=submission['student_item']['student_id'], other_id=user_id) raise SelfAssessmentRequestError(msg) except SubmissionNotFoundError: msg = ("Could not submit a self-assessment because no submission " "exists with UUID {uuid}").format(uuid=submission_uuid) raise SelfAssessmentRequestError() try: assessment = _complete_assessment(submission_uuid, user_id, options_selected, criterion_feedback, overall_feedback, rubric_dict, scored_at) _log_assessment(assessment, submission) except InvalidRubric as ex: msg = "Invalid rubric definition: " + str(ex) logger.warning(msg, exc_info=True) raise SelfAssessmentRequestError(msg) except InvalidRubricSelection as ex: msg = "Selected options do not match the rubric: " + str(ex) logger.warning(msg, exc_info=True) raise SelfAssessmentRequestError(msg) except DatabaseError: error_message = (u"Error creating self assessment for submission {}" ).format(submission_uuid) logger.exception(error_message) raise SelfAssessmentInternalError(error_message) # Return the serialized assessment return full_assessment_dict(assessment)