def test_create_with_feedback_only_criterion(self): rubric = self._rubric_with_one_feedback_only_criterion() assessment = Assessment.create(rubric, "Bob", "submission UUID", "PE") # Create assessment parts # We can't select an option for the last criterion, but we do # provide written feedback. selected = { u"vøȼȺƀᵾłȺɍɏ": u"𝓰𝓸𝓸𝓭", u"ﻭɼค๓๓คɼ": u"єχ¢єℓℓєηт", } feedback = { u"feedback": u"𝕿𝖍𝖎𝖘 𝖎𝖘 𝖘𝖔𝖒𝖊 𝖋𝖊𝖊𝖉𝖇𝖆𝖈𝖐." } AssessmentPart.create_from_option_names( assessment, selected, feedback=feedback ) # Check the score (the feedback-only assessment should count for 0 points) self.assertEqual(assessment.points_earned, 3) self.assertEqual(assessment.points_possible, 4) # Check the feedback text feedback_only = AssessmentPart.objects.get(criterion__name="feedback") self.assertEqual(feedback_only.feedback, u"𝕿𝖍𝖎𝖘 𝖎𝖘 𝖘𝖔𝖒𝖊 𝖋𝖊𝖊𝖉𝖇𝖆𝖈𝖐.")
def test_full_assessment_dict_criteria_no_options(self): # Create a rubric with a criterion that has no options (just feedback) rubric_dict = copy.deepcopy(RUBRIC) rubric_dict['criteria'].append({ 'order_num': 2, 'name': 'feedback only', 'prompt': 'feedback only', 'options': [] }) rubric = rubric_from_dict(rubric_dict) # Create an assessment for the rubric assessment = Assessment.create(rubric, "Bob", "submission-UUID", "PE") selected = { "vøȼȺƀᵾłȺɍɏ": "𝓰𝓸𝓸𝓭", "ﻭɼค๓๓คɼ": "єχ¢єℓℓєηт", } feedback = { "feedback only": "enjoy the feedback!" } AssessmentPart.create_from_option_names(assessment, selected, feedback=feedback) # Serialize the assessment serialized = full_assessment_dict(assessment) # Verify that the assessment dict correctly serialized the criterion with options. self.assertEqual(serialized['parts'][0]['criterion']['name'], "vøȼȺƀᵾłȺɍɏ") self.assertEqual(serialized['parts'][0]['option']['name'], "𝓰𝓸𝓸𝓭") self.assertEqual(serialized['parts'][1]['criterion']['name'], "ﻭɼค๓๓คɼ") self.assertEqual(serialized['parts'][1]['option']['name'], "єχ¢єℓℓєηт") # Verify that the assessment dict correctly serialized the criterion with no options. self.assertIs(serialized['parts'][2]['option'], None) self.assertEqual(serialized['parts'][2]['criterion']['name'], "feedback only")
def test_full_assessment_dict_criteria_no_options(self): # Create a rubric with a criterion that has no options (just feedback) rubric_dict = copy.deepcopy(RUBRIC) rubric_dict['criteria'].append({ 'order_num': 2, 'name': 'feedback only', 'prompt': 'feedback only', 'options': [] }) rubric = rubric_from_dict(rubric_dict) # Create an assessment for the rubric assessment = Assessment.create(rubric, "Bob", "submission UUID", "PE") selected = { u"vøȼȺƀᵾłȺɍɏ": u"𝓰𝓸𝓸𝓭", u"ﻭɼค๓๓คɼ": u"єχ¢єℓℓєηт", } feedback = { u"feedback only": u"enjoy the feedback!" } AssessmentPart.create_from_option_names(assessment, selected, feedback=feedback) # Serialize the assessment serialized = full_assessment_dict(assessment) # Verify that the assessment dict correctly serialized the criterion with options. self.assertEqual(serialized['parts'][0]['criterion']['name'], u"vøȼȺƀᵾłȺɍɏ") self.assertEqual(serialized['parts'][0]['option']['name'], u"𝓰𝓸𝓸𝓭") self.assertEqual(serialized['parts'][1]['criterion']['name'], u"ﻭɼค๓๓คɼ") self.assertEqual(serialized['parts'][1]['option']['name'], u"єχ¢єℓℓєηт") # Verify that the assessment dict correctly serialized the criterion with no options. self.assertIs(serialized['parts'][2]['option'], None) self.assertEqual(serialized['parts'][2]['criterion']['name'], u"feedback only")
def _complete_assessment( rubric_dict, scorer_id, peer_submission_uuid, options_selected, criterion_feedback, scorer_workflow, overall_feedback, num_required_grades, scored_at, ): """ Internal function for atomic assessment creation. Creates a peer assessment and closes the associated peer workflow item in a single transaction. Args: rubric_dict (dict): The rubric model associated with this assessment scorer_id (str): The user ID for the user giving this assessment. This is required to create an assessment on a submission. peer_submission_uuid (str): The submission uuid for the submission being assessed. options_selected (dict): Dictionary mapping criterion names to the option names the user selected for that criterion. criterion_feedback (dict): Dictionary mapping criterion names to the free-form text feedback the user gave for the criterion. Since criterion feedback is optional, some criteria may not appear in the dictionary. scorer_workflow (PeerWorkflow): The PeerWorkflow associated with the scorer. Updates the workflow item associated with this assessment. overall_feedback (unicode): Free-form text feedback on the submission overall. num_required_grades (int): The required number of assessments a submission requires before it is completed. If this number of assessments is reached, the grading_completed_at timestamp is set for the Workflow. scored_at (datetime): Optional argument to override the time in which the assessment took place. If not specified, scored_at is set to now. Returns: The Assessment model """ # Get or create the rubric rubric = rubric_from_dict(rubric_dict) # Create the peer assessment assessment = Assessment.create( rubric, scorer_id, peer_submission_uuid, PEER_TYPE, scored_at=scored_at, feedback=overall_feedback ) # Create assessment parts for each criterion in the rubric # This will raise an `InvalidRubricSelection` if the selected options do not # match the rubric. AssessmentPart.create_from_option_names(assessment, options_selected, feedback=criterion_feedback) # Close the active assessment scorer_workflow.close_active_assessment(peer_submission_uuid, assessment, num_required_grades) return assessment
def _complete_assessment( submission_uuid, scorer_id, options_selected, criterion_feedback, overall_feedback, rubric_dict, scored_at, scorer_workflow ): """ Internal function for atomic assessment creation. Creates a staff assessment in a single transaction. Args: submission_uuid (str): The submission uuid for the submission being assessed. scorer_id (str): The user ID for the user giving this assessment. This is required to create an assessment on a submission. options_selected (dict): Dictionary mapping criterion names to the option names the user selected for that criterion. criterion_feedback (dict): Dictionary mapping criterion names to the free-form text feedback the user gave for the criterion. Since criterion feedback is optional, some criteria may not appear in the dictionary. overall_feedback (unicode): Free-form text feedback on the submission overall. rubric_dict (dict): The rubric model associated with this assessment scored_at (datetime): Optional argument to override the time in which the assessment took place. If not specified, scored_at is set to now. Returns: The Assessment model """ # Get or create the rubric rubric = rubric_from_dict(rubric_dict) # Create the staff assessment assessment = Assessment.create( rubric, scorer_id, submission_uuid, STAFF_TYPE, scored_at=scored_at, feedback=overall_feedback ) # Create assessment parts for each criterion in the rubric # This will raise an `InvalidRubricSelection` if the selected options do not # match the rubric. AssessmentPart.create_from_option_names(assessment, options_selected, feedback=criterion_feedback) # Close the active assessment if scorer_workflow is not None: scorer_workflow.close_active_assessment(assessment, scorer_id) return assessment
def _complete_assessment(rubric_dict, scorer_id, peer_submission_uuid, options_selected, criterion_feedback, scorer_workflow, overall_feedback, num_required_grades, scored_at): """ Internal function for atomic assessment creation. Creates a peer assessment and closes the associated peer workflow item in a single transaction. Args: rubric_dict (dict): The rubric model associated with this assessment scorer_id (str): The user ID for the user giving this assessment. This is required to create an assessment on a submission. peer_submission_uuid (str): The submission uuid for the submission being assessed. options_selected (dict): Dictionary mapping criterion names to the option names the user selected for that criterion. criterion_feedback (dict): Dictionary mapping criterion names to the free-form text feedback the user gave for the criterion. Since criterion feedback is optional, some criteria may not appear in the dictionary. scorer_workflow (PeerWorkflow): The PeerWorkflow associated with the scorer. Updates the workflow item associated with this assessment. overall_feedback (unicode): Free-form text feedback on the submission overall. num_required_grades (int): The required number of assessments a submission requires before it is completed. If this number of assessments is reached, the grading_completed_at timestamp is set for the Workflow. scored_at (datetime): Optional argument to override the time in which the assessment took place. If not specified, scored_at is set to now. Returns: The Assessment model """ # Get or create the rubric rubric = rubric_from_dict(rubric_dict) # Create the peer assessment assessment = Assessment.create(rubric, scorer_id, peer_submission_uuid, PEER_TYPE, scored_at=scored_at, feedback=overall_feedback) # Create assessment parts for each criterion in the rubric # This will raise an `InvalidRubricSelection` if the selected options do not # match the rubric. AssessmentPart.create_from_option_names(assessment, options_selected, feedback=criterion_feedback) # Close the active assessment scorer_workflow.close_active_assessment(peer_submission_uuid, assessment, num_required_grades) return assessment
def test_create_from_option_points_all_feedback_only_criteria(self): rubric = self._rubric_with_all_feedback_only_criteria() assessment = Assessment.create(rubric, "Bob", "submission UUID", "PE") # Since there are no criteria with options, and we're not # providing written feedback, pass in an empty selection. selected = {} AssessmentPart.create_from_option_points(assessment, selected) # Score should be zero, since none of the criteria have options self.assertEqual(assessment.points_earned, 0) self.assertEqual(assessment.points_possible, 0)
def test_no_feedback_provided_for_feedback_only_criterion(self): rubric = self._rubric_with_one_feedback_only_criterion() assessment = Assessment.create(rubric, "Bob", "submission UUID", "PE") # Create assessment parts # Do NOT provide feedback for the feedback-only criterion selected = { u"vøȼȺƀᵾłȺɍɏ": u"𝓰𝓸𝓸𝓭", u"ﻭɼค๓๓คɼ": u"єχ¢єℓℓєηт", } feedback = {} # Expect an error when we try to create the assessment parts with self.assertRaises(InvalidRubricSelection): AssessmentPart.create_from_option_names(assessment, selected, feedback=feedback)
def _complete_assessment( submission_uuid, user_id, options_selected, criterion_feedback, overall_feedback, rubric_dict, scored_at ): """ Internal function for creating an assessment and its parts atomically. Args: submission_uuid (str): The unique identifier for the submission being assessed. user_id (str): The ID of the user creating the assessment. This must match the ID of the user who made the submission. options_selected (dict): Mapping of rubric criterion names to option values selected. criterion_feedback (dict): Dictionary mapping criterion names to the free-form text feedback the user gave for the criterion. Since criterion feedback is optional, some criteria may not appear in the dictionary. overall_feedback (unicode): Free-form text feedback on the submission overall. rubric_dict (dict): Serialized Rubric model. scored_at (datetime): The timestamp of the assessment. Returns: Assessment model """ # Get or create the rubric rubric = rubric_from_dict(rubric_dict) # Create the self assessment assessment = Assessment.create( rubric, user_id, submission_uuid, SELF_TYPE, scored_at=scored_at, feedback=overall_feedback ) # This will raise an `InvalidRubricSelection` if the selected options do not match the rubric. AssessmentPart.create_from_option_names(assessment, options_selected, feedback=criterion_feedback) return assessment
def test_create_with_all_feedback_only_criteria(self): rubric = self._rubric_with_all_feedback_only_criteria() assessment = Assessment.create(rubric, "Bob", "submission UUID", "PE") # Create assessment parts, each of which are feedback-only (no points) selected = {} feedback = { u"vøȼȺƀᵾłȺɍɏ": u"𝓰𝓸𝓸𝓭", u"ﻭɼค๓๓คɼ": u"єχ¢єℓℓєηт", } AssessmentPart.create_from_option_names( assessment, selected, feedback=feedback ) # Check the score (should be 0, since we haven't selected any points) self.assertEqual(assessment.points_earned, 0) self.assertEqual(assessment.points_possible, 0)
def test_create_from_option_points_feedback_only_criterion(self): rubric = self._rubric_with_one_feedback_only_criterion() assessment = Assessment.create(rubric, "Bob", "submission UUID", "PE") # Create assessment parts by providing scores for options # but NO feedback. This simulates how an example-based AI # assessment is created. selected = { u"vøȼȺƀᵾłȺɍɏ": 2, u"ﻭɼค๓๓คɼ": 1, } AssessmentPart.create_from_option_points(assessment, selected) # Check the score (the feedback-only assessment should count for 0 points) self.assertEqual(assessment.points_earned, 3) self.assertEqual(assessment.points_possible, 4) # Check the feedback text (should default to an empty string) feedback_only = AssessmentPart.objects.get(criterion__name="feedback") self.assertEqual(feedback_only.feedback, u"")
def test_default_feedback_for_feedback_only_criterion(self): rubric = self._rubric_with_one_feedback_only_criterion() assessment = Assessment.create(rubric, "Bob", "submission UUID", "PE") # Create assessment parts, but do NOT provide any feedback # This simulates how non-peer assessments are created # Note that this is different from providing an empty feedback dict; # here, we're not providing the `feedback` kwarg at all. selected = { u"vøȼȺƀᵾłȺɍɏ": u"𝓰𝓸𝓸𝓭", u"ﻭɼค๓๓คɼ": u"єχ¢єℓℓєηт", } AssessmentPart.create_from_option_names(assessment, selected) # Check the score (the feedback-only assessment should count for 0 points) self.assertEqual(assessment.points_earned, 3) self.assertEqual(assessment.points_possible, 4) # Check the feedback text, which should default to an empty string feedback_only = AssessmentPart.objects.get(criterion__name="feedback") self.assertEqual(feedback_only.feedback, u"")
def create_assessment( scorer_submission_uuid, scorer_id, options_selected, criterion_feedback, overall_feedback, rubric_dict, num_required_grades, scored_at=None ): """Creates an assessment on the given submission. Assessments are created based on feedback associated with a particular rubric. Args: scorer_submission_uuid (str): The submission uuid for the Scorer's workflow. The submission being assessed can be determined via the peer workflow of the grading student. scorer_id (str): The user ID for the user giving this assessment. This is required to create an assessment on a submission. options_selected (dict): Dictionary mapping criterion names to the option names the user selected for that criterion. criterion_feedback (dict): Dictionary mapping criterion names to the free-form text feedback the user gave for the criterion. Since criterion feedback is optional, some criteria may not appear in the dictionary. overall_feedback (unicode): Free-form text feedback on the submission overall. num_required_grades (int): The required number of assessments a submission requires before it is completed. If this number of assessments is reached, the grading_completed_at timestamp is set for the Workflow. Kwargs: scored_at (datetime): Optional argument to override the time in which the assessment took place. If not specified, scored_at is set to now. Returns: dict: the Assessment model, serialized as a dict. Raises: PeerAssessmentRequestError: Raised when the submission_id is invalid, or the assessment_dict does not contain the required values to create an assessment. PeerAssessmentInternalError: Raised when there is an internal error while creating a new assessment. Examples: >>> options_selected = {"clarity": "Very clear", "precision": "Somewhat precise"} >>> criterion_feedback = {"clarity": "I thought this essay was very clear."} >>> feedback = "Your submission was thrilling." >>> create_assessment("1", "Tim", options_selected, criterion_feedback, feedback, rubric_dict) """ try: # Retrieve workflow information scorer_workflow = PeerWorkflow.objects.get(submission_uuid=scorer_submission_uuid) peer_workflow_item = scorer_workflow.get_latest_open_workflow_item() if peer_workflow_item is None: message = ( u"There are no open assessments associated with the scorer's " u"submission UUID {}." ).format(scorer_submission_uuid) logger.warning(message) raise PeerAssessmentWorkflowError(message) peer_submission_uuid = peer_workflow_item.author.submission_uuid # Get or create the rubric rubric = rubric_from_dict(rubric_dict) # Create the peer assessment assessment = Assessment.create( rubric, scorer_id, peer_submission_uuid, PEER_TYPE, scored_at=scored_at, feedback=overall_feedback ) # Create assessment parts for each criterion in the rubric # This will raise an `InvalidRubricSelection` if the selected options do not match the rubric. AssessmentPart.create_from_option_names(assessment, options_selected, feedback=criterion_feedback) # Close the active assessment scorer_workflow.close_active_assessment(peer_submission_uuid, assessment, num_required_grades) _log_assessment(assessment, scorer_workflow) return full_assessment_dict(assessment) except PeerWorkflow.DoesNotExist: message = ( u"There is no Peer Workflow associated with the given " u"submission UUID {}." ).format(scorer_submission_uuid) logger.exception(message) raise PeerAssessmentWorkflowError(message) except InvalidRubric: msg = u"Rubric definition was not valid" logger.exception(msg) raise PeerAssessmentRequestError(msg) except InvalidRubricSelection: msg = u"Invalid options selected in the rubric" logger.warning(msg, exc_info=True) raise PeerAssessmentRequestError(msg) except DatabaseError: error_message = ( u"An error occurred while retrieving the peer workflow item by scorer with ID: {}" ).format(scorer_id) logger.exception(error_message) raise PeerAssessmentInternalError(error_message)
def create_assessment( scorer_submission_uuid, scorer_id, options_selected, criterion_feedback, overall_feedback, rubric_dict, num_required_grades, scored_at=None): """Creates an assessment on the given submission. Assessments are created based on feedback associated with a particular rubric. Args: scorer_submission_uuid (str): The submission uuid for the Scorer's workflow. The submission being assessed can be determined via the peer workflow of the grading student. scorer_id (str): The user ID for the user giving this assessment. This is required to create an assessment on a submission. options_selected (dict): Dictionary mapping criterion names to the option names the user selected for that criterion. criterion_feedback (dict): Dictionary mapping criterion names to the free-form text feedback the user gave for the criterion. Since criterion feedback is optional, some criteria may not appear in the dictionary. overall_feedback (unicode): Free-form text feedback on the submission overall. num_required_grades (int): The required number of assessments a submission requires before it is completed. If this number of assessments is reached, the grading_completed_at timestamp is set for the Workflow. Kwargs: scored_at (datetime): Optional argument to override the time in which the assessment took place. If not specified, scored_at is set to now. Returns: dict: the Assessment model, serialized as a dict. Raises: PeerAssessmentRequestError: Raised when the submission_id is invalid, or the assessment_dict does not contain the required values to create an assessment. PeerAssessmentInternalError: Raised when there is an internal error while creating a new assessment. Examples: >>> options_selected = {"clarity": "Very clear", "precision": "Somewhat precise"} >>> criterion_feedback = {"clarity": "I thought this essay was very clear."} >>> feedback = "Your submission was thrilling." >>> create_assessment("1", "Tim", options_selected, criterion_feedback, feedback, rubric_dict) """ # Ensure that this variables is declared so if an error occurs # we don't get an error when trying to log it! assessment_dict = None try: rubric = rubric_from_dict(rubric_dict) # Validate that the selected options matched the rubric # and raise an error if this is not the case try: option_ids = rubric.options_ids(options_selected) except InvalidOptionSelection: msg = "Selected options do not match the rubric" logger.warning(msg, exc_info=True) raise PeerAssessmentRequestError(msg) scorer_workflow = PeerWorkflow.objects.get(submission_uuid=scorer_submission_uuid) peer_workflow_item = scorer_workflow.get_latest_open_workflow_item() if peer_workflow_item is None: message = ( u"There are no open assessments associated with the scorer's " u"submission UUID {}." ).format(scorer_submission_uuid) logger.warning(message) raise PeerAssessmentWorkflowError(message) peer_submission_uuid = peer_workflow_item.author.submission_uuid peer_assessment = { "rubric": rubric.id, "scorer_id": scorer_id, "submission_uuid": peer_submission_uuid, "score_type": PEER_TYPE, "feedback": overall_feedback[0:Assessment.MAXSIZE], } if scored_at is not None: peer_assessment["scored_at"] = scored_at peer_serializer = AssessmentSerializer(data=peer_assessment) if not peer_serializer.is_valid(): msg = ( u"An error occurred while serializing " u"the peer assessment associated with " u"the scorer's submission UUID {}." ).format(scorer_submission_uuid) raise PeerAssessmentRequestError(msg) assessment = peer_serializer.save() # We do this to do a run around django-rest-framework serializer # validation, which would otherwise require two DB queries per # option to do validation. We already validated these options above. AssessmentPart.add_to_assessment(assessment, option_ids, criterion_feedback=criterion_feedback) # Close the active assessment scorer_workflow.close_active_assessment(peer_submission_uuid, assessment, num_required_grades) assessment_dict = full_assessment_dict(assessment) _log_assessment(assessment, scorer_workflow) return assessment_dict except DatabaseError: error_message = ( u"An error occurred while creating assessment {} by: {}" ).format(assessment_dict, scorer_id) logger.exception(error_message) raise PeerAssessmentInternalError(error_message) except PeerWorkflow.DoesNotExist: message = ( u"There is no Peer Workflow associated with the given " u"submission UUID {}." ).format(scorer_submission_uuid) logger.error(message) raise PeerAssessmentWorkflowError(message)
def create_assessment(submission_uuid, user_id, options_selected, rubric_dict, scored_at=None): """ Create a self-assessment for a submission. Args: submission_uuid (str): The unique identifier for the submission being assessed. user_id (str): The ID of the user creating the assessment. This must match the ID of the user who made the submission. options_selected (dict): Mapping of rubric criterion names to option values selected. rubric_dict (dict): Serialized Rubric model. Kwargs: scored_at (datetime): The timestamp of the assessment; defaults to the current time. Returns: dict: serialized Assessment model Raises: SelfAssessmentRequestError: Could not retrieve a submission that the user is allowed to score. """ # Check that there are not any assessments for this submission if Assessment.objects.filter(submission_uuid=submission_uuid, score_type=SELF_TYPE).exists(): raise SelfAssessmentRequestError(_("You've already completed your self assessment for this response.")) # Check that the student is allowed to assess this submission try: submission = get_submission_and_student(submission_uuid) if submission['student_item']['student_id'] != user_id: raise SelfAssessmentRequestError(_("You can only complete a self assessment on your own response.")) except SubmissionNotFoundError: raise SelfAssessmentRequestError(_("Could not retrieve the response.")) # Get or create the rubric try: rubric = rubric_from_dict(rubric_dict) option_ids = rubric.options_ids(options_selected) except InvalidRubric as ex: msg = _("Invalid rubric definition: {errors}").format(errors=ex.errors) raise SelfAssessmentRequestError(msg) except InvalidOptionSelection: msg = _("Selected options do not match the rubric") raise SelfAssessmentRequestError(msg) # Create the assessment # Since we have already retrieved the submission, we can assume that # the user who created the submission exists. self_assessment = { "rubric": rubric.id, "scorer_id": user_id, "submission_uuid": submission_uuid, "score_type": SELF_TYPE, "feedback": u"", } if scored_at is not None: self_assessment['scored_at'] = scored_at # Serialize the assessment serializer = AssessmentSerializer(data=self_assessment) if not serializer.is_valid(): msg = _("Could not create self assessment: {errors}").format(errors=serializer.errors) raise SelfAssessmentRequestError(msg) assessment = serializer.save() # We do this to do a run around django-rest-framework serializer # validation, which would otherwise require two DB queries per # option to do validation. We already validated these options above. AssessmentPart.add_to_assessment(assessment, option_ids) assessment_dict = full_assessment_dict(assessment) _log_assessment(assessment, submission) # Return the serialized assessment return assessment_dict
def create_assessment( submission_uuid, user_id, options_selected, criterion_feedback, overall_feedback, rubric_dict, scored_at=None ): """ Create a self-assessment for a submission. Args: submission_uuid (str): The unique identifier for the submission being assessed. user_id (str): The ID of the user creating the assessment. This must match the ID of the user who made the submission. options_selected (dict): Mapping of rubric criterion names to option values selected. criterion_feedback (dict): Dictionary mapping criterion names to the free-form text feedback the user gave for the criterion. Since criterion feedback is optional, some criteria may not appear in the dictionary. overall_feedback (unicode): Free-form text feedback on the submission overall. rubric_dict (dict): Serialized Rubric model. Keyword Arguments: scored_at (datetime): The timestamp of the assessment; defaults to the current time. Returns: dict: serialized Assessment model Raises: SelfAssessmentRequestError: Could not retrieve a submission that the user is allowed to score. """ # Check that there are not any assessments for this submission if Assessment.objects.filter(submission_uuid=submission_uuid, score_type=SELF_TYPE).exists(): msg = ( u"Cannot submit a self-assessment for the submission {uuid} " "because another self-assessment already exists for that submission." ).format(uuid=submission_uuid) raise SelfAssessmentRequestError(msg) # Check that the student is allowed to assess this submission try: submission = get_submission_and_student(submission_uuid) if submission['student_item']['student_id'] != user_id: msg = ( u"Cannot submit a self-assessment for the submission {uuid} " u"because it was created by another student " u"(submission student ID {student_id} does not match your " u"student id {other_id})" ).format( uuid=submission_uuid, student_id=submission['student_item']['student_id'], other_id=user_id ) raise SelfAssessmentRequestError(msg) except SubmissionNotFoundError: msg = ( "Could not submit a self-assessment because no submission " "exists with UUID {uuid}" ).format(uuid=submission_uuid) raise SelfAssessmentRequestError() try: # Get or create the rubric rubric = rubric_from_dict(rubric_dict) # Create the self assessment assessment = Assessment.create( rubric, user_id, submission_uuid, SELF_TYPE, scored_at=scored_at, feedback=overall_feedback ) # This will raise an `InvalidRubricSelection` if the selected options do not match the rubric. AssessmentPart.create_from_option_names(assessment, options_selected, feedback=criterion_feedback) _log_assessment(assessment, submission) except InvalidRubric as ex: msg = "Invalid rubric definition: " + str(ex) logger.warning(msg, exc_info=True) raise SelfAssessmentRequestError(msg) except InvalidRubricSelection as ex: msg = "Selected options do not match the rubric: " + str(ex) logger.warning(msg, exc_info=True) raise SelfAssessmentRequestError(msg) # Return the serialized assessment return full_assessment_dict(assessment)
def create_assessment( submission_uuid, scorer_id, assessment_dict, rubric_dict, num_required_grades, scored_at=None): """Creates an assessment on the given submission. Assessments are created based on feedback associated with a particular rubric. Args: submission_uuid (str): The submission uuid this assessment is associated with. The submission uuid is required and must already exist in the Submission model. scorer_id (str): The user ID for the user giving this assessment. This is required to create an assessment on a submission. assessment_dict (dict): All related information for the assessment. An assessment contains points_earned, points_possible, and feedback. num_required_grades (int): The required number of assessments a submission requires before it is completed. If this number of assessments is reached, the grading_completed_at timestamp is set for the Workflow. Kwargs: scored_at (datetime): Optional argument to override the time in which the assessment took place. If not specified, scored_at is set to now. Returns: dict: the Assessment model, serialized as a dict. Raises: PeerAssessmentRequestError: Raised when the submission_id is invalid, or the assessment_dict does not contain the required values to create an assessment. PeerAssessmentInternalError: Raised when there is an internal error while creating a new assessment. Examples: >>> assessment_dict = dict( >>> options_selected={"clarity": "Very clear", "precision": "Somewhat precise"}, >>> feedback="Your submission was thrilling.", >>> ) >>> create_assessment("1", "Tim", assessment_dict, rubric_dict) """ try: submission = sub_api.get_submission_and_student(submission_uuid) rubric = rubric_from_dict(rubric_dict) # Validate that the selected options matched the rubric # and raise an error if this is not the case try: option_ids = rubric.options_ids(assessment_dict["options_selected"]) except InvalidOptionSelection as ex: msg = _("Selected options do not match the rubric: {error}").format(error=ex.message) raise PeerAssessmentRequestError(msg) feedback = assessment_dict.get('feedback', u'') peer_assessment = { "rubric": rubric.id, "scorer_id": scorer_id, "submission_uuid": submission_uuid, "score_type": PEER_TYPE, "feedback": feedback, } if scored_at is not None: peer_assessment["scored_at"] = scored_at peer_serializer = AssessmentSerializer(data=peer_assessment) if not peer_serializer.is_valid(): raise PeerAssessmentRequestError(peer_serializer.errors) assessment = peer_serializer.save() # We do this to do a run around django-rest-framework serializer # validation, which would otherwise require two DB queries per # option to do validation. We already validated these options above. AssessmentPart.add_to_assessment(assessment, option_ids) student_item = submission['student_item'] scorer_item = copy.deepcopy(student_item) scorer_item['student_id'] = scorer_id scorer_workflow = _get_latest_workflow(scorer_item) workflow = _get_latest_workflow(student_item) if not scorer_workflow: raise PeerAssessmentWorkflowError(_( "You must make a submission before assessing another student.")) if not workflow: raise PeerAssessmentWorkflowError(_( "The submission you reviewed is not in the peer workflow. This " "assessment cannot be submitted unless the associated " "submission came from the peer workflow.")) # Close the active assessment _close_active_assessment(scorer_workflow, submission_uuid, assessment, num_required_grades) assessment_dict = full_assessment_dict(assessment) _log_assessment(assessment, student_item, scorer_item) return assessment_dict except DatabaseError: error_message = _( u"An error occurred while creating assessment {} for submission: " u"{} by: {}" .format(assessment_dict, submission_uuid, scorer_id) ) logger.exception(error_message) raise PeerAssessmentInternalError(error_message)