def test_full_assessment_dict_criteria_no_options(self): # Create a rubric with a criterion that has no options (just feedback) rubric_dict = copy.deepcopy(RUBRIC) rubric_dict['criteria'].append({ 'order_num': 2, 'name': 'feedback only', 'prompt': 'feedback only', 'options': [] }) rubric = rubric_from_dict(rubric_dict) # Create an assessment for the rubric assessment = Assessment.create(rubric, "Bob", "submission UUID", "PE") selected = { u"vøȼȺƀᵾłȺɍɏ": u"𝓰𝓸𝓸𝓭", u"ﻭɼค๓๓คɼ": u"єχ¢єℓℓєηт", } feedback = { u"feedback only": u"enjoy the feedback!" } AssessmentPart.create_from_option_names(assessment, selected, feedback=feedback) # Serialize the assessment serialized = full_assessment_dict(assessment) # Verify that the assessment dict correctly serialized the criterion with options. self.assertEqual(serialized['parts'][0]['criterion']['name'], u"vøȼȺƀᵾłȺɍɏ") self.assertEqual(serialized['parts'][0]['option']['name'], u"𝓰𝓸𝓸𝓭") self.assertEqual(serialized['parts'][1]['criterion']['name'], u"ﻭɼค๓๓คɼ") self.assertEqual(serialized['parts'][1]['option']['name'], u"єχ¢єℓℓєηт") # Verify that the assessment dict correctly serialized the criterion with no options. self.assertIs(serialized['parts'][2]['option'], None) self.assertEqual(serialized['parts'][2]['criterion']['name'], u"feedback only")
def test_create_with_feedback_only_criterion(self): rubric = self._rubric_with_one_feedback_only_criterion() assessment = Assessment.create(rubric, "Bob", "submission UUID", "PE") # Create assessment parts # We can't select an option for the last criterion, but we do # provide written feedback. selected = { u"vøȼȺƀᵾłȺɍɏ": u"𝓰𝓸𝓸𝓭", u"ﻭɼค๓๓คɼ": u"єχ¢єℓℓєηт", } feedback = { u"feedback": u"𝕿𝖍𝖎𝖘 𝖎𝖘 𝖘𝖔𝖒𝖊 𝖋𝖊𝖊𝖉𝖇𝖆𝖈𝖐." } AssessmentPart.create_from_option_names( assessment, selected, feedback=feedback ) # Check the score (the feedback-only assessment should count for 0 points) self.assertEqual(assessment.points_earned, 3) self.assertEqual(assessment.points_possible, 4) # Check the feedback text feedback_only = AssessmentPart.objects.get(criterion__name="feedback") self.assertEqual(feedback_only.feedback, u"𝕿𝖍𝖎𝖘 𝖎𝖘 𝖘𝖔𝖒𝖊 𝖋𝖊𝖊𝖉𝖇𝖆𝖈𝖐.")
def test_full_assessment_dict_criteria_no_options(self): # Create a rubric with a criterion that has no options (just feedback) rubric_dict = copy.deepcopy(RUBRIC) rubric_dict['criteria'].append({ 'order_num': 2, 'name': 'feedback only', 'prompt': 'feedback only', 'options': [] }) rubric = rubric_from_dict(rubric_dict) # Create an assessment for the rubric assessment = Assessment.create(rubric, "Bob", "submission-UUID", "PE") selected = { "vøȼȺƀᵾłȺɍɏ": "𝓰𝓸𝓸𝓭", "ﻭɼค๓๓คɼ": "єχ¢єℓℓєηт", } feedback = { "feedback only": "enjoy the feedback!" } AssessmentPart.create_from_option_names(assessment, selected, feedback=feedback) # Serialize the assessment serialized = full_assessment_dict(assessment) # Verify that the assessment dict correctly serialized the criterion with options. self.assertEqual(serialized['parts'][0]['criterion']['name'], "vøȼȺƀᵾłȺɍɏ") self.assertEqual(serialized['parts'][0]['option']['name'], "𝓰𝓸𝓸𝓭") self.assertEqual(serialized['parts'][1]['criterion']['name'], "ﻭɼค๓๓คɼ") self.assertEqual(serialized['parts'][1]['option']['name'], "єχ¢єℓℓєηт") # Verify that the assessment dict correctly serialized the criterion with no options. self.assertIs(serialized['parts'][2]['option'], None) self.assertEqual(serialized['parts'][2]['criterion']['name'], "feedback only")
def _complete_assessment( rubric_dict, scorer_id, peer_submission_uuid, options_selected, criterion_feedback, scorer_workflow, overall_feedback, num_required_grades, scored_at, ): """ Internal function for atomic assessment creation. Creates a peer assessment and closes the associated peer workflow item in a single transaction. Args: rubric_dict (dict): The rubric model associated with this assessment scorer_id (str): The user ID for the user giving this assessment. This is required to create an assessment on a submission. peer_submission_uuid (str): The submission uuid for the submission being assessed. options_selected (dict): Dictionary mapping criterion names to the option names the user selected for that criterion. criterion_feedback (dict): Dictionary mapping criterion names to the free-form text feedback the user gave for the criterion. Since criterion feedback is optional, some criteria may not appear in the dictionary. scorer_workflow (PeerWorkflow): The PeerWorkflow associated with the scorer. Updates the workflow item associated with this assessment. overall_feedback (unicode): Free-form text feedback on the submission overall. num_required_grades (int): The required number of assessments a submission requires before it is completed. If this number of assessments is reached, the grading_completed_at timestamp is set for the Workflow. scored_at (datetime): Optional argument to override the time in which the assessment took place. If not specified, scored_at is set to now. Returns: The Assessment model """ # Get or create the rubric rubric = rubric_from_dict(rubric_dict) # Create the peer assessment assessment = Assessment.create( rubric, scorer_id, peer_submission_uuid, PEER_TYPE, scored_at=scored_at, feedback=overall_feedback ) # Create assessment parts for each criterion in the rubric # This will raise an `InvalidRubricSelection` if the selected options do not # match the rubric. AssessmentPart.create_from_option_names(assessment, options_selected, feedback=criterion_feedback) # Close the active assessment scorer_workflow.close_active_assessment(peer_submission_uuid, assessment, num_required_grades) return assessment
def _complete_assessment( submission_uuid, scorer_id, options_selected, criterion_feedback, overall_feedback, rubric_dict, scored_at, scorer_workflow ): """ Internal function for atomic assessment creation. Creates a staff assessment in a single transaction. Args: submission_uuid (str): The submission uuid for the submission being assessed. scorer_id (str): The user ID for the user giving this assessment. This is required to create an assessment on a submission. options_selected (dict): Dictionary mapping criterion names to the option names the user selected for that criterion. criterion_feedback (dict): Dictionary mapping criterion names to the free-form text feedback the user gave for the criterion. Since criterion feedback is optional, some criteria may not appear in the dictionary. overall_feedback (unicode): Free-form text feedback on the submission overall. rubric_dict (dict): The rubric model associated with this assessment scored_at (datetime): Optional argument to override the time in which the assessment took place. If not specified, scored_at is set to now. Returns: The Assessment model """ # Get or create the rubric rubric = rubric_from_dict(rubric_dict) # Create the staff assessment assessment = Assessment.create( rubric, scorer_id, submission_uuid, STAFF_TYPE, scored_at=scored_at, feedback=overall_feedback ) # Create assessment parts for each criterion in the rubric # This will raise an `InvalidRubricSelection` if the selected options do not # match the rubric. AssessmentPart.create_from_option_names(assessment, options_selected, feedback=criterion_feedback) # Close the active assessment if scorer_workflow is not None: scorer_workflow.close_active_assessment(assessment, scorer_id) return assessment
def _complete_assessment(rubric_dict, scorer_id, peer_submission_uuid, options_selected, criterion_feedback, scorer_workflow, overall_feedback, num_required_grades, scored_at): """ Internal function for atomic assessment creation. Creates a peer assessment and closes the associated peer workflow item in a single transaction. Args: rubric_dict (dict): The rubric model associated with this assessment scorer_id (str): The user ID for the user giving this assessment. This is required to create an assessment on a submission. peer_submission_uuid (str): The submission uuid for the submission being assessed. options_selected (dict): Dictionary mapping criterion names to the option names the user selected for that criterion. criterion_feedback (dict): Dictionary mapping criterion names to the free-form text feedback the user gave for the criterion. Since criterion feedback is optional, some criteria may not appear in the dictionary. scorer_workflow (PeerWorkflow): The PeerWorkflow associated with the scorer. Updates the workflow item associated with this assessment. overall_feedback (unicode): Free-form text feedback on the submission overall. num_required_grades (int): The required number of assessments a submission requires before it is completed. If this number of assessments is reached, the grading_completed_at timestamp is set for the Workflow. scored_at (datetime): Optional argument to override the time in which the assessment took place. If not specified, scored_at is set to now. Returns: The Assessment model """ # Get or create the rubric rubric = rubric_from_dict(rubric_dict) # Create the peer assessment assessment = Assessment.create(rubric, scorer_id, peer_submission_uuid, PEER_TYPE, scored_at=scored_at, feedback=overall_feedback) # Create assessment parts for each criterion in the rubric # This will raise an `InvalidRubricSelection` if the selected options do not # match the rubric. AssessmentPart.create_from_option_names(assessment, options_selected, feedback=criterion_feedback) # Close the active assessment scorer_workflow.close_active_assessment(peer_submission_uuid, assessment, num_required_grades) return assessment
def test_no_feedback_provided_for_feedback_only_criterion(self): rubric = self._rubric_with_one_feedback_only_criterion() assessment = Assessment.create(rubric, "Bob", "submission UUID", "PE") # Create assessment parts # Do NOT provide feedback for the feedback-only criterion selected = { u"vøȼȺƀᵾłȺɍɏ": u"𝓰𝓸𝓸𝓭", u"ﻭɼค๓๓คɼ": u"єχ¢єℓℓєηт", } feedback = {} # Expect an error when we try to create the assessment parts with self.assertRaises(InvalidRubricSelection): AssessmentPart.create_from_option_names(assessment, selected, feedback=feedback)
def _complete_assessment( submission_uuid, user_id, options_selected, criterion_feedback, overall_feedback, rubric_dict, scored_at ): """ Internal function for creating an assessment and its parts atomically. Args: submission_uuid (str): The unique identifier for the submission being assessed. user_id (str): The ID of the user creating the assessment. This must match the ID of the user who made the submission. options_selected (dict): Mapping of rubric criterion names to option values selected. criterion_feedback (dict): Dictionary mapping criterion names to the free-form text feedback the user gave for the criterion. Since criterion feedback is optional, some criteria may not appear in the dictionary. overall_feedback (unicode): Free-form text feedback on the submission overall. rubric_dict (dict): Serialized Rubric model. scored_at (datetime): The timestamp of the assessment. Returns: Assessment model """ # Get or create the rubric rubric = rubric_from_dict(rubric_dict) # Create the self assessment assessment = Assessment.create( rubric, user_id, submission_uuid, SELF_TYPE, scored_at=scored_at, feedback=overall_feedback ) # This will raise an `InvalidRubricSelection` if the selected options do not match the rubric. AssessmentPart.create_from_option_names(assessment, options_selected, feedback=criterion_feedback) return assessment
def test_create_with_all_feedback_only_criteria(self): rubric = self._rubric_with_all_feedback_only_criteria() assessment = Assessment.create(rubric, "Bob", "submission UUID", "PE") # Create assessment parts, each of which are feedback-only (no points) selected = {} feedback = { u"vøȼȺƀᵾłȺɍɏ": u"𝓰𝓸𝓸𝓭", u"ﻭɼค๓๓คɼ": u"єχ¢єℓℓєηт", } AssessmentPart.create_from_option_names( assessment, selected, feedback=feedback ) # Check the score (should be 0, since we haven't selected any points) self.assertEqual(assessment.points_earned, 0) self.assertEqual(assessment.points_possible, 0)
def test_default_feedback_for_feedback_only_criterion(self): rubric = self._rubric_with_one_feedback_only_criterion() assessment = Assessment.create(rubric, "Bob", "submission UUID", "PE") # Create assessment parts, but do NOT provide any feedback # This simulates how non-peer assessments are created # Note that this is different from providing an empty feedback dict; # here, we're not providing the `feedback` kwarg at all. selected = { u"vøȼȺƀᵾłȺɍɏ": u"𝓰𝓸𝓸𝓭", u"ﻭɼค๓๓คɼ": u"єχ¢єℓℓєηт", } AssessmentPart.create_from_option_names(assessment, selected) # Check the score (the feedback-only assessment should count for 0 points) self.assertEqual(assessment.points_earned, 3) self.assertEqual(assessment.points_possible, 4) # Check the feedback text, which should default to an empty string feedback_only = AssessmentPart.objects.get(criterion__name="feedback") self.assertEqual(feedback_only.feedback, u"")
def create_assessment( scorer_submission_uuid, scorer_id, options_selected, criterion_feedback, overall_feedback, rubric_dict, num_required_grades, scored_at=None ): """Creates an assessment on the given submission. Assessments are created based on feedback associated with a particular rubric. Args: scorer_submission_uuid (str): The submission uuid for the Scorer's workflow. The submission being assessed can be determined via the peer workflow of the grading student. scorer_id (str): The user ID for the user giving this assessment. This is required to create an assessment on a submission. options_selected (dict): Dictionary mapping criterion names to the option names the user selected for that criterion. criterion_feedback (dict): Dictionary mapping criterion names to the free-form text feedback the user gave for the criterion. Since criterion feedback is optional, some criteria may not appear in the dictionary. overall_feedback (unicode): Free-form text feedback on the submission overall. num_required_grades (int): The required number of assessments a submission requires before it is completed. If this number of assessments is reached, the grading_completed_at timestamp is set for the Workflow. Kwargs: scored_at (datetime): Optional argument to override the time in which the assessment took place. If not specified, scored_at is set to now. Returns: dict: the Assessment model, serialized as a dict. Raises: PeerAssessmentRequestError: Raised when the submission_id is invalid, or the assessment_dict does not contain the required values to create an assessment. PeerAssessmentInternalError: Raised when there is an internal error while creating a new assessment. Examples: >>> options_selected = {"clarity": "Very clear", "precision": "Somewhat precise"} >>> criterion_feedback = {"clarity": "I thought this essay was very clear."} >>> feedback = "Your submission was thrilling." >>> create_assessment("1", "Tim", options_selected, criterion_feedback, feedback, rubric_dict) """ try: # Retrieve workflow information scorer_workflow = PeerWorkflow.objects.get(submission_uuid=scorer_submission_uuid) peer_workflow_item = scorer_workflow.get_latest_open_workflow_item() if peer_workflow_item is None: message = ( u"There are no open assessments associated with the scorer's " u"submission UUID {}." ).format(scorer_submission_uuid) logger.warning(message) raise PeerAssessmentWorkflowError(message) peer_submission_uuid = peer_workflow_item.author.submission_uuid # Get or create the rubric rubric = rubric_from_dict(rubric_dict) # Create the peer assessment assessment = Assessment.create( rubric, scorer_id, peer_submission_uuid, PEER_TYPE, scored_at=scored_at, feedback=overall_feedback ) # Create assessment parts for each criterion in the rubric # This will raise an `InvalidRubricSelection` if the selected options do not match the rubric. AssessmentPart.create_from_option_names(assessment, options_selected, feedback=criterion_feedback) # Close the active assessment scorer_workflow.close_active_assessment(peer_submission_uuid, assessment, num_required_grades) _log_assessment(assessment, scorer_workflow) return full_assessment_dict(assessment) except PeerWorkflow.DoesNotExist: message = ( u"There is no Peer Workflow associated with the given " u"submission UUID {}." ).format(scorer_submission_uuid) logger.exception(message) raise PeerAssessmentWorkflowError(message) except InvalidRubric: msg = u"Rubric definition was not valid" logger.exception(msg) raise PeerAssessmentRequestError(msg) except InvalidRubricSelection: msg = u"Invalid options selected in the rubric" logger.warning(msg, exc_info=True) raise PeerAssessmentRequestError(msg) except DatabaseError: error_message = ( u"An error occurred while retrieving the peer workflow item by scorer with ID: {}" ).format(scorer_id) logger.exception(error_message) raise PeerAssessmentInternalError(error_message)
def create_assessment( submission_uuid, user_id, options_selected, criterion_feedback, overall_feedback, rubric_dict, scored_at=None ): """ Create a self-assessment for a submission. Args: submission_uuid (str): The unique identifier for the submission being assessed. user_id (str): The ID of the user creating the assessment. This must match the ID of the user who made the submission. options_selected (dict): Mapping of rubric criterion names to option values selected. criterion_feedback (dict): Dictionary mapping criterion names to the free-form text feedback the user gave for the criterion. Since criterion feedback is optional, some criteria may not appear in the dictionary. overall_feedback (unicode): Free-form text feedback on the submission overall. rubric_dict (dict): Serialized Rubric model. Keyword Arguments: scored_at (datetime): The timestamp of the assessment; defaults to the current time. Returns: dict: serialized Assessment model Raises: SelfAssessmentRequestError: Could not retrieve a submission that the user is allowed to score. """ # Check that there are not any assessments for this submission if Assessment.objects.filter(submission_uuid=submission_uuid, score_type=SELF_TYPE).exists(): msg = ( u"Cannot submit a self-assessment for the submission {uuid} " "because another self-assessment already exists for that submission." ).format(uuid=submission_uuid) raise SelfAssessmentRequestError(msg) # Check that the student is allowed to assess this submission try: submission = get_submission_and_student(submission_uuid) if submission['student_item']['student_id'] != user_id: msg = ( u"Cannot submit a self-assessment for the submission {uuid} " u"because it was created by another student " u"(submission student ID {student_id} does not match your " u"student id {other_id})" ).format( uuid=submission_uuid, student_id=submission['student_item']['student_id'], other_id=user_id ) raise SelfAssessmentRequestError(msg) except SubmissionNotFoundError: msg = ( "Could not submit a self-assessment because no submission " "exists with UUID {uuid}" ).format(uuid=submission_uuid) raise SelfAssessmentRequestError() try: # Get or create the rubric rubric = rubric_from_dict(rubric_dict) # Create the self assessment assessment = Assessment.create( rubric, user_id, submission_uuid, SELF_TYPE, scored_at=scored_at, feedback=overall_feedback ) # This will raise an `InvalidRubricSelection` if the selected options do not match the rubric. AssessmentPart.create_from_option_names(assessment, options_selected, feedback=criterion_feedback) _log_assessment(assessment, submission) except InvalidRubric as ex: msg = "Invalid rubric definition: " + str(ex) logger.warning(msg, exc_info=True) raise SelfAssessmentRequestError(msg) except InvalidRubricSelection as ex: msg = "Selected options do not match the rubric: " + str(ex) logger.warning(msg, exc_info=True) raise SelfAssessmentRequestError(msg) # Return the serialized assessment return full_assessment_dict(assessment)