def test_create_feedback_on_an_assessment(self): tim_sub, tim = self._create_student_and_submission("Tim", "Tim's answer") bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer") peer_api.get_submission_to_assess(bob_sub['uuid'], 1) assessment = peer_api.create_assessment( bob_sub["uuid"], bob["student_id"], ASSESSMENT_DICT['options_selected'], ASSESSMENT_DICT['criterion_feedback'], ASSESSMENT_DICT['overall_feedback'], RUBRIC_DICT, REQUIRED_GRADED_BY, ) peer_api.get_submission_to_assess(tim_sub['uuid'], 1) peer_api.create_assessment( tim_sub["uuid"], tim["student_id"], ASSESSMENT_DICT['options_selected'], ASSESSMENT_DICT['criterion_feedback'], ASSESSMENT_DICT['overall_feedback'], RUBRIC_DICT, REQUIRED_GRADED_BY, ) peer_api.get_score( tim_sub["uuid"], { 'must_grade': 1, 'must_be_graded_by': 1 } ) feedback = peer_api.get_assessment_feedback(tim_sub['uuid']) self.assertIsNone(feedback) peer_api.set_assessment_feedback( { 'submission_uuid': tim_sub['uuid'], 'feedback_text': 'Bob is a jerk!', 'options': [ 'I disliked this assessment', 'I felt this assessment was unfair', ] } ) saved_feedback = peer_api.get_assessment_feedback(tim_sub['uuid']) self.assertIsNot(saved_feedback, None) self.assertEquals(saved_feedback['submission_uuid'], assessment['submission_uuid']) self.assertEquals(saved_feedback['feedback_text'], 'Bob is a jerk!') self.assertItemsEqual(saved_feedback['options'], [ {'text': 'I disliked this assessment'}, {'text': 'I felt this assessment was unfair'}, ]) self.assertEquals(saved_feedback["assessments"][0]["submission_uuid"], assessment["submission_uuid"])
def setUp(self): super(TestOraAggregateDataIntegration, self).setUp() # Create submissions and assessments self.submission = self._create_submission(STUDENT_ITEM) self.scorer_submission = self._create_submission(SCORER_ITEM) self.earned_points = 1 self.possible_points = 2 peer_api.get_submission_to_assess(self.scorer_submission['uuid'], 1) self.assessment = self._create_assessment(self.scorer_submission['uuid']) sub_api.set_score(self.submission['uuid'], self.earned_points, self.possible_points) self.score = sub_api.get_score(STUDENT_ITEM) peer_api.get_score(self.submission['uuid'], {'must_be_graded_by': 1, 'must_grade': 0}) self._create_assessment_feedback(self.submission['uuid'])
def render_grade_complete(self, workflow): """ Render the grade complete state. Args: workflow (dict): The serialized Workflow model. Returns: tuple of context (dict), template_path (string) """ # Import is placed here to avoid model import at project startup. from openassessment.assessment.api import peer as peer_api from openassessment.assessment.api import self as self_api from openassessment.assessment.api import staff as staff_api from submissions import api as sub_api # Peer specific stuff... assessment_steps = self.assessment_steps submission_uuid = workflow['submission_uuid'] staff_assessment = None self_assessment = None feedback = None peer_assessments = [] has_submitted_feedback = False if "peer-assessment" in assessment_steps: peer_api.get_score(submission_uuid, self.workflow_requirements()["peer"]) feedback = peer_api.get_assessment_feedback(submission_uuid) peer_assessments = [ self._assessment_grade_context(peer_assessment) for peer_assessment in peer_api.get_assessments(submission_uuid) ] has_submitted_feedback = feedback is not None if "self-assessment" in assessment_steps: self_assessment = self._assessment_grade_context( self_api.get_assessment(submission_uuid) ) raw_staff_assessment = staff_api.get_latest_staff_assessment(submission_uuid) if raw_staff_assessment: staff_assessment = self._assessment_grade_context(raw_staff_assessment) feedback_text = feedback.get('feedback', '') if feedback else '' student_submission = sub_api.get_submission(submission_uuid) # We retrieve the score from the workflow, which in turn retrieves # the score for our current submission UUID. # We look up the score by submission UUID instead of student item # to ensure that the score always matches the rubric. # It's possible for the score to be `None` even if the workflow status is "done" # when all the criteria in the rubric are feedback-only (no options). score = workflow['score'] context = { 'score': score, 'feedback_text': feedback_text, 'has_submitted_feedback': has_submitted_feedback, 'student_submission': create_submission_dict(student_submission, self.prompts), 'peer_assessments': peer_assessments, 'grade_details': self.grade_details( submission_uuid, peer_assessments=peer_assessments, self_assessment=self_assessment, staff_assessment=staff_assessment, ), 'file_upload_type': self.file_upload_type, 'allow_latex': self.allow_latex, 'file_urls': self.get_download_urls_from_submission(student_submission), 'xblock_id': self.get_xblock_id() } return ('openassessmentblock/grade/oa_grade_complete.html', context)
def add_submission_context(self, submission_uuid, context): """ Add the submission information (self asssessment, peer assessments, final grade, etc.) to the supplied context for display in the "learner info" portion of staff tools. Args: submission_uuid (unicode): The uuid of the submission, should NOT be None. context: the context to update with additional information """ assessment_steps = self.assessment_steps example_based_assessment = None example_based_assessment_grade_context = None self_assessment = None self_assessment_grade_context = None peer_assessments = None peer_assessments_grade_context = [] staff_assessment = staff_api.get_latest_staff_assessment(submission_uuid) staff_assessment_grade_context = None submitted_assessments = None grade_details = None workflow = self.get_workflow_info(submission_uuid=submission_uuid) grade_exists = workflow.get('status') == "done" if "peer-assessment" in assessment_steps: peer_assessments = peer_api.get_assessments(submission_uuid) submitted_assessments = peer_api.get_submitted_assessments(submission_uuid) if grade_exists: peer_api.get_score(submission_uuid, self.workflow_requirements()["peer"]) peer_assessments_grade_context = [ self._assessment_grade_context(peer_assessment) for peer_assessment in peer_assessments ] if "self-assessment" in assessment_steps: self_assessment = self_api.get_assessment(submission_uuid) if grade_exists: self_assessment_grade_context = self._assessment_grade_context(self_assessment) if "example-based-assessment" in assessment_steps: example_based_assessment = ai_api.get_latest_assessment(submission_uuid) if grade_exists: example_based_assessment_grade_context = self._assessment_grade_context(example_based_assessment) if grade_exists: if staff_assessment: staff_assessment_grade_context = self._assessment_grade_context(staff_assessment) grade_details = self.grade_details( submission_uuid, peer_assessments_grade_context, self_assessment_grade_context, example_based_assessment_grade_context, staff_assessment_grade_context, is_staff=True, ) workflow_cancellation = self.get_workflow_cancellation_info(submission_uuid) context.update({ 'example_based_assessment': [example_based_assessment] if example_based_assessment else None, 'self_assessment': [self_assessment] if self_assessment else None, 'peer_assessments': peer_assessments, 'staff_assessment': [staff_assessment] if staff_assessment else None, 'submitted_assessments': submitted_assessments, 'grade_details': grade_details, 'score': workflow.get('score'), 'workflow_status': workflow.get('status'), 'workflow_cancellation': workflow_cancellation, }) if peer_assessments or self_assessment or example_based_assessment or staff_assessment: max_scores = peer_api.get_rubric_max_scores(submission_uuid) for criterion in context["rubric_criteria"]: criterion["total_value"] = max_scores[criterion["name"]]
def add_submission_context(self, submission_uuid, context): """ Add the submission information (self asssessment, peer assessments, final grade, etc.) to the supplied context for display in the "learner info" portion of staff tools. Args: submission_uuid (unicode): The uuid of the submission, should NOT be None. context: the context to update with additional information """ # Import is placed here to avoid model import at project startup. from openassessment.assessment.api import peer as peer_api from openassessment.assessment.api import self as self_api from openassessment.assessment.api import staff as staff_api assessment_steps = self.assessment_steps self_assessment = None self_assessment_grade_context = None peer_assessments = None peer_assessments_grade_context = [] staff_assessment = staff_api.get_latest_staff_assessment( submission_uuid) staff_assessment_grade_context = None submitted_assessments = None grade_details = None workflow = self.get_workflow_info(submission_uuid=submission_uuid) grade_exists = workflow.get('status') == "done" grade_utils = self.runtime._services.get('grade_utils') # pylint: disable=protected-access if "peer-assessment" in assessment_steps: peer_assessments = peer_api.get_assessments(submission_uuid) submitted_assessments = peer_api.get_submitted_assessments( submission_uuid) if grade_exists: peer_api.get_score(submission_uuid, self.workflow_requirements()["peer"]) peer_assessments_grade_context = [ self._assessment_grade_context(peer_assessment) for peer_assessment in peer_assessments ] if "self-assessment" in assessment_steps: self_assessment = self_api.get_assessment(submission_uuid) if grade_exists: self_assessment_grade_context = self._assessment_grade_context( self_assessment) if grade_exists: if staff_assessment: staff_assessment_grade_context = self._assessment_grade_context( staff_assessment) grade_details = self.grade_details( submission_uuid, peer_assessments_grade_context, self_assessment_grade_context, staff_assessment_grade_context, is_staff=True, ) workflow_cancellation = self.get_workflow_cancellation_info( submission_uuid) context.update({ 'self_assessment': [self_assessment] if self_assessment else None, 'peer_assessments': peer_assessments, 'staff_assessment': [staff_assessment] if staff_assessment else None, 'submitted_assessments': submitted_assessments, 'grade_details': grade_details, 'score': workflow.get('score'), 'workflow_status': workflow.get('status'), 'workflow_cancellation': workflow_cancellation, 'are_grades_frozen': grade_utils.are_grades_frozen() if grade_utils else None }) if peer_assessments or self_assessment or staff_assessment: max_scores = peer_api.get_rubric_max_scores(submission_uuid) for criterion in context["rubric_criteria"]: criterion["total_value"] = max_scores[criterion["name"]]
def render_grade_complete(self, workflow): """ Render the grade complete state. Args: workflow (dict): The serialized Workflow model. Returns: tuple of context (dict), template_path (string) """ # Import is placed here to avoid model import at project startup. from submissions import api as sub_api from openassessment.assessment.api import peer as peer_api from openassessment.assessment.api import self as self_api from openassessment.assessment.api import staff as staff_api # Peer specific stuff... assessment_steps = self.assessment_steps submission_uuid = workflow['submission_uuid'] staff_assessment = None self_assessment = None feedback = None peer_assessments = [] has_submitted_feedback = False if "peer-assessment" in assessment_steps: peer_api.get_score(submission_uuid, self.workflow_requirements()["peer"]) feedback = peer_api.get_assessment_feedback(submission_uuid) peer_assessments = [ self._assessment_grade_context(peer_assessment) for peer_assessment in peer_api.get_assessments(submission_uuid) ] has_submitted_feedback = feedback is not None if "self-assessment" in assessment_steps: self_assessment = self._assessment_grade_context( self_api.get_assessment(submission_uuid)) raw_staff_assessment = staff_api.get_latest_staff_assessment( submission_uuid) if raw_staff_assessment: staff_assessment = self._assessment_grade_context( raw_staff_assessment) feedback_text = feedback.get('feedback', '') if feedback else '' student_submission = sub_api.get_submission(submission_uuid) # We retrieve the score from the workflow, which in turn retrieves # the score for our current submission UUID. # We look up the score by submission UUID instead of student item # to ensure that the score always matches the rubric. # It's possible for the score to be `None` even if the workflow status is "done" # when all the criteria in the rubric are feedback-only (no options). score = workflow['score'] context = { 'score': score, 'score_explanation': self._get_score_explanation(workflow), 'feedback_text': feedback_text, 'has_submitted_feedback': has_submitted_feedback, 'student_submission': create_submission_dict(student_submission, self.prompts), 'peer_assessments': peer_assessments, 'grade_details': self.grade_details( submission_uuid, peer_assessments=peer_assessments, self_assessment=self_assessment, staff_assessment=staff_assessment, ), 'file_upload_type': self.file_upload_type, 'allow_multiple_files': self.allow_multiple_files, 'allow_latex': self.allow_latex, 'prompts_type': self.prompts_type, 'file_urls': self.get_download_urls_from_submission(student_submission), 'xblock_id': self.get_xblock_id() } return ('openassessmentblock/grade/oa_grade_complete.html', context)
def add_submission_context(self, submission_uuid, context): """ Add the submission information (self asssessment, peer assessments, final grade, etc.) to the supplied context for display in the "learner info" portion of staff tools. Args: submission_uuid (unicode): The uuid of the submission, should NOT be None. context: the context to update with additional information """ # Import is placed here to avoid model import at project startup. from openassessment.assessment.api import peer as peer_api from openassessment.assessment.api import self as self_api from openassessment.assessment.api import staff as staff_api assessment_steps = self.assessment_steps self_assessment = None self_assessment_grade_context = None peer_assessments = None peer_assessments_grade_context = [] staff_assessment = staff_api.get_latest_staff_assessment(submission_uuid) staff_assessment_grade_context = None submitted_assessments = None grade_details = None workflow = self.get_workflow_info(submission_uuid=submission_uuid) grade_exists = workflow.get('status') == "done" grade_utils = self.runtime._services.get('grade_utils') if "peer-assessment" in assessment_steps: peer_assessments = peer_api.get_assessments(submission_uuid) submitted_assessments = peer_api.get_submitted_assessments(submission_uuid) if grade_exists: peer_api.get_score(submission_uuid, self.workflow_requirements()["peer"]) peer_assessments_grade_context = [ self._assessment_grade_context(peer_assessment) for peer_assessment in peer_assessments ] if "self-assessment" in assessment_steps: self_assessment = self_api.get_assessment(submission_uuid) if grade_exists: self_assessment_grade_context = self._assessment_grade_context(self_assessment) if grade_exists: if staff_assessment: staff_assessment_grade_context = self._assessment_grade_context(staff_assessment) grade_details = self.grade_details( submission_uuid, peer_assessments_grade_context, self_assessment_grade_context, staff_assessment_grade_context, is_staff=True, ) workflow_cancellation = self.get_workflow_cancellation_info(submission_uuid) context.update({ 'self_assessment': [self_assessment] if self_assessment else None, 'peer_assessments': peer_assessments, 'staff_assessment': [staff_assessment] if staff_assessment else None, 'submitted_assessments': submitted_assessments, 'grade_details': grade_details, 'score': workflow.get('score'), 'workflow_status': workflow.get('status'), 'workflow_cancellation': workflow_cancellation, 'are_grades_frozen': grade_utils.are_grades_frozen() }) if peer_assessments or self_assessment or staff_assessment: max_scores = peer_api.get_rubric_max_scores(submission_uuid) for criterion in context["rubric_criteria"]: criterion["total_value"] = max_scores[criterion["name"]]
def update_from_assessments(self, assessment_requirements): """Query self and peer APIs and change our status if appropriate. If the status is done, we do nothing. Once something is done, we never move back to any other status. By default, an `AssessmentWorkflow` starts with status `peer`. If the peer API says that our submitter's requirements are met -- that the submitter of the submission we're tracking has assessed the required number of other submissions -- then the status will move to `self`. If the self API says that the person who created the submission we're tracking has assessed themselves, then we move to `waiting`. If we're in the `waiting` status, and the peer API says it can score this submission (meaning other students have created enough assessments of it), then we record the score in the submissions API and move our `status` to `done`. Args: assessment_requirements (dict): Dictionary that currently looks like: `{"peer": {"must_grade": <int>, "must_be_graded_by": <int>}}` `must_grade` is the number of assessments a student must complete. `must_be_graded_by` is the number of assessments a submission must receive to be scored. `must_grade` should be greater than `must_be_graded_by` to ensure that everyone will get scored. The intention is to eventually pass in more assessment sequence specific requirements in this dict. """ from openassessment.assessment.api import peer as peer_api from openassessment.assessment.api import self as self_api # If we're done, we're done -- it doesn't matter if requirements have # changed because we've already written a score. if self.status == self.STATUS.done: return # Update our AssessmentWorkflowStep models with the latest from our APIs steps = self._get_steps() # Go through each step and update its status. for step in steps: step.update(self.submission_uuid, assessment_requirements) # Fetch name of the first step that the submitter hasn't yet completed. new_status = next( (step.name for step in steps if step.submitter_completed_at is None), self.STATUS.waiting # if nothing's left to complete, we're waiting ) # If the submitter has done all they need to do, let's check to see if # all steps have been fully assessed (i.e. we can score it). if (new_status == self.STATUS.waiting and all(step.assessment_completed_at for step in steps)): # At this point, we're trying to give a score. We currently have a # very simple rule for this -- if it has a peer step, use that for # scoring. If not, use the self step. Later on, we may put more # interesting rules here. step_names = [step.name for step in steps] score = None if self.STATUS.peer in step_names: score = peer_api.get_score( self.submission_uuid, assessment_requirements[self.STATUS.peer] ) elif self.STATUS.self in step_names: score = self_api.get_score(self.submission_uuid, {}) if score: self.set_score(score) new_status = self.STATUS.done # Finally save our changes if the status has changed if self.status != new_status: self.status = new_status self.save()