def test_get_submitted_assessments_error(self, mock_filter): self._create_student_and_submission("Tim", "Tim's answer") bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer") peer_api.get_submission_to_assess(bob_sub['uuid'], REQUIRED_GRADED_BY) mock_filter.side_effect = DatabaseError("Oh no.") submitted_assessments = peer_api.get_submitted_assessments(bob_sub["uuid"], scored_only=False) self.assertEqual(1, len(submitted_assessments))
def test_get_submitted_assessments(self): self._create_student_and_submission("Tim", "Tim's answer") bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer") peer_api.get_submission_to_assess(bob_sub['uuid'], REQUIRED_GRADED_BY) assessment = peer_api.create_assessment( bob_sub["uuid"], bob["student_id"], ASSESSMENT_DICT['options_selected'], dict(), "", RUBRIC_DICT, REQUIRED_GRADED_BY, ) self.assertEqual(assessment["points_earned"], 6) self.assertEqual(assessment["points_possible"], 14) submitted_assessments = peer_api.get_submitted_assessments(bob_sub["uuid"], scored_only=True) self.assertEqual(0, len(submitted_assessments)) submitted_assessments = peer_api.get_submitted_assessments(bob_sub["uuid"], scored_only=False) self.assertEqual(1, len(submitted_assessments))
def get_student_info_path_and_context(self, data): """ Get the proper path and context for rendering the the student info section of the staff debug panel. """ student_id = data.params.get('student_id', '') submission_uuid = None submission = None assessment_steps = self.assessment_steps if student_id: student_item = self.get_student_item_dict() student_item['student_id'] = student_id # If there is a submission available for the requested student, present # it. If not, there will be no other information to collect. submissions = submission_api.get_submissions(student_item, 1) if submissions: submission = submissions[0] submission_uuid = submissions[0]['uuid'] example_based_assessment = None self_assessment = None peer_assessments = [] submitted_assessments = [] if "peer-assessment" in assessment_steps: peer_assessments = peer_api.get_assessments(submission_uuid) submitted_assessments = peer_api.get_submitted_assessments(submission_uuid, scored_only=False) if "self-assessment" in assessment_steps: self_assessment = self_api.get_assessment(submission_uuid) if "example-based-assessment" in assessment_steps: example_based_assessment = ai_api.get_latest_assessment(submission_uuid) context = { 'submission': submission, 'peer_assessments': peer_assessments, 'submitted_assessments': submitted_assessments, 'self_assessment': self_assessment, 'example_based_assessment': example_based_assessment, 'rubric_criteria': copy.deepcopy(self.rubric_criteria), } if peer_assessments or self_assessment or example_based_assessment: max_scores = peer_api.get_rubric_max_scores(submission_uuid) for criterion in context["rubric_criteria"]: criterion["total_value"] = max_scores[criterion["name"]] path = 'openassessmentblock/staff_debug/student_info.html' return path, context
def get_student_info_path_and_context(self, data): """ Get the proper path and context for rendering the the student info section of the staff debug panel. """ student_id = data.params.get('student_id', '') submission_uuid = None submission = None assessment_steps = self.assessment_steps if student_id: student_item = self.get_student_item_dict() student_item['student_id'] = student_id # If there is a submission available for the requested student, present # it. If not, there will be no other information to collect. submissions = submission_api.get_submissions(student_item, 1) if submissions: submission = submissions[0] submission_uuid = submissions[0]['uuid'] if "peer-assessment" in assessment_steps: peer_assessments = peer_api.get_assessments(submission_uuid) submitted_assessments = peer_api.get_submitted_assessments(submission_uuid, scored_only=False) else: peer_assessments = [] submitted_assessments = [] if "self-assessment" in assessment_steps: self_assessment = self_api.get_assessment(submission_uuid) else: self_assessment = None context = { 'submission': submission, 'peer_assessments': peer_assessments, 'submitted_assessments': submitted_assessments, 'self_assessment': self_assessment, 'rubric_criteria': copy.deepcopy(self.rubric_criteria), } if peer_assessments or self_assessment: max_scores = peer_api.get_rubric_max_scores(submission_uuid) for criterion in context["rubric_criteria"]: criterion["total_value"] = max_scores[criterion["name"]] path = 'openassessmentblock/staff_debug/student_info.html' return path, context
def get_student_info_path_and_context(self, data): """ Get the proper path and context for rendering the the student info section of the staff debug panel. """ student_id = data.params.get("student_id", "") submission_uuid = None submission = None assessment_steps = self.assessment_steps if student_id: student_item = self.get_student_item_dict() student_item["student_id"] = student_id # If there is a submission available for the requested student, present # it. If not, there will be no other information to collect. submissions = submission_api.get_submissions(student_item, 1) if submissions: submission = submissions[0] submission_uuid = submissions[0]["uuid"] if "peer-assessment" in assessment_steps: peer_assessments = peer_api.get_assessments(submission_uuid) submitted_assessments = peer_api.get_submitted_assessments(submission_uuid, scored_only=False) else: peer_assessments = [] submitted_assessments = [] if "self-assessment" in assessment_steps: self_assessment = self_api.get_assessment(submission_uuid) else: self_assessment = None context = { "submission": submission, "peer_assessments": peer_assessments, "submitted_assessments": submitted_assessments, "self_assessment": self_assessment, "rubric_criteria": copy.deepcopy(self.rubric_criteria), } if peer_assessments or self_assessment: max_scores = peer_api.get_rubric_max_scores(submission_uuid) for criterion in context["rubric_criteria"]: criterion["total_value"] = max_scores[criterion["name"]] path = "openassessmentblock/staff_debug/student_info.html" return path, context
def add_submission_context(self, submission_uuid, context): """ Add the submission information (self asssessment, peer assessments, final grade, etc.) to the supplied context for display in the "learner info" portion of staff tools. Args: submission_uuid (unicode): The uuid of the submission, should NOT be None. context: the context to update with additional information """ # Import is placed here to avoid model import at project startup. from openassessment.assessment.api import peer as peer_api from openassessment.assessment.api import self as self_api from openassessment.assessment.api import staff as staff_api assessment_steps = self.assessment_steps self_assessment = None self_assessment_grade_context = None peer_assessments = None peer_assessments_grade_context = [] staff_assessment = staff_api.get_latest_staff_assessment( submission_uuid) staff_assessment_grade_context = None submitted_assessments = None grade_details = None workflow = self.get_workflow_info(submission_uuid=submission_uuid) grade_exists = workflow.get('status') == "done" grade_utils = self.runtime._services.get('grade_utils') # pylint: disable=protected-access if "peer-assessment" in assessment_steps: peer_assessments = peer_api.get_assessments(submission_uuid) submitted_assessments = peer_api.get_submitted_assessments( submission_uuid) if grade_exists: peer_api.get_score(submission_uuid, self.workflow_requirements()["peer"]) peer_assessments_grade_context = [ self._assessment_grade_context(peer_assessment) for peer_assessment in peer_assessments ] if "self-assessment" in assessment_steps: self_assessment = self_api.get_assessment(submission_uuid) if grade_exists: self_assessment_grade_context = self._assessment_grade_context( self_assessment) if grade_exists: if staff_assessment: staff_assessment_grade_context = self._assessment_grade_context( staff_assessment) grade_details = self.grade_details( submission_uuid, peer_assessments_grade_context, self_assessment_grade_context, staff_assessment_grade_context, is_staff=True, ) workflow_cancellation = self.get_workflow_cancellation_info( workflow['submission_uuid']) context.update({ 'self_assessment': [self_assessment] if self_assessment else None, 'peer_assessments': peer_assessments, 'staff_assessment': [staff_assessment] if staff_assessment else None, 'submitted_assessments': submitted_assessments, 'grade_details': grade_details, 'score': workflow.get('score'), 'workflow_status': workflow.get('status'), 'workflow_cancellation': workflow_cancellation, 'are_grades_frozen': grade_utils.are_grades_frozen() if grade_utils else None }) if peer_assessments or self_assessment or staff_assessment: max_scores = peer_api.get_rubric_max_scores(submission_uuid) for criterion in context["rubric_criteria"]: criterion["total_value"] = max_scores[criterion["name"]]
def add_submission_context(self, submission_uuid, context): """ Add the submission information (self asssessment, peer assessments, final grade, etc.) to the supplied context for display in the "learner info" portion of staff tools. Args: submission_uuid (unicode): The uuid of the submission, should NOT be None. context: the context to update with additional information """ assessment_steps = self.assessment_steps example_based_assessment = None example_based_assessment_grade_context = None self_assessment = None self_assessment_grade_context = None peer_assessments = None peer_assessments_grade_context = [] staff_assessment = staff_api.get_latest_staff_assessment(submission_uuid) staff_assessment_grade_context = None submitted_assessments = None grade_details = None workflow = self.get_workflow_info(submission_uuid=submission_uuid) grade_exists = workflow.get('status') == "done" if "peer-assessment" in assessment_steps: peer_assessments = peer_api.get_assessments(submission_uuid) submitted_assessments = peer_api.get_submitted_assessments(submission_uuid) if grade_exists: peer_api.get_score(submission_uuid, self.workflow_requirements()["peer"]) peer_assessments_grade_context = [ self._assessment_grade_context(peer_assessment) for peer_assessment in peer_assessments ] if "self-assessment" in assessment_steps: self_assessment = self_api.get_assessment(submission_uuid) if grade_exists: self_assessment_grade_context = self._assessment_grade_context(self_assessment) if "example-based-assessment" in assessment_steps: example_based_assessment = ai_api.get_latest_assessment(submission_uuid) if grade_exists: example_based_assessment_grade_context = self._assessment_grade_context(example_based_assessment) if grade_exists: if staff_assessment: staff_assessment_grade_context = self._assessment_grade_context(staff_assessment) grade_details = self.grade_details( submission_uuid, peer_assessments_grade_context, self_assessment_grade_context, example_based_assessment_grade_context, staff_assessment_grade_context, is_staff=True, ) workflow_cancellation = self.get_workflow_cancellation_info(submission_uuid) context.update({ 'example_based_assessment': [example_based_assessment] if example_based_assessment else None, 'self_assessment': [self_assessment] if self_assessment else None, 'peer_assessments': peer_assessments, 'staff_assessment': [staff_assessment] if staff_assessment else None, 'submitted_assessments': submitted_assessments, 'grade_details': grade_details, 'score': workflow.get('score'), 'workflow_status': workflow.get('status'), 'workflow_cancellation': workflow_cancellation, }) if peer_assessments or self_assessment or example_based_assessment or staff_assessment: max_scores = peer_api.get_rubric_max_scores(submission_uuid) for criterion in context["rubric_criteria"]: criterion["total_value"] = max_scores[criterion["name"]]
def get_student_info_path_and_context(self, student_id): """ Get the proper path and context for rendering the the student info section of the staff debug panel. Args: student_id (unicode): The ID of the student to report. """ submission_uuid = None submission = None assessment_steps = self.assessment_steps if student_id: student_item = self.get_student_item_dict() student_item['student_id'] = student_id # If there is a submission available for the requested student, present # it. If not, there will be no other information to collect. submissions = submission_api.get_submissions(student_item, 1) if submissions: submission_uuid = submissions[0]['uuid'] submission = submissions[0] if 'file_key' in submission.get('answer', {}): file_key = submission['answer']['file_key'] try: submission['image_url'] = file_api.get_download_url( file_key) except file_api.FileUploadError: # Log the error, but do not prevent the rest of the student info # from being displayed. msg = ( u"Could not retrieve image URL for staff debug page. " u"The student ID is '{student_id}', and the file key is {file_key}" ).format(student_id=student_id, file_key=file_key) logger.exception(msg) example_based_assessment = None self_assessment = None peer_assessments = [] submitted_assessments = [] if "peer-assessment" in assessment_steps: peer_assessments = peer_api.get_assessments(submission_uuid) submitted_assessments = peer_api.get_submitted_assessments( submission_uuid, scored_only=False) if "self-assessment" in assessment_steps: self_assessment = self_api.get_assessment(submission_uuid) if "example-based-assessment" in assessment_steps: example_based_assessment = ai_api.get_latest_assessment( submission_uuid) context = { 'submission': submission, 'peer_assessments': peer_assessments, 'submitted_assessments': submitted_assessments, 'self_assessment': self_assessment, 'example_based_assessment': example_based_assessment, 'rubric_criteria': copy.deepcopy(self.rubric_criteria_with_labels), } if peer_assessments or self_assessment or example_based_assessment: max_scores = peer_api.get_rubric_max_scores(submission_uuid) for criterion in context["rubric_criteria"]: criterion["total_value"] = max_scores[criterion["name"]] path = 'openassessmentblock/staff_debug/student_info.html' return path, context
def get_student_info_path_and_context(self, student_username): """ Get the proper path and context for rendering the the student info section of the staff debug panel. Args: student_username (unicode): The username of the student to report. """ submission_uuid = None submission = None assessment_steps = self.assessment_steps anonymous_user_id = None submissions = None student_item = None if student_username: anonymous_user_id = self.get_anonymous_user_id(student_username, self.course_id) student_item = self.get_student_item_dict(anonymous_user_id=anonymous_user_id) if anonymous_user_id: # If there is a submission available for the requested student, present # it. If not, there will be no other information to collect. submissions = submission_api.get_submissions(student_item, 1) if submissions: submission_uuid = submissions[0]['uuid'] submission = submissions[0] if 'file_key' in submission.get('answer', {}): file_key = submission['answer']['file_key'] try: submission['image_url'] = file_api.get_download_url(file_key) except file_api.FileUploadError: # Log the error, but do not prevent the rest of the student info # from being displayed. msg = ( u"Could not retrieve image URL for staff debug page. " u"The student username is '{student_username}', and the file key is {file_key}" ).format(student_username=student_username, file_key=file_key) logger.exception(msg) example_based_assessment = None self_assessment = None peer_assessments = [] submitted_assessments = [] if "peer-assessment" in assessment_steps: peer_assessments = peer_api.get_assessments(submission_uuid) submitted_assessments = peer_api.get_submitted_assessments(submission_uuid, scored_only=False) if "self-assessment" in assessment_steps: self_assessment = self_api.get_assessment(submission_uuid) if "example-based-assessment" in assessment_steps: example_based_assessment = ai_api.get_latest_assessment(submission_uuid) workflow_cancellation = workflow_api.get_assessment_workflow_cancellation(submission_uuid) if workflow_cancellation: workflow_cancellation['cancelled_by'] = self.get_username(workflow_cancellation['cancelled_by_id']) context = { 'submission': submission, 'workflow_cancellation': workflow_cancellation, 'peer_assessments': peer_assessments, 'submitted_assessments': submitted_assessments, 'self_assessment': self_assessment, 'example_based_assessment': example_based_assessment, 'rubric_criteria': copy.deepcopy(self.rubric_criteria_with_labels), } if peer_assessments or self_assessment or example_based_assessment: max_scores = peer_api.get_rubric_max_scores(submission_uuid) for criterion in context["rubric_criteria"]: criterion["total_value"] = max_scores[criterion["name"]] path = 'openassessmentblock/staff_debug/student_info.html' return path, context
def get_student_info_path_and_context(self, student_id): """ Get the proper path and context for rendering the the student info section of the staff debug panel. Args: student_id (unicode): The ID of the student to report. """ submission_uuid = None submission = None assessment_steps = self.assessment_steps student_item = self.get_student_item_dict() scores = {} problem_closed = None if student_id: student_item['student_id'] = student_id # If there is a submission available for the requested student, present # it. If not, there will be no other information to collect. submissions = submission_api.get_submissions(student_item, 1) if submissions: submission_uuid = submissions[0]['uuid'] submission = submissions[0] if 'file_key' in submission.get('answer', {}): file_key = submission['answer']['file_key'] try: submission['image_url'] = file_api.get_download_url(file_key) except file_api.FileUploadError: # Log the error, but do not prevent the rest of the student info # from being displayed. msg = ( u"Could not retrieve image URL for staff debug page. " u"The student ID is '{student_id}', and the file key is {file_key}" ).format(student_id=student_id, file_key=file_key) logger.exception(msg) example_based_assessment = None self_assessment = None peer_assessments = [] submitted_assessments = [] if "peer-assessment" in assessment_steps: peer_assessments = peer_api.get_assessments(submission_uuid) submitted_assessments = peer_api.get_submitted_assessments(submission_uuid, scored_only=False) # Get the data we need for instructor override of the student's score rubric_dict = create_rubric_dict(self.prompt, self.rubric_criteria_with_labels) scores = peer_api.get_data_for_override_score( submission_uuid, student_item, rubric_dict, ) problem_closed, dummy0, dummy1, dummy2 = self.is_closed(step='peer-assessment', course_staff=False) if "self-assessment" in assessment_steps: self_assessment = self_api.get_assessment(submission_uuid) if "example-based-assessment" in assessment_steps: example_based_assessment = ai_api.get_latest_assessment(submission_uuid) context = { 'submission': submission, 'peer_assessments': peer_assessments, 'submitted_assessments': submitted_assessments, 'self_assessment': self_assessment, 'example_based_assessment': example_based_assessment, 'rubric_criteria': copy.deepcopy(self.rubric_criteria_with_labels), 'scores': scores, 'problem_closed': problem_closed, } if peer_assessments or self_assessment or example_based_assessment: max_scores = peer_api.get_rubric_max_scores(submission_uuid) for criterion in context["rubric_criteria"]: criterion["total_value"] = max_scores[criterion["name"]] path = 'openassessmentblock/staff_debug/student_info.html' return path, context
def add_submission_context(self, submission_uuid, context): """ Add the submission information (self asssessment, peer assessments, final grade, etc.) to the supplied context for display in the "learner info" portion of staff tools. Args: submission_uuid (unicode): The uuid of the submission, should NOT be None. context: the context to update with additional information """ # Import is placed here to avoid model import at project startup. from openassessment.assessment.api import peer as peer_api from openassessment.assessment.api import self as self_api from openassessment.assessment.api import staff as staff_api assessment_steps = self.assessment_steps self_assessment = None self_assessment_grade_context = None peer_assessments = None peer_assessments_grade_context = [] staff_assessment = staff_api.get_latest_staff_assessment(submission_uuid) staff_assessment_grade_context = None submitted_assessments = None grade_details = None workflow = self.get_workflow_info(submission_uuid=submission_uuid) grade_exists = workflow.get('status') == "done" grade_utils = self.runtime._services.get('grade_utils') if "peer-assessment" in assessment_steps: peer_assessments = peer_api.get_assessments(submission_uuid) submitted_assessments = peer_api.get_submitted_assessments(submission_uuid) if grade_exists: peer_api.get_score(submission_uuid, self.workflow_requirements()["peer"]) peer_assessments_grade_context = [ self._assessment_grade_context(peer_assessment) for peer_assessment in peer_assessments ] if "self-assessment" in assessment_steps: self_assessment = self_api.get_assessment(submission_uuid) if grade_exists: self_assessment_grade_context = self._assessment_grade_context(self_assessment) if grade_exists: if staff_assessment: staff_assessment_grade_context = self._assessment_grade_context(staff_assessment) grade_details = self.grade_details( submission_uuid, peer_assessments_grade_context, self_assessment_grade_context, staff_assessment_grade_context, is_staff=True, ) workflow_cancellation = self.get_workflow_cancellation_info(submission_uuid) context.update({ 'self_assessment': [self_assessment] if self_assessment else None, 'peer_assessments': peer_assessments, 'staff_assessment': [staff_assessment] if staff_assessment else None, 'submitted_assessments': submitted_assessments, 'grade_details': grade_details, 'score': workflow.get('score'), 'workflow_status': workflow.get('status'), 'workflow_cancellation': workflow_cancellation, 'are_grades_frozen': grade_utils.are_grades_frozen() }) if peer_assessments or self_assessment or staff_assessment: max_scores = peer_api.get_rubric_max_scores(submission_uuid) for criterion in context["rubric_criteria"]: criterion["total_value"] = max_scores[criterion["name"]]
def test_get_submitted_assessments_with_bad_submission(self): submitted_assessments = peer_api.get_submitted_assessments("bad-uuid", scored_only=True) self.assertEqual(0, len(submitted_assessments))