def _write_submission_to_csv(self, submission_uuid): """ Write submission data to CSV. Args: submission_uuid (unicode): The UUID of the submission to write. Returns: None """ submission = sub_api.get_submission_and_student(submission_uuid, read_replica=True) self._write_unicode('submission', [ submission['uuid'], submission['student_item']['student_id'], submission['student_item']['item_id'], submission['submitted_at'], submission['created_at'], json.dumps(submission['answer']) ]) score = sub_api.get_latest_score_for_submission(submission_uuid, read_replica=True) if score is not None: self._write_unicode('score', [ score['submission_uuid'], score['points_earned'], score['points_possible'], score['created_at'] ])
def test_get_latest_score_for_submission(self): retrieved = sub_api.get_latest_score_for_submission( self.submission['uuid'], read_replica=True) self.assertEqual(retrieved['points_possible'], self.SCORE['points_possible']) self.assertEqual(retrieved['points_earned'], self.SCORE['points_earned'])
def score(self): """Latest score for the submission we're tracking. Note that while it is usually the case that we're setting the score, that may not always be the case. We may have some course staff override. """ return sub_api.get_latest_score_for_submission(self.submission_uuid)
def get_data_for_override_score(submission_uuid, student_item, rubric_dict): """ Gets the data needed to render the score override section of the student data page. Args: submission_uuid (string): UUID of the student's submission student_item (dict): The dictionary representation of a student item. rubric_dict (dict): The dictionary representation of a rubric Returns: scores (dict): points_earned, pointed_possible, points_override """ scores = {} score_data = sub_api.get_latest_score_for_submission(submission_uuid) if score_data: scores["points_earned"] = score_data.get("points_earned") scores["points_possible"] = score_data.get("points_possible") # If there is no Score, retrieve points_possible from the rubric as there may not be any assessments yet. if not scores.get("points_possible"): try: rubric = rubric_from_dict(rubric_dict) except InvalidRubric: points_possible = None else: points_possible = rubric.points_possible scores["points_possible"] = points_possible latest_override_score = sub_api.get_score_override(student_item) if latest_override_score: scores["points_override"] = latest_override_score["points_earned"] return scores
def test_create_score(self): submission = api.create_submission(STUDENT_ITEM, ANSWER_ONE) student_item = self._get_student_item(STUDENT_ITEM) self._assert_submission(submission, ANSWER_ONE, student_item.pk, 1) api.set_score(submission["uuid"], 11, 12) score = api.get_latest_score_for_submission(submission["uuid"]) self._assert_score(score, 11, 12)
def test_get_latest_score_for_submission(self): with mock.patch('submissions.api._use_read_replica', _mock_use_read_replica): retrieved = sub_api.get_latest_score_for_submission( self.submission['uuid'], read_replica=True) self.assertEqual(retrieved['points_possible'], self.SCORE['points_possible']) self.assertEqual(retrieved['points_earned'], self.SCORE['points_earned'])
def test_get_score_for_submission_hidden_score(self): # Create a "hidden" score for the submission # (by convention, a score with points possible set to 0) submission = api.create_submission(STUDENT_ITEM, ANSWER_ONE) api.set_score(submission["uuid"], 0, 0) # Expect that the retrieved score is None score = api.get_latest_score_for_submission(submission['uuid']) self.assertIs(score, None)
def test_create_score(self): submission = api.create_submission(STUDENT_ITEM, ANSWER_ONE) student_item = self._get_student_item(STUDENT_ITEM) self._assert_submission(submission, ANSWER_ONE, student_item.pk, 1) api.set_score(submission["uuid"], 11, 12) score = api.get_latest_score_for_submission(submission["uuid"]) self._assert_score(score, 11, 12) self.assertFalse(ScoreAnnotation.objects.all().exists())
def score(self): """Latest score for the submission we're tracking. Returns: score (dict): The latest score for this workflow, or None if the workflow is incomplete. """ score = None if self.status == self.STATUS.done: score = sub_api.get_latest_score_for_submission(self.submission_uuid) return score
def test_reset_then_get_score_for_submission(self): # Create a submission for the student and score it submission = sub_api.create_submission(self.STUDENT_ITEM, "test answer") sub_api.set_score(submission["uuid"], 1, 2) # Reset scores sub_api.reset_score( self.STUDENT_ITEM["student_id"], self.STUDENT_ITEM["course_id"], self.STUDENT_ITEM["item_id"] ) # If we're retrieving the score for a particular submission, # instead of a student item, then we should STILL get a score. self.assertIsNot(sub_api.get_latest_score_for_submission(submission["uuid"]), None)
def test_reset_then_get_score_for_submission(self): # Create a submission for the student and score it submission = sub_api.create_submission(self.STUDENT_ITEM, 'test answer') sub_api.set_score(submission['uuid'], 1, 2) # Reset scores sub_api.reset_score( self.STUDENT_ITEM['student_id'], self.STUDENT_ITEM['course_id'], self.STUDENT_ITEM['item_id'], ) # If we're retrieving the score for a particular submission, # instead of a student item, then we should STILL get a score. self.assertIsNot( sub_api.get_latest_score_for_submission(submission['uuid']), None)
def test_override_doesnt_overwrite_submission_score(self): # Create a submission for the student and score it submission = sub_api.create_submission(self.STUDENT_ITEM, 'test answer') sub_api.set_score(submission['uuid'], 1, 10) sub_api.score_override( self.STUDENT_ITEM, 8, 10, ) submission_score = sub_api.get_latest_score_for_submission(submission['uuid']) self.assertEqual(submission_score['points_earned'], 1) self.assertEqual(submission_score['points_possible'], 10) override_score = sub_api.get_score_override(self.STUDENT_ITEM) self.assertEqual(override_score['points_earned'], 8) self.assertEqual(override_score['points_possible'], 10)
def test_set_score_with_annotation(self, reason): submission = api.create_submission(STUDENT_ITEM, ANSWER_ONE) creator_uuid = "Bob" annotation_type = "staff_override" api.set_score(submission["uuid"], 11, 12, creator_uuid, annotation_type, reason) score = api.get_latest_score_for_submission(submission["uuid"]) self._assert_score(score, 11, 12) # We need to do this to verify that one score annotation exists and was # created for this score. We do not have an api point for retrieving # annotations, and it doesn't make sense to expose them, since they're # for auditing purposes. annotations = ScoreAnnotation.objects.all() self.assertGreater(len(annotations), 0) annotation = annotations[0] self.assertEqual(annotation.score.points_earned, 11) self.assertEqual(annotation.score.points_possible, 12) self.assertEqual(annotation.annotation_type, annotation_type) self.assertEqual(annotation.creator, creator_uuid) self.assertEqual(annotation.reason, reason)
def test_override_doesnt_overwrite_submission_score(self): # Create a submission for the student and score it submission = sub_api.create_submission(self.STUDENT_ITEM, 'test answer') sub_api.set_score(submission['uuid'], 1, 10) sub_api.score_override( self.STUDENT_ITEM, 8, 10, ) submission_score = sub_api.get_latest_score_for_submission( submission['uuid']) self.assertEqual(submission_score['points_earned'], 1) self.assertEqual(submission_score['points_possible'], 10) override_score = sub_api.get_score_override(self.STUDENT_ITEM) self.assertEqual(override_score['points_earned'], 8) self.assertEqual(override_score['points_possible'], 10)
def update_from_assessments(self, override_submitter_requirements=False): # pylint: disable=arguments-differ """ Update the workflow with potential new scores from assessments. """ if self.status == self.STATUS.cancelled: return team_staff_step = self._team_staff_step team_staff_api = team_staff_step.api() new_score = team_staff_api.get_score(self.team_submission_uuid, self.REQUIREMENTS) if new_score: # new_score is just the most recent team score, it may already be recorded in sub_api old_score = sub_api.get_latest_score_for_submission( self.submission_uuid) if ( # Does a prior score exist? Do the points earned match? not old_score or self.STAFF_ANNOTATION_TYPE not in [ annotation['annotation_type'] for annotation in old_score['annotations'] ] or old_score['points_earned'] != new_score['points_earned']): # Set the team staff score using team submissions api, and log that fact self._set_team_staff_score(new_score) self.save() logger.info( "Team Workflow for team submission UUID %s has updated score using team staff assessment.", self.team_submission_uuid) common_now = now() team_staff_step.assessment_completed_at = common_now team_staff_step.save() if override_submitter_requirements: team_staff_step.submitter_completed_at = common_now team_staff_step.update(self.team_submission_uuid, self.REQUIREMENTS) self.status = self.STATUS.done self.save()
def update_from_assessments(self, assessment_requirements, override_submitter_requirements=False): """Query assessment APIs and change our status if appropriate. If the status is done, we do nothing. Once something is done, we never move back to any other status. If an assessment API says that our submitter's requirements are met, or if current assessment step can be skipped, then move to the next assessment. For example, in student training, if the submitter we're tracking has completed the training, they're allowed to continue. Whereas in peer assessment, it is allowed to skip that step so we mark it as started and move to the next assessment. So all skippable steps are in progress until completed. But user can complete next steps before those skippable ones. For every possible assessments, we find out all skippable assessments and mark them as skipped and consider that step already started (calling `on_start` for that assessmet api). Then choose the next un-skippable step as current step. If the submitter has finished all the assessments, then we change their status to `waiting`. If we're in the `waiting` status, and an assessment API says it can score this submission, then we record the score in the submissions API and move our `status` to `done`. By convention, if `assessment_requirements` is `None`, then assessment modules that need requirements should automatically say that they're incomplete. This allows us to update the workflow even when we don't know the current state of the problem. For example, if we're updating the workflow at the completion of an asynchronous call, we won't necessarily know the current state of the problem, but we would still want to update assessments that don't have any requirements. Args: assessment_requirements (dict): Dictionary passed to the assessment API. This defines the requirements for each assessment step; the APIs can refer to this to decide whether the requirements have been met. Note that the requirements could change if the author updates the problem definition. override_submitter_requirements (bool): If True, the presence of a new staff score will cause all of the submitter's requirements to be fulfilled, moving the workflow to DONE and exposing their grade. """ if self.status == self.STATUS.cancelled: return # Update our AssessmentWorkflowStep models with the latest from our APIs steps = self._get_steps() step_for_name = {step.name: step for step in steps} new_staff_score = self.get_score(assessment_requirements, { self.STAFF_STEP_NAME: step_for_name.get(self.STAFF_STEP_NAME, None) }) if new_staff_score: # new_staff_score is just the most recent staff score, it may already be recorded in sub_api old_score = sub_api.get_latest_score_for_submission( self.submission_uuid) if ( # Does a prior score exist? Is it a staff score? Do the points earned match? not old_score or self.STAFF_ANNOTATION_TYPE not in [ annotation['annotation_type'] for annotation in old_score['annotations'] ] or old_score['points_earned'] != new_staff_score['points_earned']): # Set the staff score using submissions api, and log that fact self.set_staff_score(new_staff_score) self.save() logger.info( "Workflow for submission UUID %s has updated score using %s assessment.", self.submission_uuid, self.STAFF_STEP_NAME) # Update the assessment_completed_at field for all steps # All steps are considered "assessment complete", as the staff score will override all for step in steps: common_now = now() step.assessment_completed_at = common_now if override_submitter_requirements: step.submitter_completed_at = common_now step.save() if self.status == self.STATUS.done: return # Go through each step and update its status. for step in steps: step.update(self.submission_uuid, assessment_requirements) possible_statuses = [] skipped_statuses = [] all_statuses = [] # find which are the next unskippable steps and steps that can be skipped for step in steps: all_statuses.append(step.name) if step.submitter_completed_at is None: if step.can_skip(self.submission_uuid, assessment_requirements): skipped_statuses.append(step.name) else: possible_statuses.append(step.name) # if there is no unskippable steps and only skippable steps left # then consider 1st skippable step as unskippable if len(possible_statuses) == 0 and len(skipped_statuses) > 0: unskip_step = skipped_statuses.pop() possible_statuses.append(unskip_step) if step_for_name.get(unskip_step): step_for_name[unskip_step].unskip() # mark skippable step as skipped only if current it's the current step # this prevent skipping a step too early for step_name in skipped_statuses: skip_step = step_for_name.get(step_name) if skip_step: # skip when its the current status or were before than current status if self.status in all_statuses and all_statuses.index( self.status) >= all_statuses.index(step_name): skip_step.skip() # skiping an assessment step should also start it skip_step.start(self.submission_uuid) new_status = next( iter(possible_statuses), self.STATUS.waiting # if nothing's left to complete, we're waiting ) # If the submitter is beginning the next assessment, notify the # appropriate assessment API. new_step = step_for_name.get(new_status) if new_step is not None: new_step.start(self.submission_uuid) # If the submitter has done all they need to do, let's check to see if # all steps have been fully assessed (i.e. we can score it). if new_status == self.STATUS.waiting and all( step.assessment_completed_at for step in steps): score = self.get_score(assessment_requirements, step_for_name) # If we found a score, then we're done if score is not None: # Only set the score if it's not a staff score, in which case it will have already been set above if score.get("staff_id") is None: self.set_score(score) new_status = self.STATUS.done # Finally save our changes if the status has changed if self.status != new_status: self.status = new_status self.save() logger.info( "Workflow for submission UUID %s has updated status to %s", self.submission_uuid, new_status)
def test_get_latest_score_for_submission(self): retrieved = sub_api.get_latest_score_for_submission(self.submission['uuid'], read_replica=True) self.assertEqual(retrieved['points_possible'], self.SCORE['points_possible']) self.assertEqual(retrieved['points_earned'], self.SCORE['points_earned'])
def update_from_assessments(self, assessment_requirements, override_submitter_requirements=False): """Query assessment APIs and change our status if appropriate. If the status is done, we do nothing. Once something is done, we never move back to any other status. If an assessment API says that our submitter's requirements are met, then move to the next assessment. For example, in peer assessment, if the submitter we're tracking has assessed the required number of submissions, they're allowed to continue. If the submitter has finished all the assessments, then we change their status to `waiting`. If we're in the `waiting` status, and an assessment API says it can score this submission, then we record the score in the submissions API and move our `status` to `done`. By convention, if `assessment_requirements` is `None`, then assessment modules that need requirements should automatically say that they're incomplete. This allows us to update the workflow even when we don't know the current state of the problem. For example, if we're updating the workflow at the completion of an asynchronous call, we won't necessarily know the current state of the problem, but we would still want to update assessments that don't have any requirements. Args: assessment_requirements (dict): Dictionary passed to the assessment API. This defines the requirements for each assessment step; the APIs can refer to this to decide whether the requirements have been met. Note that the requirements could change if the author updates the problem definition. override_submitter_requirements (bool): If True, the presence of a new staff score will cause all of the submitter's requirements to be fulfilled, moving the workflow to DONE and exposing their grade. """ if self.status == self.STATUS.cancelled: return # Update our AssessmentWorkflowStep models with the latest from our APIs steps = self._get_steps() step_for_name = {step.name: step for step in steps} new_staff_score = self.get_score(assessment_requirements, {'staff': step_for_name.get('staff', None)}) if new_staff_score: # new_staff_score is just the most recent staff score, it may already be recorded in sub_api old_score = sub_api.get_latest_score_for_submission(self.submission_uuid) if ( # Does a prior score exist? Is it a staff score? Do the points earned match? not old_score or not self.STAFF_ANNOTATION_TYPE in [ annotation['annotation_type'] for annotation in old_score['annotations'] ] or old_score['points_earned'] != new_staff_score['points_earned'] ): # Set the staff score using submissions api, and log that fact self.set_staff_score(new_staff_score) self.save() logger.info(( u"Workflow for submission UUID {uuid} has updated score using staff assessment." ).format(uuid=self.submission_uuid)) # Update the assessment_completed_at field for all steps # All steps are considered "assessment complete", as the staff score will override all for step in steps: common_now = now() step.assessment_completed_at = common_now if override_submitter_requirements: step.submitter_completed_at = common_now step.save() if self.status == self.STATUS.done: return # Go through each step and update its status. for step in steps: step.update(self.submission_uuid, assessment_requirements) # Fetch name of the first step that the submitter hasn't yet completed. new_status = next( (step.name for step in steps if step.submitter_completed_at is None), self.STATUS.waiting # if nothing's left to complete, we're waiting ) # If the submitter is beginning the next assessment, notify the # appropriate assessment API. new_step = step_for_name.get(new_status) if new_step is not None: on_start_func = getattr(new_step.api(), 'on_start', None) if on_start_func is not None: on_start_func(self.submission_uuid) # If the submitter has done all they need to do, let's check to see if # all steps have been fully assessed (i.e. we can score it). if ( new_status == self.STATUS.waiting and all(step.assessment_completed_at for step in steps) ): score = self.get_score(assessment_requirements, step_for_name) # If we found a score, then we're done if score is not None: # Only set the score if it's not a staff score, in which case it will have already been set above if score.get("staff_id") is None: self.set_score(score) new_status = self.STATUS.done # Finally save our changes if the status has changed if self.status != new_status: self.status = new_status self.save() logger.info(( u"Workflow for submission UUID {uuid} has updated status to {status}" ).format(uuid=self.submission_uuid, status=new_status))