Ejemplo n.º 1
0
    def test_get_submission_and_student(self):
        submission = api.create_submission(STUDENT_ITEM, ANSWER_ONE)

        # Retrieve the submission by its uuid
        retrieved = api.get_submission_and_student(submission['uuid'])
        self.assertItemsEqual(submission, retrieved)

        # Should raise an exception if the student item does not exist
        with self.assertRaises(api.SubmissionNotFoundError):
            api.get_submission_and_student(u'no such uuid')
Ejemplo n.º 2
0
    def test_load_non_json_answer(self):
        # This should never happen, if folks are using the public API.
        # Create a submission with a raw answer that is NOT valid JSON
        submission = api.create_submission(STUDENT_ITEM, ANSWER_ONE)
        sub_model = Submission.objects.get(uuid=submission['uuid'])
        sub_model.raw_answer = ''
        sub_model.save()

        with self.assertRaises(api.SubmissionInternalError):
            api.get_submission(sub_model.uuid)

        with self.assertRaises(api.SubmissionInternalError):
            api.get_submission_and_student(sub_model.uuid)
Ejemplo n.º 3
0
    def test_load_non_json_answer(self):
        submission = api.create_submission(STUDENT_ITEM, ANSWER_ONE)
        sub_model = Submission.objects.get(uuid=submission['uuid'])

        # This should never happen, if folks are using the public API.
        # Create a submission with a raw answer that is NOT valid JSON
        query = "UPDATE submissions_submission SET raw_answer = '}' WHERE id = %s"
        connection.cursor().execute(query, [str(sub_model.id)])
        transaction.commit_unless_managed()

        with self.assertRaises(api.SubmissionInternalError):
            api.get_submission(sub_model.uuid)

        with self.assertRaises(api.SubmissionInternalError):
            api.get_submission_and_student(sub_model.uuid)
Ejemplo n.º 4
0
    def create_workflow(cls, submission_uuid):
        """
        Create a student training workflow.

        Args:
            submission_uuid (str): The UUID of the submission from the student
                being trained.

        Returns:
            StudentTrainingWorkflow

        Raises:
            SubmissionError: There was an error retrieving the submission.

        """
        # Retrieve the student item info
        submission = sub_api.get_submission_and_student(submission_uuid)
        student_item = submission['student_item']

        # Create the workflow
        try:
            workflow, __ = cls.objects.get_or_create(
                submission_uuid=submission_uuid,
                student_id=student_item['student_id'],
                item_id=student_item['item_id'],
                course_id=student_item['course_id']
            )
            return workflow
        # If we get an integrity error, it means we've violated a uniqueness constraint
        # (someone has created this object after we checked if it existed)
        # We can therefore assume that the object exists and do nothing.
        except IntegrityError:
            pass
Ejemplo n.º 5
0
    def _write_submission_to_csv(self, submission_uuid):
        """
        Write submission data to CSV.

        Args:
            submission_uuid (unicode): The UUID of the submission to write.

        Returns:
            None

        """
        submission = sub_api.get_submission_and_student(submission_uuid, read_replica=True)
        self._write_unicode('submission', [
            submission['uuid'],
            submission['student_item']['student_id'],
            submission['student_item']['item_id'],
            submission['submitted_at'],
            submission['created_at'],
            json.dumps(submission['answer'])
        ])

        score = sub_api.get_latest_score_for_submission(submission_uuid, read_replica=True)
        if score is not None:
            self._write_unicode('score', [
                score['submission_uuid'],
                score['points_earned'],
                score['points_possible'],
                score['created_at']
            ])
Ejemplo n.º 6
0
    def set_staff_score(self, score, reason=None):
        """
        Set a staff score for the workflow.

        Allows for staff scores to be set on a submission, with annotations to provide an audit trail if needed.
        This method can be used for both required staff grading, and staff overrides.

        Args:
            score (dict): A dict containing 'points_earned', 'points_possible', and 'staff_id'.
            is_override (bool): Optionally True if staff is overriding a previous score.
            reason (string): An optional parameter specifying the reason for the staff grade. A default value
                will be used in the event that this parameter is not provided.

        """
        if reason is None:
            reason = "A staff member has defined the score for this submission"
        sub_dict = sub_api.get_submission_and_student(self.submission_uuid)
        sub_api.reset_score(
            sub_dict['student_item']['student_id'],
            self.course_id,
            self.item_id,
            emit_signal=False
        )
        sub_api.set_score(
            self.submission_uuid,
            score["points_earned"],
            score["points_possible"],
            annotation_creator=score["staff_id"],
            annotation_type=self.STAFF_ANNOTATION_TYPE,
            annotation_reason=reason
        )
Ejemplo n.º 7
0
    def render_staff_grade_form(self, data, suffix=''):  # pylint: disable=W0613
        """
        Renders a form to staff-grade the next available learner submission.

        Must be course staff to render this view.
        """
        # Import is placed here to avoid model import at project startup.
        from openassessment.assessment.api import staff as staff_api
        from submissions import api as submission_api
        try:
            student_item_dict = self.get_student_item_dict()
            course_id = student_item_dict.get('course_id')
            item_id = student_item_dict.get('item_id')
            staff_id = student_item_dict['student_id']

            # Note that this will check out a submission for grading by the specified staff member.
            # If no submissions are available for grading, will return None.
            submission_to_assess = staff_api.get_submission_to_assess(
                course_id, item_id, staff_id)

            if submission_to_assess is not None:
                # This is posting a tracking event to the runtime.
                self.runtime.publish(
                    self,
                    'openassessmentblock.get_submission_for_staff_grading', {
                        'type': 'full-grade',
                        'requesting_staff_id': staff_id,
                        'item_id': item_id,
                        'submission_returned_uuid':
                        submission_to_assess['uuid']
                    })
                submission = submission_api.get_submission_and_student(
                    submission_to_assess['uuid'])
                if submission:
                    anonymous_student_id = submission['student_item'][
                        'student_id']
                    submission_context = self.get_student_submission_context(
                        self.get_username(anonymous_student_id), submission)
                    if self.is_team_assignment():
                        self.add_team_submission_context(
                            submission_context,
                            individual_submission_uuid=submission['uuid'],
                            transform_usernames=True)
                    path = 'openassessmentblock/staff_area/oa_staff_grade_learners_assessment.html'
                    return self.render_assessment(path, submission_context)
                return self.render_error(
                    self._("Error loading the checked out learner response."))
            return self.render_error(
                self.
                _("No other learner responses are available for grading at this time."
                  ))
        except PeerAssessmentInternalError:
            return self.render_error(
                self._("Error getting staff grade information."))
Ejemplo n.º 8
0
    def start_workflow(cls, submission_uuid, rubric_dict, algorithm_id):
        """
        Start a grading workflow.

        Args:
            submission_uuid (str): The UUID of the submission to grade.
            rubric_dict (dict): The serialized rubric model.
            algorithm_id (unicode): The ID of the algorithm to use for grading.

        Returns:
            AIGradingWorkflow

        Raises:
            SubmissionNotFoundError
            SubmissionRequestError
            SubmissionInternalError
            InvalidRubric
            DatabaseError

        """
        # Retrieve info about the submission
        submission = sub_api.get_submission_and_student(submission_uuid)

        # Get or create the rubric
        from openassessment.assessment.serializers import rubric_from_dict
        rubric = rubric_from_dict(rubric_dict)

        # Retrieve the submission text
        # Submissions are arbitrary JSON-blobs, which *should*
        # contain a single key, "answer", containing the essay
        # submission text.  If not, though, assume we've been
        # given the essay text directly (convenient for testing).
        if isinstance(submission, dict):
            essay_text = submission.get('answer')
        else:
            essay_text = unicode(submission)

        # Create the workflow
        workflow = cls.objects.create(
            submission_uuid=submission_uuid,
            essay_text=essay_text,
            algorithm_id=algorithm_id,
            student_id=submission['student_item']['student_id'],
            item_id=submission['student_item']['item_id'],
            course_id=submission['student_item']['course_id'],
            rubric=rubric
        )

        # Retrieve and assign classifier set candidates
        workflow.assign_most_recent_classifier_set()

        workflow._log_start_workflow()

        return workflow
Ejemplo n.º 9
0
    def start_workflow(cls, submission_uuid, rubric_dict, algorithm_id):
        """
        Start a grading workflow.

        Args:
            submission_uuid (str): The UUID of the submission to grade.
            rubric_dict (dict): The serialized rubric model.
            algorithm_id (unicode): The ID of the algorithm to use for grading.

        Returns:
            AIGradingWorkflow

        Raises:
            SubmissionNotFoundError
            SubmissionRequestError
            SubmissionInternalError
            InvalidRubric
            DatabaseError

        """
        # Retrieve info about the submission
        submission = sub_api.get_submission_and_student(submission_uuid)

        # Get or create the rubric
        from openassessment.assessment.serializers import rubric_from_dict
        rubric = rubric_from_dict(rubric_dict)

        # Retrieve the submission text
        # Submissions are arbitrary JSON-blobs, which *should*
        # contain a single key, "answer", containing the essay
        # submission text.  If not, though, assume we've been
        # given the essay text directly (convenient for testing).
        if isinstance(submission, dict):
            essay_text = submission.get('answer')
        else:
            essay_text = unicode(submission)

        # Create the workflow
        workflow = cls.objects.create(
            submission_uuid=submission_uuid,
            essay_text=essay_text,
            algorithm_id=algorithm_id,
            student_id=submission['student_item']['student_id'],
            item_id=submission['student_item']['item_id'],
            course_id=submission['student_item']['course_id'],
            rubric=rubric)

        # Retrieve and assign classifier set candidates
        workflow.assign_most_recent_classifier_set()

        workflow._log_start_workflow()

        return workflow
Ejemplo n.º 10
0
    def override_score(self):
        """
        Latest override score.
        
        Note an override score has no submission associated with it.
        """

        try:
            submission_dict = sub_api.get_submission_and_student(self.submission_uuid)
        except sub_api.SubmissionError:
            return None
        student_item = submission_dict["student_item"]
        return sub_api.get_score_override(student_item)
Ejemplo n.º 11
0
    def test_grade_score_override(self, xblock):
        # Graded peers, but haven't completed self assessment
        self._create_submission_and_assessments(
            xblock, self.SUBMISSION, [self.PEERS[0]], [self.ASSESSMENTS[0]], None
        )

        # Create an override score for the submission
        submission_dict = sub_api.get_submission_and_student(xblock.submission_uuid)
        student_item = submission_dict['student_item']
        sub_api.score_override(student_item, '14', '15')

        # Verify that we're on the grade override template
        resp = self.request(xblock, 'render_grade', json.dumps(dict()))
        self.assertIn(u'<span class="grade__value__earned">14</span> out of <span class="grade__value__potential">15</span>, set by the instructor.', resp.decode('utf-8').lower())
Ejemplo n.º 12
0
    def staff_assess(self, data, suffix=''):
        """
        Create a staff assessment from a staff submission.
        """
        if 'submission_uuid' not in data:
            return {
                'success': False, 'msg': self._(u"The submission ID of the submission being assessed was not found.")
            }
        try:
            assessment = staff_api.create_assessment(
                data['submission_uuid'],
                self.get_student_item_dict()["student_id"],
                data['options_selected'],
                clean_criterion_feedback(self.rubric_criteria, data['criterion_feedback']),
                data['overall_feedback'],
                create_rubric_dict(self.prompts, self.rubric_criteria_with_labels)
            )
            assess_type = data.get('assess_type', 'regrade')
            self.publish_assessment_event("openassessmentblock.staff_assess", assessment, type=assess_type)
            workflow_api.update_from_assessments(assessment["submission_uuid"], None)
            
            student_item = sub_api.get_submission_and_student(data['submission_uuid']).get('student_item', None)

            if student_item:
                student_id = student_item.get('student_id', None)
                if student_id:
                    student_email = self.get_user_email(student_id)
                    send_notification_for_assessment.delay(student_email, 'staff', "{0}".format(self.course_id), "{0}".format(self.scope_ids.usage_id))

        except StaffAssessmentRequestError:
            logger.warning(
                u"An error occurred while submitting a staff assessment "
                u"for the submission {}".format(data['submission_uuid']),
                exc_info=True
            )
            msg = self._(u"Your staff assessment could not be submitted.")
            return {'success': False, 'msg': msg}
        except StaffAssessmentInternalError:
            logger.exception(
                u"An error occurred while submitting a staff assessment "
                u"for the submission {}".format(data['submission_uuid']),
            )
            msg = self._(u"Your staff assessment could not be submitted.")
            return {'success': False, 'msg': msg}
        else:
            return {'success': True, 'msg': u""}
Ejemplo n.º 13
0
    def start_workflow(cls, submission_uuid, rubric_dict, algorithm_id):
        """
        Start a grading workflow.

        Args:
            submission_uuid (str): The UUID of the submission to grade.
            rubric_dict (dict): The serialized rubric model.
            algorithm_id (unicode): The ID of the algorithm to use for grading.

        Returns:
            AIGradingWorkflow

        Raises:
            SubmissionNotFoundError
            SubmissionRequestError
            SubmissionInternalError
            InvalidRubric
            DatabaseError

        """
        # Retrieve info about the submission
        submission = sub_api.get_submission_and_student(submission_uuid)

        # Get or create the rubric
        from openassessment.assessment.serializers import rubric_from_dict
        rubric = rubric_from_dict(rubric_dict)

        # Create the workflow
        workflow = cls.objects.create(
            submission_uuid=submission_uuid,
            essay_text=essay_text_from_submission(submission),
            algorithm_id=algorithm_id,
            student_id=submission['student_item']['student_id'],
            item_id=submission['student_item']['item_id'],
            course_id=submission['student_item']['course_id'],
            rubric=rubric
        )

        # Retrieve and assign classifier set candidates
        workflow.assign_most_recent_classifier_set()

        workflow._log_start_workflow()

        return workflow
Ejemplo n.º 14
0
    def start_workflow(cls, submission_uuid, rubric_dict, algorithm_id):
        """
        Start a grading workflow.

        Args:
            submission_uuid (str): The UUID of the submission to grade.
            rubric_dict (dict): The serialized rubric model.
            algorithm_id (unicode): The ID of the algorithm to use for grading.

        Returns:
            AIGradingWorkflow

        Raises:
            SubmissionNotFoundError
            SubmissionRequestError
            SubmissionInternalError
            InvalidRubric
            DatabaseError

        """
        # Retrieve info about the submission
        submission = sub_api.get_submission_and_student(submission_uuid)

        # Get or create the rubric
        from openassessment.assessment.serializers import rubric_from_dict
        rubric = rubric_from_dict(rubric_dict)

        # Create the workflow
        workflow = cls.objects.create(
            submission_uuid=submission_uuid,
            essay_text=essay_text_from_submission(submission),
            algorithm_id=algorithm_id,
            student_id=submission['student_item']['student_id'],
            item_id=submission['student_item']['item_id'],
            course_id=submission['student_item']['course_id'],
            rubric=rubric)

        # Retrieve and assign classifier set candidates
        workflow.assign_most_recent_classifier_set()

        workflow._log_start_workflow()

        return workflow
Ejemplo n.º 15
0
    def render_staff_grade_form(self, data, suffix=''):  # pylint: disable=W0613
        """
        Renders a form to staff-grade the next available learner submission.

        Must be course staff to render this view.
        """
        # Import is placed here to avoid model import at project startup.
        from openassessment.assessment.api import staff as staff_api
        from submissions import api as submission_api
        try:
            student_item_dict = self.get_student_item_dict()
            course_id = student_item_dict.get('course_id')
            item_id = student_item_dict.get('item_id')
            staff_id = student_item_dict['student_id']

            # Note that this will check out a submission for grading by the specified staff member.
            # If no submissions are available for grading, will return None.
            submission_to_assess = staff_api.get_submission_to_assess(course_id, item_id, staff_id)

            if submission_to_assess is not None:
                # This is posting a tracking event to the runtime.
                self.runtime.publish(self, 'openassessmentblock.get_submission_for_staff_grading', {
                    'type': 'full-grade',
                    'requesting_staff_id': staff_id,
                    'item_id': item_id,
                    'submission_returned_uuid': submission_to_assess['uuid']
                })
                submission = submission_api.get_submission_and_student(submission_to_assess['uuid'])
                if submission:
                    anonymous_student_id = submission['student_item']['student_id']
                    submission_context = self.get_student_submission_context(
                        self.get_username(anonymous_student_id), submission
                    )
                    path = 'openassessmentblock/staff_area/oa_staff_grade_learners_assessment.html'
                    return self.render_assessment(path, submission_context)
                else:
                    return self.render_error(self._(u"Error loading the checked out learner response."))
            else:
                return self.render_error(self._(u"No other learner responses are available for grading at this time."))

        except PeerAssessmentInternalError:
            return self.render_error(self._(u"Error getting staff grade information."))
Ejemplo n.º 16
0
def create_peer_workflow(submission_uuid):
    """Create a new peer workflow for a student item and submission.

    Creates a unique peer workflow for a student item, associated with a
    submission.

    Args:
        submission_uuid (str): The submission associated with this workflow.

    Returns:
        None

    Raises:
        SubmissionError: There was an error retrieving the submission.
        PeerAssessmentInternalError: Raised when there is an internal error
            creating the Workflow.

    Examples:
        >>> create_peer_workflow("1")

    """
    try:
        with transaction.atomic():
            submission = sub_api.get_submission_and_student(submission_uuid)
            workflow, __ = PeerWorkflow.objects.get_or_create(
                student_id=submission['student_item']['student_id'],
                course_id=submission['student_item']['course_id'],
                item_id=submission['student_item']['item_id'],
                submission_uuid=submission_uuid
            )
            workflow.save()
    except IntegrityError:
        # If we get an integrity error, it means someone else has already
        # created a workflow for this submission, so we don't need to do anything.
        pass
    except DatabaseError as ex:
        error_message = (
            "An internal error occurred while creating a new peer "
            "workflow for submission {}"
        ).format(submission_uuid)
        logger.exception(error_message)
        raise PeerAssessmentInternalError(error_message) from ex
Ejemplo n.º 17
0
def create_peer_workflow(submission_uuid):
    """Create a new peer workflow for a student item and submission.

    Creates a unique peer workflow for a student item, associated with a
    submission.

    Args:
        submission_uuid (str): The submission associated with this workflow.

    Returns:
        None

    Raises:
        SubmissionError: There was an error retrieving the submission.
        PeerAssessmentInternalError: Raised when there is an internal error
            creating the Workflow.

    Examples:
        >>> create_peer_workflow("1")

    """
    try:
        with transaction.atomic():
            submission = sub_api.get_submission_and_student(submission_uuid)
            workflow, __ = PeerWorkflow.objects.get_or_create(
                student_id=submission['student_item']['student_id'],
                course_id=submission['student_item']['course_id'],
                item_id=submission['student_item']['item_id'],
                submission_uuid=submission_uuid
            )
            workflow.save()
    except IntegrityError:
        # If we get an integrity error, it means someone else has already
        # created a workflow for this submission, so we don't need to do anything.
        pass
    except DatabaseError:
        error_message = (
            u"An internal error occurred while creating a new peer "
            u"workflow for submission {}"
        ).format(submission_uuid)
        logger.exception(error_message)
        raise PeerAssessmentInternalError(error_message)
Ejemplo n.º 18
0
def create_peer_workflow(submission_uuid):
    """Create a new peer workflow for a student item and submission.

    Creates a unique peer workflow for a student item, associated with a
    submission.

    Args:
        submission_uuid (str): The submission associated with this workflow.

    Returns:
        Workflow (PeerWorkflow): A PeerWorkflow item created based on the given
            student item and submission.

    Raises:
        SubmissionError: There was an error retrieving the submission.
        PeerAssessmentInternalError: Raised when there is an internal error
            creating the Workflow.

    Examples:
        >>> create_peer_workflow("1")

    """
    try:
        submission = sub_api.get_submission_and_student(submission_uuid)
        workflow = PeerWorkflow.objects.get_or_create(
            student_id=submission['student_item']['student_id'],
            course_id=submission['student_item']['course_id'],
            item_id=submission['student_item']['item_id'],
            submission_uuid=submission_uuid
        )
        return workflow
    except DatabaseError:
        error_message = _(
            u"An internal error occurred while creating a new peer "
            u"workflow for submission {}"
            .format(submission_uuid)
        )
        logger.exception(error_message)
        raise PeerAssessmentInternalError(error_message)
Ejemplo n.º 19
0
    def get_or_create_workflow(cls, submission_uuid):
        """
        Create a student training workflow.

        Args:
            submission_uuid (str): The UUID of the submission from the student being trained.

        Returns:
            StudentTrainingWorkflow

        Raises:
            SubmissionError: There was an error retrieving the submission.

        """
        # Try to retrieve an existing workflow
        # If we find one, return it immediately
        try:
            return cls.objects.get(submission_uuid=submission_uuid)   # pylint:disable=E1101
        except cls.DoesNotExist:
            pass

        # Retrieve the student item info
        submission = sub_api.get_submission_and_student(submission_uuid)
        student_item = submission['student_item']

        # Create the workflow
        try:
            return cls.objects.create(
                submission_uuid=submission_uuid,
                student_id=student_item['student_id'],
                item_id=student_item['item_id'],
                course_id=student_item['course_id']
            )
        # If we get an integrity error, it means we've violated a uniqueness constraint
        # (someone has created this object after we checked if it existed)
        # We can therefore assume that the object exists and we can retrieve it.
        except IntegrityError:
            return cls.objects.get(submission_uuid=submission_uuid)
Ejemplo n.º 20
0
def create_peer_workflow_item(scorer, submission_uuid):
    """
    Begin peer-assessing a particular submission.
    Note that this does NOT pick the submission from the prioritized list of available submissions.
    Mainly useful for testing.

    Args:
        scorer (str): The ID of the scoring student.
        submission_uuid (str): The unique identifier of the submission being scored

    Returns:
        None

    Raises:
        PeerAssessmentWorkflowError: Could not find the workflow for the student.
        PeerAssessmentInternalError: Could not create the peer workflow item.
        SubmissionError: An error occurred while retrieving the submission.
    """
    submission = get_submission_and_student(submission_uuid)
    student_item_dict = copy.copy(submission['student_item'])
    student_item_dict['student_id'] = scorer
    workflow = _get_latest_workflow(student_item_dict)
    _create_peer_workflow_item(workflow, submission_uuid)
Ejemplo n.º 21
0
def on_init(submission_uuid):
    """
    Create a new staff workflow for a student item and submission.

    Creates a unique staff workflow for a student item, associated with a
    submission.

    Note that the staff workflow begins things in on_init() instead of
    on_start(), because staff shoud be able to access the submission
    regardless of which state the workflow is currently in.

    Args:
        submission_uuid (str): The submission associated with this workflow.

    Returns:
        None

    Raises:
        StaffAssessmentInternalError: Raised when there is an internal error
            creating the Workflow.

    """
    try:
        submission = submissions_api.get_submission_and_student(submission_uuid)
        workflow, __ = StaffWorkflow.objects.get_or_create(
            course_id=submission['student_item']['course_id'],
            item_id=submission['student_item']['item_id'],
            submission_uuid=submission_uuid
        )
    except DatabaseError:
        error_message = (
            u"An internal error occurred while creating a new staff "
            u"workflow for submission {}"
            .format(submission_uuid)
        )
        logger.exception(error_message)
        raise StaffAssessmentInternalError(error_message)
Ejemplo n.º 22
0
def create_peer_workflow(submission_uuid):
    """Create a new peer workflow for a student item and submission.

    Creates a unique peer workflow for a student item, associated with a
    submission.

    Args:
        submission_uuid (str): The submission associated with this workflow.

    Returns:
        Workflow (PeerWorkflow): A PeerWorkflow item created based on the given
            student item and submission.

    Raises:
        SubmissionError: There was an error retrieving the submission.
        PeerAssessmentInternalError: Raised when there is an internal error
            creating the Workflow.

    Examples:
        >>> create_peer_workflow("1")

    """
    try:
        submission = sub_api.get_submission_and_student(submission_uuid)
        workflow = PeerWorkflow.objects.get_or_create(
            student_id=submission['student_item']['student_id'],
            course_id=submission['student_item']['course_id'],
            item_id=submission['student_item']['item_id'],
            submission_uuid=submission_uuid)
        return workflow
    except DatabaseError:
        error_message = _(
            u"An internal error occurred while creating a new peer "
            u"workflow for submission {}".format(submission_uuid))
        logger.exception(error_message)
        raise PeerAssessmentInternalError(error_message)
Ejemplo n.º 23
0
    def get_or_create_workflow(cls, submission_uuid):
        """
        Create a student training workflow.

        Args:
            submission_uuid (str): The UUID of the submission from the student being trained.

        Returns:
            StudentTrainingWorkflow

        Raises:
            SubmissionError: There was an error retrieving the submission.

        """
        # Try to retrieve an existing workflow
        # If we find one, return it immediately
        try:
            return cls.objects.get(submission_uuid=submission_uuid)  # pylint:disable=E1101
        except cls.DoesNotExist:
            pass

        # Retrieve the student item info
        submission = sub_api.get_submission_and_student(submission_uuid)
        student_item = submission['student_item']

        # Create the workflow
        try:
            return cls.objects.create(submission_uuid=submission_uuid,
                                      student_id=student_item['student_id'],
                                      item_id=student_item['item_id'],
                                      course_id=student_item['course_id'])
        # If we get an integrity error, it means we've violated a uniqueness constraint
        # (someone has created this object after we checked if it existed)
        # We can therefore assume that the object exists and we can retrieve it.
        except IntegrityError:
            return cls.objects.get(submission_uuid=submission_uuid)
Ejemplo n.º 24
0
def create_assessment(
        submission_uuid,
        scorer_id,
        assessment_dict,
        rubric_dict,
        num_required_grades,
        scored_at=None):
    """Creates an assessment on the given submission.

    Assessments are created based on feedback associated with a particular
    rubric.

    Args:
        submission_uuid (str): The submission uuid this assessment is associated
            with. The submission uuid is required and must already exist in the
            Submission model.
        scorer_id (str): The user ID for the user giving this assessment. This
            is required to create an assessment on a submission.
        assessment_dict (dict): All related information for the assessment. An
            assessment contains points_earned, points_possible, and feedback.
        num_required_grades (int): The required number of assessments a
            submission requires before it is completed. If this number of
            assessments is reached, the grading_completed_at timestamp is set
            for the Workflow.

    Kwargs:
        scored_at (datetime): Optional argument to override the time in which
            the assessment took place. If not specified, scored_at is set to
            now.

    Returns:
        dict: the Assessment model, serialized as a dict.

    Raises:
        PeerAssessmentRequestError: Raised when the submission_id is invalid, or
            the assessment_dict does not contain the required values to create
            an assessment.
        PeerAssessmentInternalError: Raised when there is an internal error
            while creating a new assessment.

    Examples:
        >>> assessment_dict = dict(
        >>>    options_selected={"clarity": "Very clear", "precision": "Somewhat precise"},
        >>>    feedback="Your submission was thrilling.",
        >>> )
        >>> create_assessment("1", "Tim", assessment_dict, rubric_dict)
    """
    try:
        submission = sub_api.get_submission_and_student(submission_uuid)
        rubric = rubric_from_dict(rubric_dict)

        # Validate that the selected options matched the rubric
        # and raise an error if this is not the case
        try:
            option_ids = rubric.options_ids(assessment_dict["options_selected"])
        except InvalidOptionSelection as ex:
            msg = _("Selected options do not match the rubric: {error}").format(error=ex.message)
            raise PeerAssessmentRequestError(msg)

        feedback = assessment_dict.get('feedback', u'')
        peer_assessment = {
            "rubric": rubric.id,
            "scorer_id": scorer_id,
            "submission_uuid": submission_uuid,
            "score_type": PEER_TYPE,
            "feedback": feedback,
        }

        if scored_at is not None:
            peer_assessment["scored_at"] = scored_at

        peer_serializer = AssessmentSerializer(data=peer_assessment)

        if not peer_serializer.is_valid():
            raise PeerAssessmentRequestError(peer_serializer.errors)

        assessment = peer_serializer.save()

        # We do this to do a run around django-rest-framework serializer
        # validation, which would otherwise require two DB queries per
        # option to do validation. We already validated these options above.
        AssessmentPart.add_to_assessment(assessment, option_ids)

        student_item = submission['student_item']
        scorer_item = copy.deepcopy(student_item)
        scorer_item['student_id'] = scorer_id

        scorer_workflow = _get_latest_workflow(scorer_item)
        workflow = _get_latest_workflow(student_item)

        if not scorer_workflow:
            raise PeerAssessmentWorkflowError(_(
                "You must make a submission before assessing another student."))
        if not workflow:
            raise PeerAssessmentWorkflowError(_(
                "The submission you reviewed is not in the peer workflow. This "
                "assessment cannot be submitted unless the associated "
                "submission came from the peer workflow."))
        # Close the active assessment
        _close_active_assessment(scorer_workflow, submission_uuid, assessment, num_required_grades)
        assessment_dict = full_assessment_dict(assessment)
        _log_assessment(assessment, student_item, scorer_item)

        return assessment_dict
    except DatabaseError:
        error_message = _(
            u"An error occurred while creating assessment {} for submission: "
            u"{} by: {}"
            .format(assessment_dict, submission_uuid, scorer_id)
        )
        logger.exception(error_message)
        raise PeerAssessmentInternalError(error_message)
Ejemplo n.º 25
0
def create_assessment(submission_uuid, user_id, options_selected, rubric_dict, scored_at=None):
    """
    Create a self-assessment for a submission.

    Args:
        submission_uuid (str): The unique identifier for the submission being assessed.
        user_id (str): The ID of the user creating the assessment.  This must match the ID of the user who made the submission.
        options_selected (dict): Mapping of rubric criterion names to option values selected.
        rubric_dict (dict): Serialized Rubric model.

    Kwargs:
        scored_at (datetime): The timestamp of the assessment; defaults to the current time.

    Returns:
        dict: serialized Assessment model

    Raises:
        SelfAssessmentRequestError: Could not retrieve a submission that the user is allowed to score.
    """
    # Check that there are not any assessments for this submission
    if Assessment.objects.filter(submission_uuid=submission_uuid, score_type=SELF_TYPE).exists():
        raise SelfAssessmentRequestError(_("You've already completed your self assessment for this response."))

    # Check that the student is allowed to assess this submission
    try:
        submission = get_submission_and_student(submission_uuid)
        if submission['student_item']['student_id'] != user_id:
            raise SelfAssessmentRequestError(_("You can only complete a self assessment on your own response."))
    except SubmissionNotFoundError:
        raise SelfAssessmentRequestError(_("Could not retrieve the response."))

    # Get or create the rubric
    try:
        rubric = rubric_from_dict(rubric_dict)
        option_ids = rubric.options_ids(options_selected)
    except InvalidRubric as ex:
        msg = _("Invalid rubric definition: {errors}").format(errors=ex.errors)
        raise SelfAssessmentRequestError(msg)
    except InvalidOptionSelection:
        msg = _("Selected options do not match the rubric")
        raise SelfAssessmentRequestError(msg)

    # Create the assessment
    # Since we have already retrieved the submission, we can assume that
    # the user who created the submission exists.
    self_assessment = {
        "rubric": rubric.id,
        "scorer_id": user_id,
        "submission_uuid": submission_uuid,
        "score_type": SELF_TYPE,
        "feedback": u"",
    }

    if scored_at is not None:
        self_assessment['scored_at'] = scored_at

    # Serialize the assessment
    serializer = AssessmentSerializer(data=self_assessment)
    if not serializer.is_valid():
        msg = _("Could not create self assessment: {errors}").format(errors=serializer.errors)
        raise SelfAssessmentRequestError(msg)

    assessment = serializer.save()

    # We do this to do a run around django-rest-framework serializer
    # validation, which would otherwise require two DB queries per
    # option to do validation. We already validated these options above.
    AssessmentPart.add_to_assessment(assessment, option_ids)
    assessment_dict = full_assessment_dict(assessment)
    _log_assessment(assessment, submission)

    # Return the serialized assessment
    return assessment_dict
Ejemplo n.º 26
0
def create_assessment(
    submission_uuid,
    user_id,
    options_selected,
    criterion_feedback,
    overall_feedback,
    rubric_dict,
    scored_at=None
):
    """
    Create a self-assessment for a submission.

    Args:
        submission_uuid (str): The unique identifier for the submission being assessed.
        user_id (str): The ID of the user creating the assessment. 
                       This must match the ID of the user who made the submission.
        options_selected (dict): Mapping of rubric criterion names to option values selected.
        criterion_feedback (dict): Dictionary mapping criterion names to the
            free-form text feedback the user gave for the criterion.
            Since criterion feedback is optional, some criteria may not appear
            in the dictionary.
        overall_feedback (unicode): Free-form text feedback on the submission overall.
        rubric_dict (dict): Serialized Rubric model.

    Keyword Arguments:
        scored_at (datetime): The timestamp of the assessment; defaults to the current time.

    Returns:
        dict: serialized Assessment model

    Raises:
        SelfAssessmentRequestError: Could not retrieve a submission that the user is allowed to score.
    """
    # Check that there are not any assessments for this submission
    if Assessment.objects.filter(submission_uuid=submission_uuid, score_type=SELF_TYPE).exists():
        msg = (
            u"Cannot submit a self-assessment for the submission {uuid} "
            "because another self-assessment already exists for that submission."
        ).format(uuid=submission_uuid)
        raise SelfAssessmentRequestError(msg)

    # Check that the student is allowed to assess this submission
    try:
        submission = get_submission_and_student(submission_uuid)
        if submission['student_item']['student_id'] != user_id:
            msg = (
                u"Cannot submit a self-assessment for the submission {uuid} "
                u"because it was created by another learner "
                u"(submission learner ID {student_id} does not match your "
                u"learner id {other_id})"
            ).format(
                uuid=submission_uuid,
                student_id=submission['student_item']['student_id'],
                other_id=user_id
            )
            raise SelfAssessmentRequestError(msg)
    except SubmissionNotFoundError:
        msg = (
            "Could not submit a self-assessment because no submission "
            "exists with UUID {uuid}"
        ).format(uuid=submission_uuid)
        raise SelfAssessmentRequestError()

    try:
        assessment = _complete_assessment(
            submission_uuid,
            user_id,
            options_selected,
            criterion_feedback,
            overall_feedback,
            rubric_dict,
            scored_at
        )
        _log_assessment(assessment, submission)
    except InvalidRubric as ex:
        msg = "Invalid rubric definition: " + str(ex)
        logger.warning(msg, exc_info=True)
        raise SelfAssessmentRequestError(msg)
    except InvalidRubricSelection as ex:
        msg = "Selected options do not match the rubric: " + str(ex)
        logger.warning(msg, exc_info=True)
        raise SelfAssessmentRequestError(msg)
    except DatabaseError:
        error_message = (
            u"Error creating self assessment for submission {}"
        ).format(submission_uuid)
        logger.exception(error_message)
        raise SelfAssessmentInternalError(error_message)

    # Return the serialized assessment
    return full_assessment_dict(assessment)
Ejemplo n.º 27
0
    def start_workflow(cls, submission_uuid, step_names, on_init_params):
        """
        Start a new workflow.

        Args:
            submission_uuid (str): The UUID of the submission associated with this workflow.
            step_names (list): The names of the assessment steps in the workflow.
            on_init_params (dict): The parameters to pass to each assessment module
                on init.  Keys are the assessment step names.

        Returns:
            AssessmentWorkflow

        Raises:
            SubmissionNotFoundError
            SubmissionRequestError
            SubmissionInternalError
            DatabaseError
            Assessment-module specific errors
        """
        submission_dict = sub_api.get_submission_and_student(submission_uuid)

        # Create the workflow and step models in the database
        # For now, set the status to waiting; we'll modify it later
        # based on the first step in the workflow.
        workflow = cls.objects.create(
            submission_uuid=submission_uuid,
            status=AssessmentWorkflow.STATUS.waiting,
            course_id=submission_dict["student_item"]["course_id"],
            item_id=submission_dict["student_item"]["item_id"],
        )
        workflow_steps = [
            AssessmentWorkflowStep(workflow=workflow, name=step, order_num=i) for i, step in enumerate(step_names)
        ]
        workflow.steps.add(*workflow_steps)

        # Initialize the assessment APIs
        has_started_first_step = False
        for step in workflow_steps:
            api = step.api()

            if api is not None:
                # Initialize the assessment module
                # We do this for every assessment module
                on_init_func = getattr(api, "on_init", lambda submission_uuid, **params: None)
                on_init_func(submission_uuid, **on_init_params.get(step.name, {}))

                # For the first valid step, update the workflow status
                # and notify the assessment module that it's being started
                if not has_started_first_step:
                    # Update the workflow
                    workflow.status = step.name
                    workflow.save()

                    # Notify the assessment module that it's being started
                    on_start_func = getattr(api, "on_start", lambda submission_uuid: None)
                    on_start_func(submission_uuid)

                    # Remember that we've already started the first step
                    has_started_first_step = True

        # Update the workflow (in case some of the assessment modules are automatically complete)
        # We do NOT pass in requirements, on the assumption that any assessment module
        # that accepts requirements would NOT automatically complete.
        workflow.update_from_assessments(None)

        # Return the newly created workflow
        return workflow
Ejemplo n.º 28
0
 def test_get_submission_and_student(self):
     retrieved = sub_api.get_submission_and_student(self.submission['uuid'], read_replica=True)
     expected = copy.deepcopy(self.submission)
     expected['student_item'] = copy.deepcopy(self.STUDENT_ITEM)
     self.assertEqual(retrieved, expected)
Ejemplo n.º 29
0
def create_assessment(submission_uuid,
                      user_id,
                      options_selected,
                      criterion_feedback,
                      overall_feedback,
                      rubric_dict,
                      scored_at=None):
    """
    Create a self-assessment for a submission.

    Args:
        submission_uuid (str): The unique identifier for the submission being assessed.
        user_id (str): The ID of the user creating the assessment. 
                       This must match the ID of the user who made the submission.
        options_selected (dict): Mapping of rubric criterion names to option values selected.
        criterion_feedback (dict): Dictionary mapping criterion names to the
            free-form text feedback the user gave for the criterion.
            Since criterion feedback is optional, some criteria may not appear
            in the dictionary.
        overall_feedback (unicode): Free-form text feedback on the submission overall.
        rubric_dict (dict): Serialized Rubric model.

    Keyword Arguments:
        scored_at (datetime): The timestamp of the assessment; defaults to the current time.

    Returns:
        dict: serialized Assessment model

    Raises:
        SelfAssessmentRequestError: Could not retrieve a submission that the user is allowed to score.
    """
    # Check that there are not any assessments for this submission
    if Assessment.objects.filter(submission_uuid=submission_uuid,
                                 score_type=SELF_TYPE).exists():
        msg = (
            u"Cannot submit a self-assessment for the submission {uuid} "
            "because another self-assessment already exists for that submission."
        ).format(uuid=submission_uuid)
        raise SelfAssessmentRequestError(msg)

    # Check that the student is allowed to assess this submission
    try:
        submission = get_submission_and_student(submission_uuid)
        if submission['student_item']['student_id'] != user_id:
            msg = (
                u"Cannot submit a self-assessment for the submission {uuid} "
                u"because it was created by another learner "
                u"(submission learner ID {student_id} does not match your "
                u"learner id {other_id})").format(
                    uuid=submission_uuid,
                    student_id=submission['student_item']['student_id'],
                    other_id=user_id)
            raise SelfAssessmentRequestError(msg)
    except SubmissionNotFoundError:
        msg = ("Could not submit a self-assessment because no submission "
               "exists with UUID {uuid}").format(uuid=submission_uuid)
        raise SelfAssessmentRequestError()

    try:
        assessment = _complete_assessment(submission_uuid, user_id,
                                          options_selected, criterion_feedback,
                                          overall_feedback, rubric_dict,
                                          scored_at)
        _log_assessment(assessment, submission)
    except InvalidRubric as ex:
        msg = "Invalid rubric definition: " + str(ex)
        logger.warning(msg, exc_info=True)
        raise SelfAssessmentRequestError(msg)
    except InvalidRubricSelection as ex:
        msg = "Selected options do not match the rubric: " + str(ex)
        logger.warning(msg, exc_info=True)
        raise SelfAssessmentRequestError(msg)
    except DatabaseError:
        error_message = (u"Error creating self assessment for submission {}"
                         ).format(submission_uuid)
        logger.exception(error_message)
        raise SelfAssessmentInternalError(error_message)

    # Return the serialized assessment
    return full_assessment_dict(assessment)
Ejemplo n.º 30
0
def create_workflow(submission_uuid):
    """Begins a new assessment workflow.

    Create a new workflow that other assessments will record themselves against.

    Args:
        submission_uuid (str): The UUID for the submission that all our
            assessments will be evaluating.

    Returns:
        dict: Assessment workflow information with the following
            `uuid` = UUID of this `AssessmentWorkflow`
            `submission_uuid` = UUID of submission this workflow tracks
            `status` = Active step, always "peer" when created.
            `created` = created datetime
            'modified' = modified datetime (same as `created` for this method)
            'score' = should be None in the usual case, but could be a dict
                with keys "points_earned" and "points_possible` and int values.
                The latter will only happen on workflow creation if something
                else has already written the score for this submission (such as
                a professor manually entering it). There is no support for such
                a feature at present, but it may be added later.

    Raises:
        AssessmentWorkflowRequestError: If the `submission_uuid` passed in does
            not exist or is of an invalid type.
        AssessmentWorkflowInternalError: Unexpected internal error, such as the
            submissions app not being available or a database configuation
            problem.

    """
    def sub_err_msg(specific_err_msg):
        return (
            u"Could not create assessment workflow: "
            u"retrieving submission {} failed: {}"
            .format(submission_uuid, specific_err_msg)
        )

    try:
        submission_dict = sub_api.get_submission_and_student(submission_uuid)
    except sub_api.SubmissionNotFoundError as err:
        err_msg = sub_err_msg("submission not found")
        logger.error(err_msg)
        raise AssessmentWorkflowRequestError(err_msg)
    except sub_api.SubmissionRequestError as err:
        err_msg = sub_err_msg(err)
        logger.error(err_msg)
        raise AssessmentWorkflowRequestError(err_msg)
    except sub_api.SubmissionInternalError as err:
        err_msg = sub_err_msg(err)
        logger.error(err)
        raise AssessmentWorkflowInternalError(
            u"retrieving submission {} failed with unknown error: {}"
            .format(submission_uuid, err)
        )

    # We're not using a serializer to deserialize this because the only variable
    # we're getting from the outside is the submission_uuid, which is already
    # validated by this point.
    try:
        peer_api.create_peer_workflow(submission_uuid)
        workflow = AssessmentWorkflow.objects.create(
            submission_uuid=submission_uuid,
            status=AssessmentWorkflow.STATUS.peer,
            course_id=submission_dict['student_item']['course_id'],
            item_id=submission_dict['student_item']['item_id'],
        )
    except (
        DatabaseError,
        peer_api.PeerAssessmentError,
        sub_api.SubmissionError
    ) as err:
        err_msg = u"Could not create assessment workflow: {}".format(err)
        logger.exception(err_msg)
        raise AssessmentWorkflowInternalError(err_msg)

    return AssessmentWorkflowSerializer(workflow).data
Ejemplo n.º 31
0
    def start_workflow(cls, submission_uuid, step_names, on_init_params):
        """
        Start a new workflow.

        Args:
            submission_uuid (str): The UUID of the submission associated with this workflow.
            step_names (list): The names of the assessment steps in the workflow.
            on_init_params (dict): The parameters to pass to each assessment module
                on init.  Keys are the assessment step names.

        Returns:
            AssessmentWorkflow

        Raises:
            SubmissionNotFoundError
            SubmissionRequestError
            SubmissionInternalError
            DatabaseError
            Assessment-module specific errors
        """
        submission_dict = sub_api.get_submission_and_student(submission_uuid)

        staff_auto_added = False
        if 'staff' not in step_names:
            staff_auto_added = True
            new_list = ['staff']
            new_list.extend(step_names)
            step_names = new_list

        # Create the workflow and step models in the database
        # For now, set the status to waiting; we'll modify it later
        # based on the first step in the workflow.
        workflow = cls.objects.create(
            submission_uuid=submission_uuid,
            status=AssessmentWorkflow.STATUS.waiting,
            course_id=submission_dict['student_item']['course_id'],
            item_id=submission_dict['student_item']['item_id']
        )
        workflow_steps = [
            AssessmentWorkflowStep.objects.create(
                workflow=workflow, name=step, order_num=i
            )
            for i, step in enumerate(step_names)
        ]
        workflow.steps.add(*workflow_steps)

        # Initialize the assessment APIs
        has_started_first_step = False
        for step in workflow_steps:
            api = step.api()

            if api is not None:
                # Initialize the assessment module
                # We do this for every assessment module
                on_init_func = getattr(api, 'on_init', lambda submission_uuid, **params: None)
                on_init_func(submission_uuid, **on_init_params.get(step.name, {}))

                # If we auto-added a staff step, it is optional and should be marked complete immediately
                if step.name == "staff" and staff_auto_added:
                    step.assessment_completed_at = now()
                    step.save()

                # For the first valid step, update the workflow status
                # and notify the assessment module that it's being started
                if not has_started_first_step:
                    # Update the workflow
                    workflow.status = step.name
                    workflow.save()

                    # Notify the assessment module that it's being started
                    on_start_func = getattr(api, 'on_start', lambda submission_uuid: None)
                    on_start_func(submission_uuid)

                    # Remember that we've already started the first step
                    has_started_first_step = True

        # Update the workflow (in case some of the assessment modules are automatically complete)
        # We do NOT pass in requirements, on the assumption that any assessment module
        # that accepts requirements would NOT automatically complete.
        workflow.update_from_assessments(None)

        # Return the newly created workflow
        return workflow
Ejemplo n.º 32
0
    def render_staff_grade_form(self, data, suffix=''):  # pylint: disable=W0613
        """
        Renders a form to staff-grade the next available learner submission.

        Must be course staff to render this view.
        """
        # Import is placed here to avoid model import at project startup.
        from openassessment.assessment.api import staff as staff_api
        from submissions import api as submission_api
        try:
            student_item_dict = self.get_student_item_dict()
            course_id = student_item_dict.get('course_id')
            item_id = student_item_dict.get('item_id')
            staff_id = student_item_dict['student_id']

            # Note that this will check out a submission for grading by the specified staff member.
            # If no submissions are available for grading, will return None.
            submission_to_assess = staff_api.get_submission_to_assess(
                course_id, item_id, staff_id)

            if submission_to_assess is not None:
                # This is posting a tracking event to the runtime.
                self.runtime.publish(
                    self,
                    'openassessmentblock.get_submission_for_staff_grading', {
                        'type': 'full-grade',
                        'requesting_staff_id': staff_id,
                        'item_id': item_id,
                        'submission_returned_uuid':
                        submission_to_assess['uuid']
                    })
                submission = submission_api.get_submission_and_student(
                    submission_to_assess['uuid'])
                if submission:
                    anonymous_student_id = submission['student_item'][
                        'student_id']
                    submission_context = self.get_student_submission_context(
                        self.get_username(anonymous_student_id), submission)
                    # Add team info to context
                    submission_context['teams_enabled'] = self.teams_enabled
                    if self.teams_enabled:
                        user = self.get_real_user(anonymous_student_id)

                        if not user:
                            logger.error(
                                '{}: User lookuip for anonymous_user_id {} failed'
                                .format(self.location, anonymous_student_id))
                            raise ObjectDoesNotExist()

                        team = self.teams_service.get_team(
                            user, self.course_id, self.selected_teamset_id)

                        submission_context['team_name'] = team.name
                        submission_context[
                            'team_usernames'] = list_to_conversational_format(
                                [user.username for user in team.users.all()])

                    path = 'openassessmentblock/staff_area/oa_staff_grade_learners_assessment.html'
                    return self.render_assessment(path, submission_context)
                return self.render_error(
                    self._(u"Error loading the checked out learner response."))
            return self.render_error(
                self.
                _(u"No other learner responses are available for grading at this time."
                  ))
        except PeerAssessmentInternalError:
            return self.render_error(
                self._(u"Error getting staff grade information."))