Example #1
0
    def close_active_assessment(self, submission_uuid, assessment,
                                num_required_grades):
        """
        Updates a workflow item on the student's workflow with the associated
        assessment. When a workflow item has an assessment, it is considered
        finished.

        Args:
            submission_uuid (str): The submission the scorer is grading.
            assessment (PeerAssessment): The associate assessment for this action.
            graded_by (int): The required number of grades the peer workflow
                requires to be considered complete.

        Returns:
            None

        """
        try:
            item_query = self.graded.filter(
                submission_uuid=submission_uuid).order_by(
                    "-started_at", "-id")
            items = list(item_query[:1])
            if not items:
                msg = (
                    u"No open assessment was found for learner {} while assessing "
                    u"submission UUID {}.").format(self.student_id,
                                                   submission_uuid)
                raise PeerAssessmentWorkflowError(msg)
            item = items[0]
            item.assessment = assessment
            item.save()

            if not item.author.grading_completed_at:
                if item.author.graded_by.filter(
                        assessment__isnull=False).count(
                        ) >= num_required_grades:
                    item.author.grading_completed_at = now()
                    item.author.save()

        except (DatabaseError, PeerWorkflowItem.DoesNotExist):
            error_message = (
                u"An internal error occurred while retrieving a workflow item for "
                u"learner {}. Workflow Items are created when submissions are "
                u"pulled for assessment.").format(self.student_id)
            logger.exception(error_message)
            raise PeerAssessmentWorkflowError(error_message)
Example #2
0
    def get_by_submission_uuid(cls, submission_uuid):
        """
        Retrieve the Peer Workflow associated with the given submission UUID.

        Args:
            submission_uuid (str): The string representation of the UUID belonging
                to the associated Peer Workflow.

        Returns:
            workflow (PeerWorkflow): The most recent peer workflow associated with
                this submission UUID.

        Raises:
            PeerAssessmentWorkflowError: Thrown when no workflow can be found for
                the associated submission UUID. This should always exist before a
                student is allow to request submissions for peer assessment.

        Examples:
            >>> PeerWorkflow.get_workflow_by_submission_uuid("abc123")
            {
                'student_id': u'Bob',
                'item_id': u'type_one',
                'course_id': u'course_1',
                'submission_uuid': u'1',
                'created_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>)
            }

        """
        try:
            return cls.objects.get(submission_uuid=submission_uuid)
        except cls.DoesNotExist:
            return None
        except DatabaseError:
            error_message = (
                u"Error finding workflow for submission UUID {}. Workflow must be "
                u"created for submission before beginning peer assessment."
            ).format(submission_uuid)
            logger.exception(error_message)
            raise PeerAssessmentWorkflowError(error_message)
Example #3
0
def get_submission_to_assess(submission_uuid, graded_by):
    """Get a submission to peer evaluate.

    Retrieves a submission for assessment for the given student. This will
    not return a submission submitted by the requesting scorer. Submissions are
    returned based on how many assessments are still required, and if there are
    peers actively assessing a particular submission. If there are no
    submissions requiring assessment, a submission may be returned that will be
    'over graded', and the assessment will not be counted towards the overall
    grade.

    Args:
        submission_uuid (str): The submission UUID from the student
            requesting a submission for assessment. This is used to explicitly
            avoid giving the student their own submission, and determines the
            associated Peer Workflow.
        graded_by (int): The number of assessments a submission
            requires before it has completed the peer assessment process.

    Returns:
        dict: A peer submission for assessment. This contains a 'student_item',
            'attempt_number', 'submitted_at', 'created_at', and 'answer' field to be
            used for assessment.

    Raises:
        PeerAssessmentRequestError: Raised when the request parameters are
            invalid for the request.
        PeerAssessmentInternalError: Raised when there is an internal error
            retrieving peer workflow information.
        PeerAssessmentWorkflowError: Raised when an error occurs because this
            function, or the student item, is not in the proper workflow state
            to retrieve a peer submission.

    Examples:
        >>> get_submission_to_assess("abc123", 3)
        {
            'student_item': 2,
            'attempt_number': 1,
            'submitted_at': datetime.datetime(2014, 1, 29, 23, 14, 52, 649284, tzinfo=<UTC>),
            'created_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>),
            'answer': u'The answer is 42.'
        }

    """
    workflow = PeerWorkflow.get_by_submission_uuid(submission_uuid)
    if not workflow:
        raise PeerAssessmentWorkflowError(
            u"A Peer Assessment Workflow does not exist for the student "
            u"with submission UUID {}".format(submission_uuid)
        )
    peer_submission_uuid = workflow.find_active_assessments()
    # If there is an active assessment for this user, get that submission,
    # otherwise, get the first assessment for review, otherwise,
    # get the first submission available for over grading ("over-grading").
    if peer_submission_uuid is None:
        peer_submission_uuid = workflow.get_submission_for_review(graded_by)
    if peer_submission_uuid is None:
        peer_submission_uuid = workflow.get_submission_for_over_grading()
    if peer_submission_uuid:
        try:
            submission_data = sub_api.get_submission(peer_submission_uuid)
            PeerWorkflow.create_item(workflow, peer_submission_uuid)
            _log_workflow(peer_submission_uuid, workflow)
            return submission_data
        except sub_api.SubmissionNotFoundError:
            error_message = (
                u"Could not find a submission with the uuid {} for student {} "
                u"in the peer workflow."
            ).format(peer_submission_uuid, workflow.student_id)
            logger.exception(error_message)
            raise PeerAssessmentWorkflowError(error_message)
    else:
        logger.info(
            u"No submission found for {} to assess ({}, {})"
            .format(
                workflow.student_id,
                workflow.course_id,
                workflow.item_id,
            )
        )
        return None
Example #4
0
def create_assessment(
        scorer_submission_uuid,
        scorer_id,
        options_selected,
        criterion_feedback,
        overall_feedback,
        rubric_dict,
        num_required_grades,
        scored_at=None):
    """Creates an assessment on the given submission.

    Assessments are created based on feedback associated with a particular
    rubric.

    Args:
        scorer_submission_uuid (str): The submission uuid for the Scorer's
            workflow. The submission being assessed can be determined via the
            peer workflow of the grading student.
        scorer_id (str): The user ID for the user giving this assessment. This
            is required to create an assessment on a submission.
        options_selected (dict): Dictionary mapping criterion names to the
            option names the user selected for that criterion.
        criterion_feedback (dict): Dictionary mapping criterion names to the
            free-form text feedback the user gave for the criterion.
            Since criterion feedback is optional, some criteria may not appear
            in the dictionary.
        overall_feedback (unicode): Free-form text feedback on the submission overall.
        num_required_grades (int): The required number of assessments a
            submission requires before it is completed. If this number of
            assessments is reached, the grading_completed_at timestamp is set
            for the Workflow.

    Kwargs:
        scored_at (datetime): Optional argument to override the time in which
            the assessment took place. If not specified, scored_at is set to
            now.

    Returns:
        dict: the Assessment model, serialized as a dict.

    Raises:
        PeerAssessmentRequestError: Raised when the submission_id is invalid, or
            the assessment_dict does not contain the required values to create
            an assessment.
        PeerAssessmentInternalError: Raised when there is an internal error
            while creating a new assessment.

    Examples:
        >>> options_selected = {"clarity": "Very clear", "precision": "Somewhat precise"}
        >>> criterion_feedback = {"clarity": "I thought this essay was very clear."}
        >>> feedback = "Your submission was thrilling."
        >>> create_assessment("1", "Tim", options_selected, criterion_feedback, feedback, rubric_dict)
    """
    # Ensure that this variables is declared so if an error occurs
    # we don't get an error when trying to log it!
    assessment_dict = None

    try:
        rubric = rubric_from_dict(rubric_dict)

        # Validate that the selected options matched the rubric
        # and raise an error if this is not the case
        try:
            option_ids = rubric.options_ids(options_selected)
        except InvalidOptionSelection:
            msg = "Selected options do not match the rubric"
            logger.warning(msg, exc_info=True)
            raise PeerAssessmentRequestError(msg)

        scorer_workflow = PeerWorkflow.objects.get(submission_uuid=scorer_submission_uuid)

        peer_workflow_item = scorer_workflow.get_latest_open_workflow_item()
        if peer_workflow_item is None:
            message = (
                u"There are no open assessments associated with the scorer's "
                u"submission UUID {}."
            ).format(scorer_submission_uuid)
            logger.warning(message)
            raise PeerAssessmentWorkflowError(message)

        peer_submission_uuid = peer_workflow_item.author.submission_uuid
        peer_assessment = {
            "rubric": rubric.id,
            "scorer_id": scorer_id,
            "submission_uuid": peer_submission_uuid,
            "score_type": PEER_TYPE,
            "feedback": overall_feedback[0:Assessment.MAXSIZE],
        }

        if scored_at is not None:
            peer_assessment["scored_at"] = scored_at

        peer_serializer = AssessmentSerializer(data=peer_assessment)

        if not peer_serializer.is_valid():
            msg = (
                u"An error occurred while serializing "
                u"the peer assessment associated with "
                u"the scorer's submission UUID {}."
            ).format(scorer_submission_uuid)
            raise PeerAssessmentRequestError(msg)

        assessment = peer_serializer.save()

        # We do this to do a run around django-rest-framework serializer
        # validation, which would otherwise require two DB queries per
        # option to do validation. We already validated these options above.
        AssessmentPart.add_to_assessment(assessment, option_ids, criterion_feedback=criterion_feedback)

        # Close the active assessment
        scorer_workflow.close_active_assessment(peer_submission_uuid, assessment, num_required_grades)
        assessment_dict = full_assessment_dict(assessment)
        _log_assessment(assessment, scorer_workflow)

        return assessment_dict
    except DatabaseError:
        error_message = (
            u"An error occurred while creating assessment {} by: {}"
        ).format(assessment_dict, scorer_id)
        logger.exception(error_message)
        raise PeerAssessmentInternalError(error_message)
    except PeerWorkflow.DoesNotExist:
        message = (
            u"There is no Peer Workflow associated with the given "
            u"submission UUID {}."
        ).format(scorer_submission_uuid)
        logger.error(message)
        raise PeerAssessmentWorkflowError(message)
Example #5
0
def create_assessment(scorer_submission_uuid,
                      scorer_id,
                      options_selected,
                      criterion_feedback,
                      overall_feedback,
                      rubric_dict,
                      num_required_grades,
                      scored_at=None):
    """Creates an assessment on the given submission.

    Assessments are created based on feedback associated with a particular
    rubric.

    Args:
        scorer_submission_uuid (str): The submission uuid for the Scorer's
            workflow. The submission being assessed can be determined via the
            peer workflow of the grading student.
        scorer_id (str): The user ID for the user giving this assessment. This
            is required to create an assessment on a submission.
        options_selected (dict): Dictionary mapping criterion names to the
            option names the user selected for that criterion.
        criterion_feedback (dict): Dictionary mapping criterion names to the
            free-form text feedback the user gave for the criterion.
            Since criterion feedback is optional, some criteria may not appear
            in the dictionary.
        overall_feedback (unicode): Free-form text feedback on the submission overall.
        num_required_grades (int): The required number of assessments a
            submission requires before it is completed. If this number of
            assessments is reached, the grading_completed_at timestamp is set
            for the Workflow.

    Keyword Args:
        scored_at (datetime): Optional argument to override the time in which
            the assessment took place. If not specified, scored_at is set to
            now.

    Returns:
        dict: the Assessment model, serialized as a dict.

    Raises:
        PeerAssessmentRequestError: Raised when the submission_id is invalid, or
            the assessment_dict does not contain the required values to create
            an assessment.
        PeerAssessmentInternalError: Raised when there is an internal error
            while creating a new assessment.

    Examples:
        >>> options_selected = {"clarity": "Very clear", "precision": "Somewhat precise"}
        >>> criterion_feedback = {"clarity": "I thought this essay was very clear."}
        >>> feedback = "Your submission was thrilling."
        >>> create_assessment("1", "Tim", options_selected, criterion_feedback, feedback, rubric_dict)
    """
    try:
        # Retrieve workflow information
        scorer_workflow = PeerWorkflow.objects.get(
            submission_uuid=scorer_submission_uuid)
        peer_workflow_item = scorer_workflow.find_active_assessments()
        if peer_workflow_item is None:
            message = (
                u"There are no open assessments associated with the scorer's "
                u"submission UUID {}.").format(scorer_submission_uuid)
            logger.warning(message)
            raise PeerAssessmentWorkflowError(message)
        peer_submission_uuid = peer_workflow_item.submission_uuid

        assessment = _complete_assessment(rubric_dict, scorer_id,
                                          peer_submission_uuid,
                                          options_selected, criterion_feedback,
                                          scorer_workflow, overall_feedback,
                                          num_required_grades, scored_at)

        _log_assessment(assessment, scorer_workflow)
        return full_assessment_dict(assessment)
    except PeerWorkflow.DoesNotExist:
        message = (u"There is no Peer Workflow associated with the given "
                   u"submission UUID {}.").format(scorer_submission_uuid)
        logger.exception(message)
        raise PeerAssessmentWorkflowError(message)
    except InvalidRubric:
        msg = u"The rubric definition is not valid."
        logger.exception(msg)
        raise PeerAssessmentRequestError(msg)
    except InvalidRubricSelection:
        msg = u"Invalid options were selected in the rubric."
        logger.warning(msg, exc_info=True)
        raise PeerAssessmentRequestError(msg)
    except DatabaseError:
        error_message = (
            u"An error occurred while creating an assessment by the scorer with this ID: {}"
        ).format(scorer_id)
        logger.exception(error_message)
        raise PeerAssessmentInternalError(error_message)