Example #1
0
def on_cancel(submission_uuid):
    """Cancel the peer workflow for submission.

    Sets the cancelled_at field in peer workflow.

    Args:
        submission_uuid (str): The submission UUID associated with this workflow.

    Returns:
        None

    """
    try:
        workflow = PeerWorkflow.get_by_submission_uuid(submission_uuid)
        if workflow:
            workflow.cancelled_at = timezone.now()
            workflow.save()
    except (PeerAssessmentWorkflowError, DatabaseError) as ex:
        error_message = (
            "An internal error occurred while cancelling the peer"
            "workflow for submission {}"
            .format(submission_uuid)
        )
        logger.exception(error_message)
        raise PeerAssessmentInternalError(error_message) from ex
Example #2
0
def get_assessment_feedback(submission_uuid):
    """
    Retrieve a feedback on an assessment.

    Args:
        submission_uuid: The submission we want to retrieve assessment feedback for.

    Returns:
        dict or None

    Raises:
        PeerAssessmentInternalError: Error occurred while retrieving the feedback.
    """
    try:
        feedback = AssessmentFeedback.objects.get(
            submission_uuid=submission_uuid
        )
        return AssessmentFeedbackSerializer(feedback).data
    except AssessmentFeedback.DoesNotExist:
        return None
    except DatabaseError:
        error_message = (
            u"An error occurred retrieving assessment feedback for {}."
            .format(submission_uuid)
        )
        logger.exception(error_message)
        raise PeerAssessmentInternalError(error_message)
Example #3
0
def get_rubric_max_scores(submission_uuid):
    """Gets the maximum possible value for each criterion option

    Iterates over the rubric used to grade the given submission, and creates a
    dictionary of maximum possible values.

    Args:
        submission_uuid: The submission to get the associated rubric max scores.
    Returns:
        A dictionary of max scores for this rubric's criteria options. Returns
            None if no assessments are found for this submission.
    Raises:
        PeerAssessmentInternalError: Raised when there is an error retrieving
            the submission, or its associated rubric.
    """
    try:
        assessments = list(
            Assessment.objects.filter(
                submission_uuid=submission_uuid).order_by(
                    "-scored_at", "-id").select_related("rubric")[:1])
        if not assessments:
            return None

        assessment = assessments[0]
        rubric_dict = RubricSerializer.serialized_from_cache(assessment.rubric)
        return {
            criterion["name"]: criterion["points_possible"]
            for criterion in rubric_dict["criteria"]
        }
    except DatabaseError:
        error_message = (
            u"Error getting rubric options max scores for submission uuid {uuid}"
        ).format(uuid=submission_uuid)
        logger.exception(error_message)
        raise PeerAssessmentInternalError(error_message)
Example #4
0
def get_assessments(submission_uuid, scored_only=True, limit=None):
    """Retrieve the assessments for a submission.

    Retrieves all the assessments for a submissions. This API returns related
    feedback without making any assumptions about grading. Any outstanding
    assessments associated with this submission will not be returned.

    Args:
        submission_uuid (str): The submission all the requested assessments are
            associated with. Required.

    Kwargs:
        scored (boolean): Only retrieve the assessments used to generate a score
            for this submission.
        limit (int): Limit the returned assessments. If None, returns all.

    Returns:
        list(dict): A list of dictionaries, where each dictionary represents a
            separate assessment. Each assessment contains points earned, points
            possible, time scored, scorer id, score type, and feedback.

    Raises:
        PeerAssessmentRequestError: Raised when the submission_id is invalid.
        PeerAssessmentInternalError: Raised when there is an internal error
            while retrieving the assessments associated with this submission.

    Examples:
        >>> get_assessments("1", scored_only=True, limit=2)
        [
            {
                'points_earned': 6,
                'points_possible': 12,
                'scored_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 649284 tzinfo=<UTC>),
                'scorer': u"Tim",
                'feedback': u'Your submission was thrilling.'
            },
            {
                'points_earned': 11,
                'points_possible': 12,
                'scored_at': datetime.datetime(2014, 1, 31, 14, 10, 17, 544214 tzinfo=<UTC>),
                'scorer': u"Bob",
                'feedback': u'Great submission.'
            }
        ]

    """
    try:
        if scored_only:
            assessments = PeerWorkflowItem.get_scored_assessments(
                submission_uuid)[:limit]
        else:
            assessments = Assessment.objects.filter(
                submission_uuid=submission_uuid, score_type=PEER_TYPE)[:limit]
        return serialize_assessments(assessments)
    except DatabaseError:
        error_message = _(
            u"Error getting assessments for submission {}".format(
                submission_uuid))
        logger.exception(error_message)
        raise PeerAssessmentInternalError(error_message)
Example #5
0
def get_assessment_median_scores(submission_uuid):
    """Get the median score for each rubric criterion

    For a given assessment, collect the median score for each criterion on the
    rubric. This set can be used to determine the overall score, as well as each
    part of the individual rubric scores.

    If there is a true median score, it is returned. If there are two median
    values, the average of those two values is returned, rounded up to the
    greatest integer value.

    Args:
        submission_uuid (str): The submission uuid is used to get the
            assessments used to score this submission, and generate the
            appropriate median score.

    Returns:
        (dict): A dictionary of rubric criterion names, with a median score of
            the peer assessments.

    Raises:
        PeerAssessmentInternalError: If any error occurs while retrieving
            information to form the median scores, an error is raised.
    """
    try:
        workflow = PeerWorkflow.objects.get(submission_uuid=submission_uuid)
        items = workflow.graded_by.filter(scored=True)
        assessments = [item.assessment for item in items]
        scores = Assessment.scores_by_criterion(assessments)
        return Assessment.get_median_score_dict(scores)
    except DatabaseError:
        error_message = _(u"Error getting assessment median scores {}".format(
            submission_uuid))
        logger.exception(error_message)
        raise PeerAssessmentInternalError(error_message)
Example #6
0
def set_assessment_feedback(feedback_dict):
    """
    Set a feedback object for an assessment to have some new values.

    Sets or updates the assessment feedback with the given values in the dict.

    Args:
        feedback_dict (dict): A dictionary of all the values to update or create
            a new assessment feedback.

    Returns:
        None

    Raises:
        PeerAssessmentRequestError
        PeerAssessmentInternalError
    """
    submission_uuid = feedback_dict.get('submission_uuid')
    feedback_text = feedback_dict.get('feedback_text')
    selected_options = feedback_dict.get('options', list())

    if feedback_text and len(feedback_text) > AssessmentFeedback.MAXSIZE:
        error_message = u"Assessment feedback too large."
        raise PeerAssessmentRequestError(error_message)

    try:
        # Get or create the assessment model for this submission
        # If we receive an integrity error, assume that someone else is trying to create
        # another feedback model for this submission, and raise an exception.
        if submission_uuid:
            feedback, created = AssessmentFeedback.objects.get_or_create(
                submission_uuid=submission_uuid)
        else:
            error_message = u"An error occurred creating assessment feedback: bad or missing submission_uuid."
            logger.error(error_message)
            raise PeerAssessmentRequestError(error_message)

        # Update the feedback text
        if feedback_text is not None:
            feedback.feedback_text = feedback_text

        # Save the feedback model.  We need to do this before setting m2m relations.
        if created or feedback_text is not None:
            feedback.save()

        # Associate the feedback with selected options
        feedback.add_options(selected_options)

        # Associate the feedback with scored assessments
        assessments = PeerWorkflowItem.get_scored_assessments(submission_uuid)
        feedback.assessments.add(*assessments)
    except DatabaseError:
        msg = u"Error occurred while creating or updating feedback on assessment: {}".format(
            feedback_dict)
        logger.exception(msg)
        raise PeerAssessmentInternalError(msg)
Example #7
0
def get_assessment_workflow_cancellation(submission_uuid):
    """
    Get cancellation information for an assessment workflow.

    Args:
        submission_uuid (str): The UUID of the submission.
    """
    try:
        workflow_cancellation = AssessmentWorkflowCancellation.get_latest_workflow_cancellation(submission_uuid)
        return AssessmentWorkflowCancellationSerializer(workflow_cancellation).data if workflow_cancellation else None
    except DatabaseError as ex:
        error_message = "Error finding assessment workflow cancellation for submission UUID {}."\
            .format(submission_uuid)
        logger.exception(error_message)
        raise PeerAssessmentInternalError(error_message) from ex
Example #8
0
    def get_submission_for_over_grading(self):
        """
        Retrieve the next submission uuid for over grading in peer assessment.
        """
        # The follow query behaves as the Peer Assessment Over Grading Queue. This
        # will find a random submission (via PeerWorkflow) in this course / question
        # that:
        #  1) Does not belong to you
        #  2) Is not something you have already scored
        #  3) Has not been cancelled.
        try:
            #query = list(PeerWorkflow.objects.raw(
            #    "select pw.id, pw.submission_uuid "
            #    "from assessment_peerworkflow pw "
            #    "where course_id=%s "
            #    "and item_id=%s "
            #    "and student_id<>%s "
            #    "and pw.cancelled_at is NULL "
            #    "and pw.id not in ( "
            #        "select pwi.author_id "
            #        "from assessment_peerworkflowitem pwi "
            #        "where pwi.scorer_id=%s); ",
            #    [self.course_id, self.item_id, self.student_id, self.id]
            #))
            # Note: RawQuerySet doesn't have a count method, so replace it
            query = PeerWorkflow.objects.filter(
                course_id=self.course_id,
                item_id=self.item_id,
                cancelled_at=None).exclude(student_id=self.student_id).exclude(
                    graded_by__scorer_id=self.id).order_by()
            #workflow_count = len(query)
            workflow_count = query.count()
            if workflow_count < 1:
                return None

            random_int = random.randint(0, workflow_count - 1)
            random_workflow = query[random_int]

            return random_workflow.submission_uuid
        except DatabaseError:
            error_message = (
                u"An internal error occurred while retrieving a peer submission "
                u"for student {}").format(self)
            logger.exception(error_message)
            raise PeerAssessmentInternalError(error_message)
Example #9
0
def create_peer_workflow(submission_uuid):
    """Create a new peer workflow for a student item and submission.

    Creates a unique peer workflow for a student item, associated with a
    submission.

    Args:
        submission_uuid (str): The submission associated with this workflow.

    Returns:
        None

    Raises:
        SubmissionError: There was an error retrieving the submission.
        PeerAssessmentInternalError: Raised when there is an internal error
            creating the Workflow.

    Examples:
        >>> create_peer_workflow("1")

    """
    try:
        with transaction.atomic():
            submission = sub_api.get_submission_and_student(submission_uuid)
            workflow, __ = PeerWorkflow.objects.get_or_create(
                student_id=submission['student_item']['student_id'],
                course_id=submission['student_item']['course_id'],
                item_id=submission['student_item']['item_id'],
                submission_uuid=submission_uuid
            )
            workflow.save()
    except IntegrityError:
        # If we get an integrity error, it means someone else has already
        # created a workflow for this submission, so we don't need to do anything.
        pass
    except DatabaseError as ex:
        error_message = (
            "An internal error occurred while creating a new peer "
            "workflow for submission {}"
        ).format(submission_uuid)
        logger.exception(error_message)
        raise PeerAssessmentInternalError(error_message) from ex
Example #10
0
    def get_submission_for_over_grading(self):
        """
        Retrieve the next submission uuid for over grading in peer assessment.
        """
        # The follow query behaves as the Peer Assessment Over Grading Queue. This
        # will find a random submission (via PeerWorkflow) in this course / question
        # that:
        #  1) Does not belong to you
        #  2) Is not something you have already scored
        #  3) Has not been cancelled.
        try:
            query = list(PeerWorkflow.objects.raw(
                "select pw.id, pw.submission_uuid "
                "from assessment_peerworkflow pw "
                "where course_id=%s "
                "and item_id=%s "
                "and student_id<>%s "
                "and pw.cancelled_at is NULL "
                "and pw.id not in ( "
                "select pwi.author_id "
                "from assessment_peerworkflowitem pwi "
                "where pwi.scorer_id=%s"
                "); ",
                [self.course_id, self.item_id, self.student_id, self.id]
            ))
            workflow_count = len(query)
            if workflow_count < 1:
                return None

            random_int = random.randint(0, workflow_count - 1)
            random_workflow = query[random_int]

            return random_workflow.submission_uuid
        except DatabaseError:
            error_message = (
                u"An internal error occurred while retrieving a peer submission "
                u"for learner {}"
            ).format(self)
            logger.exception(error_message)
            raise PeerAssessmentInternalError(error_message)
Example #11
0
    def create_item(cls, scorer_workflow, submission_uuid):
        """
        Create a new peer workflow for a student item and submission.

        Args:
            scorer_workflow (PeerWorkflow): The peer workflow associated with the scorer.
            submission_uuid (str): The submission associated with this workflow.

        Raises:
            PeerAssessmentInternalError: Raised when there is an internal error
                creating the Workflow.
        """
        peer_workflow = cls.get_by_submission_uuid(submission_uuid)

        try:
            workflow_items = PeerWorkflowItem.objects.filter(
                scorer=scorer_workflow,
                author=peer_workflow,
                submission_uuid=submission_uuid
            )

            if len(workflow_items) > 0:
                item = workflow_items[0]
            else:
                item = PeerWorkflowItem.objects.create(
                    scorer=scorer_workflow,
                    author=peer_workflow,
                    submission_uuid=submission_uuid
                )
            item.started_at = now()
            item.save()
            return item
        except DatabaseError:
            error_message = (
                u"An internal error occurred while creating a new peer workflow "
                u"item for workflow {}"
            ).format(scorer_workflow)
            logger.exception(error_message)
            raise PeerAssessmentInternalError(error_message)
Example #12
0
def get_submitted_assessments(submission_uuid, scored_only=True, limit=None):
    """Retrieve the assessments created by the given submission's author.

    Retrieves all the assessments created by the given submission's author. This
    API returns related feedback without making any assumptions about grading.
    Any outstanding assessments associated with this submission will not be
    returned.

    Args:
        submission_uuid (str): The submission of the student whose assessments
        we are requesting. Required.

    Kwargs:
        scored (boolean): Only retrieve the assessments used to generate a score
            for this submission.
        limit (int): Limit the returned assessments. If None, returns all.

    Returns:
        list(dict): A list of dictionaries, where each dictionary represents a
            separate assessment. Each assessment contains points earned, points
            possible, time scored, scorer id, score type, and feedback. If no
            workflow is found associated with the given submission_uuid, returns
            an empty list.

    Raises:
        PeerAssessmentRequestError: Raised when the submission_id is invalid.
        PeerAssessmentInternalError: Raised when there is an internal error
            while retrieving the assessments associated with this submission.

    Examples:
        >>> get_submitted_assessments("1", scored_only=True, limit=2)
        [
            {
                'points_earned': 6,
                'points_possible': 12,
                'scored_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 649284 tzinfo=<UTC>),
                'scorer': u"Tim",
                'feedback': u'Your submission was thrilling.'
            },
            {
                'points_earned': 11,
                'points_possible': 12,
                'scored_at': datetime.datetime(2014, 1, 31, 14, 10, 17, 544214 tzinfo=<UTC>),
                'scorer': u"Tim",
                'feedback': u'Great submission.'
            }
        ]

    """
    try:
        # If no workflow is found associated with the uuid, this returns None,
        # and an empty set of assessments will be returned.
        workflow = PeerWorkflow.get_by_submission_uuid(submission_uuid)
        items = PeerWorkflowItem.objects.filter(
            scorer=workflow,
            assessment__isnull=False
        )
        if scored_only:
            items = items.exclude(scored=False)
        assessments = Assessment.objects.filter(
            pk__in=[item.assessment.pk for item in items])[:limit]
        return serialize_assessments(assessments)
    except DatabaseError:
        error_message = (
            u"Couldn't retrieve the assessments completed by the "
            " student with submission {uuid}"
        ).format(uuid=submission_uuid)
        logger.exception(error_message)
        raise PeerAssessmentInternalError(error_message)
Example #13
0
def create_assessment(
        scorer_submission_uuid,
        scorer_id,
        options_selected,
        criterion_feedback,
        overall_feedback,
        rubric_dict,
        num_required_grades,
        scored_at=None):
    """Creates an assessment on the given submission.

    Assessments are created based on feedback associated with a particular
    rubric.

    Args:
        scorer_submission_uuid (str): The submission uuid for the Scorer's
            workflow. The submission being assessed can be determined via the
            peer workflow of the grading student.
        scorer_id (str): The user ID for the user giving this assessment. This
            is required to create an assessment on a submission.
        options_selected (dict): Dictionary mapping criterion names to the
            option names the user selected for that criterion.
        criterion_feedback (dict): Dictionary mapping criterion names to the
            free-form text feedback the user gave for the criterion.
            Since criterion feedback is optional, some criteria may not appear
            in the dictionary.
        overall_feedback (unicode): Free-form text feedback on the submission overall.
        num_required_grades (int): The required number of assessments a
            submission requires before it is completed. If this number of
            assessments is reached, the grading_completed_at timestamp is set
            for the Workflow.

    Kwargs:
        scored_at (datetime): Optional argument to override the time in which
            the assessment took place. If not specified, scored_at is set to
            now.

    Returns:
        dict: the Assessment model, serialized as a dict.

    Raises:
        PeerAssessmentRequestError: Raised when the submission_id is invalid, or
            the assessment_dict does not contain the required values to create
            an assessment.
        PeerAssessmentInternalError: Raised when there is an internal error
            while creating a new assessment.

    Examples:
        >>> options_selected = {"clarity": "Very clear", "precision": "Somewhat precise"}
        >>> criterion_feedback = {"clarity": "I thought this essay was very clear."}
        >>> feedback = "Your submission was thrilling."
        >>> create_assessment("1", "Tim", options_selected, criterion_feedback, feedback, rubric_dict)
    """
    # Ensure that this variables is declared so if an error occurs
    # we don't get an error when trying to log it!
    assessment_dict = None

    try:
        rubric = rubric_from_dict(rubric_dict)

        # Validate that the selected options matched the rubric
        # and raise an error if this is not the case
        try:
            option_ids = rubric.options_ids(options_selected)
        except InvalidOptionSelection:
            msg = "Selected options do not match the rubric"
            logger.warning(msg, exc_info=True)
            raise PeerAssessmentRequestError(msg)

        scorer_workflow = PeerWorkflow.objects.get(submission_uuid=scorer_submission_uuid)

        peer_workflow_item = scorer_workflow.get_latest_open_workflow_item()
        if peer_workflow_item is None:
            message = (
                u"There are no open assessments associated with the scorer's "
                u"submission UUID {}."
            ).format(scorer_submission_uuid)
            logger.warning(message)
            raise PeerAssessmentWorkflowError(message)

        peer_submission_uuid = peer_workflow_item.author.submission_uuid
        peer_assessment = {
            "rubric": rubric.id,
            "scorer_id": scorer_id,
            "submission_uuid": peer_submission_uuid,
            "score_type": PEER_TYPE,
            "feedback": overall_feedback[0:Assessment.MAXSIZE],
        }

        if scored_at is not None:
            peer_assessment["scored_at"] = scored_at

        peer_serializer = AssessmentSerializer(data=peer_assessment)

        if not peer_serializer.is_valid():
            msg = (
                u"An error occurred while serializing "
                u"the peer assessment associated with "
                u"the scorer's submission UUID {}."
            ).format(scorer_submission_uuid)
            raise PeerAssessmentRequestError(msg)

        assessment = peer_serializer.save()

        # We do this to do a run around django-rest-framework serializer
        # validation, which would otherwise require two DB queries per
        # option to do validation. We already validated these options above.
        AssessmentPart.add_to_assessment(assessment, option_ids, criterion_feedback=criterion_feedback)

        # Close the active assessment
        scorer_workflow.close_active_assessment(peer_submission_uuid, assessment, num_required_grades)
        assessment_dict = full_assessment_dict(assessment)
        _log_assessment(assessment, scorer_workflow)

        return assessment_dict
    except DatabaseError:
        error_message = (
            u"An error occurred while creating assessment {} by: {}"
        ).format(assessment_dict, scorer_id)
        logger.exception(error_message)
        raise PeerAssessmentInternalError(error_message)
    except PeerWorkflow.DoesNotExist:
        message = (
            u"There is no Peer Workflow associated with the given "
            u"submission UUID {}."
        ).format(scorer_submission_uuid)
        logger.error(message)
        raise PeerAssessmentWorkflowError(message)
Example #14
0
def create_assessment(scorer_submission_uuid,
                      scorer_id,
                      options_selected,
                      criterion_feedback,
                      overall_feedback,
                      rubric_dict,
                      num_required_grades,
                      scored_at=None):
    """Creates an assessment on the given submission.

    Assessments are created based on feedback associated with a particular
    rubric.

    Args:
        scorer_submission_uuid (str): The submission uuid for the Scorer's
            workflow. The submission being assessed can be determined via the
            peer workflow of the grading student.
        scorer_id (str): The user ID for the user giving this assessment. This
            is required to create an assessment on a submission.
        options_selected (dict): Dictionary mapping criterion names to the
            option names the user selected for that criterion.
        criterion_feedback (dict): Dictionary mapping criterion names to the
            free-form text feedback the user gave for the criterion.
            Since criterion feedback is optional, some criteria may not appear
            in the dictionary.
        overall_feedback (unicode): Free-form text feedback on the submission overall.
        num_required_grades (int): The required number of assessments a
            submission requires before it is completed. If this number of
            assessments is reached, the grading_completed_at timestamp is set
            for the Workflow.

    Keyword Args:
        scored_at (datetime): Optional argument to override the time in which
            the assessment took place. If not specified, scored_at is set to
            now.

    Returns:
        dict: the Assessment model, serialized as a dict.

    Raises:
        PeerAssessmentRequestError: Raised when the submission_id is invalid, or
            the assessment_dict does not contain the required values to create
            an assessment.
        PeerAssessmentInternalError: Raised when there is an internal error
            while creating a new assessment.

    Examples:
        >>> options_selected = {"clarity": "Very clear", "precision": "Somewhat precise"}
        >>> criterion_feedback = {"clarity": "I thought this essay was very clear."}
        >>> feedback = "Your submission was thrilling."
        >>> create_assessment("1", "Tim", options_selected, criterion_feedback, feedback, rubric_dict)
    """
    try:
        # Retrieve workflow information
        scorer_workflow = PeerWorkflow.objects.get(
            submission_uuid=scorer_submission_uuid)
        peer_workflow_item = scorer_workflow.find_active_assessments()
        if peer_workflow_item is None:
            message = (
                u"There are no open assessments associated with the scorer's "
                u"submission UUID {}.").format(scorer_submission_uuid)
            logger.warning(message)
            raise PeerAssessmentWorkflowError(message)
        peer_submission_uuid = peer_workflow_item.submission_uuid

        assessment = _complete_assessment(rubric_dict, scorer_id,
                                          peer_submission_uuid,
                                          options_selected, criterion_feedback,
                                          scorer_workflow, overall_feedback,
                                          num_required_grades, scored_at)

        _log_assessment(assessment, scorer_workflow)
        return full_assessment_dict(assessment)
    except PeerWorkflow.DoesNotExist:
        message = (u"There is no Peer Workflow associated with the given "
                   u"submission UUID {}.").format(scorer_submission_uuid)
        logger.exception(message)
        raise PeerAssessmentWorkflowError(message)
    except InvalidRubric:
        msg = u"The rubric definition is not valid."
        logger.exception(msg)
        raise PeerAssessmentRequestError(msg)
    except InvalidRubricSelection:
        msg = u"Invalid options were selected in the rubric."
        logger.warning(msg, exc_info=True)
        raise PeerAssessmentRequestError(msg)
    except DatabaseError:
        error_message = (
            u"An error occurred while creating an assessment by the scorer with this ID: {}"
        ).format(scorer_id)
        logger.exception(error_message)
        raise PeerAssessmentInternalError(error_message)
Example #15
0
    def get_submission_for_review(self, graded_by):
        """
        Find a submission for peer assessment. This function will find the next
        submission that requires assessment, excluding any submission that has been
        completely graded, or is actively being reviewed by other students.

        Args:
            graded_by (unicode): Student ID of the scorer.

        Returns:
            submission_uuid (str): The submission_uuid for the submission to review.

        Raises:
            PeerAssessmentInternalError: Raised when there is an error retrieving
                the workflows or workflow items for this request.

        """
        timeout = (now() - self.TIME_LIMIT).strftime("%Y-%m-%d %H:%M:%S")
        # The follow query behaves as the Peer Assessment Queue. This will
        # find the next submission (via PeerWorkflow) in this course / question
        # that:
        #  1) Does not belong to you
        #  2) Does not have enough completed assessments
        #  3) Is not something you have already scored.
        #  4) Does not have a combination of completed assessments or open
        #     assessments equal to or more than the requirement.
        #  5) Has not been cancelled.
        try:
            peer_workflows = list(
                PeerWorkflow.objects.raw(
                    "select pw.id, pw.submission_uuid "
                    "from assessment_peerworkflow pw "
                    "where pw.item_id=%s "
                    "and pw.course_id=%s "
                    "and pw.student_id<>%s "
                    "and pw.grading_completed_at is NULL "
                    "and pw.cancelled_at is NULL "
                    "and pw.id not in ("
                    "   select pwi.author_id "
                    "   from assessment_peerworkflowitem pwi "
                    "   where pwi.scorer_id=%s "
                    "   and pwi.assessment_id is not NULL "
                    ") "
                    "and ("
                    "   select count(pwi.id) as c "
                    "   from assessment_peerworkflowitem pwi "
                    "   where pwi.author_id=pw.id "
                    "   and (pwi.assessment_id is not NULL or pwi.started_at > %s) "
                    ") < %s "
                    "order by pw.created_at, pw.id "
                    "limit 1; ", [
                        self.item_id, self.course_id, self.student_id, self.id,
                        timeout, graded_by
                    ]))
            if not peer_workflows:
                return None

            return peer_workflows[0].submission_uuid
        except DatabaseError:
            error_message = (
                u"An internal error occurred while retrieving a peer submission "
                u"for learner {}").format(self)
            logger.exception(error_message)
            raise PeerAssessmentInternalError(error_message)