Ejemplo n.º 1
0
    def test_create_item_multiple_available(self):
        # Bugfix TIM-572
        submitter_sub = sub_api.create_submission(self.STUDENT_ITEM,
                                                  'test answer')
        submitter_workflow = PeerWorkflow.objects.create(
            student_id=self.STUDENT_ITEM['student_id'],
            item_id=self.STUDENT_ITEM['item_id'],
            course_id=self.STUDENT_ITEM['course_id'],
            submission_uuid=submitter_sub['uuid'])
        scorer_sub = sub_api.create_submission(self.OTHER_STUDENT,
                                               'test answer 2')
        scorer_workflow = PeerWorkflow.objects.create(
            student_id=self.OTHER_STUDENT['student_id'],
            item_id=self.OTHER_STUDENT['item_id'],
            course_id=self.OTHER_STUDENT['course_id'],
            submission_uuid=scorer_sub['uuid'])

        for _ in range(2):
            PeerWorkflowItem.objects.create(
                scorer=scorer_workflow,
                author=submitter_workflow,
                submission_uuid=submitter_sub['uuid'])

        # This used to cause an error when `get_or_create` returned multiple workflow items
        PeerWorkflow.create_item(scorer_workflow, submitter_sub['uuid'])
Ejemplo n.º 2
0
def on_cancel(submission_uuid):
    """Cancel the peer workflow for submission.

    Sets the cancelled_at field in peer workflow.

    Args:
        submission_uuid (str): The submission UUID associated with this workflow.

    Returns:
        None

    """
    try:
        workflow = PeerWorkflow.get_by_submission_uuid(submission_uuid)
        if workflow:
            workflow.cancelled_at = timezone.now()
            workflow.save()
    except (PeerAssessmentWorkflowError, DatabaseError):
        error_message = (
            u"An internal error occurred while cancelling the peer"
            u"workflow for submission {}"
            .format(submission_uuid)
        )
        logger.exception(error_message)
        raise PeerAssessmentInternalError(error_message)
Ejemplo n.º 3
0
def get_score(submission_uuid, peer_requirements):
    """
    Retrieve a score for a submission if requirements have been satisfied.

    Args:
        submission_uuid (str): The UUID of the submission.
        requirements (dict): Dictionary with the key "must_be_graded_by"
            indicating the required number of assessments the student
            must receive to get a score.

    Returns:
        A dictionary with the points earned, points possible, and
        contributing_assessments information, along with a None staff_id.

    """

    if peer_requirements is None:
        return None

    # User hasn't completed their own submission yet
    if not submitter_is_finished(submission_uuid, peer_requirements):
        return None

    workflow = PeerWorkflow.get_by_submission_uuid(submission_uuid)

    if workflow is None:
        return None

    # Retrieve the assessments in ascending order by score date,
    # because we want to use the *first* one(s) for the score.
    items = workflow.graded_by.filter(
        assessment__submission_uuid=submission_uuid,
        assessment__score_type=PEER_TYPE
    ).order_by('-assessment')

    # Check if enough peers have graded this submission
    if items.count() < required_peer_grades(submission_uuid, peer_requirements):
        return None

    # Unfortunately, we cannot use update() after taking a slice,
    # so we need to update the and save the items individually.
    # One might be tempted to first query for the first n assessments,
    # then select items that have those assessments.
    # However, this generates a SQL query with a LIMIT in a subquery,
    # which is not supported by some versions of MySQL.
    # Although this approach generates more database queries, the number is likely to
    # be relatively small (at least 1 and very likely less than 5).
    for scored_item in items[:peer_requirements["must_be_graded_by"]]:
        scored_item.scored = True
        scored_item.save()
    assessments = [item.assessment for item in items]

    return {
        "points_earned": sum(
            get_assessment_median_scores(submission_uuid).values()
        ),
        "points_possible": assessments[0].points_possible,
        "contributing_assessments": [assessment.id for assessment in assessments],
        "staff_id": None,
    }
Ejemplo n.º 4
0
def on_cancel(submission_uuid):
    """Cancel the peer workflow for submission.

    Sets the cancelled_at field in peer workflow.

    Args:
        submission_uuid (str): The submission UUID associated with this workflow.

    Returns:
        None

    """
    try:
        workflow = PeerWorkflow.get_by_submission_uuid(submission_uuid)
        if workflow:
            workflow.cancelled_at = timezone.now()
            workflow.save()
    except (PeerAssessmentWorkflowError, DatabaseError) as ex:
        error_message = (
            "An internal error occurred while cancelling the peer"
            "workflow for submission {}"
            .format(submission_uuid)
        )
        logger.exception(error_message)
        raise PeerAssessmentInternalError(error_message) from ex
Ejemplo n.º 5
0
    def test_close_active_assessment(self):
        buffy_answer, _ = self._create_student_and_submission("Buffy", "Buffy's answer")
        xander_answer, _ = self._create_student_and_submission("Xander", "Xander's answer")

        # Create a workflow for Buffy.
        buffy_workflow = PeerWorkflow.get_by_submission_uuid(buffy_answer['uuid'])

        # Get a workflow item opened up.
        submission = peer_api.get_submission_to_assess(buffy_answer['uuid'], 3)

        self.assertEqual(xander_answer["uuid"], submission["uuid"])

        assessment_dict = peer_api.create_assessment(
            buffy_answer["uuid"], "Buffy",
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            RUBRIC_DICT,
            REQUIRED_GRADED_BY,
        )
        assessment = Assessment.objects.filter(
            scorer_id=assessment_dict["scorer_id"],
            scored_at=assessment_dict["scored_at"])[0]
        buffy_workflow.close_active_assessment(xander_answer["uuid"], assessment, REQUIRED_GRADED_BY)

        item = PeerWorkflowItem.objects.get(submission_uuid=xander_answer['uuid'])
        self.assertEqual(xander_answer["uuid"], submission["uuid"])
        self.assertIsNotNone(item.assessment)
Ejemplo n.º 6
0
def assessment_is_finished(submission_uuid, peer_requirements):
    """
    Check whether the submitter has received enough assessments
    to get a score.

    If the requirements dict is None (because we're being updated
    asynchronously or when the workflow is first created),
    then automatically return False.

    Args:
        submission_uuid (str): The UUID of the submission being tracked.
        peer_requirements (dict): Dictionary with the key "must_be_graded_by"
            indicating the required number of assessments the student
            must receive to get a score.

    Returns:

        bool
    """
    if not peer_requirements:
        return False

    workflow = PeerWorkflow.get_by_submission_uuid(submission_uuid)
    if workflow is None:
        return False

    scored_items = workflow.graded_by.filter(
        assessment__submission_uuid=submission_uuid,
        assessment__score_type=PEER_TYPE)
    return scored_items.count() >= peer_requirements["must_be_graded_by"]
Ejemplo n.º 7
0
def assessment_is_finished(submission_uuid, requirements):
    """
    Check whether the submitter has received enough assessments
    to get a score.

    If the requirements dict is None (because we're being updated
    asynchronously or when the workflow is first created),
    then automatically return False.

    Args:
        submission_uuid (str): The UUID of the submission being tracked.
        requirements (dict): Dictionary with the key "must_be_graded_by"
            indicating the required number of assessments the student
            must receive to get a score.

    Returns:

        bool
    """
    if requirements is None:
        return False

    workflow = PeerWorkflow.get_by_submission_uuid(submission_uuid)
    if workflow is None:
        return False

    scored_items = workflow.graded_by.filter(
        assessment__submission_uuid=submission_uuid,
        assessment__score_type=PEER_TYPE
    )
    return scored_items.count() >= requirements["must_be_graded_by"]
Ejemplo n.º 8
0
def get_waiting_step_details(
    course_id,
    item_id,
    submission_uuids,
    must_be_graded_by
):
    """
    Proxy method to `get_waiting_step_details` model method.
    Retrieves information about users in the waiting step (waiting for peer reviews).

    Args:
        course_id (str): The course that this problem belongs to.
        item_id (str): The student_item (problem) that we want to know statistics about.
        submission_uuids (list): A list of submission UUIDs to filter the results for,
                                    if None is given, this will return all students which
                                    the peer step is not complete.
        must_be_graded_by (int): number of required peer reviews for this problem.

    Returns:
        dict: a dictionary that contains information about students in the waiting step.
              The dictionary includes the following information: `student_id`, `created_at` (
              timestamp of when the step was created), `graded` (how many peers the student
              graded) and `graded_by` (how many peers graded this student).
    """
    return PeerWorkflow.get_waiting_step_details(
        course_id,
        item_id,
        submission_uuids,
        must_be_graded_by
    )
Ejemplo n.º 9
0
def get_score(submission_uuid, peer_requirements):
    """
    Retrieve a score for a submission if requirements have been satisfied.

    Args:
        submission_uuid (str): The UUID of the submission.
        requirements (dict): Dictionary with the key "must_be_graded_by"
            indicating the required number of assessments the student
            must receive to get a score.

    Returns:
        A dictionary with the points earned, points possible, and
        contributing_assessments information, along with a None staff_id.

    """
    if peer_requirements is None:
        return None

    # User hasn't completed their own submission yet
    if not submitter_is_finished(submission_uuid, peer_requirements):
        return None

    workflow = PeerWorkflow.get_by_submission_uuid(submission_uuid)

    if workflow is None:
        return None

    # Retrieve the assessments in ascending order by score date,
    # because we want to use the *first* one(s) for the score.
    items = workflow.graded_by.filter(
        assessment__submission_uuid=submission_uuid,
        assessment__score_type=PEER_TYPE
    ).order_by('-assessment')

    submission_finished = items.count() >= peer_requirements["must_be_graded_by"]
    if not submission_finished:
        return None

    # Unfortunately, we cannot use update() after taking a slice,
    # so we need to update the and save the items individually.
    # One might be tempted to first query for the first n assessments,
    # then select items that have those assessments.
    # However, this generates a SQL query with a LIMIT in a subquery,
    # which is not supported by some versions of MySQL.
    # Although this approach generates more database queries, the number is likely to
    # be relatively small (at least 1 and very likely less than 5).
    for scored_item in items[:peer_requirements["must_be_graded_by"]]:
        scored_item.scored = True
        scored_item.save()
    assessments = [item.assessment for item in items]

    return {
        "points_earned": sum(
            get_assessment_median_scores(submission_uuid).values()
        ),
        "points_possible": assessments[0].points_possible,
        "contributing_assessments": [assessment.id for assessment in assessments],
        "staff_id": None,
    }
Ejemplo n.º 10
0
    def test_get_workflow_by_uuid(self):
        buffy_answer, _ = self._create_student_and_submission("Buffy", "Buffy's answer")
        self._create_student_and_submission("Xander", "Xander's answer")
        self._create_student_and_submission("Willow", "Willow's answer")
        buffy_answer_two, _ = self._create_student_and_submission("Buffy", "Buffy's answer")

        workflow = PeerWorkflow.get_by_submission_uuid(buffy_answer_two['uuid'])
        self.assertNotEqual(buffy_answer["uuid"], workflow.submission_uuid)
        self.assertEqual(buffy_answer_two["uuid"], workflow.submission_uuid)
Ejemplo n.º 11
0
def create_peer_workflow_item(scorer_submission_uuid, submission_uuid):
    """
    Begin peer-assessing a particular submission.
    Note that this does NOT pick the submission from the prioritized list of available submissions.
    Mainly useful for testing.

    Args:
        scorer_submission_uuid (str): The ID of the scoring student.
        submission_uuid (str): The unique identifier of the submission being scored

    Returns:
        None

    Raises:
        PeerAssessmentWorkflowError: Could not find the workflow for the student.
        PeerAssessmentInternalError: Could not create the peer workflow item.
    """
    workflow = PeerWorkflow.get_by_submission_uuid(scorer_submission_uuid)
    PeerWorkflow.create_item(workflow, submission_uuid)
Ejemplo n.º 12
0
    def test_get_submission_for_review(self):
        buffy_answer, _ = self._create_student_and_submission("Buffy", "Buffy's answer")
        xander_answer, _ = self._create_student_and_submission("Xander", "Xander's answer")
        self._create_student_and_submission("Willow", "Willow's answer")

        buffy_workflow = PeerWorkflow.get_by_submission_uuid(buffy_answer['uuid'])

        # Get the next submission for review
        submission_uuid = buffy_workflow.get_submission_for_review(3)
        self.assertEqual(xander_answer["uuid"], submission_uuid)
Ejemplo n.º 13
0
def create_peer_workflow_item(scorer_submission_uuid, submission_uuid):
    """
    Begin peer-assessing a particular submission.
    Note that this does NOT pick the submission from the prioritized list of available submissions.
    Mainly useful for testing.

    Args:
        scorer_submission_uuid (str): The ID of the scoring student.
        submission_uuid (str): The unique identifier of the submission being scored

    Returns:
        None

    Raises:
        PeerAssessmentWorkflowError: Could not find the workflow for the student.
        PeerAssessmentInternalError: Could not create the peer workflow item.
    """
    workflow = PeerWorkflow.get_by_submission_uuid(scorer_submission_uuid)
    PeerWorkflow.create_item(workflow, submission_uuid)
Ejemplo n.º 14
0
    def test_find_active_assessments(self):
        buffy_answer, _ = self._create_student_and_submission("Buffy", "Buffy's answer")
        xander_answer, _ = self._create_student_and_submission("Xander", "Xander's answer")

        # Check for a workflow for Buffy.
        buffy_workflow = PeerWorkflow.get_by_submission_uuid(buffy_answer['uuid'])
        self.assertIsNotNone(buffy_workflow)

        # Check to see if Buffy is actively reviewing Xander's submission.
        # She isn't so we should get back no uuid.
        submission_uuid = buffy_workflow.find_active_assessments()
        self.assertIsNone(submission_uuid)

        # Buffy is going to review Xander's submission, so create a workflow
        # item for Buffy.
        PeerWorkflow.create_item(buffy_workflow, xander_answer["uuid"])

        # Check to see if Buffy is still actively reviewing Xander's submission.
        submission_uuid = buffy_workflow.find_active_assessments()
        self.assertEqual(xander_answer["uuid"], submission_uuid)
Ejemplo n.º 15
0
def get_score(submission_uuid, requirements):
    """
    Retrieve a score for a submission if requirements have been satisfied.

    Args:
        submission_uuid (str): The UUID of the submission.
        requirements (dict): Dictionary with the key "must_be_graded_by"
            indicating the required number of assessments the student
            must receive to get a score.

    Returns:
        dict with keys "points_earned" and "points_possible".

    """

    # User hasn't completed their own submission yet
    if not submitter_is_finished(submission_uuid, requirements):
        return None

    workflow = PeerWorkflow.get_by_submission_uuid(submission_uuid)

    if workflow is None:
        return None

    # This query will use the ordering defined by the assessment model
    # (descending scored_at, then descending id)
    items = workflow.graded_by.filter(
        assessment__submission_uuid=submission_uuid,
        assessment__score_type=PEER_TYPE
    ).order_by('assessment')

    submission_finished = items.count() >= requirements["must_be_graded_by"]
    if not submission_finished:
        return None

    # Unfortunately, we cannot use update() after taking a slice,
    # so we need to update the and save the items individually.
    # One might be tempted to first query for the first n assessments,
    # then select items that have those assessments.
    # However, this generates a SQL query with a LIMIT in a subquery,
    # which is not supported by some versions of MySQL.
    # Although this approach generates more database queries, the number is likely to
    # be relatively small (at least 1 and very likely less than 5).
    for scored_item in items[:requirements["must_be_graded_by"]]:
        scored_item.scored = True
        scored_item.save()

    return {
        "points_earned": sum(
            get_assessment_median_scores(submission_uuid).values()
        ),
        "points_possible": items[0].assessment.points_possible,
    }
Ejemplo n.º 16
0
def get_graded_by_count(submission_uuid):
    """
    Retrieve the number of peer assessments the submitter has received.
    Returns None if no submission with this ID.
    """
    workflow = PeerWorkflow.get_by_submission_uuid(submission_uuid)
    if workflow is None:
        return None

    scored_items = workflow.graded_by.filter(
        assessment__submission_uuid=submission_uuid,
        assessment__score_type=PEER_TYPE)
    return scored_items.count()
Ejemplo n.º 17
0
    def test_create_item_multiple_available(self):
        # Bugfix TIM-572
        submitter_sub = sub_api.create_submission(self.STUDENT_ITEM, "test answer")
        submitter_workflow = PeerWorkflow.objects.create(
            student_id=self.STUDENT_ITEM["student_id"],
            item_id=self.STUDENT_ITEM["item_id"],
            course_id=self.STUDENT_ITEM["course_id"],
            submission_uuid=submitter_sub["uuid"],
        )
        scorer_sub = sub_api.create_submission(self.OTHER_STUDENT, "test answer 2")
        scorer_workflow = PeerWorkflow.objects.create(
            student_id=self.OTHER_STUDENT["student_id"],
            item_id=self.OTHER_STUDENT["item_id"],
            course_id=self.OTHER_STUDENT["course_id"],
            submission_uuid=scorer_sub["uuid"],
        )

        for _ in range(2):
            PeerWorkflowItem.objects.create(
                scorer=scorer_workflow, author=submitter_workflow, submission_uuid=submitter_sub["uuid"]
            )

        # This used to cause an error when `get_or_create` returned multiple workflow items
        PeerWorkflow.create_item(scorer_workflow, submitter_sub["uuid"])
Ejemplo n.º 18
0
def is_workflow_cancelled(submission_uuid):
    """
    Check if workflow submission is cancelled.

    Args:
        submission_uuid (str): The UUID of the workflow's submission.

    Returns:
        True/False
    """
    if submission_uuid is None:
        return False
    try:
        workflow = PeerWorkflow.get_by_submission_uuid(submission_uuid)
        return workflow.is_cancelled if workflow else False
    except PeerAssessmentWorkflowError:
        return False
Ejemplo n.º 19
0
def is_workflow_cancelled(submission_uuid):
    """
    Check if workflow submission is cancelled.

    Args:
        submission_uuid (str): The UUID of the workflow's submission.

    Returns:
        True/False
    """
    if submission_uuid is None:
        return False
    try:
        workflow = PeerWorkflow.get_by_submission_uuid(submission_uuid)
        return workflow.is_cancelled if workflow else False
    except PeerAssessmentWorkflowError:
        return False
Ejemplo n.º 20
0
def has_finished_required_evaluating(submission_uuid, required_assessments):
    """Check if a student still needs to evaluate more submissions

    Per the contract of the peer assessment workflow, a student must evaluate a
    number of peers before receiving feedback on their submission.

    Args:
        submission_uuid (str): The submission UUID is required to determine if
            the associated student has completed enough assessments. This
            argument is required.
        required_assessments (int): The number of assessments a student has to
            submit before receiving the feedback on their submission. This is a
            required argument.

    Returns:
        tuple: True if the student has evaluated enough peer submissions to move
            through the peer assessment workflow. False if the student needs to
            evaluate more peer submissions. The second value is the count of
            assessments completed.

    Raises:
        PeerAssessmentRequestError: Raised when the submission UUID is invalid,
            or the required_assessments is not a positive integer.
        PeerAssessmentInternalError: Raised when there is an internal error
            while evaluating this workflow rule.

    Examples:
        >>> has_finished_required_evaluating("abc123", 3)
        True, 3

    """
    workflow = PeerWorkflow.get_by_submission_uuid(submission_uuid)
    done = False
    peers_graded = 0
    if workflow:
        peers_graded = workflow.num_peers_graded()
        done = (peers_graded >= required_assessments)
    return done, peers_graded
Ejemplo n.º 21
0
def has_finished_required_evaluating(submission_uuid, required_assessments):
    """Check if a student still needs to evaluate more submissions

    Per the contract of the peer assessment workflow, a student must evaluate a
    number of peers before receiving feedback on their submission.

    Args:
        submission_uuid (str): The submission UUID is required to determine if
            the associated student has completed enough assessments. This
            argument is required.
        required_assessments (int): The number of assessments a student has to
            submit before receiving the feedback on their submission. This is a
            required argument.

    Returns:
        tuple: True if the student has evaluated enough peer submissions to move
            through the peer assessment workflow. False if the student needs to
            evaluate more peer submissions. The second value is the count of
            assessments completed.

    Raises:
        PeerAssessmentRequestError: Raised when the submission UUID is invalid,
            or the required_assessments is not a positive integer.
        PeerAssessmentInternalError: Raised when there is an internal error
            while evaluating this workflow rule.

    Examples:
        >>> has_finished_required_evaluating("abc123", 3)
        True, 3

    """
    workflow = PeerWorkflow.get_by_submission_uuid(submission_uuid)
    done = False
    peers_graded = 0
    if workflow:
        peers_graded = workflow.num_peers_graded()
        done = (peers_graded >= required_assessments)
    return done, peers_graded
Ejemplo n.º 22
0
    def test_get_submission_for_over_grading(self):
        buffy_answer, _ = self._create_student_and_submission("Buffy", "Buffy's answer")
        xander_answer, _ = self._create_student_and_submission("Xander", "Xander's answer")
        willow_answer, _ = self._create_student_and_submission("Willow", "Willow's answer")

        buffy_workflow = PeerWorkflow.get_by_submission_uuid(buffy_answer['uuid'])
        xander_workflow = PeerWorkflow.get_by_submission_uuid(xander_answer['uuid'])
        willow_workflow = PeerWorkflow.get_by_submission_uuid(willow_answer['uuid'])

        # Get a bunch of workflow items opened up.
        PeerWorkflow.create_item(buffy_workflow, xander_answer["uuid"])
        PeerWorkflow.create_item(willow_workflow, xander_answer["uuid"])
        PeerWorkflow.create_item(xander_workflow, xander_answer["uuid"])
        PeerWorkflow.create_item(buffy_workflow, willow_answer["uuid"])
        PeerWorkflow.create_item(xander_workflow, willow_answer["uuid"])

        # Get the next submission for review
        submission_uuid = xander_workflow.get_submission_for_over_grading()

        if not (buffy_answer["uuid"] == submission_uuid or willow_answer["uuid"] == submission_uuid):
            self.fail("Submission was not Buffy or Willow's.")
Ejemplo n.º 23
0
def get_submission_to_assess(submission_uuid, graded_by):
    """Get a submission to peer evaluate.

    Retrieves a submission for assessment for the given student. This will
    not return a submission submitted by the requesting scorer. Submissions are
    returned based on how many assessments are still required, and if there are
    peers actively assessing a particular submission. If there are no
    submissions requiring assessment, a submission may be returned that will be
    'over graded', and the assessment will not be counted towards the overall
    grade.

    Args:
        submission_uuid (str): The submission UUID from the student
            requesting a submission for assessment. This is used to explicitly
            avoid giving the student their own submission, and determines the
            associated Peer Workflow.
        graded_by (int): The number of assessments a submission
            requires before it has completed the peer assessment process.

    Returns:
        dict: A peer submission for assessment. This contains a 'student_item',
            'attempt_number', 'submitted_at', 'created_at', and 'answer' field to be
            used for assessment.

    Raises:
        PeerAssessmentRequestError: Raised when the request parameters are
            invalid for the request.
        PeerAssessmentInternalError: Raised when there is an internal error
            retrieving peer workflow information.
        PeerAssessmentWorkflowError: Raised when an error occurs because this
            function, or the student item, is not in the proper workflow state
            to retrieve a peer submission.

    Examples:
        >>> get_submission_to_assess("abc123", 3)
        {
            'student_item': 2,
            'attempt_number': 1,
            'submitted_at': datetime.datetime(2014, 1, 29, 23, 14, 52, 649284, tzinfo=<UTC>),
            'created_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>),
            'answer': u'The answer is 42.'
        }

    """
    workflow = PeerWorkflow.get_by_submission_uuid(submission_uuid)
    if not workflow:
        raise PeerAssessmentWorkflowError(
            u"A Peer Assessment Workflow does not exist for the student "
            u"with submission UUID {}".format(submission_uuid)
        )
    peer_submission_uuid = workflow.find_active_assessments()
    # If there is an active assessment for this user, get that submission,
    # otherwise, get the first assessment for review, otherwise,
    # get the first submission available for over grading ("over-grading").
    if peer_submission_uuid is None:
        peer_submission_uuid = workflow.get_submission_for_review(graded_by)
    if peer_submission_uuid is None:
        peer_submission_uuid = workflow.get_submission_for_over_grading()
    if peer_submission_uuid:
        try:
            submission_data = sub_api.get_submission(peer_submission_uuid)
            PeerWorkflow.create_item(workflow, peer_submission_uuid)
            _log_workflow(peer_submission_uuid, workflow)
            return submission_data
        except sub_api.SubmissionNotFoundError:
            error_message = (
                u"Could not find a submission with the uuid {} for student {} "
                u"in the peer workflow."
            ).format(peer_submission_uuid, workflow.student_id)
            logger.exception(error_message)
            raise PeerAssessmentWorkflowError(error_message)
    else:
        logger.info(
            u"No submission found for {} to assess ({}, {})"
            .format(
                workflow.student_id,
                workflow.course_id,
                workflow.item_id,
            )
        )
        return None
Ejemplo n.º 24
0
def get_submitted_assessments(submission_uuid, scored_only=True, limit=None):
    """Retrieve the assessments created by the given submission's author.

    Retrieves all the assessments created by the given submission's author. This
    API returns related feedback without making any assumptions about grading.
    Any outstanding assessments associated with this submission will not be
    returned.

    Args:
        submission_uuid (str): The submission of the student whose assessments
        we are requesting. Required.

    Kwargs:
        scored (boolean): Only retrieve the assessments used to generate a score
            for this submission.
        limit (int): Limit the returned assessments. If None, returns all.

    Returns:
        list(dict): A list of dictionaries, where each dictionary represents a
            separate assessment. Each assessment contains points earned, points
            possible, time scored, scorer id, score type, and feedback. If no
            workflow is found associated with the given submission_uuid, returns
            an empty list.

    Raises:
        PeerAssessmentRequestError: Raised when the submission_id is invalid.
        PeerAssessmentInternalError: Raised when there is an internal error
            while retrieving the assessments associated with this submission.

    Examples:
        >>> get_submitted_assessments("1", scored_only=True, limit=2)
        [
            {
                'points_earned': 6,
                'points_possible': 12,
                'scored_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 649284 tzinfo=<UTC>),
                'scorer': u"Tim",
                'feedback': u'Your submission was thrilling.'
            },
            {
                'points_earned': 11,
                'points_possible': 12,
                'scored_at': datetime.datetime(2014, 1, 31, 14, 10, 17, 544214 tzinfo=<UTC>),
                'scorer': u"Tim",
                'feedback': u'Great submission.'
            }
        ]

    """
    try:
        # If no workflow is found associated with the uuid, this returns None,
        # and an empty set of assessments will be returned.
        workflow = PeerWorkflow.get_by_submission_uuid(submission_uuid)
        items = PeerWorkflowItem.objects.filter(
            scorer=workflow,
            assessment__isnull=False
        )
        if scored_only:
            items = items.exclude(scored=False)
        assessments = Assessment.objects.filter(
            pk__in=[item.assessment.pk for item in items])[:limit]
        return serialize_assessments(assessments)
    except DatabaseError:
        error_message = (
            u"Couldn't retrieve the assessments completed by the "
            " student with submission {uuid}"
        ).format(uuid=submission_uuid)
        logger.exception(error_message)
        raise PeerAssessmentInternalError(error_message)
Ejemplo n.º 25
0
 def test_failure_to_get_latest_workflow(self, mock_filter):
     mock_filter.side_effect = DatabaseError("Oh no.")
     tim_answer, _ = self._create_student_and_submission("Tim", "Tim's answer", MONDAY)
     PeerWorkflow.get_by_submission_uuid(tim_answer['uuid'])
Ejemplo n.º 26
0
def get_submission_to_assess(submission_uuid, graded_by):
    """Get a submission to peer evaluate.

    Retrieves a submission for assessment for the given student. This will
    not return a submission submitted by the requesting scorer. Submissions are
    returned based on how many assessments are still required, and if there are
    peers actively assessing a particular submission. If there are no
    submissions requiring assessment, a submission may be returned that will be
    'over graded', and the assessment will not be counted towards the overall
    grade.

    Args:
        submission_uuid (str): The submission UUID from the student
            requesting a submission for assessment. This is used to explicitly
            avoid giving the student their own submission, and determines the
            associated Peer Workflow.
        graded_by (int): The number of assessments a submission
            requires before it has completed the peer assessment process.

    Returns:
        dict: A peer submission for assessment. This contains a 'student_item',
            'attempt_number', 'submitted_at', 'created_at', and 'answer' field to be
            used for assessment.

    Raises:
        PeerAssessmentRequestError: Raised when the request parameters are
            invalid for the request.
        PeerAssessmentInternalError: Raised when there is an internal error
            retrieving peer workflow information.
        PeerAssessmentWorkflowError: Raised when an error occurs because this
            function, or the student item, is not in the proper workflow state
            to retrieve a peer submission.

    Examples:
        >>> get_submission_to_assess("abc123", 3)
        {
            'student_item': 2,
            'attempt_number': 1,
            'submitted_at': datetime.datetime(2014, 1, 29, 23, 14, 52, 649284, tzinfo=<UTC>),
            'created_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>),
            'answer': u'The answer is 42.'
        }

    """
    workflow = PeerWorkflow.get_by_submission_uuid(submission_uuid)
    if not workflow:
        raise PeerAssessmentWorkflowError(
            u"A Peer Assessment Workflow does not exist for the student "
            u"with submission UUID {}".format(submission_uuid)
        )
    peer_submission_uuid = workflow.find_active_assessments()
    # If there is an active assessment for this user, get that submission,
    # otherwise, get the first assessment for review, otherwise,
    # get the first submission available for over grading ("over-grading").
    if peer_submission_uuid is None:
        peer_submission_uuid = workflow.get_submission_for_review(graded_by)
    if peer_submission_uuid is None:
        peer_submission_uuid = workflow.get_submission_for_over_grading()
    if peer_submission_uuid:
        try:
            submission_data = sub_api.get_submission(peer_submission_uuid)
            PeerWorkflow.create_item(workflow, peer_submission_uuid)
            _log_workflow(peer_submission_uuid, workflow)
            return submission_data
        except sub_api.SubmissionNotFoundError:
            error_message = (
                u"Could not find a submission with the uuid {} for student {} "
                u"in the peer workflow."
            ).format(peer_submission_uuid, workflow.student_id)
            logger.exception(error_message)
            raise PeerAssessmentWorkflowError(error_message)
    else:
        logger.info(
            u"No submission found for {} to assess ({}, {})"
            .format(
                workflow.student_id,
                workflow.course_id,
                workflow.item_id,
            )
        )
        return None
Ejemplo n.º 27
0
def get_submitted_assessments(submission_uuid, scored_only=True, limit=None):
    """Retrieve the assessments created by the given submission's author.

    Retrieves all the assessments created by the given submission's author. This
    API returns related feedback without making any assumptions about grading.
    Any outstanding assessments associated with this submission will not be
    returned.

    Args:
        submission_uuid (str): The submission of the student whose assessments
        we are requesting. Required.

    Kwargs:
        scored (boolean): Only retrieve the assessments used to generate a score
            for this submission.
        limit (int): Limit the returned assessments. If None, returns all.

    Returns:
        list(dict): A list of dictionaries, where each dictionary represents a
            separate assessment. Each assessment contains points earned, points
            possible, time scored, scorer id, score type, and feedback. If no
            workflow is found associated with the given submission_uuid, returns
            an empty list.

    Raises:
        PeerAssessmentRequestError: Raised when the submission_id is invalid.
        PeerAssessmentInternalError: Raised when there is an internal error
            while retrieving the assessments associated with this submission.

    Examples:
        >>> get_submitted_assessments("1", scored_only=True, limit=2)
        [
            {
                'points_earned': 6,
                'points_possible': 12,
                'scored_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 649284 tzinfo=<UTC>),
                'scorer': u"Tim",
                'feedback': u'Your submission was thrilling.'
            },
            {
                'points_earned': 11,
                'points_possible': 12,
                'scored_at': datetime.datetime(2014, 1, 31, 14, 10, 17, 544214 tzinfo=<UTC>),
                'scorer': u"Tim",
                'feedback': u'Great submission.'
            }
        ]

    """
    try:
        # If no workflow is found associated with the uuid, this returns None,
        # and an empty set of assessments will be returned.
        workflow = PeerWorkflow.get_by_submission_uuid(submission_uuid)
        items = PeerWorkflowItem.objects.filter(
            scorer=workflow,
            assessment__isnull=False
        )
        if scored_only:
            items = items.exclude(scored=False)
        assessments = Assessment.objects.filter(
            pk__in=[item.assessment.pk for item in items])[:limit]
        return serialize_assessments(assessments)
    except DatabaseError:
        error_message = (
            u"Couldn't retrieve the assessments completed by the "
            " student with submission {uuid}"
        ).format(uuid=submission_uuid)
        logger.exception(error_message)
        raise PeerAssessmentInternalError(error_message)
Ejemplo n.º 28
0
 def test_create_workflow_item_error(self, mock_filter):
     mock_filter.side_effect = DatabaseError("Oh no.")
     tim_answer, tim = self._create_student_and_submission("Tim", "Tim's answer", MONDAY)
     PeerWorkflow.create_item(tim, tim_answer['uuid'])