コード例 #1
0
ファイル: test_staff_info.py プロジェクト: YoshidaKS/edx-ora2
    def test_staff_debug_student_info_self_only(self, xblock):
        # Simulate that we are course staff
        xblock.xmodule_runtime =  self._create_mock_runtime(
            xblock.scope_ids.usage_id, True, "Bob"
        )

        bob_item = STUDENT_ITEM.copy()
        bob_item["item_id"] = xblock.scope_ids.usage_id
        # Create a submission for Bob, and corresponding workflow.
        submission = sub_api.create_submission(bob_item, {'text':"Bob Answer"})
        peer_api.create_peer_workflow(submission["uuid"])
        workflow_api.create_workflow(submission["uuid"], ['self'])

        # Bob assesses himself.
        self_api.create_assessment(
            submission['uuid'],
            STUDENT_ITEM["student_id"],
            ASSESSMENT_DICT['options_selected'],
            {'criteria': xblock.rubric_criteria},
        )

        # Now Bob should be fully populated in the student info view.
        request = namedtuple('Request', 'params')
        request.params = {"student_id": "Bob"}
        # Verify that we can render without error
        path, context = xblock.get_student_info_path_and_context(request)
        self.assertEquals("Bob Answer", context['submission']['answer']['text'])
        self.assertEquals([], context['peer_assessments'])
        self.assertEquals("openassessmentblock/staff_debug/student_info.html", path)
コード例 #2
0
ファイル: test_staff_info.py プロジェクト: skim-ks/edx-ora2
    def test_staff_debug_student_info_self_only(self, xblock):
        # Simulate that we are course staff
        xblock.xmodule_runtime = self._create_mock_runtime(
            xblock.scope_ids.usage_id, True, "Bob")

        bob_item = STUDENT_ITEM.copy()
        bob_item["item_id"] = xblock.scope_ids.usage_id
        # Create a submission for Bob, and corresponding workflow.
        submission = sub_api.create_submission(bob_item,
                                               {'text': "Bob Answer"})
        peer_api.create_peer_workflow(submission["uuid"])
        workflow_api.create_workflow(submission["uuid"], ['self'])

        # Bob assesses himself.
        self_api.create_assessment(
            submission['uuid'],
            STUDENT_ITEM["student_id"],
            ASSESSMENT_DICT['options_selected'],
            {'criteria': xblock.rubric_criteria},
        )

        # Now Bob should be fully populated in the student info view.
        request = namedtuple('Request', 'params')
        request.params = {"student_id": "Bob"}
        # Verify that we can render without error
        path, context = xblock.get_student_info_path_and_context(request)
        self.assertEquals("Bob Answer",
                          context['submission']['answer']['text'])
        self.assertEquals([], context['peer_assessments'])
        self.assertEquals("openassessmentblock/staff_debug/student_info.html",
                          path)
コード例 #3
0
ファイル: test_peer.py プロジェクト: gradyward/edx-ora2
 def _create_student_and_submission(student, answer, date=None):
     new_student_item = STUDENT_ITEM.copy()
     new_student_item["student_id"] = student
     submission = sub_api.create_submission(new_student_item, answer, date)
     peer_api.create_peer_workflow(submission["uuid"])
     workflow_api.create_workflow(submission["uuid"], STEPS)
     return submission, new_student_item
コード例 #4
0
ファイル: test_peer.py プロジェクト: gradyward/edx-ora2
 def test_error_on_assessment_creation(self, mock_filter):
     mock_filter.side_effect = DatabaseError("Bad things happened")
     submission = sub_api.create_submission(STUDENT_ITEM, ANSWER_ONE)
     peer_api.create_peer_workflow(submission["uuid"])
     peer_api.create_assessment(
         submission["uuid"], STUDENT_ITEM["student_id"],
         ASSESSMENT_DICT['options_selected'],
         ASSESSMENT_DICT['criterion_feedback'],
         ASSESSMENT_DICT['overall_feedback'],
         RUBRIC_DICT,
         REQUIRED_GRADED_BY,
         MONDAY,
     )
コード例 #5
0
ファイル: test_staff_info.py プロジェクト: skim-ks/edx-ora2
    def test_staff_debug_student_info_full_workflow(self, xblock):
        # Simulate that we are course staff
        xblock.xmodule_runtime = self._create_mock_runtime(
            xblock.scope_ids.usage_id, True, "Bob")

        bob_item = STUDENT_ITEM.copy()
        bob_item["item_id"] = xblock.scope_ids.usage_id
        # Create a submission for Bob, and corresponding workflow.
        submission = sub_api.create_submission(bob_item,
                                               {'text': "Bob Answer"})
        peer_api.create_peer_workflow(submission["uuid"])
        workflow_api.create_workflow(submission["uuid"], ['peer', 'self'])

        # Create a submission for Tim, and corresponding workflow.
        tim_item = bob_item.copy()
        tim_item["student_id"] = "Tim"
        tim_sub = sub_api.create_submission(tim_item, "Tim Answer")
        peer_api.create_peer_workflow(tim_sub["uuid"])
        workflow_api.create_workflow(tim_sub["uuid"], ['peer', 'self'])

        # Bob assesses Tim.
        peer_api.get_submission_to_assess(submission['uuid'], 1)
        peer_api.create_assessment(
            submission["uuid"],
            STUDENT_ITEM["student_id"],
            ASSESSMENT_DICT['options_selected'],
            dict(),
            "",
            {'criteria': xblock.rubric_criteria},
            1,
        )

        # Bob assesses himself.
        self_api.create_assessment(
            submission['uuid'],
            STUDENT_ITEM["student_id"],
            ASSESSMENT_DICT['options_selected'],
            {'criteria': xblock.rubric_criteria},
        )

        # Now Bob should be fully populated in the student info view.
        request = namedtuple('Request', 'params')
        request.params = {"student_id": "Bob"}
        # Verify that we can render without error
        resp = xblock.render_student_info(request)
        self.assertIn("bob answer", resp.body.lower())
コード例 #6
0
ファイル: test_staff_info.py プロジェクト: YoshidaKS/edx-ora2
    def test_staff_debug_student_info_full_workflow(self, xblock):
        # Simulate that we are course staff
        xblock.xmodule_runtime = self._create_mock_runtime(
            xblock.scope_ids.usage_id, True, "Bob"
        )

        bob_item = STUDENT_ITEM.copy()
        bob_item["item_id"] = xblock.scope_ids.usage_id
        # Create a submission for Bob, and corresponding workflow.
        submission = sub_api.create_submission(bob_item, {'text':"Bob Answer"})
        peer_api.create_peer_workflow(submission["uuid"])
        workflow_api.create_workflow(submission["uuid"], ['peer', 'self'])

        # Create a submission for Tim, and corresponding workflow.
        tim_item = bob_item.copy()
        tim_item["student_id"] = "Tim"
        tim_sub = sub_api.create_submission(tim_item, "Tim Answer")
        peer_api.create_peer_workflow(tim_sub["uuid"])
        workflow_api.create_workflow(tim_sub["uuid"], ['peer', 'self'])

        # Bob assesses Tim.
        peer_api.get_submission_to_assess(submission['uuid'], 1)
        peer_api.create_assessment(
            submission["uuid"],
            STUDENT_ITEM["student_id"],
            ASSESSMENT_DICT['options_selected'], dict(), "",
            {'criteria': xblock.rubric_criteria},
            1,
        )

        # Bob assesses himself.
        self_api.create_assessment(
            submission['uuid'],
            STUDENT_ITEM["student_id"],
            ASSESSMENT_DICT['options_selected'],
            {'criteria': xblock.rubric_criteria},
        )

        # Now Bob should be fully populated in the student info view.
        request = namedtuple('Request', 'params')
        request.params = {"student_id": "Bob"}
        # Verify that we can render without error
        resp = xblock.render_student_info(request)
        self.assertIn("bob answer", resp.body.lower())
コード例 #7
0
    def update_from_assessments(self, assessment_requirements):
        """Query self and peer APIs and change our status if appropriate.

        If the status is done, we do nothing. Once something is done, we never
        move back to any other status.

        By default, an `AssessmentWorkflow` starts with status `peer`.

        If the peer API says that our submitter's requirements are met -- that
        the submitter of the submission we're tracking has assessed the required
        number of other submissions -- then the status will move to `self`.

        If the self API says that the person who created the submission we're
        tracking has assessed themselves, then we move to `waiting`.

        If we're in the `waiting` status, and the peer API says it can score
        this submission (meaning other students have created enough assessments
        of it), then we record the score in the submissions API and move our
        `status` to `done`.

        Args:
            assessment_requirements (dict): Dictionary that currently looks like:
                `{"peer": {"must_grade": <int>, "must_be_graded_by": <int>}}`
                `must_grade` is the number of assessments a student must complete.
                `must_be_graded_by` is the number of assessments a submission must
                receive to be scored. `must_grade` should be greater than
                `must_be_graded_by` to ensure that everyone will get scored.
                The intention is to eventually pass in more assessment sequence
                specific requirements in this dict.

        """
        from openassessment.assessment.api import peer as peer_api
        from openassessment.assessment.api import self as self_api

        # If we're done, we're done -- it doesn't matter if requirements have
        # changed because we've already written a score.
        if self.status == self.STATUS.done:
            return

        # Update our AssessmentWorkflowStep models with the latest from our APIs
        steps = self._get_steps()

        # Go through each step and update its status.
        for step in steps:
            step.update(self.submission_uuid, assessment_requirements)

        # Fetch name of the first step that the submitter hasn't yet completed.
        new_status = next(
            (step.name
             for step in steps if step.submitter_completed_at is None),
            self.STATUS.waiting  # if nothing's left to complete, we're waiting
        )

        # If the submitter is beginning peer assessment, add them to the queue
        # by creating a new peer workflow
        if new_status == "peer":
            peer_api.create_peer_workflow(self.submission_uuid)

        # If the submitter has done all they need to do, let's check to see if
        # all steps have been fully assessed (i.e. we can score it).
        if (new_status == self.STATUS.waiting
                and all(step.assessment_completed_at for step in steps)):

            # At this point, we're trying to give a score. We currently have a
            # very simple rule for this -- if it has a peer step, use that for
            # scoring. If not, use the self step. Later on, we may put more
            # interesting rules here.
            step_names = [step.name for step in steps]
            score = None
            if self.STATUS.peer in step_names:
                score = peer_api.get_score(
                    self.submission_uuid,
                    assessment_requirements[self.STATUS.peer])
            elif self.STATUS.self in step_names:
                score = self_api.get_score(self.submission_uuid, {})

            if score:
                self.set_score(score)
                new_status = self.STATUS.done

        # Finally save our changes if the status has changed
        if self.status != new_status:
            self.status = new_status
            self.save()
コード例 #8
0
def create_workflow(submission_uuid, steps):
    """Begins a new assessment workflow.

    Create a new workflow that other assessments will record themselves against.

    Args:
        submission_uuid (str): The UUID for the submission that all our
            assessments will be evaluating.
        steps (list): List of steps that are part of the workflow, in the order
            that the user must complete them. Example: `["peer", "self"]`

    Returns:
        dict: Assessment workflow information with the following
            `uuid` = UUID of this `AssessmentWorkflow`
            `submission_uuid` = UUID of submission this workflow tracks
            `status` = Active step, always "peer" when created.
            `created` = created datetime
            'modified' = modified datetime (same as `created` for this method)
            'score' = should be None in the usual case, but could be a dict
                with keys "points_earned" and "points_possible` and int values.
                The latter will only happen on workflow creation if something
                else has already written the score for this submission (such as
                a professor manually entering it). There is no support for such
                a feature at present, but it may be added later.

    Raises:
        AssessmentWorkflowRequestError: If the `submission_uuid` passed in does
            not exist or is of an invalid type.
        AssessmentWorkflowInternalError: Unexpected internal error, such as the
            submissions app not being available or a database configuration
            problem.

    """
    def sub_err_msg(specific_err_msg):
        return (u"Could not create assessment workflow: "
                u"retrieving submission {} failed: {}".format(
                    submission_uuid, specific_err_msg))

    try:
        submission_dict = sub_api.get_submission_and_student(submission_uuid)
    except sub_api.SubmissionNotFoundError:
        err_msg = sub_err_msg("submission not found")
        logger.error(err_msg)
        raise AssessmentWorkflowRequestError(err_msg)
    except sub_api.SubmissionRequestError as err:
        err_msg = sub_err_msg(err)
        logger.error(err_msg)
        raise AssessmentWorkflowRequestError(err_msg)
    except sub_api.SubmissionInternalError as err:
        logger.error(err)
        raise AssessmentWorkflowInternalError(
            u"retrieving submission {} failed with unknown error: {}".format(
                submission_uuid, err))

    # Raise an error if they specify a step we don't recognize...
    invalid_steps = set(steps) - set(AssessmentWorkflow.STEPS)
    if invalid_steps:
        raise AssessmentWorkflowRequestError(
            u"The following steps were not recognized: {}; Must be one of {}".
            format(invalid_steps, AssessmentWorkflow.STEPS))

    # We're not using a serializer to deserialize this because the only variable
    # we're getting from the outside is the submission_uuid, which is already
    # validated by this point.
    status = AssessmentWorkflow.STATUS.peer
    if steps[0] == "peer":
        try:
            peer_api.create_peer_workflow(submission_uuid)
        except PeerAssessmentError as err:
            err_msg = u"Could not create assessment workflow: {}".format(err)
            logger.exception(err_msg)
            raise AssessmentWorkflowInternalError(err_msg)
    elif steps[0] == "self":
        status = AssessmentWorkflow.STATUS.self
    elif steps[0] == "training":
        status = AssessmentWorkflow.STATUS.training

    try:
        workflow = AssessmentWorkflow.objects.create(
            submission_uuid=submission_uuid,
            status=status,
            course_id=submission_dict['student_item']['course_id'],
            item_id=submission_dict['student_item']['item_id'],
        )
        workflow_steps = [
            AssessmentWorkflowStep(workflow=workflow, name=step, order_num=i)
            for i, step in enumerate(steps)
        ]
        workflow.steps.add(*workflow_steps)
    except (DatabaseError, sub_api.SubmissionError) as err:
        err_msg = u"Could not create assessment workflow: {}".format(err)
        logger.exception(err_msg)
        raise AssessmentWorkflowInternalError(err_msg)

    return AssessmentWorkflowSerializer(workflow).data
コード例 #9
0
ファイル: test_peer.py プロジェクト: YoshidaKS/edx-ora2
 def test_peer_workflow_integrity_error(self):
     tim_sub, __ = self._create_student_and_submission("Tim", "Tim's answer")
     with patch.object(PeerWorkflow.objects, "get_or_create") as mock_peer:
         mock_peer.side_effect = IntegrityError("Oh no!")
         # This should not raise an exception
         peer_api.create_peer_workflow(tim_sub["uuid"])
コード例 #10
0
ファイル: api.py プロジェクト: gradyward/edx-ora2
def create_workflow(submission_uuid, steps):
    """Begins a new assessment workflow.

    Create a new workflow that other assessments will record themselves against.

    Args:
        submission_uuid (str): The UUID for the submission that all our
            assessments will be evaluating.
        steps (list): List of steps that are part of the workflow, in the order
            that the user must complete them. Example: `["peer", "self"]`

    Returns:
        dict: Assessment workflow information with the following
            `uuid` = UUID of this `AssessmentWorkflow`
            `submission_uuid` = UUID of submission this workflow tracks
            `status` = Active step, always "peer" when created.
            `created` = created datetime
            'modified' = modified datetime (same as `created` for this method)
            'score' = should be None in the usual case, but could be a dict
                with keys "points_earned" and "points_possible` and int values.
                The latter will only happen on workflow creation if something
                else has already written the score for this submission (such as
                a professor manually entering it). There is no support for such
                a feature at present, but it may be added later.

    Raises:
        AssessmentWorkflowRequestError: If the `submission_uuid` passed in does
            not exist or is of an invalid type.
        AssessmentWorkflowInternalError: Unexpected internal error, such as the
            submissions app not being available or a database configuration
            problem.

    """
    def sub_err_msg(specific_err_msg):
        return (
            u"Could not create assessment workflow: "
            u"retrieving submission {} failed: {}"
            .format(submission_uuid, specific_err_msg)
        )

    try:
        submission_dict = sub_api.get_submission_and_student(submission_uuid)
    except sub_api.SubmissionNotFoundError:
        err_msg = sub_err_msg("submission not found")
        logger.error(err_msg)
        raise AssessmentWorkflowRequestError(err_msg)
    except sub_api.SubmissionRequestError as err:
        err_msg = sub_err_msg(err)
        logger.error(err_msg)
        raise AssessmentWorkflowRequestError(err_msg)
    except sub_api.SubmissionInternalError as err:
        logger.error(err)
        raise AssessmentWorkflowInternalError(
            u"retrieving submission {} failed with unknown error: {}"
            .format(submission_uuid, err)
        )

    # Raise an error if they specify a step we don't recognize...
    invalid_steps = set(steps) - set(AssessmentWorkflow.STEPS)
    if invalid_steps:
        raise AssessmentWorkflowRequestError(
            u"The following steps were not recognized: {}; Must be one of {}".format(
                invalid_steps, AssessmentWorkflow.STEPS
            )
        )

    # We're not using a serializer to deserialize this because the only variable
    # we're getting from the outside is the submission_uuid, which is already
    # validated by this point.
    status = AssessmentWorkflow.STATUS.peer
    if steps[0] == "peer":
        try:
            peer_api.create_peer_workflow(submission_uuid)
        except PeerAssessmentError as err:
            err_msg = u"Could not create assessment workflow: {}".format(err)
            logger.exception(err_msg)
            raise AssessmentWorkflowInternalError(err_msg)
    elif steps[0] == "self":
        status = AssessmentWorkflow.STATUS.self

    try:
        workflow = AssessmentWorkflow.objects.create(
            submission_uuid=submission_uuid,
            status=status,
            course_id=submission_dict['student_item']['course_id'],
            item_id=submission_dict['student_item']['item_id'],
        )
        workflow_steps = [
            AssessmentWorkflowStep(
                workflow=workflow, name=step, order_num=i
            )
            for i, step in enumerate(steps)
        ]
        workflow.steps.add(*workflow_steps)
    except (
        DatabaseError,
        sub_api.SubmissionError
    ) as err:
        err_msg = u"Could not create assessment workflow: {}".format(err)
        logger.exception(err_msg)
        raise AssessmentWorkflowInternalError(err_msg)

    return AssessmentWorkflowSerializer(workflow).data
コード例 #11
0
ファイル: models.py プロジェクト: YoshidaKS/edx-ora2
    def update_from_assessments(self, assessment_requirements):
        """Query self and peer APIs and change our status if appropriate.

        If the status is done, we do nothing. Once something is done, we never
        move back to any other status.

        By default, an `AssessmentWorkflow` starts with status `peer`.

        If the peer API says that our submitter's requirements are met -- that
        the submitter of the submission we're tracking has assessed the required
        number of other submissions -- then the status will move to `self`.

        If the self API says that the person who created the submission we're
        tracking has assessed themselves, then we move to `waiting`.

        If we're in the `waiting` status, and the peer API says it can score
        this submission (meaning other students have created enough assessments
        of it), then we record the score in the submissions API and move our
        `status` to `done`.

        Args:
            assessment_requirements (dict): Dictionary that currently looks like:
                `{"peer": {"must_grade": <int>, "must_be_graded_by": <int>}}`
                `must_grade` is the number of assessments a student must complete.
                `must_be_graded_by` is the number of assessments a submission must
                receive to be scored. `must_grade` should be greater than
                `must_be_graded_by` to ensure that everyone will get scored.
                The intention is to eventually pass in more assessment sequence
                specific requirements in this dict.

        """
        from openassessment.assessment.api import peer as peer_api
        from openassessment.assessment.api import self as self_api

        # If we're done, we're done -- it doesn't matter if requirements have
        # changed because we've already written a score.
        if self.status == self.STATUS.done:
            return

        # Update our AssessmentWorkflowStep models with the latest from our APIs
        steps = self._get_steps()

        # Go through each step and update its status.
        for step in steps:
            step.update(self.submission_uuid, assessment_requirements)

        # Fetch name of the first step that the submitter hasn't yet completed.
        new_status = next(
            (step.name for step in steps if step.submitter_completed_at is None),
            self.STATUS.waiting  # if nothing's left to complete, we're waiting
        )

        # If the submitter is beginning peer assessment, add them to the queue
        # by creating a new peer workflow
        if new_status == "peer":
            peer_api.create_peer_workflow(self.submission_uuid)

        # If the submitter has done all they need to do, let's check to see if
        # all steps have been fully assessed (i.e. we can score it).
        if (new_status == self.STATUS.waiting and
            all(step.assessment_completed_at for step in steps)):

            # At this point, we're trying to give a score. We currently have a
            # very simple rule for this -- if it has a peer step, use that for
            # scoring. If not, use the self step. Later on, we may put more
            # interesting rules here.
            step_names = [step.name for step in steps]
            score = None
            if self.STATUS.peer in step_names:
                score = peer_api.get_score(
                    self.submission_uuid,
                    assessment_requirements[self.STATUS.peer]
                )
            elif self.STATUS.self in step_names:
                score = self_api.get_score(self.submission_uuid, {})

            if score:
                self.set_score(score)
                new_status = self.STATUS.done

        # Finally save our changes if the status has changed
        if self.status != new_status:
            self.status = new_status
            self.save()