示例#1
0
 def test_error_on_get_assessment(self, mock_filter):
     self._create_student_and_submission("Tim", "Tim's answer")
     bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
     sub = peer_api.get_submission_to_assess(bob, 3)
     peer_api.create_assessment(
         sub["uuid"],
         bob["student_id"],
         ASSESSMENT_DICT,
         RUBRIC_DICT,
         REQUIRED_GRADED_BY,
         MONDAY,
     )
     mock_filter.side_effect = DatabaseError("Bad things happened")
     peer_api.get_assessments(sub["uuid"])
示例#2
0
    def _submit_peer_assessment(self,
                                xblock,
                                student_id,
                                scorer_id,
                                assessment,
                                expect_failure=False):
        """
        Create submissions for a student and scorer, then create a peer assessment
        from the scorer.

        Args:
            xblock (OpenAssessmentBlock)
            student_id (unicode): The ID of the student being assessed.
            scorer_id (unicode): The ID of the student creating the assessment.
            assessment (dict): Serialized assessment model.

        Kwargs:
            expect_failure (bool): If true, expect a failure response and return None

        Returns:
            dict: The peer assessment retrieved from the API.

        """
        # Create a submission for this problem from another user
        student_item = xblock.get_student_item_dict()
        student_item['student_id'] = student_id
        submission = xblock.create_submission(student_item, self.SUBMISSION)

        # Create a submission for the scorer (required before assessing another student)
        another_student = copy.deepcopy(student_item)
        another_student['student_id'] = scorer_id
        another_submission = xblock.create_submission(another_student,
                                                      self.SUBMISSION)

        # Pull the submission to assess
        peer_api.get_submission_to_assess(another_submission['uuid'], 3)

        # Submit an assessment and expect a successful response
        assessment = copy.deepcopy(assessment)
        resp = self.request(xblock,
                            'peer_assess',
                            json.dumps(assessment),
                            response_format='json')

        if expect_failure:
            self.assertFalse(resp['success'])
            return None
        else:
            self.assertTrue(resp['success'])

            # Retrieve the peer assessment
            retrieved_assessment = peer_api.get_assessments(
                submission['uuid'], scored_only=False)[0]
            return submission['uuid'], retrieved_assessment
示例#3
0
    def render_grade_complete(self, workflow):
        """
        Render the grade complete state.

        Args:
            workflow (dict): The serialized Workflow model.

        Returns:
            tuple of context (dict), template_path (string)
        """
        feedback = peer_api.get_assessment_feedback(self.submission_uuid)
        feedback_text = feedback.get('feedback', '') if feedback else ''
        student_submission = sub_api.get_submission(
            workflow['submission_uuid'])
        peer_assessments = peer_api.get_assessments(student_submission['uuid'])
        self_assessment = self_api.get_assessment(student_submission['uuid'])
        has_submitted_feedback = peer_api.get_assessment_feedback(
            workflow['submission_uuid']) is not None

        # We retrieve the score from the workflow, which in turn retrieves
        # the score for our current submission UUID.
        # We look up the score by submission UUID instead of student item
        # to ensure that the score always matches the rubric.
        score = workflow['score']

        context = {
            'score':
            score,
            'feedback_text':
            feedback_text,
            'student_submission':
            student_submission,
            'peer_assessments':
            peer_assessments,
            'self_assessment':
            self_assessment,
            'rubric_criteria':
            self._rubric_criteria_with_feedback(peer_assessments),
            'has_submitted_feedback':
            has_submitted_feedback,
        }

        # Update the scores we will display to the user
        # Note that we are updating a *copy* of the rubric criteria stored in the XBlock field
        max_scores = peer_api.get_rubric_max_scores(self.submission_uuid)
        median_scores = peer_api.get_assessment_median_scores(
            student_submission["uuid"])
        if median_scores is not None and max_scores is not None:
            for criterion in context["rubric_criteria"]:
                criterion["median_score"] = median_scores[criterion["name"]]
                criterion["total_value"] = max_scores[criterion["name"]]

        return ('openassessmentblock/grade/oa_grade_complete.html', context)
示例#4
0
 def test_get_assessments(self, assessment_dict):
     self._create_student_and_submission("Tim", "Tim's answer")
     bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
     sub = peer_api.get_submission_to_assess(bob, 3)
     peer_api.create_assessment(
         sub["uuid"],
         bob["student_id"],
         assessment_dict,
         RUBRIC_DICT,
         REQUIRED_GRADED_BY,
     )
     assessments = peer_api.get_assessments(sub["uuid"], scored_only=False)
     self.assertEqual(1, len(assessments))
示例#5
0
 def test_get_assessments(self, assessment_dict):
     self._create_student_and_submission("Tim", "Tim's answer")
     bob_sub, bob = self._create_student_and_submission("Bob", "Bob's answer")
     sub = peer_api.get_submission_to_assess(bob_sub['uuid'], 3)
     peer_api.create_assessment(
         bob_sub["uuid"],
         bob["student_id"],
         assessment_dict['options_selected'],
         assessment_dict['criterion_feedback'],
         assessment_dict['overall_feedback'],
         RUBRIC_DICT,
         REQUIRED_GRADED_BY,
     )
     assessments = peer_api.get_assessments(sub["uuid"], scored_only=False)
     self.assertEqual(1, len(assessments))
示例#6
0
    def _submit_peer_assessment(self, xblock, student_id, scorer_id, assessment, expect_failure=False):
        """
        Create submissions for a student and scorer, then create a peer assessment
        from the scorer.

        Args:
            xblock (OpenAssessmentBlock)
            student_id (unicode): The ID of the student being assessed.
            scorer_id (unicode): The ID of the student creating the assessment.
            assessment (dict): Serialized assessment model.

        Kwargs:
            expect_failure (bool): If true, expect a failure response and return None

        Returns:
            dict: The peer assessment retrieved from the API.

        """
        # Create a submission for this problem from another user
        student_item = xblock.get_student_item_dict()
        student_item['student_id'] = student_id
        submission = xblock.create_submission(student_item, self.SUBMISSION)

        # Create a submission for the scorer (required before assessing another student)
        another_student = copy.deepcopy(student_item)
        another_student['student_id'] = scorer_id
        another_submission = xblock.create_submission(another_student, self.SUBMISSION)

        # Pull the submission to assess
        peer_api.get_submission_to_assess(another_submission['uuid'], 3)

        # Submit an assessment and expect a successful response
        assessment = copy.deepcopy(assessment)
        resp = self.request(xblock, 'peer_assess', json.dumps(assessment), response_format='json')

        if expect_failure:
            self.assertFalse(resp['success'])
            return None
        else:
            self.assertTrue(resp['success'])

            # Retrieve the peer assessment
            retrieved_assessment = peer_api.get_assessments(submission['uuid'], scored_only=False)[0]
            return submission['uuid'], retrieved_assessment
示例#7
0
    def render_grade_complete(self, workflow):
        """
        Render the grade complete state.

        Args:
            workflow (dict): The serialized Workflow model.

        Returns:
            tuple of context (dict), template_path (string)
        """
        feedback = peer_api.get_assessment_feedback(self.submission_uuid)
        feedback_text = feedback.get('feedback', '') if feedback else ''
        student_submission = sub_api.get_submission(workflow['submission_uuid'])
        peer_assessments = peer_api.get_assessments(student_submission['uuid'])
        self_assessment = self_api.get_assessment(student_submission['uuid'])
        has_submitted_feedback = peer_api.get_assessment_feedback(workflow['submission_uuid']) is not None

        # We retrieve the score from the workflow, which in turn retrieves
        # the score for our current submission UUID.
        # We look up the score by submission UUID instead of student item
        # to ensure that the score always matches the rubric.
        score = workflow['score']

        context = {
            'score': score,
            'feedback_text': feedback_text,
            'student_submission': student_submission,
            'peer_assessments': peer_assessments,
            'self_assessment': self_assessment,
            'rubric_criteria': self._rubric_criteria_with_feedback(peer_assessments),
            'has_submitted_feedback': has_submitted_feedback,
        }

        # Update the scores we will display to the user
        # Note that we are updating a *copy* of the rubric criteria stored in the XBlock field
        max_scores = peer_api.get_rubric_max_scores(self.submission_uuid)
        median_scores = peer_api.get_assessment_median_scores(student_submission["uuid"])
        if median_scores is not None and max_scores is not None:
            for criterion in context["rubric_criteria"]:
                criterion["median_score"] = median_scores[criterion["name"]]
                criterion["total_value"] = max_scores[criterion["name"]]

        return ('openassessmentblock/grade/oa_grade_complete.html', context)
示例#8
0
文件: views.py 项目: mulby/edx-ora2
def get_evaluations_for_student_item(request, course_id, student_id, item_id):
    """Retrieve all evaluations associated with the given student item.

    Developer utility for accessing all the evaluations associated with a
    student item. The student item is specified by the unique combination of
    course, student, and item.

    Args:
        request (dict): The request.
        course_id (str): The course id for this student item.
        student_id (str): The student id for this student item.
        item_id (str): The item id for this student item.

    Returns:
        HttpResponse: The response object for this request. Renders a simple
            development page with all the evaluations related to the specified
            student item.

    """
    student_item_dict = dict(
        course_id=course_id,
        student_id=student_id,
        item_id=item_id,
    )
    context = dict(**student_item_dict)
    try:
        submissions = get_submissions(student_item_dict)
        evaluations = []
        for submission in submissions:
            submission_evaluations = get_assessments(submission["uuid"])
            for evaluation in submission_evaluations:
                evaluation["submission_uuid"] = submission["uuid"]
                evaluations.append(evaluation)

        context["evaluations"] = evaluations

    except SubmissionRequestError:
        context["error"] = "The specified student item was not found."

    return render_to_response('evaluations.html', context)
示例#9
0
    def test_peer_assess_handler(self, xblock):

        # Create a submission for this problem from another user
        student_item = xblock.get_student_item_dict()
        student_item['student_id'] = 'Sally'

        submission = xblock.create_submission(student_item, self.SUBMISSION)

        # Create a submission for the scorer (required before assessing another student)
        another_student = copy.deepcopy(student_item)
        another_student['student_id'] = "Bob"
        xblock.create_submission(another_student, self.SUBMISSION)
        peer_api.get_submission_to_assess(another_student, 3)


        # Submit an assessment and expect a successful response
        assessment = copy.deepcopy(self.ASSESSMENT)
        assessment['submission_uuid'] = submission['uuid']
        resp = self.request(xblock, 'peer_assess', json.dumps(assessment), response_format='json')
        self.assertTrue(resp['success'])

        # Retrieve the assessment and check that it matches what we sent
        actual = peer_api.get_assessments(submission['uuid'], scored_only=False)
        self.assertEqual(len(actual), 1)
        self.assertEqual(actual[0]['submission_uuid'], assessment['submission_uuid'])
        self.assertEqual(actual[0]['points_earned'], 5)
        self.assertEqual(actual[0]['points_possible'], 6)
        self.assertEqual(actual[0]['scorer_id'], 'Bob')
        self.assertEqual(actual[0]['score_type'], 'PE')

        self.assertEqual(len(actual[0]['parts']), 2)
        parts = sorted(actual[0]['parts'])
        self.assertEqual(parts[0]['option']['criterion']['name'], u'Form')
        self.assertEqual(parts[0]['option']['name'], 'Fair')
        self.assertEqual(parts[1]['option']['criterion']['name'], u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮')
        self.assertEqual(parts[1]['option']['name'], u'ﻉซƈﻉɭɭﻉกՇ')

        self.assertEqual(actual[0]['feedback'], assessment['feedback'])
示例#10
0
    def render_grade_complete(self, workflow):
        """
        Render the grade complete state.

        Args:
            workflow (dict): The serialized Workflow model.

        Returns:
            tuple of context (dict), template_path (string)
        """
        feedback = peer_api.get_assessment_feedback(self.submission_uuid)
        feedback_text = feedback.get('feedback', '') if feedback else ''
        student_submission = sub_api.get_submission(workflow['submission_uuid'])
        peer_assessments = peer_api.get_assessments(student_submission['uuid'])
        self_assessment = self_api.get_assessment(student_submission['uuid'])
        has_submitted_feedback = peer_api.get_assessment_feedback(workflow['submission_uuid']) is not None

        context = {
            'score': workflow['score'],
            'feedback_text': feedback_text,
            'student_submission': student_submission,
            'peer_assessments': peer_assessments,
            'self_assessment': self_assessment,
            'rubric_criteria': copy.deepcopy(self.rubric_criteria),
            'has_submitted_feedback': has_submitted_feedback,
        }

        # Update the scores we will display to the user
        # Note that we are updating a *copy* of the rubric criteria stored in the XBlock field
        max_scores = peer_api.get_rubric_max_scores(self.submission_uuid)
        median_scores = peer_api.get_assessment_median_scores(student_submission["uuid"])
        if median_scores is not None and max_scores is not None:
            for criterion in context["rubric_criteria"]:
                criterion["median_score"] = median_scores[criterion["name"]]
                criterion["total_value"] = max_scores[criterion["name"]]

        return ('openassessmentblock/grade/oa_grade_complete.html', context)
示例#11
0
    def test_create_submissions(self):

        # Create some submissions
        cmd = create_oa_submissions.Command()
        cmd.handle("test_course", "test_item", "5")

        self.assertEqual(len(cmd.student_items), 5)
        for student_item in cmd.student_items:

            # Check that the student item was created for the right course / item
            self.assertEqual(student_item['course_id'], 'test_course')
            self.assertEqual(student_item['item_id'], 'test_item')

            # Check that a submission was created
            submissions = sub_api.get_submissions(student_item)
            self.assertEqual(len(submissions), 1)

            answer_dict = submissions[0]['answer']
            self.assertIsInstance(answer_dict['text'], basestring)
            self.assertGreater(len(answer_dict['text']), 0)

            # Check that peer and self assessments were created
            assessments = peer_api.get_assessments(submissions[0]['uuid'],
                                                   scored_only=False)

            # Verify that the assessments exist and have content
            self.assertEqual(len(assessments), cmd.NUM_PEER_ASSESSMENTS)

            for assessment in assessments:
                self.assertGreater(assessment['points_possible'], 0)

            # Check that a self-assessment was created
            assessment = self_api.get_assessment(submissions[0]['uuid'])

            # Verify that the assessment exists and has content
            self.assertIsNot(assessment, None)
            self.assertGreater(assessment['points_possible'], 0)
示例#12
0
 def test_get_assessments_db_error(self, mock_filter):
     mock_filter.side_effect = DatabaseError("Bad things happened")
     tim, _ = self._create_student_and_submission("Tim", "Tim's answer")
     peer_api.get_assessments(tim["uuid"])