Esempio n. 1
0
    def test_get_top_submissions_from_cache(self):
        student_1 = api.create_submission(STUDENT_ITEM, "Hello World")
        student_2 = api.create_submission(STUDENT_ITEM, "Hello World")
        student_3 = api.create_submission(STUDENT_ITEM, "Hello World")

        api.set_score(student_1['uuid'], 8, 10)
        api.set_score(student_2['uuid'], 4, 10)
        api.set_score(student_3['uuid'], 2, 10)

        # The first call should hit the database
        with self.assertNumQueries(1):
            scores = api.get_top_submissions(
                STUDENT_ITEM["course_id"],
                STUDENT_ITEM["item_id"],
                STUDENT_ITEM["item_type"], 2,
                use_cache=True,
                read_replica=False
            )
            self.assertEqual(scores, [
                { "content": "Hello World", "score": 8 },
                { "content": "Hello World", "score": 4 },
            ])

        # The second call should use the cache
        with self.assertNumQueries(0):
            cached_scores = api.get_top_submissions(
                STUDENT_ITEM["course_id"],
                STUDENT_ITEM["item_id"],
                STUDENT_ITEM["item_type"], 2,
                use_cache=True,
                read_replica=False
            )
            self.assertEqual(cached_scores, scores)
Esempio n. 2
0
    def test_staff_debug_student_info_full_workflow(self, xblock):
        # Simulate that we are course staff
        xblock.xmodule_runtime = self._create_mock_runtime(
            xblock.scope_ids.usage_id, True, False, "Bob"
        )

        # Commonly chosen options for assessments
        options_selected = {
            "Ideas": "Good",
            "Content": "Poor",
        }

        criterion_feedback = {
            "Ideas": "Dear diary: Lots of creativity from my dream journal last night at 2 AM,",
            "Content": "Not as insightful as I had thought in the wee hours of the morning!"
        }

        overall_feedback = "I think I should tell more people about how important worms are for the ecosystem."

        bob_item = STUDENT_ITEM.copy()
        bob_item["item_id"] = xblock.scope_ids.usage_id

        # Create a submission for Bob, and corresponding workflow.
        submission = sub_api.create_submission(bob_item, {'text':"Bob Answer"})
        peer_api.on_start(submission["uuid"])
        workflow_api.create_workflow(submission["uuid"], ['peer', 'self'])

        # Create a submission for Tim, and corresponding workflow.
        tim_item = bob_item.copy()
        tim_item["student_id"] = "Tim"
        tim_sub = sub_api.create_submission(tim_item, "Tim Answer")
        peer_api.on_start(tim_sub["uuid"])
        workflow_api.create_workflow(tim_sub["uuid"], ['peer', 'self'])

        # Bob assesses Tim.
        peer_api.get_submission_to_assess(submission['uuid'], 1)
        peer_api.create_assessment(
            submission["uuid"],
            STUDENT_ITEM["student_id"],
            options_selected, dict(), "",
            {'criteria': xblock.rubric_criteria},
            1,
        )

        # Bob assesses himself.
        self_api.create_assessment(
            submission['uuid'],
            STUDENT_ITEM["student_id"],
            options_selected,
            criterion_feedback,
            overall_feedback,
            {'criteria': xblock.rubric_criteria},
        )

        # Now Bob should be fully populated in the student info view.
        request = namedtuple('Request', 'params')
        request.params = {"student_id": "Bob"}
        # Verify that we can render without error
        resp = xblock.render_student_info(request)
        self.assertIn("bob answer", resp.body.lower())
Esempio n. 3
0
 def upload_assignment(self, request, suffix=''):
     # pylint: disable=unused-argument
     """
     Save a students submission file.
     """
     require(self.upload_allowed())
     upload = request.params['assignment']
     sha1 = _get_sha1(upload.file)
     upload.file.name = upload.file.name.replace(',','_')
     answer = {
         "sha1": sha1,
         "filename": upload.file.name,
         "mimetype": mimetypes.guess_type(upload.file.name)[0],
     }
     student_id = self.student_submission_id()
     submissions_api.create_submission(student_id, answer)
     path = self._file_storage_path(sha1, upload.file.name)
     if not default_storage.exists(path):
         default_storage.save(path, File(upload.file))
         
     #if student already have score, set recheck to true
     if self.score is not None:
         self.need_recheck = True
     
     return Response(json_body=self.student_state())
    def test_staff_debug_student_info_peer_only(self, xblock):
        # Simulate that we are course staff
        xblock.xmodule_runtime =  self._create_mock_runtime(
            xblock.scope_ids.usage_id, True, False, "Bob"
        )

        bob_item = STUDENT_ITEM.copy()
        bob_item["item_id"] = xblock.scope_ids.usage_id
        # Create a submission for Bob, and corresponding workflow.
        submission = sub_api.create_submission(bob_item, {'text':"Bob Answer"})
        peer_api.on_start(submission["uuid"])
        workflow_api.create_workflow(submission["uuid"], ['peer'])

        # Create a submission for Tim, and corresponding workflow.
        tim_item = bob_item.copy()
        tim_item["student_id"] = "Tim"
        tim_sub = sub_api.create_submission(tim_item, "Tim Answer")
        peer_api.on_start(tim_sub["uuid"])
        workflow_api.create_workflow(tim_sub["uuid"], ['peer', 'self'])

        # Bob assesses Tim.
        peer_api.get_submission_to_assess(submission['uuid'], 1)
        peer_api.create_assessment(
            submission["uuid"],
            STUDENT_ITEM["student_id"],
            ASSESSMENT_DICT['options_selected'], dict(), "",
            {'criteria': xblock.rubric_criteria},
            1,
        )

        # Now Bob should be fully populated in the student info view.
        path, context = xblock.get_student_info_path_and_context("Bob")
        self.assertEquals("Bob Answer", context['submission']['answer']['text'])
        self.assertIsNone(context['self_assessment'])
        self.assertEquals("openassessmentblock/staff_debug/student_info.html", path)
Esempio n. 5
0
 def upload_assignment(self, request, suffix=''):
     # pylint: disable=unused-argument, protected-access
     """
     Save a students submission file.
     """
     require(self.upload_allowed())
     user = self.get_real_user()
     require(user)
     upload = request.params['assignment']
     sha1 = get_sha1(upload.file)
     if self.file_size_over_limit(upload.file):
         raise JsonHandlerError(
             413, 'Unable to upload file. Max size limit is {size}'.format(
                 size=self.student_upload_max_size()
             )
         )
     # Uploading an assignment represents a change of state with this user in this block,
     # so we need to ensure that the user has a StudentModule record, which represents that state.
     self.get_or_create_student_module(user)
     answer = {
         "sha1": sha1,
         "filename": upload.file.name,
         "mimetype": mimetypes.guess_type(upload.file.name)[0],
         "finalized": False
     }
     student_item_dict = self.get_student_item_dict()
     submissions_api.create_submission(student_item_dict, answer)
     path = self.file_storage_path(sha1, upload.file.name)
     log.info("Saving file: %s at path: %s for user: %s", upload.file.name, path, user.username)
     if default_storage.exists(path):
         # save latest submission
         default_storage.delete(path)
     default_storage.save(path, File(upload.file))
     return Response(json_body=self.student_state())
Esempio n. 6
0
    def test_get_scores(self):
        student_item = copy.deepcopy(STUDENT_ITEM)
        student_item["course_id"] = "get_scores_course"

        student_item["item_id"] = "i4x://a/b/c/s1"
        s1 = api.create_submission(student_item, "Hello World")

        student_item["item_id"] = "i4x://a/b/c/s2"
        s2 = api.create_submission(student_item, "Hello World")

        student_item["item_id"] = "i4x://a/b/c/s3"
        s3 = api.create_submission(student_item, "Hello World")

        api.set_score(s1['uuid'], 3, 5)
        api.set_score(s1['uuid'], 4, 5)
        api.set_score(s1['uuid'], 2, 5)  # Should overwrite previous lines

        api.set_score(s2['uuid'], 0, 10)
        api.set_score(s3['uuid'], 4, 4)

        # Getting the scores for a user should never take more than one query
        with self.assertNumQueries(1):
            scores = api.get_scores(
                student_item["course_id"], student_item["student_id"]
            )
            self.assertEqual(
                scores,
                {
                    u"i4x://a/b/c/s1": (2, 5),
                    u"i4x://a/b/c/s2": (0, 10),
                    u"i4x://a/b/c/s3": (4, 4),
                }
            )
    def test_reset_different_student_item(self, changed):
        # Create a submissions for two students
        submission = sub_api.create_submission(self.STUDENT_ITEM, "test answer")
        sub_api.set_score(submission["uuid"], 1, 2)

        other_student = copy.copy(self.STUDENT_ITEM)
        other_student.update(changed)
        submission = sub_api.create_submission(other_student, "other test answer")
        sub_api.set_score(submission["uuid"], 3, 4)

        # Reset the score for the first student
        sub_api.reset_score(
            self.STUDENT_ITEM["student_id"], self.STUDENT_ITEM["course_id"], self.STUDENT_ITEM["item_id"]
        )

        # The first student's scores should be reset
        self.assertIs(sub_api.get_score(self.STUDENT_ITEM), None)
        scores = sub_api.get_scores(self.STUDENT_ITEM["course_id"], self.STUDENT_ITEM["student_id"])
        self.assertNotIn(self.STUDENT_ITEM["item_id"], scores)

        # But the second student should still have a score
        score = sub_api.get_score(other_student)
        self.assertEqual(score["points_earned"], 3)
        self.assertEqual(score["points_possible"], 4)
        scores = sub_api.get_scores(other_student["course_id"], other_student["student_id"])
        self.assertIn(other_student["item_id"], scores)
Esempio n. 8
0
    def test_get_submissions(self):
        api.create_submission(STUDENT_ITEM, ANSWER_ONE)
        api.create_submission(STUDENT_ITEM, ANSWER_TWO)
        submissions = api.get_submissions(STUDENT_ITEM)

        self._assert_submission(submissions[1], ANSWER_ONE, 1, 1)
        self._assert_submission(submissions[0], ANSWER_TWO, 1, 2)
    def test_get_top_submissions(self):
        student_1 = sub_api.create_submission(self.STUDENT_ITEM, "Hello World")
        student_2 = sub_api.create_submission(self.STUDENT_ITEM, "Hello World")
        student_3 = sub_api.create_submission(self.STUDENT_ITEM, "Hello World")

        sub_api.set_score(student_1['uuid'], 8, 10)
        sub_api.set_score(student_2['uuid'], 4, 10)
        sub_api.set_score(student_3['uuid'], 2, 10)

        # Use the read-replica
        with self.assertNumQueries(0):
            top_scores = sub_api.get_top_submissions(
                self.STUDENT_ITEM['course_id'],
                self.STUDENT_ITEM['item_id'],
                self.STUDENT_ITEM['item_type'], 2,
                read_replica=True
            )
            self.assertEqual(
                top_scores,
                [
                    {
                        'content': "Hello World",
                        'score': 8
                    },
                    {
                        'content': "Hello World",
                        'score': 4
                    },
                ]
            )
Esempio n. 10
0
    def test_staff_debug_student_info_image_submission(self, xblock):
        # Simulate that we are course staff
        xblock.xmodule_runtime = self._create_mock_runtime(
            xblock.scope_ids.usage_id, True, False, "Bob"
        )
        xblock.runtime._services['user'] = NullUserService()

        bob_item = STUDENT_ITEM.copy()
        bob_item["item_id"] = xblock.scope_ids.usage_id

        # Create an image submission for Bob
        sub_api.create_submission(bob_item, {
            'text': "Bob Answer",
            'file_key': "test_key"
        })

        # Mock the file upload API to avoid hitting S3
        with patch("openassessment.xblock.staff_info_mixin.file_api") as file_api:
            file_api.get_download_url.return_value = "http://www.example.com/image.jpeg"
            __, context = xblock.get_student_info_path_and_context("Bob")

            # Check that the right file key was passed to generate the download url
            file_api.get_download_url.assert_called_with("test_key")

            # Check the context passed to the template
            self.assertEquals('http://www.example.com/image.jpeg', context['submission']['image_url'])

            # Check the fully rendered template
            payload = urllib.urlencode({"student_username": "******"})
            resp = self.request(xblock, "render_student_info", payload)
            self.assertIn("http://www.example.com/image.jpeg", resp)
Esempio n. 11
0
    def test_staff_debug_student_info_file_download_url_error(self, xblock):
        # Simulate that we are course staff
        xblock.xmodule_runtime = self._create_mock_runtime(
            xblock.scope_ids.usage_id, True, False, "Bob"
        )
        xblock.runtime._services['user'] = NullUserService()

        bob_item = STUDENT_ITEM.copy()
        bob_item["item_id"] = xblock.scope_ids.usage_id

        # Create an image submission for Bob
        sub_api.create_submission(bob_item, {
            'text': "Bob Answer",
            'file_key': "test_key"
        })

        # Mock the file upload API to simulate an error
        with patch("openassessment.xblock.staff_info_mixin.file_api.get_download_url") as file_api_call:
            file_api_call.side_effect = FileUploadInternalError("Error!")
            __, context = xblock.get_student_info_path_and_context("Bob")

            # Expect that the page still renders, but without the image url
            self.assertIn('submission', context)
            self.assertNotIn('image_url', context['submission'])

            # Check the fully rendered template
            payload = urllib.urlencode({"student_username": "******"})
            resp = self.request(xblock, "render_student_info", payload)
            self.assertIn("Bob Answer", resp)
Esempio n. 12
0
    def test_get_submissions(self):
        api.create_submission(STUDENT_ITEM, ANSWER_ONE)
        api.create_submission(STUDENT_ITEM, ANSWER_TWO)
        submissions = api.get_submissions(STUDENT_ITEM)

        student_item = self._get_student_item(STUDENT_ITEM)
        self._assert_submission(submissions[1], ANSWER_ONE, student_item.pk, 1)
        self._assert_submission(submissions[0], ANSWER_TWO, student_item.pk, 2)
Esempio n. 13
0
    def test_two_students(self):
        api.create_submission(STUDENT_ITEM, ANSWER_ONE)
        api.create_submission(SECOND_STUDENT_ITEM, ANSWER_TWO)

        submissions = api.get_submissions(STUDENT_ITEM)
        self.assertEqual(1, len(submissions))
        self._assert_submission(submissions[0], ANSWER_ONE, 1, 1)

        submissions = api.get_submissions(SECOND_STUDENT_ITEM)
        self.assertEqual(1, len(submissions))
        self._assert_submission(submissions[0], ANSWER_TWO, 2, 1)
Esempio n. 14
0
    def test_get_scores(self):
        student_item = copy.deepcopy(STUDENT_ITEM)
        student_item["course_id"] = "get_scores_course"

        student_item["item_id"] = "i4x://a/b/c/s1"
        s1 = api.create_submission(student_item, "Hello World")

        student_item["item_id"] = "i4x://a/b/c/s2"
        s2 = api.create_submission(student_item, "Hello World")

        student_item["item_id"] = "i4x://a/b/c/s3"
        s3 = api.create_submission(student_item, "Hello World")

        api.set_score(s1['uuid'], 3, 5)
        api.set_score(s1['uuid'], 4, 5)
        api.set_score(s1['uuid'], 2, 5)  # Should overwrite previous lines

        api.set_score(s2['uuid'], 0, 10)
        api.set_score(s3['uuid'], 4, 4)

        # Getting the scores for a user should never take more than one query
        with self.assertNumQueries(1):
            scores = api.get_scores(
                student_item["course_id"], student_item["student_id"]
            )
        self.assertEqual(
            scores,
            {
                u'i4x://a/b/c/s1': {
                    'created_at': now(),
                    'points_earned': 2,
                    'points_possible': 5,
                    'student_item': 1,
                    'submission': 1,
                    'submission_uuid': s1['uuid'],
                },
                u'i4x://a/b/c/s2': {
                    'created_at': now(),
                    'points_earned': 0,
                    'points_possible': 10,
                    'student_item': 2,
                    'submission': 2,
                    'submission_uuid': s2['uuid'],
                },
                u'i4x://a/b/c/s3': {
                    'created_at': now(),
                    'points_earned': 4,
                    'points_possible': 4,
                    'student_item': 3,
                    'submission': 3,
                    'submission_uuid': s3['uuid'],
                },
            }
        )
Esempio n. 15
0
    def test_get_latest_submission(self):
        past_date = datetime.datetime(2007, 9, 12, 0, 0, 0, 0, pytz.UTC)
        more_recent_date = datetime.datetime(2007, 9, 13, 0, 0, 0, 0, pytz.UTC)
        api.create_submission(STUDENT_ITEM, ANSWER_ONE, more_recent_date)
        api.create_submission(STUDENT_ITEM, ANSWER_TWO, past_date)

        # Test a limit on the submissions
        submissions = api.get_submissions(STUDENT_ITEM, 1)
        self.assertEqual(1, len(submissions))
        self.assertEqual(ANSWER_ONE, submissions[0]["answer"])
        self.assertEqual(more_recent_date.year,
                         submissions[0]["submitted_at"].year)
Esempio n. 16
0
 def handle_answer(self, data, suffix=''):
     the_post_student_answer = data.get('student_answer')
     self.student_answer = the_post_student_answer
     #Save a students submission
     student_id = self.student_submission_id()
     answer = {
         "sha1": "None",
         "filename": "None",
         "mimetype": "json",
         }
     submissions_api.create_submission(student_id, answer)
     return {"student_answer":self.student_answer}
Esempio n. 17
0
    def test_staff_debug_student_info_full_workflow(self, xblock):
        # Simulate that we are course staff
        xblock.xmodule_runtime = self._create_mock_runtime(
            xblock.scope_ids.usage_id, True, False, "Bob"
        )

        # Commonly chosen options for assessments
        options_selected = {
            "Ideas": "Good",
            "Content": "Poor",
        }

        bob_item = STUDENT_ITEM.copy()
        bob_item["item_id"] = xblock.scope_ids.usage_id

        # Create a submission for Bob, and corresponding workflow.
        submission = sub_api.create_submission(bob_item, {'text':"Bob Answer"})
        peer_api.on_start(submission["uuid"])
        workflow_api.create_workflow(submission["uuid"], ['peer', 'self'])

        # Create a submission for Tim, and corresponding workflow.
        tim_item = bob_item.copy()
        tim_item["student_id"] = "Tim"
        tim_sub = sub_api.create_submission(tim_item, "Tim Answer")
        peer_api.on_start(tim_sub["uuid"])
        workflow_api.create_workflow(tim_sub["uuid"], ['peer', 'self'])

        # Bob assesses Tim.
        peer_api.get_submission_to_assess(submission['uuid'], 1)
        peer_api.create_assessment(
            submission["uuid"],
            STUDENT_ITEM["student_id"],
            options_selected, dict(), "",
            {'criteria': xblock.rubric_criteria},
            1,
        )

        # Bob assesses himself.
        self_api.create_assessment(
            submission['uuid'],
            STUDENT_ITEM["student_id"],
            options_selected,
            {'criteria': xblock.rubric_criteria},
        )

        # Now Bob should be fully populated in the student info view.
        request = namedtuple('Request', 'params')
        request.params = {"student_id": "Bob"}
        # Verify that we can render without error
        resp = xblock.render_student_info(request)
        self.assertIn("bob answer", resp.body.lower())
Esempio n. 18
0
 def upload_assignment(self, request, suffix=''):
     require(self.upload_allowed())
     upload = request.params['assignment']
     sha1 = _get_sha1(upload.file)
     answer = {
         "sha1": sha1,
         "filename": upload.file.name,
         "mimetype": mimetypes.guess_type(upload.file.name)[0],
     }
     student_id = self.student_submission_id()
     submissions_api.create_submission(student_id, answer)
     path = self._file_storage_path(sha1, upload.file.name)
     if not default_storage.exists(path):
         default_storage.save(path, File(upload.file))
     return Response(json_body=self.student_state())
Esempio n. 19
0
    def test_create_assessment(self):
        # Initially, there should be no submission or self assessment
        self.assertEqual(get_assessment("5"), None)

        # Create a submission to self-assess
        submission = create_submission(self.STUDENT_ITEM, "Test answer")

        # Now there should be a submission, but no self-assessment
        assessment = get_assessment(submission["uuid"])
        self.assertIs(assessment, None)
        self.assertFalse(submitter_is_finished(submission['uuid'], {}))

        # Create a self-assessment for the submission
        assessment = create_assessment(
            submission['uuid'], u'π–™π–Šπ–˜π–™ π–šπ–˜π–Šπ–—',
            self.OPTIONS_SELECTED, self.RUBRIC,
            scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
        )

        # Self-assessment should be complete
        self.assertTrue(submitter_is_finished(submission['uuid'], {}))

        # Retrieve the self-assessment
        retrieved = get_assessment(submission["uuid"])

        # Check that the assessment we created matches the assessment we retrieved
        # and that both have the correct values
        self.assertItemsEqual(assessment, retrieved)
        self.assertEqual(assessment['submission_uuid'], submission['uuid'])
        self.assertEqual(assessment['points_earned'], 8)
        self.assertEqual(assessment['points_possible'], 10)
        self.assertEqual(assessment['feedback'], u'')
        self.assertEqual(assessment['score_type'], u'SE')
Esempio n. 20
0
    def test_cancel_submission_full_flow(self, xblock):
        # Simulate that we are course staff
        xblock.xmodule_runtime = self._create_mock_runtime(
            xblock.scope_ids.usage_id, True, False, "Bob"
        )

        bob_item = STUDENT_ITEM.copy()
        bob_item["item_id"] = xblock.scope_ids.usage_id
        # Create a submission for Bob, and corresponding workflow.
        submission = sub_api.create_submission(bob_item, {'text': "Bob Answer"})
        peer_api.on_start(submission["uuid"])
        workflow_api.create_workflow(submission["uuid"], ['peer'])

        incorrect_submission_uuid = 'abc'
        params = {"submission_uuid": incorrect_submission_uuid, "comments": "Inappropriate language."}
        # Raise flow not found exception.
        resp = self.request(xblock, 'cancel_submission', json.dumps(params), response_format='json')
        self.assertIn("Error finding workflow", resp['msg'])
        self.assertEqual(False, resp['success'])

        # Verify that we can render without error
        params = {"submission_uuid": submission["uuid"], "comments": "Inappropriate language."}
        resp = self.request(xblock, 'cancel_submission', json.dumps(params), response_format='json')
        self.assertIn("The student submission has been removed from peer", resp['msg'])
        self.assertEqual(True, resp['success'])
    def test_submissions_api_overrides_scores(self):
        """
        Check that answering incorrectly is graded properly.
        """
        self.basic_setup()
        self.submit_question_answer('p1', {'2_1': 'Correct'})
        self.submit_question_answer('p2', {'2_1': 'Correct'})
        self.submit_question_answer('p3', {'2_1': 'Incorrect'})
        self.check_grade_percent(0.67)
        self.assertEqual(self.get_course_grade().letter_grade, 'B')

        # But now, set the score with the submissions API and watch
        # as it overrides the score read from StudentModule and our
        # student gets an A instead.
        self._stop_signal_patch()
        student_item = {
            'student_id': anonymous_id_for_user(self.student_user, self.course.id),
            'course_id': unicode(self.course.id),
            'item_id': unicode(self.problem_location('p3')),
            'item_type': 'problem'
        }
        submission = submissions_api.create_submission(student_item, 'any answer')
        submissions_api.set_score(submission['uuid'], 1, 1)
        self.check_grade_percent(1.0)
        self.assertEqual(self.get_course_grade().letter_grade, 'A')
Esempio n. 22
0
    def _create_submissions_and_scores(
        self, xblock, submissions_and_scores,
        submission_key="text", points_possible=10
    ):
        """
        Create submissions and scores that should be displayed by the leaderboard.

        Args:
            xblock (OpenAssessmentBlock)
            submisions_and_scores (list): List of `(submission, score)` tuples, where
                `submission` is the essay text (string) and `score` is the integer
                number of points earned.

        Keyword Args:
            points_possible (int): The total number of points possible for this problem
            submission_key (string): The key to use in the submission dict.  If None, use
                the submission value itself instead of embedding it in a dictionary.
        """
        for num, (submission, points_earned) in enumerate(submissions_and_scores):
            # Assign a unique student ID
            # These aren't displayed by the leaderboard, so we can set them
            # to anything without affecting the test.
            student_item = xblock.get_student_item_dict()
            # adding rand number to the student_id to make it unique.
            student_item['student_id'] = "student {num} {num2}".format(num=num, num2=randint(2, 1000))
            if submission_key is not None:
                answer = {submission_key: submission}
            else:
                answer = submission

            # Create a submission
            sub = sub_api.create_submission(student_item, answer)

            # Create a score for the submission
            sub_api.set_score(sub['uuid'], points_earned, points_possible)
Esempio n. 23
0
    def test_cancelled_submission_peer_assessment_render_path(self, xblock):
        # Test that peer assessment path should be oa_peer_cancelled.html for a cancelled submission.
        # Simulate that we are course staff
        xblock.xmodule_runtime = self._create_mock_runtime(
            xblock.scope_ids.usage_id, True, False, "Bob"
        )

        bob_item = STUDENT_ITEM.copy()
        bob_item["item_id"] = xblock.scope_ids.usage_id
        # Create a submission for Bob, and corresponding workflow.
        submission = sub_api.create_submission(bob_item, {'text': "Bob Answer"})
        peer_api.on_start(submission["uuid"])
        workflow_api.create_workflow(submission["uuid"], ['peer'])

        requirements = {
            "peer": {
                "must_grade": 1,
                "must_be_graded_by": 1
            },
        }

        workflow_api.cancel_workflow(
            submission_uuid=submission['uuid'],
            comments="Inappropriate language",
            cancelled_by_id=bob_item['student_id'],
            assessment_requirements=requirements
        )

        xblock.submission_uuid = submission["uuid"]
        path, context = xblock.peer_path_and_context(False)
        self.assertEquals("openassessmentblock/peer/oa_peer_cancelled.html", path)
Esempio n. 24
0
    def test_create_workflow(self, data):
        first_step = data["steps"][0] if data["steps"] else "peer"
        submission = sub_api.create_submission(ITEM_1, "Shoot Hot Rod")
        workflow = workflow_api.create_workflow(submission["uuid"], data["steps"])

        workflow_keys = set(workflow.keys())
        self.assertEqual(
            workflow_keys,
            {
                'submission_uuid', 'uuid', 'status', 'created', 'modified', 'score'
            }
        )
        self.assertEqual(workflow["submission_uuid"], submission["uuid"])
        self.assertEqual(workflow["status"], first_step)

        workflow_from_get = workflow_api.get_workflow_for_submission(
            submission["uuid"], data["requirements"]
        )
        del workflow_from_get['status_details']
        self.assertEqual(workflow, workflow_from_get)

        # Test that the Peer Workflow is, or is not created, based on when peer
        # is a step in the workflow.
        if "peer" == first_step:
            peer_workflow = PeerWorkflow.objects.get(submission_uuid=submission["uuid"])
            self.assertIsNotNone(peer_workflow)
        else:
            peer_workflows = list(PeerWorkflow.objects.filter(submission_uuid=submission["uuid"]))
            self.assertFalse(peer_workflows)
Esempio n. 25
0
    def test_delete_student_state_resets_scores(self):
        item_id = 'i4x://MITx/999/openassessment/b3dce2586c9c4876b73e7f390e42ef8f'

        # Create a student module for the user
        StudentModule.objects.create(
            student=self.student, course_id=self.course.id, module_state_key=item_id, state=json.dumps({})
        )

        # Create a submission and score for the student using the submissions API
        student_item = {
            'student_id': anonymous_id_for_user(self.student, self.course.id),
            'course_id': self.course.id,
            'item_id': item_id,
            'item_type': 'openassessment'
        }
        submission = sub_api.create_submission(student_item, 'test answer')
        sub_api.set_score(submission['uuid'], 1, 2)

        # Delete student state using the instructor dash
        url = reverse('instructor_dashboard_legacy', kwargs={'course_id': self.course.id})
        response = self.client.post(url, {
            'action': 'Delete student state for module',
            'unique_student_identifier': self.student.email,
            'problem_for_student': 'openassessment/b3dce2586c9c4876b73e7f390e42ef8f',
        })

        self.assertEqual(response.status_code, 200)

        # Verify that the student's scores have been reset in the submissions API
        score = sub_api.get_score(student_item)
        self.assertIs(score, None)
Esempio n. 26
0
    def _create_workflow_with_status(self, student_id, course_id, item_id, status, answer="answer"):
        """
        Create a submission and workflow with a given status.

        Args:
            student_id (unicode): Student ID for the submission.
            course_id (unicode): Course ID for the submission.
            item_id (unicode): Item ID for the submission
            status (unicode): One of acceptable status values (e.g. "peer", "self", "waiting", "done")

        Kwargs:
            answer (unicode): Submission answer.

        Returns:
            None
        """
        submission = sub_api.create_submission({
            "student_id": student_id,
            "course_id": course_id,
            "item_id": item_id,
            "item_type": "openassessment",
        }, answer)

        workflow = workflow_api.create_workflow(submission['uuid'])
        workflow_model = AssessmentWorkflow.objects.get(uuid=workflow['uuid'])
        workflow_model.status = status
        workflow_model.save()
Esempio n. 27
0
    def create_submission(self, student_item_dict, student_sub):

        # Store the student's response text in a JSON-encodable dict
        # so that later we can add additional response fields.
        student_sub_dict = {'text': student_sub}

        if self.allow_file_upload:
            student_sub_dict['file_key'] = self._get_student_item_key()
        submission = api.create_submission(student_item_dict, student_sub_dict)
        self.create_workflow(submission["uuid"])
        self.submission_uuid = submission["uuid"]

        # Emit analytics event...
        self.runtime.publish(
            self,
            "openassessmentblock.create_submission",
            {
                "submission_uuid": submission["uuid"],
                "attempt_number": submission["attempt_number"],
                "created_at": submission["created_at"],
                "submitted_at": submission["submitted_at"],
                "answer": submission["answer"],
            }
        )

        return submission
Esempio n. 28
0
 def _create_student_and_submission(student, answer, date=None):
     new_student_item = STUDENT_ITEM.copy()
     new_student_item["student_id"] = student
     submission = sub_api.create_submission(new_student_item, answer, date)
     peer_api.create_peer_workflow(submission["uuid"])
     workflow_api.create_workflow(submission["uuid"])
     return submission, new_student_item
Esempio n. 29
0
    def test_delete_submission_scores(self):
        user = UserFactory()
        problem_location = self.course_key.make_usage_key('dummy', 'module')

        # Create a student module for the user
        StudentModule.objects.create(
            student=user,
            course_id=self.course_key,
            module_state_key=problem_location,
            state=json.dumps({})
        )

        # Create a submission and score for the student using the submissions API
        student_item = {
            'student_id': anonymous_id_for_user(user, self.course_key),
            'course_id': self.course_key.to_deprecated_string(),
            'item_id': problem_location.to_deprecated_string(),
            'item_type': 'openassessment'
        }
        submission = sub_api.create_submission(student_item, 'test answer')
        sub_api.set_score(submission['uuid'], 1, 2)

        # Delete student state using the instructor dash
        reset_student_attempts(
            self.course_key, user, problem_location,
            delete_module=True
        )

        # Verify that the student's scores have been reset in the submissions API
        score = sub_api.get_score(student_item)
        self.assertIs(score, None)
Esempio n. 30
0
    def test_cancel_the_assessment_workflow_does_not_exist(self):
        # Create the submission and assessment workflow.
        submission = sub_api.create_submission(ITEM_1, ANSWER_1)
        workflow = workflow_api.create_workflow(submission["uuid"], ["peer"])

        requirements = {
            "peer": {
                "must_grade": 1,
                "must_be_graded_by": 1
            }
        }

        # Check if workflow is cancelled.
        self.assertFalse(workflow_api.is_workflow_cancelled(submission["uuid"]))
        self.assertNotEqual(workflow.get('status'), 'cancelled')

        # Cancel the workflow raises DoesNotExist.
        with self.assertRaises(workflow_api.AssessmentWorkflowError):
            workflow_api.cancel_workflow(
                submission_uuid="1234567098789",
                comments="Inappropriate language",
                cancelled_by_id=ITEM_2['student_id'],
                assessment_requirements=requirements
            )

        # Status for workflow should not be cancelled.
        workflow = AssessmentWorkflow.get_by_submission_uuid(submission["uuid"])
        self.assertNotEqual(workflow.status, 'cancelled')
Esempio n. 31
0
    def test_automatic_grade_error(self):
        # Create some submissions which will not succeed. No classifiers yet exist.
        for _ in range(0, 10):
            submission = sub_api.create_submission(STUDENT_ITEM, ANSWER)
            ai_api.on_init(submission['uuid'],
                           rubric=RUBRIC,
                           algorithm_id=ALGORITHM_ID)

        # Check that there are unresolved grading workflows
        self._assert_complete(training_done=True, grading_done=False)

        patched_method = 'openassessment.assessment.worker.training.reschedule_grading_tasks.apply_async'
        with mock.patch(patched_method) as mocked_reschedule_grading:
            mocked_reschedule_grading.side_effect = AIGradingInternalError(
                "Kablewey.")
            with self.assertRaises(AIGradingInternalError):
                ai_api.train_classifiers(RUBRIC, EXAMPLES, COURSE_ID, ITEM_ID,
                                         ALGORITHM_ID)
Esempio n. 32
0
    def test_create_assessment_invalid_option(self):
        # Create a submission
        submission = create_submission(self.STUDENT_ITEM, "Test answer")

        # Mutate the selected option so the value does not match an available option
        options = copy.deepcopy(self.OPTIONS_SELECTED)
        options['clarity'] = 'invalid option'

        # Attempt to create a self-assessment with options that do not match the rubric
        with self.assertRaises(SelfAssessmentRequestError):
            create_assessment(submission['uuid'],
                              u'π–™π–Šπ–˜π–™ π–šπ–˜π–Šπ–—',
                              options,
                              self.CRITERION_FEEDBACK,
                              self.OVERALL_FEEDBACK,
                              self.RUBRIC,
                              scored_at=datetime.datetime(
                                  2014, 4, 1).replace(tzinfo=pytz.utc))
Esempio n. 33
0
    def test_create_assessment_missing_criterion(self):
        # Create a submission
        submission = create_submission(self.STUDENT_ITEM, "Test answer")

        # Delete one of the criterion that's present in the rubric
        options = copy.deepcopy(self.OPTIONS_SELECTED)
        del options['clarity']

        # Attempt to create a self-assessment with options that do not match the rubric
        with self.assertRaises(SelfAssessmentRequestError):
            create_assessment(submission['uuid'],
                              u'π–™π–Šπ–˜π–™ π–šπ–˜π–Šπ–—',
                              options,
                              self.CRITERION_FEEDBACK,
                              self.OVERALL_FEEDBACK,
                              self.RUBRIC,
                              scored_at=datetime.datetime(
                                  2014, 4, 1).replace(tzinfo=pytz.utc))
Esempio n. 34
0
    def test_reset_with_one_score(self):
        # Create a submission for the student and score it
        submission = sub_api.create_submission(self.STUDENT_ITEM,
                                               'test answer')
        sub_api.set_score(submission['uuid'], 1, 2)

        # Reset scores
        sub_api.reset_score(
            self.STUDENT_ITEM['student_id'],
            self.STUDENT_ITEM['course_id'],
            self.STUDENT_ITEM['item_id'],
        )

        # Expect that no scores are available for the student
        self.assertIs(sub_api.get_score(self.STUDENT_ITEM), None)
        scores = sub_api.get_scores(self.STUDENT_ITEM['course_id'],
                                    self.STUDENT_ITEM['student_id'])
        self.assertEqual(len(scores), 0)
Esempio n. 35
0
    def test_create_workflow_integrity_error(self, mock_create, mock_get):
        # Simulate a race condition in which someone creates a workflow
        # after we check if it exists.  This will violate the database uniqueness
        # constraints, so we need to handle this case gracefully.
        mock_create.side_effect = IntegrityError

        # The first time we check, we should see that no workflow exists.
        # The second time, we should get the workflow created by someone else
        mock_workflow = mock.MagicMock(StudentTrainingWorkflow)
        mock_get.side_effect = [
            mock_workflow
        ]

        # Expect that we retry and retrieve the workflow that someone else created
        submission = sub_api.create_submission(STUDENT_ITEM, ANSWER)
        StudentTrainingWorkflow.create_workflow(submission['uuid'])
        workflow = StudentTrainingWorkflow.get_workflow(submission['uuid'])
        self.assertEqual(workflow, mock_workflow)
Esempio n. 36
0
 def _create_student_and_submission(student,
                                    answer,
                                    date=None,
                                    problem_steps=None):
     """
     Helper method to create a student and submission for use in tests.
     """
     new_student_item = STUDENT_ITEM.copy()
     new_student_item["student_id"] = student
     submission = sub_api.create_submission(new_student_item, answer, date)
     steps = []
     init_params = {}
     if problem_steps:
         steps = problem_steps
     if 'peer' in steps:
         peer_api.on_start(submission["uuid"])
     workflow_api.create_workflow(submission["uuid"], steps, init_params)
     return submission, new_student_item
Esempio n. 37
0
    def test_upload(self):
        # Create an S3 bucket using the fake S3 implementation
        conn = boto.connect_s3()
        conn.create_bucket(self.BUCKET_NAME)

        # Create some submissions to ensure that we cover
        # the progress indicator code.
        for index in range(50):
            student_item = {
                'student_id': "test_user_{}".format(index),
                'course_id': self.COURSE_ID,
                'item_id': 'test_item',
                'item_type': 'openassessment',
            }
            submission_text = "test submission {}".format(index)
            submission = sub_api.create_submission(student_item,
                                                   submission_text)
            workflow_api.create_workflow(submission['uuid'], ['peer', 'self'])

        # Create and upload the archive of CSV files
        # This should generate the files even though
        # we don't have any data available.
        cmd = upload_oa_data.Command()
        cmd.handle(self.COURSE_ID.encode('utf-8'), self.BUCKET_NAME)

        # Retrieve the uploaded file from the fake S3 implementation
        self.assertEqual(len(cmd.history), 1)
        bucket = conn.get_all_buckets()[0]
        key = bucket.get_key(cmd.history[0]['key'])
        contents = StringIO(key.get_contents_as_string())

        # Expect that the contents contain all the expected CSV files
        with tarfile.open(mode="r:gz", fileobj=contents) as tar:
            file_sizes = {
                member.name: member.size
                for member in tar.getmembers()
            }
            for csv_name in self.CSV_NAMES:
                self.assertIn(csv_name, file_sizes)
                self.assertGreater(file_sizes[csv_name], 0)

        # Expect that we generated a URL for the bucket
        url = cmd.history[0]['url']
        self.assertIn("https://{}".format(self.BUCKET_NAME), url)
Esempio n. 38
0
    def test_create_workflow(self):
        submission = sub_api.create_submission(ITEM_1, "Shoot Hot Rod")
        workflow = workflow_api.create_workflow(submission["uuid"])

        workflow_keys = set(workflow.keys())
        self.assertEqual(
            workflow_keys,
            {
                'submission_uuid', 'uuid', 'status', 'created', 'modified', 'score'
            }
        )
        self.assertEqual(workflow["submission_uuid"], submission["uuid"])
        self.assertEqual(workflow["status"], "peer")

        workflow_from_get = workflow_api.get_workflow_for_submission(
            submission["uuid"], REQUIREMENTS
        )
        del workflow_from_get['status_details']
        self.assertEqual(workflow, workflow_from_get)
Esempio n. 39
0
    def test_reset_score_signal(self, send_mock):
        # Create a submission for the student and score it
        submission = sub_api.create_submission(self.STUDENT_ITEM,
                                               'test answer')
        sub_api.set_score(submission['uuid'], 1, 2)

        # Reset scores
        sub_api.reset_score(
            self.STUDENT_ITEM['student_id'],
            self.STUDENT_ITEM['course_id'],
            self.STUDENT_ITEM['item_id'],
        )

        # Verify that the send method was properly called
        send_mock.assert_called_with(
            sender=None,
            anonymous_user_id=self.STUDENT_ITEM['student_id'],
            course_id=self.STUDENT_ITEM['course_id'],
            item_id=self.STUDENT_ITEM['item_id'])
Esempio n. 40
0
    def test_get_all_submissions(self):
        api.create_submission(SECOND_STUDENT_ITEM, ANSWER_TWO)
        api.create_submission(STUDENT_ITEM, ANSWER_ONE)
        api.create_submission(STUDENT_ITEM, ANSWER_TWO)
        api.create_submission(SECOND_STUDENT_ITEM, ANSWER_ONE)
        with self.assertNumQueries(1):
            submissions = list(api.get_all_submissions(
                STUDENT_ITEM['course_id'],
                STUDENT_ITEM['item_id'],
                STUDENT_ITEM['item_type'],
                read_replica=False,
            ))

        student_item = self._get_student_item(STUDENT_ITEM)
        second_student_item = self._get_student_item(SECOND_STUDENT_ITEM)
        # The result is assumed to be sorted by student_id, which is not part of the specification
        # of get_all_submissions(), but it is what it currently does.
        self._assert_submission(submissions[0], ANSWER_ONE, second_student_item.pk, 2)
        self.assertEqual(submissions[0]['student_id'], SECOND_STUDENT_ITEM['student_id'])
        self._assert_submission(submissions[1], ANSWER_TWO, student_item.pk, 2)
        self.assertEqual(submissions[1]['student_id'], STUDENT_ITEM['student_id'])
Esempio n. 41
0
    def _warm_cache(self, rubric, examples):
        """
        Create a submission and complete student training.
        This will populate the cache with training examples and rubrics,
        which are immutable and shared for all students training on a particular problem.

        Args:
            rubric (dict): Serialized rubric model.
            examples (list of dict): Serialized training examples

        Returns:
            None

        """
        pre_submission = sub_api.create_submission(STUDENT_ITEM, ANSWER)
        training_api.on_start(pre_submission['uuid'])
        for example in examples:
            training_api.get_training_example(pre_submission['uuid'], rubric, examples)
            training_api.assess_training_example(pre_submission['uuid'], example['options_selected'])
Esempio n. 42
0
    def test_update_peer_workflow(self):
        submission = sub_api.create_submission(ITEM_1, ANSWER_1)
        workflow = workflow_api.create_workflow(submission["uuid"],
                                                ["training", "peer"])
        StudentTrainingWorkflow.create_workflow(
            submission_uuid=submission["uuid"])
        requirements = {
            "training": {
                "num_required": 2
            },
            "peer": {
                "must_grade": 5,
                "must_be_graded_by": 3
            }
        }
        workflow_keys = set(workflow.keys())
        self.assertEqual(
            workflow_keys, {
                'submission_uuid', 'status', 'created', 'modified', 'score',
                'assessment_score_priority'
            })
        self.assertEqual(workflow["submission_uuid"], submission["uuid"])
        self.assertEqual(workflow["status"], "training")

        peer_workflows = list(
            PeerWorkflow.objects.filter(submission_uuid=submission["uuid"]))
        self.assertFalse(peer_workflows)

        workflow_from_get = workflow_api.get_workflow_for_submission(
            submission["uuid"], requirements)

        del workflow_from_get['status_details']
        self.assertEqual(workflow, workflow_from_get)

        requirements["training"]["num_required"] = 0
        workflow = workflow_api.update_from_assessments(
            submission["uuid"], requirements)

        # New step is Peer, and a Workflow has been created.
        self.assertEqual(workflow["status"], "peer")
        peer_workflow = PeerWorkflow.objects.get(
            submission_uuid=submission["uuid"])
        self.assertIsNotNone(peer_workflow)
Esempio n. 43
0
    def test_create_workflow(self, data):
        first_step = data["steps"][0] if data["steps"] else "peer"
        if "ai" in data["steps"]:
            first_step = data["steps"][1] if len(data["steps"]) > 1 else "waiting"
        submission = sub_api.create_submission(ITEM_1, ANSWER_1)
        workflow = workflow_api.create_workflow(submission["uuid"], data["steps"])

        workflow_keys = set(workflow.keys())
        self.assertEqual(
            workflow_keys,
            {
                'submission_uuid', 'status', 'created', 'modified', 'score', 'assessment_score_priority'
            }
        )
        self.assertEqual(workflow["submission_uuid"], submission["uuid"])
        self.assertEqual(workflow["status"], first_step)

        workflow_from_get = workflow_api.get_workflow_for_submission(
            submission["uuid"], data["requirements"]
        )
        del workflow_from_get['status_details']

        # as peer step is skipable, we expect next possible status to be current status
        if first_step == 'peer' and data["steps"].index('peer') < len(data["steps"]) - 1:
            workflow = dict(workflow)
            workflow['status'] = data["steps"][data["steps"].index('peer') + 1]
            workflow_from_get = dict(workflow_from_get)

            # the change in `workflow` variable causes modified field to get changed.
            del workflow['modified']
            del workflow_from_get['modified']

        self.assertEqual(workflow, workflow_from_get)

        # Test that the Peer Workflow is, or is not created, based on when peer
        # is a step in the workflow.
        if first_step == "peer":
            peer_workflow = PeerWorkflow.objects.get(submission_uuid=submission["uuid"])
            self.assertIsNotNone(peer_workflow)
        else:
            peer_workflows = list(PeerWorkflow.objects.filter(submission_uuid=submission["uuid"]))
            self.assertFalse(peer_workflows)
Esempio n. 44
0
    def test_create_multiple_self_assessments(self):
        # Create a submission to self-assess
        submission = create_submission(self.STUDENT_ITEM, "Test answer")

        # Self assess once
        assessment = create_assessment(
            submission['uuid'], u'π–™π–Šπ–˜π–™ π–šπ–˜π–Šπ–—',
            self.OPTIONS_SELECTED, self.CRITERION_FEEDBACK, self.OVERALL_FEEDBACK, self.RUBRIC,
        )

        # Attempt to self-assess again, which should raise an exception
        with self.assertRaises(SelfAssessmentRequestError):
            create_assessment(
                submission['uuid'], u'π–™π–Šπ–˜π–™ π–šπ–˜π–Šπ–—',
                self.OPTIONS_SELECTED, self.CRITERION_FEEDBACK, self.OVERALL_FEEDBACK, self.RUBRIC,
            )

        # Expect that we still have the original assessment
        retrieved = get_assessment(submission["uuid"])
        self.assertCountEqual(assessment, retrieved)
Esempio n. 45
0
    def test_clear_state(self):
        # Create a submission, give it a score, and verify that score exists
        submission = api.create_submission(STUDENT_ITEM, ANSWER_ONE)
        api.set_score(submission["uuid"], 11, 12)
        score = api.get_score(STUDENT_ITEM)
        self._assert_score(score, 11, 12)
        self.assertEqual(score['submission_uuid'], submission['uuid'])

        # Reset the score with clear_state=True
        # This should set the submission's score to None, and make it unavailable to get_submissions
        api.reset_score(
            STUDENT_ITEM["student_id"],
            STUDENT_ITEM["course_id"],
            STUDENT_ITEM["item_id"],
            clear_state=True,
        )
        score = api.get_score(STUDENT_ITEM)
        self.assertIsNone(score)
        subs = api.get_submissions(STUDENT_ITEM)
        self.assertEqual(subs, [])
Esempio n. 46
0
    def _create_dummy_submission(self, student_item):
        """
        Create a dummy submission for a student.

        Args:
            student_item (dict): Serialized StudentItem model.

        Returns:
            str: submission UUID
        """
        answer = {'text': "  ".join(loremipsum.get_paragraphs(5))}
        submission = sub_api.create_submission(student_item, answer)
        workflow_api.create_workflow(submission['uuid'], STEPS)
        workflow_api.update_from_assessments(
            submission['uuid'],
            {'peer': {
                'must_grade': 1,
                'must_be_graded_by': 1
            }})
        return submission['uuid']
Esempio n. 47
0
    def setUp(self):
        """
        Create a submission and grading workflow.
        """
        # Create a submission
        submission = sub_api.create_submission(STUDENT_ITEM, ANSWER)
        self.submission_uuid = submission['uuid']

        # Create a workflow for the submission
        workflow = AIGradingWorkflow.start_workflow(self.submission_uuid,
                                                    RUBRIC, ALGORITHM_ID)
        self.workflow_uuid = workflow.uuid

        # Associate the workflow with classifiers
        rubric = rubric_from_dict(RUBRIC)
        classifier_set = AIClassifierSet.create_classifier_set(
            self.CLASSIFIERS, rubric, ALGORITHM_ID,
            STUDENT_ITEM.get('course_id'), STUDENT_ITEM.get('item_id'))
        workflow.classifier_set = classifier_set
        workflow.save()
Esempio n. 48
0
    def test_set_score_with_annotation(self, reason):
        submission = api.create_submission(STUDENT_ITEM, ANSWER_ONE)
        creator_uuid = "Bob"
        annotation_type = "staff_override"
        api.set_score(submission["uuid"], 11, 12, creator_uuid, annotation_type, reason)
        score = api.get_latest_score_for_submission(submission["uuid"])
        self._assert_score(score, 11, 12)

        # We need to do this to verify that one score annotation exists and was
        # created for this score. We do not have an api point for retrieving
        # annotations, and it doesn't make sense to expose them, since they're
        # for auditing purposes.
        annotations = ScoreAnnotation.objects.all()
        self.assertGreater(len(annotations), 0)
        annotation = annotations[0]
        self.assertEqual(annotation.score.points_earned, 11)
        self.assertEqual(annotation.score.points_possible, 12)
        self.assertEqual(annotation.annotation_type, annotation_type)
        self.assertEqual(annotation.creator, creator_uuid)
        self.assertEqual(annotation.reason, reason)
    def test_override_doesnt_overwrite_submission_score(self):
        # Create a submission for the student and score it
        submission = sub_api.create_submission(self.STUDENT_ITEM,
                                               'test answer')
        sub_api.set_score(submission['uuid'], 1, 10)

        sub_api.score_override(
            self.STUDENT_ITEM,
            8,
            10,
        )

        submission_score = sub_api.get_latest_score_for_submission(
            submission['uuid'])
        self.assertEqual(submission_score['points_earned'], 1)
        self.assertEqual(submission_score['points_possible'], 10)

        override_score = sub_api.get_score_override(self.STUDENT_ITEM)
        self.assertEqual(override_score['points_earned'], 8)
        self.assertEqual(override_score['points_possible'], 10)
Esempio n. 50
0
    def handle(self, *args, **options):
        if not args:
            raise CommandError('Please specify the course id.')
        if len(args) > 1:
            raise CommandError('Too many arguments.')
        course_id = args[0]
        course_key = CourseKey.from_string(course_id)
        course = get_course_by_id(course_key)

        student_modules = StudentModule.objects.filter(
            course_id=course.id
        ).filter(
            module_state_key__contains='edx_fga'
        )

        blocks = {}
        for student_module in student_modules:
            block_id = student_module.module_state_key
            if block_id.block_type != 'edx_fga':
                continue
            block = blocks.get(block_id)
            if not block:
                blocks[block_id] = block = modulestore().get_item(block_id)
            state = json.loads(student_module.state)
            sha1 = state.get('uploaded_sha1')
            if not sha1:
                continue
            student = student_module.student
            submission_id = block.student_submission_id(
                anonymous_id_for_user(student, course.id))
            answer = {
                "sha1": sha1,
                "filename": state.get('uploaded_filename'),
                "mimetype": state.get('uploaded_mimetype'),
            }
            submission = submissions_api.create_submission(
                submission_id, answer)
            score = state.get('score')  # float
            if score:
                submissions_api.set_score(
                    submission['uuid'], int(score), block.max_score())
Esempio n. 51
0
    def _create_workflow_with_status(self,
                                     student_id,
                                     course_id,
                                     item_id,
                                     status,
                                     answer="answer",
                                     steps=None):
        """
        Create a submission and workflow with a given status.

        Args:
            student_id (unicode): Student ID for the submission.
            course_id (unicode): Course ID for the submission.
            item_id (unicode): Item ID for the submission
            status (unicode): One of acceptable status values (e.g. "peer", "self", "waiting", "done")

        Keyword Arguments:
            answer (unicode): Submission answer.
            steps (list): A list of steps to create the workflow with. If not
                specified the default steps are "peer", "self".

        Returns:
            workflow, submission
        """
        if not steps:
            steps = ["peer", "self"]

        submission = sub_api.create_submission(
            {
                "student_id": student_id,
                "course_id": course_id,
                "item_id": item_id,
                "item_type": "openassessment",
            }, answer)

        workflow = workflow_api.create_workflow(submission['uuid'], steps)
        workflow_model = AssessmentWorkflow.objects.get(
            submission_uuid=workflow['submission_uuid'])
        workflow_model.status = status
        workflow_model.save()
        return workflow, submission
Esempio n. 52
0
    def test_create_assessment_timestamp(self):
        # Create a submission to self-assess
        submission = create_submission(self.STUDENT_ITEM, "Test answer")

        # Record the current system clock time
        before = datetime.datetime.utcnow().replace(tzinfo=pytz.utc)

        # Create a self-assessment for the submission
        # Do not override the scored_at timestamp, so it should be set to the current time
        assessment = create_assessment(
            submission['uuid'], u'π–™π–Šπ–˜π–™ π–šπ–˜π–Šπ–—',
            self.OPTIONS_SELECTED, self.CRITERION_FEEDBACK, self.OVERALL_FEEDBACK, self.RUBRIC,
        )

        # Retrieve the self-assessment
        retrieved = get_assessment(submission["uuid"])

        # Expect that both the created and retrieved assessments have the same
        # timestamp, and it's >= our recorded time.
        self.assertEqual(assessment['scored_at'], retrieved['scored_at'])
        self.assertGreaterEqual(assessment['scored_at'], before)
    def test_check_all_criteria_assessed(self, data):
        student_item = {
            'student_id': 'π–™π–Šπ–˜π–™ π–šπ–˜π–Šπ–—',
            'item_id': 'test_item',
            'course_id': 'test_course',
            'item_type': 'test_type'
        }
        submission = create_submission(student_item, "Test answer")

        rubric, options_selected, criterion_feedback = self._create_data_structures_with_criterion_properties(
            has_option_selected=data['has_option_selected'],
            has_zero_options=data['has_zero_options'],
            has_feedback=data['has_feedback'])
        error = False
        try:
            create_assessment(submission['uuid'], student_item['student_id'],
                              options_selected, criterion_feedback,
                              "overall feedback", rubric)
        except SelfAssessmentRequestError:
            error = True
        self.assertEqual(data['expected_error'], error)
Esempio n. 54
0
    def test_unable_to_load_api(self):
        submission = sub_api.create_submission(
            {
                "student_id": "test student",
                "course_id": "test course",
                "item_id": "test item",
                "item_type": "openassessment",
            }, "test answer")

        with self.assertRaises(AssessmentWorkflowInternalError):
            workflow_api.create_workflow(submission['uuid'], ['self'],
                                         ON_INIT_PARAMS)

        AssessmentWorkflow.objects.create(
            submission_uuid=submission['uuid'],
            status=AssessmentWorkflow.STATUS.waiting,
            course_id=ITEM_1['course_id'],
            item_id=ITEM_1['item_id'])

        with self.assertRaises(AssessmentWorkflowInternalError):
            workflow_api.update_from_assessments(submission['uuid'], {})
Esempio n. 55
0
    def create_submission(self, student_item_dict, student_sub):

        # Store the student's response text in a JSON-encodable dict
        # so that later we can add additional response fields.
        student_sub_dict = {'text': student_sub}

        submission = api.create_submission(student_item_dict, student_sub_dict)
        self.create_workflow(submission["uuid"])
        self.submission_uuid = submission["uuid"]

        # Emit analytics event...
        self.runtime.publish(
            self, "openassessmentblock.create_submission", {
                "submission_uuid": submission["uuid"],
                "attempt_number": submission["attempt_number"],
                "created_at": submission["created_at"],
                "submitted_at": submission["submitted_at"],
                "answer": submission["answer"],
            })

        return submission
Esempio n. 56
0
    def test_submissions_api_overrides_scores(self):
        """
        Check that answering incorrectly is graded properly.
        """
        self.basic_setup()
        self.submit_question_answer('p1', {'2_1': 'Correct'})
        self.submit_question_answer('p2', {'2_1': 'Correct'})
        self.submit_question_answer('p3', {'2_1': 'Incorrect'})
        self.check_grade_percent(0.67)
        self.assertEqual(self.get_course_grade().letter_grade, 'B')

        student_item = {
            'student_id': anonymous_id_for_user(self.student_user, self.course.id),
            'course_id': unicode(self.course.id),
            'item_id': unicode(self.problem_location('p3')),
            'item_type': 'problem'
        }
        submission = submissions_api.create_submission(student_item, 'any answer')
        submissions_api.set_score(submission['uuid'], 1, 1)
        self.check_grade_percent(1.0)
        self.assertEqual(self.get_course_grade().letter_grade, 'A')
Esempio n. 57
0
    def test_create_assessment_all_criteria_have_zero_options(self):
        # Create a submission to self-assess
        submission = create_submission(self.STUDENT_ITEM, "Test answer")

        # Use a rubric with only criteria with no options (only written feedback)
        rubric = copy.deepcopy(self.RUBRIC)
        for criterion in rubric["criteria"]:
            criterion["options"] = []

        # Create a self-assessment for the submission
        # We don't select any options, since none of the criteria have options
        options_selected = {}

        # However, because they don't have options, they need to have criterion feedback.
        criterion_feedback = {
            'clarity':
            'I thought it was about as accurate as Scrubs is to the medical profession.',
            'accuracy':
            'I thought it was about as accurate as Scrubs is to the medical profession.'
        }

        overall_feedback = ""

        assessment = create_assessment(
            submission['uuid'],
            u'π–™π–Šπ–˜π–™ π–šπ–˜π–Šπ–—',
            options_selected,
            criterion_feedback,
            overall_feedback,
            rubric,
            scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc))

        # The self-assessment should have set the feedback for
        # all criteria to an empty string.
        for part in assessment["parts"]:
            self.assertEqual(part["option"], None)
            self.assertEqual(
                part["feedback"],
                u'I thought it was about as accurate as Scrubs is to the medical profession.'
            )
Esempio n. 58
0
    def setUp(self):
        """
        Sets up each test so that it will have unfinished tasks of both types
        """
        # 1) Schedule Grading, have the scheduling succeeed but the grading fail because no classifiers exist
        for _ in range(0, 10):
            submission = sub_api.create_submission(STUDENT_ITEM, ANSWER)
            self.submission_uuid = submission['uuid']
            ai_api.on_init(self.submission_uuid,
                           rubric=RUBRIC,
                           algorithm_id=ALGORITHM_ID)

        # 2) Schedule Training, have it INTENTIONALLY fail. Now we are a point where both parts need to be rescheduled
        patched_method = 'openassessment.assessment.api.ai.training_tasks.train_classifiers.apply_async'
        with mock.patch(patched_method) as mock_train_classifiers:
            mock_train_classifiers.side_effect = AITrainingInternalError(
                'Training Classifiers Failed for some Reason.')
            with self.assertRaises(AITrainingInternalError):
                ai_api.train_classifiers(RUBRIC, EXAMPLES, COURSE_ID, ITEM_ID,
                                         ALGORITHM_ID)

        self._assert_complete(training_done=False, grading_done=False)
Esempio n. 59
0
    def test_cancel_the_assessment_workflow(self):
        # Create the submission and assessment workflow.
        submission = sub_api.create_submission(ITEM_1, ANSWER_1)
        workflow = workflow_api.create_workflow(submission["uuid"], ["peer"])

        requirements = {
            "peer": {
                "must_grade": 1,
                "must_be_graded_by": 1
            }
        }

        # Check the workflow is not cancelled.
        self.assertFalse(workflow_api.is_workflow_cancelled(submission["uuid"]))

        # Check the status is not cancelled.
        self.assertNotEqual(workflow.get('status'), 'cancelled')

        # Check the  points_earned are not 0
        self.assertNotEqual(workflow['score'], 0)

        # Cancel the workflow for submission.
        workflow_api.cancel_workflow(
            submission_uuid=submission["uuid"],
            comments="Inappropriate language",
            cancelled_by_id=ITEM_2['student_id'],
            assessment_requirements=requirements
        )

        # Check workflow is cancelled.
        self.assertTrue(workflow_api.is_workflow_cancelled(submission["uuid"]))

        # Status for workflow should be cancelled.
        workflow = AssessmentWorkflow.get_by_submission_uuid(submission["uuid"])
        self.assertEqual(workflow.status, 'cancelled')

        # Score points_earned should be 0.
        # In case of 0 earned points the score would be None.
        self.assertEqual(workflow.score, None)
Esempio n. 60
0
    def _create_submissions_and_scores(self,
                                       xblock,
                                       submissions_and_scores,
                                       submission_key="text",
                                       points_possible=10):
        """
        Create submissions and scores that should be displayed by the leaderboard.

        Args:
            xblock (OpenAssessmentBlock)
            submisions_and_scores (list): List of `(submission, score)` tuples, where
                `submission` is the essay text (string) and `score` is the integer
                number of points earned.

        Keyword Args:
            points_possible (int): The total number of points possible for this problem
            submission_key (string): The key to use in the submission dict.  If None, use
                the submission value itself instead of embedding it in a dictionary.
        """
        for num, (submission,
                  points_earned) in enumerate(submissions_and_scores):
            # Assign a unique student ID
            # These aren't displayed by the leaderboard, so we can set them
            # to anything without affecting the test.
            student_item = xblock.get_student_item_dict()
            # adding rand number to the student_id to make it unique.
            student_item['student_id'] = "student {num} {num2}".format(
                num=num, num2=randint(2, 1000))
            if submission_key is not None:
                answer = {submission_key: submission}
            else:
                answer = submission

            # Create a submission
            sub = sub_api.create_submission(student_item, answer)

            # Create a score for the submission
            sub_api.set_score(sub['uuid'], points_earned, points_possible)