Example #1
0
    def test_open_saved_response(self, xblock):
        # Save a response
        payload = json.dumps({'submission': ('A man must have a code', 'A man must have an umbrella too.')})
        resp = self.request(xblock, 'save_submission', payload, response_format='json')
        self.assertTrue(resp['success'])

        self._assert_path_and_context(
            xblock, 'openassessmentblock/response/oa_response.html',
            {
                'text_response': 'required',
                'file_upload_response': None,
                'file_upload_type': None,
                'saved_response': create_submission_dict({
                    'answer': prepare_submission_for_serialization(
                        ('A man must have a code', 'A man must have an umbrella too.')
                    )
                }, xblock.prompts),
                'save_status': 'This response has been saved but not submitted.',
                'submit_enabled': True,
                'submission_due': dt.datetime(2999, 5, 6).replace(tzinfo=pytz.utc),
                'allow_latex': False,
                'user_timezone': None,
                'user_language': None,
                'prompts_type': 'text'
            }
        )
Example #2
0
    def test_staff_area_student_info_staff_only(self, xblock):
        # Simulate that we are course staff
        xblock.xmodule_runtime = self._create_mock_runtime(
            xblock.scope_ids.usage_id, True, False, "Bob"
        )
        xblock.runtime._services['user'] = NullUserService()  # pylint: disable=protected-access
        bob_item = STUDENT_ITEM.copy()
        bob_item["item_id"] = xblock.scope_ids.usage_id
        # Create a submission for Bob, and corresponding workflow.
        submission = self._create_submission(
            bob_item, prepare_submission_for_serialization(("Bob Answer 1", "Bob Answer 2")), ['staff']
        )

        # Bob assesses himself.
        staff_api.create_assessment(
            submission['uuid'],
            STUDENT_ITEM["student_id"],
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            {'criteria': xblock.rubric_criteria},
        )

        path, context = xblock.get_student_info_path_and_context("Bob")
        self.assertEquals("Bob Answer 1", context['submission']['answer']['parts'][0]['text'])
        self.assertIsNone(context['peer_assessments'])
        self.assertIsNone(context['self_assessment'])
        self.assertIsNotNone(context['staff_assessment'])
        self.assertEquals("openassessmentblock/staff_area/oa_student_info.html", path)

        grade_details = context['grade_details']
        self.assertEquals(1, len(grade_details['criteria'][0]['assessments']))
        self.assertEquals('Staff Grade', grade_details['criteria'][0]['assessments'][0]['title'])
Example #3
0
    def test_image_and_text_submission_multiple_files(self, xblock):
        """
        Tests that leaderboard works as expected when multiple files are uploaded
        """
        file_keys = ['foo', 'bar']
        file_descriptions = ['{}-description'.format(file_key) for file_key in file_keys]

        conn = boto.connect_s3()
        bucket = conn.create_bucket('mybucket')
        for file_key in file_keys:
            key = Key(bucket, 'submissions_attachments/{}'.format(file_key))
            key.set_contents_from_string("How d'ya do?")
            files_url_and_description = [
                (api.get_download_url(file_key), file_descriptions[idx])
                for idx, file_key in enumerate(file_keys)
                ]

        # Create a image and text submission
        submission = prepare_submission_for_serialization(('test answer 1 part 1', 'test answer 1 part 2'))
        submission[u'file_keys'] = file_keys
        submission[u'files_descriptions'] = file_descriptions

        self._create_submissions_and_scores(xblock, [
            (submission, 1)
        ])

        self.maxDiff = None
        # Expect that we retrieve both the text and the download URL for the file
        self._assert_scores(xblock, [
            {'score': 1, 'files': files_url_and_description, 'submission': create_submission_dict(
                {'answer': submission},
                xblock.prompts
            )}
        ])
Example #4
0
 def test_open_no_deadline(self, xblock):
     self._assert_path_and_context(
         xblock, 'openassessmentblock/response/oa_response.html', {
             'text_response':
             'required',
             'file_upload_response':
             None,
             'file_upload_type':
             None,
             'saved_response':
             create_submission_dict(
                 {'answer': prepare_submission_for_serialization(
                     ("", ""))}, xblock.prompts),
             'save_status':
             'This response has not been saved.',
             'submit_enabled':
             False,
             'allow_latex':
             False,
             'user_timezone':
             None,
             'user_language':
             None,
             'prompts_type':
             'text'
         })
    def test_overwrite_saved_response(self, xblock):

        # XBlock has a saved response already
        xblock.saved_response = prepare_submission_for_serialization([
            u"THAT'ꙅ likɘ A 40-bɘgᴙɘɘ bAY.",
            u"Aiᴎ'T ᴎodobY goT ᴎoTHiᴎg To ꙅAY AdoUT A 40-bɘgᴙɘɘ bAY."
        ])

        # Save another response
        submission = [u"ГЂіи lіиэ ъэтшээи", u"Ђэаvэи аиↁ Ђэѓэ."]
        payload = json.dumps({'submission': submission})
        resp = self.request(xblock, 'save_submission', payload, response_format="json")
        self.assertTrue(resp['success'])

        # Verify that the saved response was overwritten
        self.assertEqual(xblock.saved_response, json.dumps(prepare_submission_for_serialization(submission)))
Example #6
0
    def test_overwrite_saved_response(self, xblock):

        # XBlock has a saved response already
        xblock.saved_response = prepare_submission_for_serialization([
            u"THAT'ꙅ likɘ A 40-bɘgᴙɘɘ bAY.",
            u"Aiᴎ'T ᴎodobY goT ᴎoTHiᴎg To ꙅAY AdoUT A 40-bɘgᴙɘɘ bAY."
        ])

        # Save another response
        submission = [u"ГЂіи lіиэ ъэтшээи", u"Ђэаvэи аиↁ Ђэѓэ."]
        payload = json.dumps({'submission': submission})
        resp = self.request(xblock, 'save_submission', payload, response_format="json")
        self.assertTrue(resp['success'])

        # Verify that the saved response was overwritten
        self.assertEqual(xblock.saved_response, json.dumps(prepare_submission_for_serialization(submission)))
    def test_image_and_text_submission(self, xblock):
        """
        Tests that text and image submission works as expected
        """
        # Create a file and get the download URL
        conn = boto.connect_s3()
        bucket = conn.create_bucket('mybucket')
        key = Key(bucket, 'submissions_attachments/foo')
        key.set_contents_from_string("How d'ya do?")

        file_download_url = [(api.get_download_url('foo'), '')]
        # Create a image and text submission
        submission = prepare_submission_for_serialization(('test answer 1 part 1', 'test answer 1 part 2'))
        submission[u'file_key'] = 'foo'
        self._create_submissions_and_scores(xblock, [
            (submission, 1)
        ])
        self.maxDiff = None
        # Expect that we retrieve both the text and the download URL for the file
        self._assert_scores(xblock, [
            {'score': 1, 'files': file_download_url, 'submission': create_submission_dict(
                {'answer': submission},
                xblock.prompts
            )}
        ])
Example #8
0
    def test_staff_area_student_info_staff_only_no_options(self, xblock):
        # Simulate that we are course staff
        xblock.xmodule_runtime = self._create_mock_runtime(
            xblock.scope_ids.usage_id, True, False, "Bob")
        xblock.runtime._services['user'] = NullUserService()  # pylint: disable=protected-access
        bob_item = STUDENT_ITEM.copy()
        bob_item["item_id"] = xblock.scope_ids.usage_id
        # Create a submission for Bob, and corresponding workflow.
        submission = self._create_submission(
            bob_item,
            prepare_submission_for_serialization(
                ("Bob Answer 1", "Bob Answer 2")), ['staff'])

        # Bob assesses himself as staff.
        staff_api.create_assessment(
            submission['uuid'],
            STUDENT_ITEM["student_id"],
            {},  # no options available
            {"vocabulary": "Good use of vocabulary!"},
            ASSESSMENT_DICT['overall_feedback'],
            {'criteria': xblock.rubric_criteria},
        )

        _, _ = xblock.get_student_info_path_and_context("Bob")
        self.assertIn(
            "Good use of vocabulary!",
            self.request(
                xblock, "render_student_info",
                six.moves.urllib.parse.urlencode({"student_username":
                                                  "******"})).decode('utf-8'))
Example #9
0
    def test_open_saved_response_old_format(self, xblock):
        # Save a response
        xblock.prompts = [{'description': 'One prompt.'}]
        xblock.saved_response = "An old format response."
        xblock.has_saved = True

        self._assert_path_and_context(
            xblock, 'openassessmentblock/response/oa_response.html',
            {
                'text_response': 'required',
                'file_upload_response': None,
                'file_upload_type': None,
                'saved_response': create_submission_dict({
                    'answer': prepare_submission_for_serialization(
                        ('An old format response.',)
                    )
                }, xblock.prompts),
                'save_status': 'This response has been saved but not submitted.',
                'submit_enabled': True,
                'submission_due': dt.datetime(2999, 5, 6).replace(tzinfo=pytz.utc),
                'allow_latex': False,
                'user_timezone': None,
                'user_language': None,
                'prompts_type': 'text'
            }
        )
Example #10
0
    def test_staff_debug_student_info_self_only(self, xblock):
        # Simulate that we are course staff
        xblock.xmodule_runtime = self._create_mock_runtime(
            xblock.scope_ids.usage_id, True, False, "Bob"
        )
        xblock.runtime._services['user'] = NullUserService()
        bob_item = STUDENT_ITEM.copy()
        bob_item["item_id"] = xblock.scope_ids.usage_id
        # Create a submission for Bob, and corresponding workflow.
        submission = sub_api.create_submission(
            bob_item, prepare_submission_for_serialization(("Bob Answer 1", "Bob Answer 2"))
        )
        peer_api.on_start(submission["uuid"])
        workflow_api.create_workflow(submission["uuid"], ['self'])

        # Bob assesses himself.
        self_api.create_assessment(
            submission['uuid'],
            STUDENT_ITEM["student_id"],
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            {'criteria': xblock.rubric_criteria},
        )

        path, context = xblock.get_student_info_path_and_context("Bob")
        self.assertEquals("Bob Answer 1", context['submission']['answer']['parts'][0]['text'])
        self.assertEquals([], context['peer_assessments'])
        self.assertEquals("openassessmentblock/staff_debug/student_info.html", path)
Example #11
0
 def test_open_unanswered(self, xblock):
     self._assert_path_and_context(
         xblock, 'openassessmentblock/response/oa_response.html', {
             'text_response':
             'required',
             'file_upload_response':
             None,
             'file_upload_type':
             None,
             'saved_response':
             create_submission_dict(
                 {'answer': prepare_submission_for_serialization(
                     ("", ""))}, xblock.prompts),
             'save_status':
             'This response has not been saved.',
             'submit_enabled':
             False,
             'submission_due':
             dt.datetime(2999, 5, 6).replace(tzinfo=pytz.utc),
             'allow_latex':
             False,
             'user_timezone':
             None,
             'user_language':
             None,
             'prompts_type':
             'text',
             'enable_delete_files':
             True,
         })
Example #12
0
    def test_staff_debug_student_info_with_cancelled_submission(self, xblock):
        requirements = {
            "peer": {
                "must_grade": 1,
                "must_be_graded_by": 1
            },
        }

        # Simulate that we are course staff
        xblock.xmodule_runtime = self._create_mock_runtime(
            xblock.scope_ids.usage_id, True, False, "Bob"
        )
        xblock.runtime._services['user'] = NullUserService()

        bob_item = STUDENT_ITEM.copy()
        bob_item["item_id"] = xblock.scope_ids.usage_id
        # Create a submission for Bob, and corresponding workflow.
        submission = sub_api.create_submission(
            bob_item, prepare_submission_for_serialization(("Bob Answer 1", "Bob Answer 2"))
        )
        peer_api.on_start(submission["uuid"])
        workflow_api.create_workflow(submission["uuid"], ['peer'])

        workflow_api.cancel_workflow(
            submission_uuid=submission["uuid"],
            comments="Inappropriate language",
            cancelled_by_id=bob_item['student_id'],
            assessment_requirements=requirements
        )

        path, context = xblock.get_student_info_path_and_context("Bob")
        self.assertEquals("Bob Answer 1", context['submission']['answer']['parts'][0]['text'])
        self.assertIsNotNone(context['workflow_cancellation'])
        self.assertEquals("openassessmentblock/staff_debug/student_info.html", path)
Example #13
0
    def test_open_saved_response_old_format(self, xblock):
        # Save a response
        xblock.prompts = [{'description': 'One prompt.'}]
        xblock.saved_response = "An old format response."
        xblock.has_saved = True

        self._assert_path_and_context(
            xblock, 'openassessmentblock/response/oa_response.html', {
                'file_upload_type':
                None,
                'saved_response':
                create_submission_dict(
                    {
                        'answer':
                        prepare_submission_for_serialization(
                            ('An old format response.', ))
                    }, xblock.prompts),
                'save_status':
                'This response has been saved but not submitted.',
                'submit_enabled':
                True,
                'submission_due':
                dt.datetime(2999, 5, 6).replace(tzinfo=pytz.utc),
                'has_peer':
                True,
                'has_self':
                True,
                'allow_latex':
                False,
            })
Example #14
0
    def test_image_and_text_submission(self, xblock):
        """
        Tests that text and image submission works as expected
        """
        # Create a file and get the download URL
        conn = boto.connect_s3()
        bucket = conn.create_bucket('mybucket')
        key = Key(bucket, 'submissions_attachments/foo')
        key.set_contents_from_string("How d'ya do?")

        file_download_url = [(api.get_download_url('foo'), '')]
        # Create a image and text submission
        submission = prepare_submission_for_serialization(
            ('test answer 1 part 1', 'test answer 1 part 2'))
        submission[u'file_key'] = 'foo'
        self._create_submissions_and_scores(xblock, [(submission, 1)])
        self.maxDiff = None
        # Expect that we retrieve both the text and the download URL for the file
        self._assert_scores(xblock, [{
            'score':
            1,
            'files':
            file_download_url,
            'submission':
            create_submission_dict({'answer': submission}, xblock.prompts)
        }])
Example #15
0
    def test_staff_area_student_info_staff_only_no_options(self, xblock):
        # Simulate that we are course staff
        xblock.xmodule_runtime = self._create_mock_runtime(
            xblock.scope_ids.usage_id, True, False, "Bob"
        )
        xblock.runtime._services['user'] = NullUserService()
        bob_item = STUDENT_ITEM.copy()
        bob_item["item_id"] = xblock.scope_ids.usage_id
        # Create a submission for Bob, and corresponding workflow.
        submission = self._create_submission(
            bob_item, prepare_submission_for_serialization(("Bob Answer 1", "Bob Answer 2")), ['staff']
        )

        # Bob assesses himself as staff.
        staff_api.create_assessment(
            submission['uuid'],
            STUDENT_ITEM["student_id"],
            {},  # no options available
            {"vocabulary": "Good use of vocabulary!"},
            ASSESSMENT_DICT['overall_feedback'],
            {'criteria': xblock.rubric_criteria},
        )

        _, context = xblock.get_student_info_path_and_context("Bob")
        self.assertIn(
            "Good use of vocabulary!",
            self.request(
                xblock,
                "render_student_info",
                urllib.urlencode({"student_username": "******"})
            )
        )
Example #16
0
    def test_image_and_text_submission(self, xblock):
        """
        Tests that text and image submission works as expected
        """
        # Create a file and get the download URL
        conn = boto3.client("s3")
        conn.create_bucket(Bucket='mybucket')
        conn.put_object(
            Bucket="mybucket",
            Key="submissions_attachments/foo",
            Body=b"How d'ya do?",
        )

        file_download_url = [{
            'download_url': api.get_download_url('foo'),
            'description': '',
            'name': '',
            'show_delete_button': False
        }]
        # Create a image and text submission
        submission = prepare_submission_for_serialization(
            ('test answer 1 part 1', 'test answer 1 part 2'))
        submission[u'file_key'] = 'foo'
        self._create_submissions_and_scores(xblock, [(submission, 1)])
        # Expect that we retrieve both the text and the download URL for the file
        self._assert_scores(xblock, [{
            'score':
            1,
            'files':
            file_download_url,
            'submission':
            create_submission_dict({'answer': submission}, xblock.prompts)
        }])
Example #17
0
    def test_staff_area_student_info_self_only(self, xblock):
        # Simulate that we are course staff
        xblock.xmodule_runtime = self._create_mock_runtime(
            xblock.scope_ids.usage_id, True, False, "Bob")
        xblock.runtime._services['user'] = NullUserService()
        bob_item = STUDENT_ITEM.copy()
        bob_item["item_id"] = xblock.scope_ids.usage_id
        # Create a submission for Bob, and corresponding workflow.
        submission = sub_api.create_submission(
            bob_item,
            prepare_submission_for_serialization(
                ("Bob Answer 1", "Bob Answer 2")))
        peer_api.on_start(submission["uuid"])
        workflow_api.create_workflow(submission["uuid"], ['self'])

        # Bob assesses himself.
        self_api.create_assessment(
            submission['uuid'],
            STUDENT_ITEM["student_id"],
            ASSESSMENT_DICT['options_selected'],
            ASSESSMENT_DICT['criterion_feedback'],
            ASSESSMENT_DICT['overall_feedback'],
            {'criteria': xblock.rubric_criteria},
        )

        path, context = xblock.get_student_info_path_and_context("Bob")
        self.assertEquals("Bob Answer 1",
                          context['submission']['answer']['parts'][0]['text'])
        self.assertEquals([], context['peer_assessments'])
        self.assertEquals("openassessmentblock/staff_area/student_info.html",
                          path)
Example #18
0
    def test_staff_area_student_info_with_cancelled_submission(self, xblock):
        requirements = {
            "peer": {
                "must_grade": 1,
                "must_be_graded_by": 1
            },
        }

        # Simulate that we are course staff
        xblock.xmodule_runtime = self._create_mock_runtime(
            xblock.scope_ids.usage_id, True, False, "Bob")
        xblock.runtime._services['user'] = NullUserService()

        bob_item = STUDENT_ITEM.copy()
        bob_item["item_id"] = xblock.scope_ids.usage_id
        # Create a submission for Bob, and corresponding workflow.
        submission = sub_api.create_submission(
            bob_item,
            prepare_submission_for_serialization(
                ("Bob Answer 1", "Bob Answer 2")))
        peer_api.on_start(submission["uuid"])
        workflow_api.create_workflow(submission["uuid"], ['peer'])

        workflow_api.cancel_workflow(submission_uuid=submission["uuid"],
                                     comments="Inappropriate language",
                                     cancelled_by_id=bob_item['student_id'],
                                     assessment_requirements=requirements)

        path, context = xblock.get_student_info_path_and_context("Bob")
        self.assertEquals("Bob Answer 1",
                          context['submission']['answer']['parts'][0]['text'])
        self.assertIsNotNone(context['workflow_cancellation'])
        self.assertEquals("openassessmentblock/staff_area/student_info.html",
                          path)
Example #19
0
    def test_show_submissions_that_have_greater_than_0_score(self, xblock):
        # Create some submissions (but fewer than the max that can be shown)
        self._create_submissions_and_scores(xblock, [
            (prepare_submission_for_serialization(("test answer 0 part 1", "test answer 0 part 2")), 0),
            (prepare_submission_for_serialization(("test answer 1 part 1", "test answer 1 part 2")), 1)
        ])
        self._assert_scores(xblock, [
            {"score": 1, "submission": create_submission_dict(
                {"answer": prepare_submission_for_serialization((u"test answer 1 part 1", u"test answer 1 part 2"))},
                xblock.prompts
            )},
        ])
        self._assert_leaderboard_visible(xblock, True)

        # Since leaderboard results are cached, we need to clear
        # the cache in order to see the new scores.
        cache.clear()

        # Create more submissions than the max
        self._create_submissions_and_scores(xblock, [
            (prepare_submission_for_serialization(("test answer 2 part 1", "test answer 2 part 2")), 10),
            (prepare_submission_for_serialization(("test answer 3 part 1", "test answer 3 part 2")), 0)
        ])
        self._assert_scores(xblock, [
            {"score": 10, "submission": create_submission_dict(
                {"answer": prepare_submission_for_serialization((u"test answer 2 part 1", u"test answer 2 part 2"))},
                xblock.prompts
            )},
            {"score": 1, "submission": create_submission_dict(
                {"answer": prepare_submission_for_serialization((u"test answer 1 part 1", u"test answer 1 part 2"))},
                xblock.prompts
            )}
        ])
        self._assert_leaderboard_visible(xblock, True)
Example #20
0
    def test_show_submissions_that_have_greater_than_0_score(self, xblock):
        # Create some submissions (but fewer than the max that can be shown)
        self._create_submissions_and_scores(xblock, [
            (prepare_submission_for_serialization(('test answer 0 part 1', 'test answer 0 part 2')), 0),
            (prepare_submission_for_serialization(('test answer 1 part 1', 'test answer 1 part 2')), 1)
        ])
        self._assert_scores(xblock, [
            {'score': 1, 'files': [], 'submission': create_submission_dict(
                {'answer': prepare_submission_for_serialization((u'test answer 1 part 1', u'test answer 1 part 2'))},
                xblock.prompts
            )},
        ])
        self._assert_leaderboard_visible(xblock, True)

        # Since leaderboard results are cached, we need to clear
        # the cache in order to see the new scores.
        cache.clear()

        # Create more submissions than the max
        self._create_submissions_and_scores(xblock, [
            (prepare_submission_for_serialization(('test answer 2 part 1', 'test answer 2 part 2')), 10),
            (prepare_submission_for_serialization(('test answer 3 part 1', 'test answer 3 part 2')), 0)
        ])
        self._assert_scores(xblock, [
            {'score': 10, 'files': [], 'submission': create_submission_dict(
                {'answer': prepare_submission_for_serialization((u'test answer 2 part 1', u'test answer 2 part 2'))},
                xblock.prompts
            )},
            {'score': 1, 'files': [], 'submission': create_submission_dict(
                {'answer': prepare_submission_for_serialization((u'test answer 1 part 1', u'test answer 1 part 2'))},
                xblock.prompts
            )}
        ])
        self._assert_leaderboard_visible(xblock, True)
Example #21
0
    def test_show_submissions(self, xblock):
        # Create some submissions (but fewer than the max that can be shown)
        self._create_submissions_and_scores(xblock, [
            (prepare_submission_for_serialization(("test answer 1 part 1", "test answer 1 part 2")), 1),
            (prepare_submission_for_serialization(("test answer 2 part 1", "test answer 2 part 2")), 2)
        ])
        self._assert_scores(xblock, [
            {"score": 2, "submission": create_submission_dict(
                {"answer": prepare_submission_for_serialization((u"test answer 2 part 1", u"test answer 2 part 2"))},
                xblock.prompts
            )},
            {"score": 1, "submission": create_submission_dict(
                {"answer": prepare_submission_for_serialization((u"test answer 1 part 1", u"test answer 1 part 2"))},
                xblock.prompts
            )}
        ])
        self._assert_leaderboard_visible(xblock, True)

        # Since leaderboard results are cached, we need to clear
        # the cache in order to see the new scores.
        cache.clear()

        # Create more submissions than the max
        self._create_submissions_and_scores(xblock, [
            (prepare_submission_for_serialization(("test answer 3 part 1", "test answer 3 part 2")), 0),
            (prepare_submission_for_serialization(("test answer 4 part 1", "test answer 4 part 2")), 10),
            (prepare_submission_for_serialization(("test answer 5 part 1", "test answer 5 part 2")), 3),
        ])
        self._assert_scores(xblock, [
            {"score": 10, "submission": create_submission_dict(
                {"answer": prepare_submission_for_serialization((u"test answer 4 part 1", u"test answer 4 part 2"))},
                xblock.prompts
            )},
            {"score": 3, "submission": create_submission_dict(
                {"answer": prepare_submission_for_serialization((u"test answer 5 part 1", u"test answer 5 part 2"))},
                xblock.prompts
            )},
            {"score": 2, "submission": create_submission_dict(
                {"answer": prepare_submission_for_serialization((u"test answer 2 part 1", u"test answer 2 part 2"))},
                xblock.prompts
            )}
        ])
        self._assert_leaderboard_visible(xblock, True)
    def test_show_submissions(self, xblock):
        # Create some submissions (but fewer than the max that can be shown)
        self._create_submissions_and_scores(xblock, [
            (prepare_submission_for_serialization(('test answer 1 part 1', 'test answer 1 part 2')), 1),
            (prepare_submission_for_serialization(('test answer 2 part 1', 'test answer 2 part 2')), 2)
        ])
        self._assert_scores(xblock, [
            {'score': 2, 'files': [], 'submission': create_submission_dict(
                {'answer': prepare_submission_for_serialization((u'test answer 2 part 1', u'test answer 2 part 2'))},
                xblock.prompts
            )},
            {'score': 1, 'files': [], 'submission': create_submission_dict(
                {'answer': prepare_submission_for_serialization((u'test answer 1 part 1', u'test answer 1 part 2'))},
                xblock.prompts
            )}
        ])
        self._assert_leaderboard_visible(xblock, True)

        # Since leaderboard results are cached, we need to clear
        # the cache in order to see the new scores.
        cache.clear()

        # Create more submissions than the max
        self._create_submissions_and_scores(xblock, [
            (prepare_submission_for_serialization(('test answer 3 part 1', 'test answer 3 part 2')), 0),
            (prepare_submission_for_serialization(('test answer 4 part 1', 'test answer 4 part 2')), 10),
            (prepare_submission_for_serialization(('test answer 5 part 1', 'test answer 5 part 2')), 3),
        ])
        self._assert_scores(xblock, [
            {'score': 10, 'files': [], 'submission': create_submission_dict(
                {'answer': prepare_submission_for_serialization((u'test answer 4 part 1', u'test answer 4 part 2'))},
                xblock.prompts
            )},
            {'score': 3, 'files': [], 'submission': create_submission_dict(
                {'answer': prepare_submission_for_serialization((u'test answer 5 part 1', u'test answer 5 part 2'))},
                xblock.prompts
            )},
            {'score': 2, 'files': [], 'submission': create_submission_dict(
                {'answer': prepare_submission_for_serialization((u'test answer 2 part 1', u'test answer 2 part 2'))},
                xblock.prompts
            )}
        ])
        self._assert_leaderboard_visible(xblock, True)
Example #23
0
 def test_open_no_deadline(self, xblock):
     self._assert_path_and_context(
         xblock, 'openassessmentblock/response/oa_response.html',
         {
             'file_upload_type': None,
             'saved_response': create_submission_dict({
                 'answer': prepare_submission_for_serialization(
                     ("", "")
                 )
             }, xblock.prompts),
             'save_status': 'This response has not been saved.',
             'submit_enabled': False,
             'allow_latex': False,
         }
     )
Example #24
0
 def test_open_unanswered(self, xblock):
     self._assert_path_and_context(
         xblock, 'openassessmentblock/response/oa_response.html',
         {
             'file_upload_type': None,
             'saved_response': create_submission_dict({
                 'answer': prepare_submission_for_serialization(
                     ("", "")
                 )
             }, xblock.prompts),
             'save_status': 'This response has not been saved.',
             'submit_enabled': False,
             'submission_due': dt.datetime(2999, 5, 6).replace(tzinfo=pytz.utc),
             'allow_latex': False,
         }
     )
Example #25
0
    def test_image_and_text_submission_multiple_files(self, xblock):
        """
        Tests that leaderboard works as expected when multiple files are uploaded
        """
        file_keys = ['foo', 'bar']
        file_descriptions = [
            '{}-description'.format(file_key) for file_key in file_keys
        ]
        files_names = [
            '{}-file_name'.format(file_key) for file_key in file_keys
        ]
        conn = boto3.client("s3")
        conn.create_bucket(Bucket="mybucket")
        for file_key in file_keys:
            conn.put_object(
                Bucket="mybucket",
                Key="submissions_attachments/{}".format(file_key),
                Body=b"How d'ya do?",
            )
            files_url_and_description = [{
                'download_url':
                api.get_download_url(file_key),
                'description':
                file_descriptions[idx],
                'name':
                files_names[idx],
                'show_delete_button':
                False
            } for idx, file_key in enumerate(file_keys)]

        # Create a image and text submission
        submission = prepare_submission_for_serialization(
            ('test answer 1 part 1', 'test answer 1 part 2'))
        submission['file_keys'] = file_keys
        submission['files_descriptions'] = file_descriptions
        submission['files_names'] = files_names
        submission['files_sizes'] = []
        self._create_submissions_and_scores(xblock, [(submission, 1)])
        # Expect that we retrieve both the text and the download URL for the file
        self._assert_scores(xblock, [{
            'score':
            1,
            'files':
            files_url_and_description,
            'submission':
            create_submission_dict({'answer': submission}, xblock.prompts)
        }])
Example #26
0
    def test_staff_area_student_info_peer_only(self, xblock):
        # Simulate that we are course staff
        xblock.xmodule_runtime = self._create_mock_runtime(
            xblock.scope_ids.usage_id, True, False, "Bob")
        xblock.runtime._services['user'] = NullUserService()

        bob_item = STUDENT_ITEM.copy()
        bob_item["item_id"] = xblock.scope_ids.usage_id
        # Create a submission for Bob, and corresponding workflow.
        submission = self._create_submission(
            bob_item,
            prepare_submission_for_serialization(
                ("Bob Answer 1", "Bob Answer 2")), ['peer'])

        # Create a submission for Tim, and corresponding workflow.
        tim_item = bob_item.copy()
        tim_item["student_id"] = "Tim"
        self._create_submission(tim_item, "Tim Answer", ['peer', 'self'])

        # Bob assesses Tim.
        peer_api.get_submission_to_assess(submission['uuid'], 1)
        peer_api.create_assessment(
            submission["uuid"],
            STUDENT_ITEM["student_id"],
            ASSESSMENT_DICT['options_selected'],
            dict(),
            "",
            {'criteria': xblock.rubric_criteria},
            1,
        )

        # Now Bob should be fully populated in the student info view.
        path, context = xblock.get_student_info_path_and_context("Bob")
        self.assertEquals("Bob Answer 1",
                          context['submission']['answer']['parts'][0]['text'])
        self.assertIsNotNone(context['peer_assessments'])
        self.assertIsNone(context['self_assessment'])
        self.assertIsNone(context['staff_assessment'])
        self.assertEquals(
            "openassessmentblock/staff_area/oa_student_info.html", path)

        # Bob still needs to assess other learners
        self.assertIsNone(context['grade_details'])
Example #27
0
 def test_open_no_deadline(self, xblock):
     self._assert_path_and_context(
         xblock, 'openassessmentblock/response/oa_response.html', {
             'allow_file_upload':
             False,
             'saved_response':
             create_submission_dict(
                 {'answer': prepare_submission_for_serialization(
                     ("", ""))}, xblock.prompts),
             'save_status':
             'This response has not been saved.',
             'submit_enabled':
             False,
             'has_peer':
             True,
             'has_self':
             False,
             'allow_latex':
             False,
         })
Example #28
0
    def test_staff_area_student_info_peer_only(self, xblock):
        # Simulate that we are course staff
        xblock.xmodule_runtime = self._create_mock_runtime(
            xblock.scope_ids.usage_id, True, False, "Bob"
        )
        xblock.runtime._services['user'] = NullUserService()

        bob_item = STUDENT_ITEM.copy()
        bob_item["item_id"] = xblock.scope_ids.usage_id
        # Create a submission for Bob, and corresponding workflow.
        submission = self._create_submission(
            bob_item, prepare_submission_for_serialization(("Bob Answer 1", "Bob Answer 2")), ['peer']
        )

        # Create a submission for Tim, and corresponding workflow.
        tim_item = bob_item.copy()
        tim_item["student_id"] = "Tim"
        self._create_submission(tim_item, "Tim Answer", ['peer', 'self'])

        # Bob assesses Tim.
        peer_api.get_submission_to_assess(submission['uuid'], 1)
        peer_api.create_assessment(
            submission["uuid"],
            STUDENT_ITEM["student_id"],
            ASSESSMENT_DICT['options_selected'], dict(), "",
            {'criteria': xblock.rubric_criteria},
            1,
        )

        # Now Bob should be fully populated in the student info view.
        path, context = xblock.get_student_info_path_and_context("Bob")
        self.assertEquals("Bob Answer 1", context['submission']['answer']['parts'][0]['text'])
        self.assertIsNotNone(context['peer_assessments'])
        self.assertIsNone(context['self_assessment'])
        self.assertIsNone(context['staff_assessment'])
        self.assertEquals("openassessmentblock/staff_area/oa_student_info.html", path)

        # Bob still needs to assess other learners
        self.assertIsNone(context['grade_details'])
Example #29
0
 def test_image_and_text_submission(self, xblock):
     # Create a file and get the download URL
     conn = boto.connect_s3()
     bucket = conn.create_bucket('mybucket')
     key = Key(bucket)
     key.key = "submissions_attachments/foo"
     key.set_contents_from_string("How d'ya do?")
     downloadUrl = api.get_download_url("foo")
     # Create a image and text submission
     submission = prepare_submission_for_serialization(("test answer 1 part 1", "test answer 1 part 2"))
     submission[u"file_key"] = "foo"
     self._create_submissions_and_scores(xblock, [
         (submission, 1)
     ])
     self.maxDiff = None
     # Expect that we retrieve both the text and the download URL for the file
     self._assert_scores(xblock, [
         {"file": downloadUrl, "score": 1, "submission": create_submission_dict(
             {"answer": submission},
             xblock.prompts
         )}
     ])
 def test_prepare_submission_for_serialization(self, input, output):
     self.assertEqual(prepare_submission_for_serialization(input), output)
Example #31
0
 def test_prepare_submission_for_serialization(self, input, output):
     self.assertEqual(prepare_submission_for_serialization(input), output)
Example #32
0
    def test_open_saved_response(self, xblock):
        file_uploads = [
            {
                'description': 'file-1',
                'name': 'file-1.pdf',
                'size': 200
            },
            {
                'description': 'file-2',
                'name': 'file-2.pdf',
                'size': 400
            },
        ]

        xblock.file_manager.append_uploads(*file_uploads)

        # Save a response
        payload = json.dumps({
            'submission':
            ('A man must have a code', 'A man must have an umbrella too.')
        })
        resp = self.request(xblock,
                            'save_submission',
                            payload,
                            response_format='json')
        self.assertTrue(resp['success'])

        self._assert_path_and_context(
            xblock, 'openassessmentblock/response/oa_response.html', {
                'text_response':
                'required',
                'file_upload_response':
                None,
                'file_upload_type':
                None,
                'saved_response':
                create_submission_dict(
                    {
                        'answer':
                        prepare_submission_for_serialization(
                            ('A man must have a code',
                             'A man must have an umbrella too.'))
                    }, xblock.prompts),
                'save_status':
                'This response has been saved but not submitted.',
                'submit_enabled':
                True,
                'submission_due':
                dt.datetime(2999, 5, 6).replace(tzinfo=pytz.utc),
                'allow_latex':
                False,
                'user_timezone':
                None,
                'user_language':
                None,
                'prompts_type':
                'text',
                'enable_delete_files':
                True,
            })

        # pylint: disable=protected-access
        actual_file_uploads = [
            upload._to_dict() for upload in xblock.file_manager.get_uploads()
        ]
        expected_file_uploads = [
            api.FileUpload(
                description='file-1',
                name='file-1.pdf',
                size=200,
                student_id='Bob',
                course_id='edX/Enchantment_101/April_1',
                item_id=ANY,
                descriptionless=False,
            )._to_dict(),
            api.FileUpload(
                description='file-2',
                name='file-2.pdf',
                size=400,
                student_id='Bob',
                course_id='edX/Enchantment_101/April_1',
                item_id=ANY,
                descriptionless=False,
            )._to_dict(),
        ]

        for expected, actual in zip(expected_file_uploads,
                                    actual_file_uploads):
            # We can't consistently determine the values of an XBlock's item_id
            expected.pop('item_id')
            actual.pop('item_id')

        self.assertEqual(expected_file_uploads, actual_file_uploads)
Example #33
0
    def test_open_saved_response_misaligned_file_data(self, xblock):
        """
        Test the case where the XBlock user state contains a different number of
        file descriptions from file sizes and names.  After rendering the block,
        the list of file names and sizes should be coerced to lists that are of the
        same length as the file descriptions.
        """
        xblock.saved_files_descriptions = json.dumps(["file-1", "file-2"])
        xblock.saved_files_names = json.dumps([])
        xblock.saved_files_sizes = json.dumps([200])

        xblock.file_upload_type = 'pdf-and-image'
        xblock.file_upload_response = 'optional'

        # Save a response
        payload = json.dumps({
            'submission':
            ('A man must have a code', 'A man must have an umbrella too.')
        })
        resp = self.request(xblock,
                            'save_submission',
                            payload,
                            response_format='json')
        self.assertTrue(resp['success'])

        self._assert_path_and_context(
            xblock, 'openassessmentblock/response/oa_response.html', {
                'text_response':
                'required',
                'file_upload_response':
                'optional',
                'file_upload_type':
                'pdf-and-image',
                'file_urls': [('', 'file-1', None), ('', 'file-2', None)],
                'saved_response':
                create_submission_dict(
                    {
                        'answer':
                        prepare_submission_for_serialization(
                            ('A man must have a code',
                             'A man must have an umbrella too.'))
                    }, xblock.prompts),
                'save_status':
                'This response has been saved but not submitted.',
                'submit_enabled':
                True,
                'submission_due':
                dt.datetime(2999, 5, 6).replace(tzinfo=pytz.utc),
                'allow_latex':
                False,
                'user_timezone':
                None,
                'user_language':
                None,
                'prompts_type':
                'text',
                'enable_delete_files':
                True,
            })

        # pylint: disable=protected-access
        actual_file_uploads = [
            upload._to_dict() for upload in xblock.file_manager.get_uploads()
        ]
        # When file names/sizes are of different cardinality of file descriptions,
        # they are coerced to lists of nulls of the same cardinality of the descriptions,
        # hence, name and size attributes below are null.
        expected_file_uploads = [
            api.FileUpload(
                description='file-1',
                name=None,
                size=None,
                student_id='Bob',
                course_id='edX/Enchantment_101/April_1',
                item_id=ANY,
                descriptionless=False,
            )._to_dict(),
            api.FileUpload(
                description='file-2',
                name=None,
                size=None,
                student_id='Bob',
                course_id='edX/Enchantment_101/April_1',
                item_id=ANY,
                descriptionless=False,
            )._to_dict(),
        ]
        for expected, actual in zip(expected_file_uploads,
                                    actual_file_uploads):
            # We can't consistently determine the values of an XBlock's item_id
            expected.pop('item_id')
            actual.pop('item_id')

        self.assertEqual(expected_file_uploads, actual_file_uploads)