Ejemplo n.º 1
0
    def test_show_submissions_that_have_greater_than_0_score(self, xblock):
        # Create some submissions (but fewer than the max that can be shown)
        self._create_submissions_and_scores(xblock, [
            (prepare_submission_for_serialization(('test answer 0 part 1', 'test answer 0 part 2')), 0),
            (prepare_submission_for_serialization(('test answer 1 part 1', 'test answer 1 part 2')), 1)
        ])
        self._assert_scores(xblock, [
            {'score': 1, 'files': [], 'submission': create_submission_dict(
                {'answer': prepare_submission_for_serialization((u'test answer 1 part 1', u'test answer 1 part 2'))},
                xblock.prompts
            )},
        ])
        self._assert_leaderboard_visible(xblock, True)

        # Since leaderboard results are cached, we need to clear
        # the cache in order to see the new scores.
        cache.clear()

        # Create more submissions than the max
        self._create_submissions_and_scores(xblock, [
            (prepare_submission_for_serialization(('test answer 2 part 1', 'test answer 2 part 2')), 10),
            (prepare_submission_for_serialization(('test answer 3 part 1', 'test answer 3 part 2')), 0)
        ])
        self._assert_scores(xblock, [
            {'score': 10, 'files': [], 'submission': create_submission_dict(
                {'answer': prepare_submission_for_serialization((u'test answer 2 part 1', u'test answer 2 part 2'))},
                xblock.prompts
            )},
            {'score': 1, 'files': [], 'submission': create_submission_dict(
                {'answer': prepare_submission_for_serialization((u'test answer 1 part 1', u'test answer 1 part 2'))},
                xblock.prompts
            )}
        ])
        self._assert_leaderboard_visible(xblock, True)
Ejemplo n.º 2
0
    def test_show_submissions_that_have_greater_than_0_score(self, xblock):
        # Create some submissions (but fewer than the max that can be shown)
        self._create_submissions_and_scores(xblock, [
            (prepare_submission_for_serialization(("test answer 0 part 1", "test answer 0 part 2")), 0),
            (prepare_submission_for_serialization(("test answer 1 part 1", "test answer 1 part 2")), 1)
        ])
        self._assert_scores(xblock, [
            {"score": 1, "submission": create_submission_dict(
                {"answer": prepare_submission_for_serialization((u"test answer 1 part 1", u"test answer 1 part 2"))},
                xblock.prompts
            )},
        ])
        self._assert_leaderboard_visible(xblock, True)

        # Since leaderboard results are cached, we need to clear
        # the cache in order to see the new scores.
        cache.clear()

        # Create more submissions than the max
        self._create_submissions_and_scores(xblock, [
            (prepare_submission_for_serialization(("test answer 2 part 1", "test answer 2 part 2")), 10),
            (prepare_submission_for_serialization(("test answer 3 part 1", "test answer 3 part 2")), 0)
        ])
        self._assert_scores(xblock, [
            {"score": 10, "submission": create_submission_dict(
                {"answer": prepare_submission_for_serialization((u"test answer 2 part 1", u"test answer 2 part 2"))},
                xblock.prompts
            )},
            {"score": 1, "submission": create_submission_dict(
                {"answer": prepare_submission_for_serialization((u"test answer 1 part 1", u"test answer 1 part 2"))},
                xblock.prompts
            )}
        ])
        self._assert_leaderboard_visible(xblock, True)
Ejemplo n.º 3
0
    def test_image_and_text_submission(self, xblock):
        """
        Tests that text and image submission works as expected
        """
        # Create a file and get the download URL
        conn = boto.connect_s3()
        bucket = conn.create_bucket('mybucket')
        key = Key(bucket, 'submissions_attachments/foo')
        key.set_contents_from_string("How d'ya do?")

        file_download_url = [(api.get_download_url('foo'), '')]
        # Create a image and text submission
        submission = prepare_submission_for_serialization(
            ('test answer 1 part 1', 'test answer 1 part 2'))
        submission[u'file_key'] = 'foo'
        self._create_submissions_and_scores(xblock, [(submission, 1)])
        self.maxDiff = None
        # Expect that we retrieve both the text and the download URL for the file
        self._assert_scores(xblock, [{
            'score':
            1,
            'files':
            file_download_url,
            'submission':
            create_submission_dict({'answer': submission}, xblock.prompts)
        }])
Ejemplo n.º 4
0
    def test_closed_graded(self, xblock):
        # Create a submission
        submission = xblock.create_submission(
            xblock.get_student_item_dict(),
            ('A man must have a code', 'A man must have an umbrella too.')
        )

        # Simulate the user receiving a grade
        xblock.get_workflow_info = Mock(return_value={
            'status': 'done',
            'submission_uuid': submission['uuid']
        })

        self._assert_path_and_context(
            xblock, 'openassessmentblock/response/oa_response_graded.html',
            {
                'submission_due': dt.datetime(2014, 4, 5).replace(tzinfo=pytz.utc),
                'student_submission': create_submission_dict(submission, xblock.prompts),
                'text_response': 'required',
                'file_upload_response': None,
                'file_upload_type': None,
                'allow_latex': False,
                'user_timezone': None,
                'user_language': None,
                'prompts_type': 'text'
            }
        )
Ejemplo n.º 5
0
    def test_peer_assessment_available(self, xblock):
        # Make a submission, so we get to peer assessment
        xblock.create_submission(
            xblock.get_student_item_dict(),
            (u"𝒀?", "?𝒔. 𝑴𝒂𝒌𝒆 𝒕𝒉𝒆𝒔𝒆 𝒚𝒐𝒖𝒓 𝒑𝒓𝒊𝒎𝒂𝒓𝒚 𝒂𝒄𝒕𝒊𝒐𝒏 𝒊𝒕𝒆𝒎𝒔."),
        )

        # Create a submission from another user so we have something to assess
        other_student = copy.deepcopy(xblock.get_student_item_dict())
        other_student['student_id'] = 'Tyler'
        submission = xblock.create_submission(
            other_student,
            (u"ησω, αη¢ιєηт ρєσρℓє ƒσυη∂ тнєιя ¢ℓσтнєѕ ﻭσт ¢ℓєαηєя",
             u" ιƒ тнєу ωαѕнє∂ тнєм αт α ¢єятαιη ѕρσт ιη тнє яινєя."))

        # We should pull the other student's submission
        expected_context = {
            'graded': 0,
            'rubric_criteria': xblock.rubric_criteria,
            'must_grade': 5,
            'review_num': 1,
            'peer_submission': create_submission_dict(submission,
                                                      xblock.prompts),
            'file_upload_type': None,
            'peer_file_url': '',
            'submit_button_text':
            'submit your assessment & move to response #2',
            'allow_latex': False,
        }
        self._assert_path_and_context(
            xblock,
            'openassessmentblock/peer/oa_peer_assessment.html',
            expected_context,
            workflow_status='peer',
        )
    def test_image_and_text_submission(self, xblock):
        """
        Tests that text and image submission works as expected
        """
        # Create a file and get the download URL
        conn = boto3.client("s3")
        conn.create_bucket(Bucket='mybucket')
        conn.put_object(
            Bucket="mybucket",
            Key="submissions_attachments/foo",
            Body=b"How d'ya do?",
        )

        file_download_url = [{
            'download_url': api.get_download_url('foo'),
            'description': '',
            'name': '',
            'show_delete_button': False
        }]
        # Create a image and text submission
        submission = prepare_submission_for_serialization(
            ('test answer 1 part 1', 'test answer 1 part 2'))
        submission[u'file_key'] = 'foo'
        self._create_submissions_and_scores(xblock, [(submission, 1)])
        # Expect that we retrieve both the text and the download URL for the file
        self._assert_scores(xblock, [{
            'score':
            1,
            'files':
            file_download_url,
            'submission':
            create_submission_dict({'answer': submission}, xblock.prompts)
        }])
Ejemplo n.º 7
0
 def test_unavailable_submitted(self, xblock):
     # If the instructor changes the start date after the problem
     # has opened, it's possible for a student to have made a submission
     # even though the problem is unavailable.
     # In this case, we should continue showing that the student completed
     # the submission.
     submission = xblock.create_submission(
         xblock.get_student_item_dict(),
         ('A man must have a code', 'A man must have an umbrella too.'))
     self._assert_path_and_context(
         xblock, 'openassessmentblock/response/oa_response_submitted.html',
         {
             'student_submission':
             create_submission_dict(submission, xblock.prompts),
             'text_response':
             'required',
             'file_upload_response':
             None,
             'file_upload_type':
             None,
             'peer_incomplete':
             True,
             'self_incomplete':
             True,
             'allow_latex':
             False,
             'user_timezone':
             None,
             'user_language':
             None,
             'prompts_type':
             'text',
             'enable_delete_files':
             False,
         })
Ejemplo n.º 8
0
    def test_image_and_text_submission_multiple_files(self, xblock):
        """
        Tests that leaderboard works as expected when multiple files are uploaded
        """
        file_keys = ['foo', 'bar']
        file_descriptions = ['{}-description'.format(file_key) for file_key in file_keys]

        conn = boto.connect_s3()
        bucket = conn.create_bucket('mybucket')
        for file_key in file_keys:
            key = Key(bucket, 'submissions_attachments/{}'.format(file_key))
            key.set_contents_from_string("How d'ya do?")
            files_url_and_description = [
                (api.get_download_url(file_key), file_descriptions[idx])
                for idx, file_key in enumerate(file_keys)
                ]

        # Create a image and text submission
        submission = prepare_submission_for_serialization(('test answer 1 part 1', 'test answer 1 part 2'))
        submission[u'file_keys'] = file_keys
        submission[u'files_descriptions'] = file_descriptions

        self._create_submissions_and_scores(xblock, [
            (submission, 1)
        ])

        self.maxDiff = None
        # Expect that we retrieve both the text and the download URL for the file
        self._assert_scores(xblock, [
            {'score': 1, 'files': files_url_and_description, 'submission': create_submission_dict(
                {'answer': submission},
                xblock.prompts
            )}
        ])
Ejemplo n.º 9
0
    def test_image_and_text_submission(self, xblock):
        """
        Tests that text and image submission works as expected
        """
        # Create a file and get the download URL
        conn = boto.connect_s3()
        bucket = conn.create_bucket('mybucket')
        key = Key(bucket, 'submissions_attachments/foo')
        key.set_contents_from_string("How d'ya do?")

        file_download_url = [(api.get_download_url('foo'), '')]
        # Create a image and text submission
        submission = prepare_submission_for_serialization(('test answer 1 part 1', 'test answer 1 part 2'))
        submission[u'file_key'] = 'foo'
        self._create_submissions_and_scores(xblock, [
            (submission, 1)
        ])
        self.maxDiff = None
        # Expect that we retrieve both the text and the download URL for the file
        self._assert_scores(xblock, [
            {'score': 1, 'files': file_download_url, 'submission': create_submission_dict(
                {'answer': submission},
                xblock.prompts
            )}
        ])
Ejemplo n.º 10
0
 def test_open_unanswered(self, xblock):
     self._assert_path_and_context(
         xblock, 'openassessmentblock/response/oa_response.html', {
             'text_response':
             'required',
             'file_upload_response':
             None,
             'file_upload_type':
             None,
             'saved_response':
             create_submission_dict(
                 {'answer': prepare_submission_for_serialization(
                     ("", ""))}, xblock.prompts),
             'save_status':
             'This response has not been saved.',
             'submit_enabled':
             False,
             'submission_due':
             dt.datetime(2999, 5, 6).replace(tzinfo=pytz.utc),
             'allow_latex':
             False,
             'user_timezone':
             None,
             'user_language':
             None,
             'prompts_type':
             'text'
         })
Ejemplo n.º 11
0
    def test_open_graded(self, xblock):
        # Create a submission
        submission = xblock.create_submission(
            xblock.get_student_item_dict(),
            ('A man must have a code', 'A man must have an umbrella too.'))

        # Simulate the user receiving a grade
        xblock.get_workflow_info = Mock(return_value={
            'status': 'done',
            'submission_uuid': submission['uuid']
        })

        self._assert_path_and_context(
            xblock, 'openassessmentblock/response/oa_response_graded.html', {
                'submission_due':
                dt.datetime(2999, 5, 6).replace(tzinfo=pytz.utc),
                'student_submission':
                create_submission_dict(submission, xblock.prompts),
                'text_response':
                'required',
                'file_upload_response':
                None,
                'file_upload_type':
                None,
                'allow_latex':
                False,
                'user_timezone':
                None,
                'user_language':
                None,
                'prompts_type':
                'text',
                'enable_delete_files':
                False,
            })
Ejemplo n.º 12
0
 def test_unavailable_submitted(self, xblock):
     # If the instructor changes the start date after the problem
     # has opened, it's possible for a student to have made a submission
     # even though the problem is unavailable.
     # In this case, we should continue showing that the student completed
     # the submission.
     submission = xblock.create_submission(
         xblock.get_student_item_dict(),
         ('A man must have a code', 'A man must have an umbrella too.')
     )
     self._assert_path_and_context(
         xblock, 'openassessmentblock/response/oa_response_submitted.html',
         {
             'student_submission': create_submission_dict(submission, xblock.prompts),
             'text_response': 'required',
             'file_upload_response': None,
             'file_upload_type': None,
             'peer_incomplete': True,
             'self_incomplete': True,
             'allow_latex': False,
             'user_timezone': None,
             'user_language': None,
             'prompts_type': 'text'
         }
     )
Ejemplo n.º 13
0
    def test_open_saved_response(self, xblock):
        # Save a response
        payload = json.dumps({'submission': ('A man must have a code', 'A man must have an umbrella too.')})
        resp = self.request(xblock, 'save_submission', payload, response_format='json')
        self.assertTrue(resp['success'])

        self._assert_path_and_context(
            xblock, 'openassessmentblock/response/oa_response.html',
            {
                'text_response': 'required',
                'file_upload_response': None,
                'file_upload_type': None,
                'saved_response': create_submission_dict({
                    'answer': prepare_submission_for_serialization(
                        ('A man must have a code', 'A man must have an umbrella too.')
                    )
                }, xblock.prompts),
                'save_status': 'This response has been saved but not submitted.',
                'submit_enabled': True,
                'submission_due': dt.datetime(2999, 5, 6).replace(tzinfo=pytz.utc),
                'allow_latex': False,
                'user_timezone': None,
                'user_language': None,
                'prompts_type': 'text'
            }
        )
Ejemplo n.º 14
0
 def test_closed_submitted(self, xblock):
     submission = xblock.create_submission(
         xblock.get_student_item_dict(),
         ('A man must have a code', 'A man must have an umbrella too.'))
     self._assert_path_and_context(
         xblock, 'openassessmentblock/response/oa_response_submitted.html',
         {
             'submission_due':
             dt.datetime(2014, 4, 5).replace(tzinfo=pytz.utc),
             'student_submission':
             create_submission_dict(submission, xblock.prompts),
             'text_response':
             'required',
             'file_upload_response':
             None,
             'file_upload_type':
             None,
             'peer_incomplete':
             False,
             'self_incomplete':
             True,
             'allow_latex':
             False,
             'user_timezone':
             None,
             'user_language':
             None,
             'prompts_type':
             'text',
             'enable_delete_files':
             False,
         })
Ejemplo n.º 15
0
 def test_open_no_deadline(self, xblock):
     self._assert_path_and_context(
         xblock, 'openassessmentblock/response/oa_response.html', {
             'text_response':
             'required',
             'file_upload_response':
             None,
             'file_upload_type':
             None,
             'saved_response':
             create_submission_dict(
                 {'answer': prepare_submission_for_serialization(
                     ("", ""))}, xblock.prompts),
             'save_status':
             'This response has not been saved.',
             'submit_enabled':
             False,
             'allow_latex':
             False,
             'user_timezone':
             None,
             'user_language':
             None,
             'prompts_type':
             'text',
             'enable_delete_files':
             True,
         })
Ejemplo n.º 16
0
    def test_open_saved_response_old_format(self, xblock):
        # Save a response
        xblock.prompts = [{'description': 'One prompt.'}]
        xblock.saved_response = "An old format response."
        xblock.has_saved = True

        self._assert_path_and_context(
            xblock, 'openassessmentblock/response/oa_response.html', {
                'file_upload_type':
                None,
                'saved_response':
                create_submission_dict(
                    {
                        'answer':
                        prepare_submission_for_serialization(
                            ('An old format response.', ))
                    }, xblock.prompts),
                'save_status':
                'This response has been saved but not submitted.',
                'submit_enabled':
                True,
                'submission_due':
                dt.datetime(2999, 5, 6).replace(tzinfo=pytz.utc),
                'has_peer':
                True,
                'has_self':
                True,
                'allow_latex':
                False,
            })
Ejemplo n.º 17
0
    def test_open_saved_response_old_format(self, xblock):
        # Save a response
        xblock.prompts = [{'description': 'One prompt.'}]
        xblock.saved_response = "An old format response."
        xblock.has_saved = True

        self._assert_path_and_context(
            xblock, 'openassessmentblock/response/oa_response.html',
            {
                'text_response': 'required',
                'file_upload_response': None,
                'file_upload_type': None,
                'saved_response': create_submission_dict({
                    'answer': prepare_submission_for_serialization(
                        ('An old format response.',)
                    )
                }, xblock.prompts),
                'save_status': 'This response has been saved but not submitted.',
                'submit_enabled': True,
                'submission_due': dt.datetime(2999, 5, 6).replace(tzinfo=pytz.utc),
                'allow_latex': False,
                'user_timezone': None,
                'user_language': None,
                'prompts_type': 'text'
            }
        )
Ejemplo n.º 18
0
    def test_closed_graded(self, xblock):
        # Create a submission
        submission = xblock.create_submission(
            xblock.get_student_item_dict(),
            ('A man must have a code', 'A man must have an umbrella too.'))

        # Simulate the user receiving a grade
        xblock.get_workflow_info = Mock(return_value={
            'status': 'done',
            'submission_uuid': submission['uuid']
        })

        self._assert_path_and_context(
            xblock, 'openassessmentblock/response/oa_response_graded.html', {
                'submission_due':
                dt.datetime(2014, 4, 5).replace(tzinfo=pytz.utc),
                'student_submission':
                create_submission_dict(submission, xblock.prompts),
                'file_upload_type':
                None,
                'has_peer':
                False,
                'has_self':
                True,
                'allow_latex':
                False,
            })
Ejemplo n.º 19
0
    def get_student_submission_context(self, student_username, submission):
        """
        Get a context dict for rendering a student submission and associated rubric (for staff grading).
        Includes submission (populating submitted file information if relevant), rubric_criteria,
        and student_username.

        Args:
            student_username (unicode): The username of the student to report.
            submission (object): A submission, as returned by the submission_api.

        Returns:
            A context dict for rendering a student submission and associated rubric (for staff grading).
        """
        user_preferences = get_user_preferences(self.runtime.service(self, 'user')) # localize for staff user

        context = {
            'submission': create_submission_dict(submission, self.prompts) if submission else None,
            'rubric_criteria': copy.deepcopy(self.rubric_criteria_with_labels),
            'student_username': student_username,
            'user_timezone': user_preferences['user_timezone'],
            'user_language': user_preferences['user_language']
        }

        if submission:
            context["file_upload_type"] = self.file_upload_type
            context["staff_file_urls"] = self.get_download_urls_from_submission(submission)

        if self.rubric_feedback_prompt is not None:
            context["rubric_feedback_prompt"] = self.rubric_feedback_prompt

        if self.rubric_feedback_default_text is not None:
            context['rubric_feedback_default_text'] = self.rubric_feedback_default_text

        context['xblock_id'] = self.get_xblock_id()
        return context
Ejemplo n.º 20
0
    def test_peer_assessment_available(self, xblock):
        # Make a submission, so we get to peer assessment
        xblock.create_submission(xblock.get_student_item_dict(), (u"𝒀?", "?𝒔. 𝑴𝒂𝒌𝒆 𝒕𝒉𝒆𝒔𝒆 𝒚𝒐𝒖𝒓 𝒑𝒓𝒊𝒎𝒂𝒓𝒚 𝒂𝒄𝒕𝒊𝒐𝒏 𝒊𝒕𝒆𝒎𝒔."))

        # Create a submission from another user so we have something to assess
        other_student = copy.deepcopy(xblock.get_student_item_dict())
        other_student["student_id"] = "Tyler"
        submission = xblock.create_submission(
            other_student,
            (
                u"ησω, αη¢ιєηт ρєσρℓє ƒσυη∂ тнєιя ¢ℓσтнєѕ ﻭσт ¢ℓєαηєя",
                u" ιƒ тнєу ωαѕнє∂ тнєм αт α ¢єятαιη ѕρσт ιη тнє яινєя.",
            ),
        )

        # We should pull the other student's submission
        expected_context = {
            "graded": 0,
            "rubric_criteria": xblock.rubric_criteria,
            "must_grade": 5,
            "review_num": 1,
            "peer_submission": create_submission_dict(submission, xblock.prompts),
            "file_upload_type": None,
            "peer_file_url": "",
            "submit_button_text": "submit your assessment & move to response #2",
            "allow_latex": False,
        }
        self._assert_path_and_context(
            xblock, "openassessmentblock/peer/oa_peer_assessment.html", expected_context, workflow_status="peer"
        )
Ejemplo n.º 21
0
    def test_image_and_text_submission_multiple_files(self, xblock):
        """
        Tests that leaderboard works as expected when multiple files are uploaded
        """
        file_keys = ['foo', 'bar']
        file_descriptions = ['{}-description'.format(file_key) for file_key in file_keys]
        file_names = ['{}-file_name'.format(file_key) for file_key in file_keys]
        conn = boto.connect_s3()
        bucket = conn.create_bucket('mybucket')
        for file_key in file_keys:
            key = Key(bucket, 'submissions_attachments/{}'.format(file_key))
            key.set_contents_from_string("How d'ya do?")
            files_url_and_description = [
                (api.get_download_url(file_key), file_descriptions[idx], file_names[idx], False)
                for idx, file_key in enumerate(file_keys)
            ]

        # Create a image and text submission
        submission = prepare_submission_for_serialization(('test answer 1 part 1', 'test answer 1 part 2'))
        submission[u'file_keys'] = file_keys
        submission[u'files_descriptions'] = file_descriptions
        submission[u'files_name'] = file_names
        self._create_submissions_and_scores(xblock, [
            (submission, 1)
        ])
        # Expect that we retrieve both the text and the download URL for the file
        self._assert_scores(xblock, [
            {'score': 1, 'files': files_url_and_description, 'submission': create_submission_dict(
                {'answer': submission},
                xblock.prompts
            )}
        ])
Ejemplo n.º 22
0
    def get_student_submission_context(self, student_username, submission):
        """
        Get a context dict for rendering a student submission and associated rubric (for staff grading).
        Includes submission (populating submitted file information if relevant), rubric_criteria,
        and student_username.

        Args:
            student_username (unicode): The username of the student to report.
            submission (object): A submission, as returned by the submission_api.

        Returns:
            A context dict for rendering a student submission and associated rubric (for staff grading).
        """
        context = {
            'submission': create_submission_dict(submission, self.prompts) if submission else None,
            'rubric_criteria': copy.deepcopy(self.rubric_criteria_with_labels),
            'student_username': student_username,
        }

        if submission:
            context["file_upload_type"] = self.file_upload_type
            context["staff_file_url"] = self.get_download_url_from_submission(submission)

        if self.rubric_feedback_prompt is not None:
            context["rubric_feedback_prompt"] = self.rubric_feedback_prompt

        if self.rubric_feedback_default_text is not None:
            context['rubric_feedback_default_text'] = self.rubric_feedback_default_text

        return context
Ejemplo n.º 23
0
    def render_leaderboard_complete(self, student_item_dict):
        """
        Render the leaderboard complete state.

        Args:
            student_item_dict (dict): The student item

        Returns:
            template_path (string), tuple of context (dict)
        """
        # Import is placed here to avoid model import at project startup.
        from submissions import api as sub_api

        # Retrieve top scores from the submissions API
        # Since this uses the read-replica and caches the results,
        # there will be some delay in the request latency.
        scores = sub_api.get_top_submissions(student_item_dict['course_id'],
                                             student_item_dict['item_id'],
                                             student_item_dict['item_type'],
                                             self.leaderboard_show)
        for score in scores:
            score['files'] = []
            if 'file_keys' in score['content']:
                file_keys = score['content'].get('file_keys', [])
                descriptions = score['content'].get('files_descriptions', [])
                for idx, key in enumerate(file_keys):
                    file_download_url = self._get_file_download_url(key)
                    if file_download_url:
                        file_description = descriptions[idx] if idx < len(
                            descriptions) else ''
                        score['files'].append(
                            (file_download_url, file_description))

            elif 'file_key' in score['content']:
                file_download_url = self._get_file_download_url(
                    score['content']['file_key'])
                if file_download_url:
                    score['files'].append((file_download_url, ''))
            if 'text' in score['content'] or 'parts' in score['content']:
                submission = {'answer': score.pop('content')}
                score['submission'] = create_submission_dict(
                    submission, self.prompts)
            elif isinstance(score['content'], six.string_types):
                pass
            # Currently, we do not handle non-text submissions.
            else:
                score['submission'] = ""

            score.pop('content', None)

        context = {
            'topscores': scores,
            'allow_latex': self.allow_latex,
            'prompts_type': self.prompts_type,
            'file_upload_type': self.file_upload_type,
            'xblock_id': self.get_xblock_id()
        }

        return 'openassessmentblock/leaderboard/oa_leaderboard_show.html', context
Ejemplo n.º 24
0
    def test_turbo_grade_past_due(self, xblock, workflow_status):
        xblock.create_submission(
            xblock.get_student_item_dict(),
            (u"ı ƃoʇ ʇɥıs pɹǝss ɐʇ ɐ ʇɥɹıɟʇ sʇoɹǝ ɟoɹ ouǝ poןןɐɹ.",
             u"∀up ʇɥᴉs ɔɥɐᴉɹ ɟoɹ ʇʍo pollɐɹs˙"))

        # Try to continue grading after the due date has passed
        # Continued grading should still be available,
        # but since there are no other submissions, we're in the waiting state.
        expected_context = {
            'graded': 0,
            'must_grade': 5,
            'peer_due': dt.datetime(2000, 1, 1).replace(tzinfo=pytz.utc),
            'review_num': 1,
            'rubric_criteria': xblock.rubric_criteria,
            'submit_button_text':
            'Submit your assessment & review another response',
            'allow_latex': False,
        }
        self._assert_path_and_context(
            xblock,
            'openassessmentblock/peer/oa_peer_turbo_mode_waiting.html',
            expected_context,
            continue_grading=True,
            workflow_status=workflow_status,
            graded_enough=True,
            was_graded_enough=True,
        )

        # Create a submission from another student.
        # We should now be able to continue grading that submission
        other_student_item = copy.deepcopy(xblock.get_student_item_dict())
        other_student_item['student_id'] = "Tyler"
        submission = xblock.create_submission(
            other_student_item, (u"Other submission 1", u"Other submission 2"))

        expected_context = {
            'graded': 0,
            'must_grade': 5,
            'peer_due': dt.datetime(2000, 1, 1).replace(tzinfo=pytz.utc),
            'peer_submission': create_submission_dict(submission,
                                                      xblock.prompts),
            'file_upload_type': None,
            'peer_file_url': '',
            'review_num': 1,
            'rubric_criteria': xblock.rubric_criteria,
            'submit_button_text':
            'Submit your assessment & review another response',
            'allow_latex': False,
        }
        self._assert_path_and_context(
            xblock,
            'openassessmentblock/peer/oa_peer_turbo_mode.html',
            expected_context,
            continue_grading=True,
            workflow_status='done',
            graded_enough=True,
            was_graded_enough=True,
        )
 def _parse_answer_dict(self, answer):
     """
     Helper to parse answer as a fully-qualified dict.
     """
     parts = answer.get('parts', [])
     if parts and isinstance(parts[0], dict):
         if isinstance(parts[0].get('text'), six.string_types):
             return create_submission_dict({'answer': answer}, self.prompts)
Ejemplo n.º 26
0
 def _parse_answer_dict(self, answer):
     """
     Helper to parse answer as a fully-qualified dict.
     """
     parts = answer.get('parts', [])
     if parts and isinstance(parts[0], dict):
         if isinstance(parts[0].get('text'), basestring):
             return create_submission_dict({'answer': answer}, self.prompts)
Ejemplo n.º 27
0
    def render_leaderboard_complete(self, student_item_dict):
        """
        Render the leaderboard complete state.

        Args:
            student_item_dict (dict): The student item

        Returns:
            template_path (string), tuple of context (dict)
        """
        # Import is placed here to avoid model import at project startup.
        from submissions import api as sub_api

        # Retrieve top scores from the submissions API
        # Since this uses the read-replica and caches the results,
        # there will be some delay in the request latency.
        scores = sub_api.get_top_submissions(student_item_dict['course_id'],
                                             student_item_dict['item_id'],
                                             student_item_dict['item_type'],
                                             self.leaderboard_show)
        for score in scores:
            raw_score_content_answer = score['content']
            answer = OraSubmissionAnswerFactory.parse_submission_raw_answer(
                raw_score_content_answer)
            score['files'] = []
            for uploaded_file in answer.get_file_uploads(missing_blank=True):
                file_download_url = self._get_file_download_url(
                    uploaded_file.key)
                if file_download_url:
                    score['files'].append(
                        file_upload_api.FileDescriptor(
                            download_url=file_download_url,
                            description=uploaded_file.description,
                            name=uploaded_file.name,
                            size=uploaded_file.size,
                            show_delete_button=False)._asdict())
            if 'text' in score['content'] or 'parts' in score['content']:
                submission = {'answer': score.pop('content')}
                score['submission'] = create_submission_dict(
                    submission, self.prompts)
            elif isinstance(score['content'], str):
                pass
            # Currently, we do not handle non-text submissions.
            else:
                score['submission'] = ""

            score.pop('content', None)

        context = {
            'topscores': scores,
            'allow_multiple_files': self.allow_multiple_files,
            'allow_latex': self.allow_latex,
            'prompts_type': self.prompts_type,
            'file_upload_type': self.file_upload_type,
            'xblock_id': self.get_xblock_id()
        }

        return 'openassessmentblock/leaderboard/oa_leaderboard_show.html', context
Ejemplo n.º 28
0
    def test_show_submissions(self, xblock):
        # Create some submissions (but fewer than the max that can be shown)
        self._create_submissions_and_scores(xblock, [
            (prepare_submission_for_serialization(('test answer 1 part 1', 'test answer 1 part 2')), 1),
            (prepare_submission_for_serialization(('test answer 2 part 1', 'test answer 2 part 2')), 2)
        ])
        self._assert_scores(xblock, [
            {'score': 2, 'files': [], 'submission': create_submission_dict(
                {'answer': prepare_submission_for_serialization((u'test answer 2 part 1', u'test answer 2 part 2'))},
                xblock.prompts
            )},
            {'score': 1, 'files': [], 'submission': create_submission_dict(
                {'answer': prepare_submission_for_serialization((u'test answer 1 part 1', u'test answer 1 part 2'))},
                xblock.prompts
            )}
        ])
        self._assert_leaderboard_visible(xblock, True)

        # Since leaderboard results are cached, we need to clear
        # the cache in order to see the new scores.
        cache.clear()

        # Create more submissions than the max
        self._create_submissions_and_scores(xblock, [
            (prepare_submission_for_serialization(('test answer 3 part 1', 'test answer 3 part 2')), 0),
            (prepare_submission_for_serialization(('test answer 4 part 1', 'test answer 4 part 2')), 10),
            (prepare_submission_for_serialization(('test answer 5 part 1', 'test answer 5 part 2')), 3),
        ])
        self._assert_scores(xblock, [
            {'score': 10, 'files': [], 'submission': create_submission_dict(
                {'answer': prepare_submission_for_serialization((u'test answer 4 part 1', u'test answer 4 part 2'))},
                xblock.prompts
            )},
            {'score': 3, 'files': [], 'submission': create_submission_dict(
                {'answer': prepare_submission_for_serialization((u'test answer 5 part 1', u'test answer 5 part 2'))},
                xblock.prompts
            )},
            {'score': 2, 'files': [], 'submission': create_submission_dict(
                {'answer': prepare_submission_for_serialization((u'test answer 2 part 1', u'test answer 2 part 2'))},
                xblock.prompts
            )}
        ])
        self._assert_leaderboard_visible(xblock, True)
Ejemplo n.º 29
0
    def test_show_submissions(self, xblock):
        # Create some submissions (but fewer than the max that can be shown)
        self._create_submissions_and_scores(xblock, [
            (prepare_submission_for_serialization(("test answer 1 part 1", "test answer 1 part 2")), 1),
            (prepare_submission_for_serialization(("test answer 2 part 1", "test answer 2 part 2")), 2)
        ])
        self._assert_scores(xblock, [
            {"score": 2, "submission": create_submission_dict(
                {"answer": prepare_submission_for_serialization((u"test answer 2 part 1", u"test answer 2 part 2"))},
                xblock.prompts
            )},
            {"score": 1, "submission": create_submission_dict(
                {"answer": prepare_submission_for_serialization((u"test answer 1 part 1", u"test answer 1 part 2"))},
                xblock.prompts
            )}
        ])
        self._assert_leaderboard_visible(xblock, True)

        # Since leaderboard results are cached, we need to clear
        # the cache in order to see the new scores.
        cache.clear()

        # Create more submissions than the max
        self._create_submissions_and_scores(xblock, [
            (prepare_submission_for_serialization(("test answer 3 part 1", "test answer 3 part 2")), 0),
            (prepare_submission_for_serialization(("test answer 4 part 1", "test answer 4 part 2")), 10),
            (prepare_submission_for_serialization(("test answer 5 part 1", "test answer 5 part 2")), 3),
        ])
        self._assert_scores(xblock, [
            {"score": 10, "submission": create_submission_dict(
                {"answer": prepare_submission_for_serialization((u"test answer 4 part 1", u"test answer 4 part 2"))},
                xblock.prompts
            )},
            {"score": 3, "submission": create_submission_dict(
                {"answer": prepare_submission_for_serialization((u"test answer 5 part 1", u"test answer 5 part 2"))},
                xblock.prompts
            )},
            {"score": 2, "submission": create_submission_dict(
                {"answer": prepare_submission_for_serialization((u"test answer 2 part 1", u"test answer 2 part 2"))},
                xblock.prompts
            )}
        ])
        self._assert_leaderboard_visible(xblock, True)
 def _parse_answer_string(self, answer):
     """
     Helper to parse answer as a plain string
     """
     return create_submission_dict(
         {'answer': {
             'parts': [{
                 'text': answer
             }]
         }}, self.prompts)
Ejemplo n.º 31
0
    def test_turbo_grade_past_due(self, xblock, workflow_status):
        xblock.create_submission(
            xblock.get_student_item_dict(),
            (u"ı ƃoʇ ʇɥıs pɹǝss ɐʇ ɐ ʇɥɹıɟʇ sʇoɹǝ ɟoɹ ouǝ poןןɐɹ.", u"∀up ʇɥᴉs ɔɥɐᴉɹ ɟoɹ ʇʍo pollɐɹs˙"),
        )

        # Try to continue grading after the due date has passed
        # Continued grading should still be available,
        # but since there are no other submissions, we're in the waiting state.
        expected_context = {
            "graded": 0,
            "must_grade": 5,
            "peer_due": dt.datetime(2000, 1, 1).replace(tzinfo=pytz.utc),
            "review_num": 1,
            "rubric_criteria": xblock.rubric_criteria,
            "submit_button_text": "Submit your assessment & review another response",
            "allow_latex": False,
        }
        self._assert_path_and_context(
            xblock,
            "openassessmentblock/peer/oa_peer_turbo_mode_waiting.html",
            expected_context,
            continue_grading=True,
            workflow_status=workflow_status,
            graded_enough=True,
            was_graded_enough=True,
        )

        # Create a submission from another student.
        # We should now be able to continue grading that submission
        other_student_item = copy.deepcopy(xblock.get_student_item_dict())
        other_student_item["student_id"] = "Tyler"
        submission = xblock.create_submission(other_student_item, (u"Other submission 1", u"Other submission 2"))

        expected_context = {
            "graded": 0,
            "must_grade": 5,
            "peer_due": dt.datetime(2000, 1, 1).replace(tzinfo=pytz.utc),
            "peer_submission": create_submission_dict(submission, xblock.prompts),
            "file_upload_type": None,
            "peer_file_url": "",
            "review_num": 1,
            "rubric_criteria": xblock.rubric_criteria,
            "submit_button_text": "Submit your assessment & review another response",
            "allow_latex": False,
        }
        self._assert_path_and_context(
            xblock,
            "openassessmentblock/peer/oa_peer_turbo_mode.html",
            expected_context,
            continue_grading=True,
            workflow_status="done",
            graded_enough=True,
            was_graded_enough=True,
        )
Ejemplo n.º 32
0
    def render_leaderboard_complete(self, student_item_dict):
        """
        Render the leaderboard complete state.

        Args:
            student_item_dict (dict): The student item

        Returns:
            template_path (string), tuple of context (dict)
        """
        # Import is placed here to avoid model import at project startup.
        from submissions import api as sub_api

        # Retrieve top scores from the submissions API
        # Since this uses the read-replica and caches the results,
        # there will be some delay in the request latency.
        scores = sub_api.get_top_submissions(
            student_item_dict['course_id'],
            student_item_dict['item_id'],
            student_item_dict['item_type'],
            self.leaderboard_show
        )
        for score in scores:
            score['files'] = []
            if 'file_keys' in score['content']:
                file_keys = score['content'].get('file_keys', [])
                descriptions = score['content'].get('files_descriptions', [])
                for idx, key in enumerate(file_keys):
                    file_download_url = self._get_file_download_url(key)
                    if file_download_url:
                        file_description = descriptions[idx] if idx < len(descriptions) else ''
                        score['files'].append((file_download_url, file_description))

            elif 'file_key' in score['content']:
                file_download_url = self._get_file_download_url(score['content']['file_key'])
                if file_download_url:
                    score['files'].append((file_download_url, ''))
            if 'text' in score['content'] or 'parts' in score['content']:
                submission = {'answer': score.pop('content')}
                score['submission'] = create_submission_dict(submission, self.prompts)
            elif isinstance(score['content'], basestring):
                pass
            # Currently, we do not handle non-text submissions.
            else:
                score['submission'] = ""

            score.pop('content', None)

        context = {'topscores': scores,
                   'allow_latex': self.allow_latex,
                   'prompts_type': self.prompts_type,
                   'file_upload_type': self.file_upload_type,
                   'xblock_id': self.get_xblock_id()}

        return 'openassessmentblock/leaderboard/oa_leaderboard_show.html', context
Ejemplo n.º 33
0
    def get_student_submission_context(self, student_username, submission):
        """
        Get a context dict for rendering a student submission and associated rubric (for staff grading).
        Includes submission (populating submitted file information if relevant), rubric_criteria,
        and student_username.

        Args:
            student_username (unicode): The username of the student to report.
            submission (object): A submission, as returned by the submission_api.

        Returns:
            A context dict for rendering a student submission and associated rubric (for staff grading).
        """
        user_preferences = get_user_preferences(self.runtime.service(self, 'user'))  # localize for staff user

        context = {
            'submission': create_submission_dict(submission, self.prompts) if submission else None,
            'rubric_criteria': copy.deepcopy(self.rubric_criteria_with_labels),
            'student_username': student_username,
            'user_timezone': user_preferences['user_timezone'],
            'user_language': user_preferences['user_language'],
            "prompts_type": self.prompts_type,
            "is_team_assignment": self.is_team_assignment(),
        }

        if submission:
            context["file_upload_type"] = self.file_upload_type
            context["staff_file_urls"] = self.get_download_urls_from_submission(submission)
            if self.should_use_user_state(context["staff_file_urls"]):
                logger.info(u"Checking student module for upload info for user: {username} in block: {block}".format(
                    username=student_username,
                    block=str(self.location)
                ))
                context['staff_file_urls'] = self.get_files_info_from_user_state(student_username)

                # This particular check is for the cases affected by the incorrect filenum bug
                # and gets all the upload URLs if feature enabled.
                if self.should_get_all_files_urls(context['staff_file_urls']):
                    logger.info(
                        u"Retrieving all uploaded files by user:{username} in block:{block}".format(
                            username=student_username,
                            block=str(self.location)
                        ))
                    context['staff_file_urls'] = self.get_all_upload_urls_for_user(student_username)

        if self.rubric_feedback_prompt is not None:
            context["rubric_feedback_prompt"] = self.rubric_feedback_prompt

        if self.rubric_feedback_default_text is not None:
            context['rubric_feedback_default_text'] = self.rubric_feedback_default_text

        context['xblock_id'] = self.get_xblock_id()
        return context
Ejemplo n.º 34
0
    def render_leaderboard_complete(self, student_item_dict):
        """
        Render the leaderboard complete state.

        Args:
            student_item_dict (dict): The student item

        Returns:
            template_path (string), tuple of context (dict)
        """

        # Retrieve top scores from the submissions API
        # Since this uses the read-replica and caches the results,
        # there will be some delay in the request latency.
        scores = sub_api.get_top_submissions(student_item_dict['course_id'],
                                             student_item_dict['item_id'],
                                             student_item_dict['item_type'],
                                             self.leaderboard_show)
        for score in scores:
            score['files'] = []
            if 'file_keys' in score['content']:
                for key in score['content']['file_keys']:
                    url = ''
                    try:
                        url = file_upload_api.get_download_url(key)
                    except FileUploadError:
                        pass
                    if url:
                        score['files'].append(url)
            elif 'file_key' in score['content']:
                score['files'].append(
                    file_upload_api.get_download_url(
                        score['content']['file_key']))
            if 'text' in score['content'] or 'parts' in score['content']:
                submission = {'answer': score.pop('content')}
                score['submission'] = create_submission_dict(
                    submission, self.prompts)
            elif isinstance(score['content'], basestring):
                pass
            # Currently, we do not handle non-text submissions.
            else:
                score['submission'] = ""

            score.pop('content', None)

        context = {
            'topscores': scores,
            'allow_latex': self.allow_latex,
            'file_upload_type': self.file_upload_type,
            'xblock_id': self.get_xblock_id()
        }

        return 'openassessmentblock/leaderboard/oa_leaderboard_show.html', context
Ejemplo n.º 35
0
 def test_open_no_deadline(self, xblock):
     self._assert_path_and_context(
         xblock, 'openassessmentblock/response/oa_response.html',
         {
             'file_upload_type': None,
             'saved_response': create_submission_dict({
                 'answer': prepare_submission_for_serialization(
                     ("", "")
                 )
             }, xblock.prompts),
             'save_status': 'This response has not been saved.',
             'submit_enabled': False,
             'allow_latex': False,
         }
     )
Ejemplo n.º 36
0
 def test_open_unanswered(self, xblock):
     self._assert_path_and_context(
         xblock, 'openassessmentblock/response/oa_response.html',
         {
             'file_upload_type': None,
             'saved_response': create_submission_dict({
                 'answer': prepare_submission_for_serialization(
                     ("", "")
                 )
             }, xblock.prompts),
             'save_status': 'This response has not been saved.',
             'submit_enabled': False,
             'submission_due': dt.datetime(2999, 5, 6).replace(tzinfo=pytz.utc),
             'allow_latex': False,
         }
     )
Ejemplo n.º 37
0
 def test_closed_submitted(self, xblock):
     submission = xblock.create_submission(
         xblock.get_student_item_dict(),
         ('A man must have a code', 'A man must have an umbrella too.')
     )
     self._assert_path_and_context(
         xblock, 'openassessmentblock/response/oa_response_submitted.html',
         {
             'submission_due': dt.datetime(2014, 4, 5).replace(tzinfo=pytz.utc),
             'student_submission': create_submission_dict(submission, xblock.prompts),
             'file_upload_type': None,
             'has_peer': False,
             'has_self': True,
             'allow_latex': False,
         }
     )
Ejemplo n.º 38
0
    def test_image_and_text_submission_multiple_files(self, xblock):
        """
        Tests that leaderboard works as expected when multiple files are uploaded
        """
        file_keys = ['foo', 'bar']
        file_descriptions = [
            '{}-description'.format(file_key) for file_key in file_keys
        ]
        files_names = [
            '{}-file_name'.format(file_key) for file_key in file_keys
        ]
        conn = boto3.client("s3")
        conn.create_bucket(Bucket="mybucket")
        for file_key in file_keys:
            conn.put_object(
                Bucket="mybucket",
                Key="submissions_attachments/{}".format(file_key),
                Body=b"How d'ya do?",
            )
            files_url_and_description = [{
                'download_url':
                api.get_download_url(file_key),
                'description':
                file_descriptions[idx],
                'name':
                files_names[idx],
                'show_delete_button':
                False
            } for idx, file_key in enumerate(file_keys)]

        # Create a image and text submission
        submission = prepare_submission_for_serialization(
            ('test answer 1 part 1', 'test answer 1 part 2'))
        submission['file_keys'] = file_keys
        submission['files_descriptions'] = file_descriptions
        submission['files_names'] = files_names
        submission['files_sizes'] = []
        self._create_submissions_and_scores(xblock, [(submission, 1)])
        # Expect that we retrieve both the text and the download URL for the file
        self._assert_scores(xblock, [{
            'score':
            1,
            'files':
            files_url_and_description,
            'submission':
            create_submission_dict({'answer': submission}, xblock.prompts)
        }])
Ejemplo n.º 39
0
 def test_open_no_deadline(self, xblock):
     self._assert_path_and_context(
         xblock, 'openassessmentblock/response/oa_response.html', {
             'allow_file_upload':
             False,
             'saved_response':
             create_submission_dict(
                 {'answer': prepare_submission_for_serialization(
                     ("", ""))}, xblock.prompts),
             'save_status':
             'This response has not been saved.',
             'submit_enabled':
             False,
             'has_peer':
             True,
             'has_self':
             False,
             'allow_latex':
             False,
         })
Ejemplo n.º 40
0
 def test_open_submitted(self, xblock):
     submission = xblock.create_submission(
         xblock.get_student_item_dict(),
         ('A man must have a code', 'A man must have an umbrella too.')
     )
     self._assert_path_and_context(
         xblock, 'openassessmentblock/response/oa_response_submitted.html',
         {
             'submission_due': dt.datetime(2999, 5, 6).replace(tzinfo=pytz.utc),
             'student_submission': create_submission_dict(submission, xblock.prompts),
             'text_response': 'required',
             'file_upload_response': None,
             'file_upload_type': None,
             'peer_incomplete': True,
             'self_incomplete': True,
             'allow_latex': False,
             'user_timezone': None,
             'user_language': None
         }
     )
Ejemplo n.º 41
0
 def test_closed_submitted(self, xblock):
     submission = xblock.create_submission(
         xblock.get_student_item_dict(),
         ('A man must have a code', 'A man must have an umbrella too.'))
     self._assert_path_and_context(
         xblock, 'openassessmentblock/response/oa_response_submitted.html',
         {
             'submission_due':
             dt.datetime(2014, 4, 5).replace(tzinfo=pytz.utc),
             'student_submission':
             create_submission_dict(submission, xblock.prompts),
             'file_upload_type':
             None,
             'has_peer':
             False,
             'has_self':
             True,
             'allow_latex':
             False,
         })
Ejemplo n.º 42
0
 def test_image_and_text_submission(self, xblock):
     # Create a file and get the download URL
     conn = boto.connect_s3()
     bucket = conn.create_bucket('mybucket')
     key = Key(bucket)
     key.key = "submissions_attachments/foo"
     key.set_contents_from_string("How d'ya do?")
     downloadUrl = api.get_download_url("foo")
     # Create a image and text submission
     submission = prepare_submission_for_serialization(("test answer 1 part 1", "test answer 1 part 2"))
     submission[u"file_key"] = "foo"
     self._create_submissions_and_scores(xblock, [
         (submission, 1)
     ])
     self.maxDiff = None
     # Expect that we retrieve both the text and the download URL for the file
     self._assert_scores(xblock, [
         {"file": downloadUrl, "score": 1, "submission": create_submission_dict(
             {"answer": submission},
             xblock.prompts
         )}
     ])
Ejemplo n.º 43
0
    def test_peer_assessment_available(self, xblock):
        # Make a submission, so we get to peer assessment
        xblock.create_submission(
            xblock.get_student_item_dict(),
            (u"𝒀?", "?𝒔. 𝑴𝒂𝒌𝒆 𝒕𝒉𝒆𝒔𝒆 𝒚𝒐𝒖𝒓 𝒑𝒓𝒊𝒎𝒂𝒓𝒚 𝒂𝒄𝒕𝒊𝒐𝒏 𝒊𝒕𝒆𝒎𝒔."),
        )

        # Create a submission from another user so we have something to assess
        other_student = copy.deepcopy(xblock.get_student_item_dict())
        other_student['student_id'] = 'Tyler'
        submission = xblock.create_submission(
            other_student,
            (
                u"ησω, αη¢ιєηт ρєσρℓє ƒσυη∂ тнєιя ¢ℓσтнєѕ ﻭσт ¢ℓєαηєя",
                u" ιƒ тнєу ωαѕнє∂ тнєм αт α ¢єятαιη ѕρσт ιη тнє яινєя."
            )
        )

        # We should pull the other student's submission
        expected_context = {
            'graded': 0,
            'rubric_criteria': xblock.rubric_criteria,
            'must_grade': 5,
            'review_num': 1,
            'peer_submission': create_submission_dict(submission, xblock.prompts),
            'file_upload_type': None,
            'peer_file_urls': [],
            'submit_button_text': 'submit your assessment & move to response #2',
            'allow_latex': False,
            'track_changes': '',
            'user_timezone': pytz.utc,
            'user_language': 'en'
        }
        self._assert_path_and_context(
            xblock, 'openassessmentblock/peer/oa_peer_assessment.html',
            expected_context,
            workflow_status='peer',
        )
Ejemplo n.º 44
0
    def render_leaderboard_complete(self, student_item_dict):
        """
        Render the leaderboard complete state.

        Args:
            student_item_dict (dict): The student item

        Returns:
            template_path (string), tuple of context (dict)
        """

        # Retrieve top scores from the submissions API
        # Since this uses the read-replica and caches the results,
        # there will be some delay in the request latency.
        scores = sub_api.get_top_submissions(
            student_item_dict['course_id'],
            student_item_dict['item_id'],
            student_item_dict['item_type'],
            self.leaderboard_show
        )
        for score in scores:
            if 'file_key' in score['content']:
                score['file'] = file_upload_api.get_download_url(score['content']['file_key'])
            if 'text' in score['content'] or 'parts' in score['content']:
                submission = {'answer': score.pop('content')}
                score['submission'] = create_submission_dict(submission, self.prompts)
            elif isinstance(score['content'], basestring):
                pass
            # Currently, we do not handle non-text submissions.
            else:
                score['submission'] = ""

            score.pop('content', None)

        context = { 'topscores': scores,
                    'allow_latex': self.allow_latex,
                  }
        return ('openassessmentblock/leaderboard/oa_leaderboard_show.html', context)
Ejemplo n.º 45
0
    def render_leaderboard_complete(self, student_item_dict):
        """
        Render the leaderboard complete state.

        Args:
            student_item_dict (dict): The student item

        Returns:
            template_path (string), tuple of context (dict)
        """

        # Retrieve top scores from the submissions API
        # Since this uses the read-replica and caches the results,
        # there will be some delay in the request latency.
        scores = sub_api.get_top_submissions(
            student_item_dict["course_id"],
            student_item_dict["item_id"],
            student_item_dict["item_type"],
            self.leaderboard_show,
        )
        for score in scores:
            if "file_key" in score["content"]:
                score["file"] = file_upload_api.get_download_url(score["content"]["file_key"])
            if "text" in score["content"] or "parts" in score["content"]:
                submission = {"answer": score.pop("content")}
                score["submission"] = create_submission_dict(submission, self.prompts)
            elif isinstance(score["content"], basestring):
                pass
            # Currently, we do not handle non-text submissions.
            else:
                score["submission"] = ""

            score.pop("content", None)

        context = {"topscores": scores, "allow_latex": self.allow_latex}

        return "openassessmentblock/leaderboard/oa_leaderboard_show.html", context
Ejemplo n.º 46
0
    def get_student_submission_context(self, student_username, submission):
        """
        Get a context dict for rendering a student submission and associated rubric (for staff grading).
        Includes submission (populating submitted file information if relevant), rubric_criteria,
        and student_username.

        Args:
            student_username (unicode): The username of the student to report.
            submission (object): A submission, as returned by the submission_api.

        Returns:
            A context dict for rendering a student submission and associated rubric (for staff grading).
        """
        user_preferences = get_user_preferences(self.runtime.service(self, 'user'))  # localize for staff user

        context = {
            'submission': create_submission_dict(submission, self.prompts) if submission else None,
            'rubric_criteria': copy.deepcopy(self.rubric_criteria_with_labels),
            'student_username': student_username,
            'user_timezone': user_preferences['user_timezone'],
            'user_language': user_preferences['user_language'],
            "prompts_type": self.prompts_type,
        }

        if submission:
            context["file_upload_type"] = self.file_upload_type
            context["staff_file_urls"] = self.get_download_urls_from_submission(submission)

        if self.rubric_feedback_prompt is not None:
            context["rubric_feedback_prompt"] = self.rubric_feedback_prompt

        if self.rubric_feedback_default_text is not None:
            context['rubric_feedback_default_text'] = self.rubric_feedback_default_text

        context['xblock_id'] = self.get_xblock_id()
        return context
Ejemplo n.º 47
0
    def test_open_graded(self, xblock):
        # Create a submission
        submission = xblock.create_submission(
            xblock.get_student_item_dict(),
            ('A man must have a code', 'A man must have an umbrella too.')
        )

        # Simulate the user receiving a grade
        xblock.get_workflow_info = Mock(return_value={
            'status': 'done',
            'submission_uuid': submission['uuid']
        })

        self._assert_path_and_context(
            xblock, 'openassessmentblock/response/oa_response_graded.html',
            {
                'submission_due': dt.datetime(2999, 5, 6).replace(tzinfo=pytz.utc),
                'student_submission': create_submission_dict(submission, xblock.prompts),
                'allow_file_upload': False,
                'has_peer': True,
                'has_self': True,
                'allow_latex': False,
            }
        )
Ejemplo n.º 48
0
    def training_path_and_context(self):
        """
        Return the template path and context used to render the student training step.

        Returns:
            tuple of `(path, context)` where `path` is the path to the template and
                `context` is a dict.

        """
        # Retrieve the status of the workflow.
        # If no submissions have been created yet, the status will be None.
        workflow_status = self.get_workflow_info().get('status')
        problem_closed, reason, start_date, due_date = self.is_closed(step="student-training")

        context = {}
        template = 'openassessmentblock/student_training/student_training_unavailable.html'

        # add allow_latex field to the context
        context['allow_latex'] = self.allow_latex

        if not workflow_status:
            return template, context

        # If the student has completed the training step, then show that the step is complete.
        # We put this condition first so that if a student has completed the step, it *always*
        # shows as complete.
        # We're assuming here that the training step always precedes the other assessment steps
        # (peer/self) -- we may need to make this more flexible later.
        if workflow_status == 'cancelled':
            template = 'openassessmentblock/student_training/student_training_cancelled.html'
        elif workflow_status and workflow_status != "training":
            template = 'openassessmentblock/student_training/student_training_complete.html'

        # If the problem is closed, then do not allow students to access the training step
        elif problem_closed and reason == 'start':
            context['training_start'] = start_date
            template = 'openassessmentblock/student_training/student_training_unavailable.html'
        elif problem_closed and reason == 'due':
            context['training_due'] = due_date
            template = 'openassessmentblock/student_training/student_training_closed.html'

        # If we're on the training step, show the student an example
        # We do this last so we can avoid querying the student training API if possible.
        else:
            training_module = self.get_assessment_module('student-training')
            if not training_module:
                return template, context

            if due_date < DISTANT_FUTURE:
                context['training_due'] = due_date

            # Report progress in the student training workflow (completed X out of Y)
            context['training_num_available'] = len(training_module["examples"])
            context['training_num_completed'] = student_training.get_num_completed(self.submission_uuid)
            context['training_num_current'] = context['training_num_completed'] + 1

            # Retrieve the example essay for the student to submit
            # This will contain the essay text, the rubric, and the options the instructor selected.
            examples = convert_training_examples_list_to_dict(training_module["examples"])
            example = student_training.get_training_example(
                self.submission_uuid,
                {
                    'prompt': self.prompt,
                    'criteria': self.rubric_criteria_with_labels
                },
                examples
            )
            if example:
                context['training_essay'] = create_submission_dict({'answer': example['answer']}, self.prompts)
                context['training_rubric'] = {
                    'criteria': example['rubric']['criteria'],
                    'points_possible': example['rubric']['points_possible']
                }
                template = 'openassessmentblock/student_training/student_training.html'
            else:
                logger.error(
                    "No training example was returned from the API for student "
                    "with Submission UUID {}".format(self.submission_uuid)
                )
                template = "openassessmentblock/student_training/student_training_error.html"

        return template, context
    def training_path_and_context(self):
        """
        Return the template path and context used to render the student training step.

        Returns:
            tuple of `(path, context)` where `path` is the path to the template and
                `context` is a dict.

        """
        # Retrieve the status of the workflow.
        # If no submissions have been created yet, the status will be None.
        workflow_status = self.get_workflow_info().get('status')
        problem_closed, reason, start_date, due_date = self.is_closed(
            step="student-training")

        context = {}
        template = 'openassessmentblock/student_training/student_training_unavailable.html'

        # add allow_latex field to the context
        context['allow_latex'] = self.allow_latex

        if not workflow_status:
            return template, context

        # If the student has completed the training step, then show that the step is complete.
        # We put this condition first so that if a student has completed the step, it *always*
        # shows as complete.
        # We're assuming here that the training step always precedes the other assessment steps
        # (peer/self) -- we may need to make this more flexible later.
        if workflow_status == 'cancelled':
            template = 'openassessmentblock/student_training/student_training_cancelled.html'
        elif workflow_status and workflow_status != "training":
            template = 'openassessmentblock/student_training/student_training_complete.html'

        # If the problem is closed, then do not allow students to access the training step
        elif problem_closed and reason == 'start':
            context['training_start'] = start_date
            template = 'openassessmentblock/student_training/student_training_unavailable.html'
        elif problem_closed and reason == 'due':
            context['training_due'] = due_date
            template = 'openassessmentblock/student_training/student_training_closed.html'

        # If we're on the training step, show the student an example
        # We do this last so we can avoid querying the student training API if possible.
        else:
            training_module = self.get_assessment_module('student-training')
            if not training_module:
                return template, context

            if due_date < DISTANT_FUTURE:
                context['training_due'] = due_date

            # Report progress in the student training workflow (completed X out of Y)
            context['training_num_available'] = len(
                training_module["examples"])
            context[
                'training_num_completed'] = student_training.get_num_completed(
                    self.submission_uuid)
            context[
                'training_num_current'] = context['training_num_completed'] + 1

            # Retrieve the example essay for the student to submit
            # This will contain the essay text, the rubric, and the options the instructor selected.
            examples = convert_training_examples_list_to_dict(
                training_module["examples"])
            example = student_training.get_training_example(
                self.submission_uuid, {
                    'prompt': self.prompt,
                    'criteria': self.rubric_criteria_with_labels
                }, examples)
            if example:
                context['training_essay'] = create_submission_dict(
                    {'answer': example['answer']}, self.prompts)
                context['training_rubric'] = {
                    'criteria': example['rubric']['criteria'],
                    'points_possible': example['rubric']['points_possible']
                }
                template = 'openassessmentblock/student_training/student_training.html'
            else:
                logger.error(
                    "No training example was returned from the API for student "
                    "with Submission UUID {}".format(self.submission_uuid))
                template = "openassessmentblock/student_training/student_training_error.html"

        return template, context
Ejemplo n.º 50
0
    def get_student_info_path_and_context(self, student_username):
        """
        Get the proper path and context for rendering the the student info
        section of the staff debug panel.

        Args:
            student_username (unicode): The username of the student to report.

        """
        submission_uuid = None
        submission = None
        assessment_steps = self.assessment_steps
        anonymous_user_id = None
        submissions = None
        student_item = None

        if student_username:
            anonymous_user_id = self.get_anonymous_user_id(student_username, self.course_id)
            student_item = self.get_student_item_dict(anonymous_user_id=anonymous_user_id)

        if anonymous_user_id:
            # If there is a submission available for the requested student, present
            # it. If not, there will be no other information to collect.
            submissions = submission_api.get_submissions(student_item, 1)

        if submissions:
            submission_uuid = submissions[0]['uuid']
            submission = submissions[0]

            if 'file_key' in submission.get('answer', {}):
                file_key = submission['answer']['file_key']

                try:
                    submission['file_url'] = file_api.get_download_url(file_key)
                except file_exceptions.FileUploadError:
                    # Log the error, but do not prevent the rest of the student info
                    # from being displayed.
                    msg = (
                        u"Could not retrieve image URL for staff debug page.  "
                        u"The learner username is '{student_username}', and the file key is {file_key}"
                    ).format(student_username=student_username, file_key=file_key)
                    logger.exception(msg)

        example_based_assessment = None
        self_assessment = None
        peer_assessments = []
        submitted_assessments = []

        if "peer-assessment" in assessment_steps:
            peer_assessments = peer_api.get_assessments(submission_uuid)
            submitted_assessments = peer_api.get_submitted_assessments(submission_uuid, scored_only=False)

        if "self-assessment" in assessment_steps:
            self_assessment = self_api.get_assessment(submission_uuid)

        if "example-based-assessment" in assessment_steps:
            example_based_assessment = ai_api.get_latest_assessment(submission_uuid)

        workflow_cancellation = workflow_api.get_assessment_workflow_cancellation(submission_uuid)
        if workflow_cancellation:
            workflow_cancellation['cancelled_by'] = self.get_username(workflow_cancellation['cancelled_by_id'])

        context = {
            'submission': create_submission_dict(submission, self.prompts) if submission else None,
            'workflow_cancellation': workflow_cancellation,
            'peer_assessments': peer_assessments,
            'submitted_assessments': submitted_assessments,
            'self_assessment': self_assessment,
            'example_based_assessment': example_based_assessment,
            'rubric_criteria': copy.deepcopy(self.rubric_criteria_with_labels),
        }

        if peer_assessments or self_assessment or example_based_assessment:
            max_scores = peer_api.get_rubric_max_scores(submission_uuid)
            for criterion in context["rubric_criteria"]:
                criterion["total_value"] = max_scores[criterion["name"]]

        path = 'openassessmentblock/staff_area/student_info.html'
        return path, context
Ejemplo n.º 51
0
 def test_create_submission_dict(self, input_submission, input_prompts,
                                 output):
     self.assertEqual(
         create_submission_dict(input_submission, input_prompts), output)
Ejemplo n.º 52
0
 def _parse_answer_string(self, answer):
     """
     Helper to parse answer as a plain string
     """
     return create_submission_dict({'answer': {'parts': [{'text': answer}]}}, self.prompts)
Ejemplo n.º 53
0
    def test_turbo_grade_past_due(self, xblock, workflow_status):
        xblock.create_submission(
            xblock.get_student_item_dict(),
            (
                u"ı ƃoʇ ʇɥıs pɹǝss ɐʇ ɐ ʇɥɹıɟʇ sʇoɹǝ ɟoɹ ouǝ poןןɐɹ.",
                u"∀up ʇɥᴉs ɔɥɐᴉɹ ɟoɹ ʇʍo pollɐɹs˙"
            )
        )

        # Try to continue grading after the due date has passed
        # Continued grading should still be available,
        # but since there are no other submissions, we're in the waiting state.
        expected_context = {
            'estimated_time': '20 minutes',
             'graded': 0,
             'must_grade': 5,
             'peer_due': dt.datetime(2000, 1, 1).replace(tzinfo=pytz.utc),
             'review_num': 1,
             'rubric_criteria': xblock.rubric_criteria,
             'submit_button_text': 'Submit your assessment & review another response',
             'allow_latex': False,
        }
        self._assert_path_and_context(
            xblock, 'openassessmentblock/peer/oa_peer_turbo_mode_waiting.html',
            expected_context,
            continue_grading=True,
            workflow_status=workflow_status,
            graded_enough=True,
            was_graded_enough=True,
        )

        # Create a submission from another student.
        # We should now be able to continue grading that submission
        other_student_item = copy.deepcopy(xblock.get_student_item_dict())
        other_student_item['student_id'] = "Tyler"
        submission = xblock.create_submission(
            other_student_item,
            (u"Other submission 1", u"Other submission 2")
        )

        expected_context = {
            'estimated_time': '20 minutes',
             'graded': 0,
             'must_grade': 5,
             'peer_due': dt.datetime(2000, 1, 1).replace(tzinfo=pytz.utc),
             'peer_submission': create_submission_dict(submission, xblock.prompts),
             'file_upload_type': None,
             'peer_file_url': '',
             'review_num': 1,
             'rubric_criteria': xblock.rubric_criteria,
             'submit_button_text': 'Submit your assessment & review another response',
             'allow_latex': False,
        }
        self._assert_path_and_context(
            xblock, 'openassessmentblock/peer/oa_peer_turbo_mode.html',
            expected_context,
            continue_grading=True,
            workflow_status='done',
            graded_enough=True,
            was_graded_enough=True,
        )
Ejemplo n.º 54
0
    def test_open_saved_response(self, xblock):
        file_uploads = [
            {
                'description': 'file-1',
                'name': 'file-1.pdf',
                'size': 200
            },
            {
                'description': 'file-2',
                'name': 'file-2.pdf',
                'size': 400
            },
        ]

        xblock.file_manager.append_uploads(*file_uploads)

        # Save a response
        payload = json.dumps({
            'submission':
            ('A man must have a code', 'A man must have an umbrella too.')
        })
        resp = self.request(xblock,
                            'save_submission',
                            payload,
                            response_format='json')
        self.assertTrue(resp['success'])

        self._assert_path_and_context(
            xblock, 'openassessmentblock/response/oa_response.html', {
                'text_response':
                'required',
                'file_upload_response':
                None,
                'file_upload_type':
                None,
                'saved_response':
                create_submission_dict(
                    {
                        'answer':
                        prepare_submission_for_serialization(
                            ('A man must have a code',
                             'A man must have an umbrella too.'))
                    }, xblock.prompts),
                'save_status':
                'This response has been saved but not submitted.',
                'submit_enabled':
                True,
                'submission_due':
                dt.datetime(2999, 5, 6).replace(tzinfo=pytz.utc),
                'allow_latex':
                False,
                'user_timezone':
                None,
                'user_language':
                None,
                'prompts_type':
                'text',
                'enable_delete_files':
                True,
            })

        # pylint: disable=protected-access
        actual_file_uploads = [
            upload._to_dict() for upload in xblock.file_manager.get_uploads()
        ]
        expected_file_uploads = [
            api.FileUpload(
                description='file-1',
                name='file-1.pdf',
                size=200,
                student_id='Bob',
                course_id='edX/Enchantment_101/April_1',
                item_id=ANY,
                descriptionless=False,
            )._to_dict(),
            api.FileUpload(
                description='file-2',
                name='file-2.pdf',
                size=400,
                student_id='Bob',
                course_id='edX/Enchantment_101/April_1',
                item_id=ANY,
                descriptionless=False,
            )._to_dict(),
        ]

        for expected, actual in zip(expected_file_uploads,
                                    actual_file_uploads):
            # We can't consistently determine the values of an XBlock's item_id
            expected.pop('item_id')
            actual.pop('item_id')

        self.assertEqual(expected_file_uploads, actual_file_uploads)
Ejemplo n.º 55
0
    def test_open_saved_response_misaligned_file_data(self, xblock):
        """
        Test the case where the XBlock user state contains a different number of
        file descriptions from file sizes and names.  After rendering the block,
        the list of file names and sizes should be coerced to lists that are of the
        same length as the file descriptions.
        """
        xblock.saved_files_descriptions = json.dumps(["file-1", "file-2"])
        xblock.saved_files_names = json.dumps([])
        xblock.saved_files_sizes = json.dumps([200])

        xblock.file_upload_type = 'pdf-and-image'
        xblock.file_upload_response = 'optional'

        # Save a response
        payload = json.dumps({
            'submission':
            ('A man must have a code', 'A man must have an umbrella too.')
        })
        resp = self.request(xblock,
                            'save_submission',
                            payload,
                            response_format='json')
        self.assertTrue(resp['success'])

        self._assert_path_and_context(
            xblock, 'openassessmentblock/response/oa_response.html', {
                'text_response':
                'required',
                'file_upload_response':
                'optional',
                'file_upload_type':
                'pdf-and-image',
                'file_urls': [('', 'file-1', None), ('', 'file-2', None)],
                'saved_response':
                create_submission_dict(
                    {
                        'answer':
                        prepare_submission_for_serialization(
                            ('A man must have a code',
                             'A man must have an umbrella too.'))
                    }, xblock.prompts),
                'save_status':
                'This response has been saved but not submitted.',
                'submit_enabled':
                True,
                'submission_due':
                dt.datetime(2999, 5, 6).replace(tzinfo=pytz.utc),
                'allow_latex':
                False,
                'user_timezone':
                None,
                'user_language':
                None,
                'prompts_type':
                'text',
                'enable_delete_files':
                True,
            })

        # pylint: disable=protected-access
        actual_file_uploads = [
            upload._to_dict() for upload in xblock.file_manager.get_uploads()
        ]
        # When file names/sizes are of different cardinality of file descriptions,
        # they are coerced to lists of nulls of the same cardinality of the descriptions,
        # hence, name and size attributes below are null.
        expected_file_uploads = [
            api.FileUpload(
                description='file-1',
                name=None,
                size=None,
                student_id='Bob',
                course_id='edX/Enchantment_101/April_1',
                item_id=ANY,
                descriptionless=False,
            )._to_dict(),
            api.FileUpload(
                description='file-2',
                name=None,
                size=None,
                student_id='Bob',
                course_id='edX/Enchantment_101/April_1',
                item_id=ANY,
                descriptionless=False,
            )._to_dict(),
        ]
        for expected, actual in zip(expected_file_uploads,
                                    actual_file_uploads):
            # We can't consistently determine the values of an XBlock's item_id
            expected.pop('item_id')
            actual.pop('item_id')

        self.assertEqual(expected_file_uploads, actual_file_uploads)
Ejemplo n.º 56
0
 def test_create_submission_dict(self, input_submission, input_prompts, output):
     self.assertEqual(create_submission_dict(input_submission, input_prompts), output)