Пример #1
0
    def test_create_assessment(self):
        # Initially, there should be no submission or self assessment
        self.assertEqual(get_assessment("5"), None)

        # Create a submission to self-assess
        submission = create_submission(self.STUDENT_ITEM, "Test answer")

        # Now there should be a submission, but no self-assessment
        assessment = get_assessment(submission["uuid"])
        self.assertIs(assessment, None)
        self.assertFalse(submitter_is_finished(submission['uuid'], {}))

        # Create a self-assessment for the submission
        assessment = create_assessment(
            submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
            self.OPTIONS_SELECTED, self.RUBRIC,
            scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
        )

        # Self-assessment should be complete
        self.assertTrue(submitter_is_finished(submission['uuid'], {}))

        # Retrieve the self-assessment
        retrieved = get_assessment(submission["uuid"])

        # Check that the assessment we created matches the assessment we retrieved
        # and that both have the correct values
        self.assertItemsEqual(assessment, retrieved)
        self.assertEqual(assessment['submission_uuid'], submission['uuid'])
        self.assertEqual(assessment['points_earned'], 8)
        self.assertEqual(assessment['points_possible'], 10)
        self.assertEqual(assessment['feedback'], u'')
        self.assertEqual(assessment['score_type'], u'SE')
Пример #2
0
    def test_create_assessment(self):
        # Initially, there should be no submission or self assessment
        self.assertEqual(get_assessment("5"), None)

        # Create a submission to self-assess
        submission = create_submission(self.STUDENT_ITEM, "Test answer")

        # Now there should be a submission, but no self-assessment
        assessment = get_assessment(submission["uuid"])
        self.assertIs(assessment, None)
        self.assertFalse(submitter_is_finished(submission['uuid'], {}))

        # Create a self-assessment for the submission
        assessment = create_assessment(
            submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
            self.OPTIONS_SELECTED, self.CRITERION_FEEDBACK, self.OVERALL_FEEDBACK, self.RUBRIC,
            scored_at=datetime.datetime(2014, 4, 1).replace(tzinfo=pytz.utc)
        )

        # Self-assessment should be complete
        self.assertTrue(submitter_is_finished(submission['uuid'], {}))

        # Retrieve the self-assessment
        retrieved = get_assessment(submission["uuid"])

        # Check that the assessment we created matches the assessment we retrieved
        # and that both have the correct values
        self.assertCountEqual(assessment, retrieved)
        self.assertEqual(assessment['submission_uuid'], submission['uuid'])
        self.assertEqual(assessment['points_earned'], 8)
        self.assertEqual(assessment['points_possible'], 10)
        self.assertEqual(assessment['feedback'], u'' + self.OVERALL_FEEDBACK)
        self.assertEqual(assessment['score_type'], u'SE')
Пример #3
0
    def test_create_submissions(self):
        """ Tests create submission process. """
        # Create some submissions
        cmd = create_oa_submissions.Command(**{'self_assessment_required': True})
        cmd.handle("test_course", "test_item", "5", 100)
        self.assertEqual(len(cmd.student_items), 5)
        for student_item in cmd.student_items:

            # Check that the student item was created for the right course / item
            self.assertEqual(student_item['course_id'], 'test_course')
            self.assertEqual(student_item['item_id'], 'test_item')

            # Check that a submission was created
            submissions = sub_api.get_submissions(student_item)
            self.assertEqual(len(submissions), 1)

            answer_dict = submissions[0]['answer']
            self.assertIsInstance(answer_dict['text'], six.string_types)
            self.assertGreater(len(answer_dict['text']), 0)

            # Check that peer and self assessments were created
            assessments = peer_api.get_assessments(submissions[0]['uuid'])

            # Verify that the assessments exist and have content
            self.assertEqual(len(assessments), cmd.NUM_PEER_ASSESSMENTS)

            for assessment in assessments:
                self.assertGreater(assessment['points_possible'], 0)

            # Check that a self-assessment was created
            assessment = self_api.get_assessment(submissions[0]['uuid'])

            # Verify that the assessment exists and has content
            self.assertIsNot(assessment, None)
            self.assertGreater(assessment['points_possible'], 0)
Пример #4
0
    def test_self_assess_handler(self, xblock):
        student_item = xblock.get_student_item_dict()

        # Create a submission for the student
        submission = xblock.create_submission(student_item, self.SUBMISSION)

        # Submit a self-assessment
        resp = self.request(xblock, 'self_assess', json.dumps(self.ASSESSMENT), response_format='json')
        self.assertTrue(resp['success'])

        # Expect that a self-assessment was created
        assessment = self_api.get_assessment(submission["uuid"])
        self.assertEqual(assessment['submission_uuid'], submission['uuid'])
        self.assertEqual(assessment['points_earned'], 5)
        self.assertEqual(assessment['points_possible'], 6)
        self.assertEqual(assessment['scorer_id'], 'Bob')
        self.assertEqual(assessment['score_type'], 'SE')
        self.assertEqual(assessment['feedback'], u'')

        parts = sorted(assessment['parts'])
        self.assertEqual(len(parts), 2)
        self.assertEqual(parts[0]['option']['criterion']['name'], u'Form')
        self.assertEqual(parts[0]['option']['name'], 'Fair')
        self.assertEqual(parts[1]['option']['criterion']['name'], u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮')
        self.assertEqual(parts[1]['option']['name'], u'ﻉซƈﻉɭɭﻉกՇ')
Пример #5
0
    def test_self_assess_feedback_only_criterion(self, xblock):
        # Create a submission for the student
        student_item = xblock.get_student_item_dict()
        submission = xblock.create_submission(student_item, self.SUBMISSION)

        # Submit a self assessment for a rubric with a feedback-only criterion
        assessment_dict = {
            'options_selected': {u'vocabulary': u'good'},
            'criterion_feedback': {
                u'vocabulary': 'Awesome job!',
                u'𝖋𝖊𝖊𝖉𝖇𝖆𝖈𝖐 𝖔𝖓𝖑𝖞': 'fairly illegible.'
            },
            'overall_feedback': u''
        }
        resp = self.request(xblock, 'self_assess', json.dumps(assessment_dict), response_format='json')
        self.assertTrue(resp['success'])
        assessment = self_api.get_assessment(submission["uuid"])

        # Check the assessment for the criterion that has options
        self.assertEqual(assessment['parts'][0]['criterion']['name'], 'vocabulary')
        self.assertEqual(assessment['parts'][0]['option']['name'], 'good')
        self.assertEqual(assessment['parts'][0]['option']['points'], 1)

        # Check the feedback-only criterion score/feedback
        self.assertEqual(assessment['parts'][1]['criterion']['name'], u'𝖋𝖊𝖊𝖉𝖇𝖆𝖈𝖐 𝖔𝖓𝖑𝖞')
        self.assertIs(assessment['parts'][1]['option'], None)
        self.assertEqual(assessment['parts'][1]['feedback'], u'fairly illegible.')
    def test_create_submissions(self):

        # Create some submissions
        cmd = create_oa_submissions.Command(**{'self_assessment_required': True})
        cmd.handle("test_course", "test_item", "5", 100)
        self.assertEqual(len(cmd.student_items), 5)
        for student_item in cmd.student_items:

            # Check that the student item was created for the right course / item
            self.assertEqual(student_item['course_id'], 'test_course')
            self.assertEqual(student_item['item_id'], 'test_item')

            # Check that a submission was created
            submissions = sub_api.get_submissions(student_item)
            self.assertEqual(len(submissions), 1)

            answer_dict = submissions[0]['answer']
            self.assertIsInstance(answer_dict['text'], basestring)
            self.assertGreater(len(answer_dict['text']), 0)

            # Check that peer and self assessments were created
            assessments = peer_api.get_assessments(submissions[0]['uuid'])

            # Verify that the assessments exist and have content
            self.assertEqual(len(assessments), cmd.NUM_PEER_ASSESSMENTS)

            for assessment in assessments:
                self.assertGreater(assessment['points_possible'], 0)

            # Check that a self-assessment was created
            assessment = self_api.get_assessment(submissions[0]['uuid'])

            # Verify that the assessment exists and has content
            self.assertIsNot(assessment, None)
            self.assertGreater(assessment['points_possible'], 0)
Пример #7
0
    def test_create_multiple_self_assessments(self):
        # Create a submission to self-assess
        submission = create_submission(self.STUDENT_ITEM, "Test answer")

        # Self assess once
        assessment = create_assessment(
            submission['uuid'],
            u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
            self.OPTIONS_SELECTED,
            self.CRITERION_FEEDBACK,
            self.OVERALL_FEEDBACK,
            self.RUBRIC,
        )

        # Attempt to self-assess again, which should raise an exception
        with self.assertRaises(SelfAssessmentRequestError):
            create_assessment(
                submission['uuid'],
                u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
                self.OPTIONS_SELECTED,
                self.CRITERION_FEEDBACK,
                self.OVERALL_FEEDBACK,
                self.RUBRIC,
            )

        # Expect that we still have the original assessment
        retrieved = get_assessment(submission["uuid"])
        six.assertCountEqual(self, assessment, retrieved)
Пример #8
0
    def test_create_assessment_timestamp(self):
        # Create a submission to self-assess
        submission = create_submission(self.STUDENT_ITEM, "Test answer")

        # Record the current system clock time
        before = datetime.datetime.utcnow().replace(tzinfo=pytz.utc)

        # Create a self-assessment for the submission
        # Do not override the scored_at timestamp, so it should be set to the current time
        assessment = create_assessment(
            submission['uuid'],
            u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
            self.OPTIONS_SELECTED,
            self.CRITERION_FEEDBACK,
            self.OVERALL_FEEDBACK,
            self.RUBRIC,
        )

        # Retrieve the self-assessment
        retrieved = get_assessment(submission["uuid"])

        # Expect that both the created and retrieved assessments have the same
        # timestamp, and it's >= our recorded time.
        self.assertEqual(assessment['scored_at'], retrieved['scored_at'])
        self.assertGreaterEqual(assessment['scored_at'], before)
Пример #9
0
    def test_self_assess_feedback_only_criterion(self, xblock):
        # Create a submission for the student
        student_item = xblock.get_student_item_dict()
        submission = xblock.create_submission(student_item, self.SUBMISSION)

        # Submit a self assessment for a rubric with a feedback-only criterion
        assessment_dict = {
            'options_selected': {
                u'vocabulary': u'good'
            },
            'criterion_feedback': {
                u'vocabulary': 'Awesome job!',
                u'𝖋𝖊𝖊𝖉𝖇𝖆𝖈𝖐 𝖔𝖓𝖑𝖞': 'fairly illegible.'
            },
            'overall_feedback': u''
        }
        resp = self.request(xblock,
                            'self_assess',
                            json.dumps(assessment_dict),
                            response_format='json')
        self.assertTrue(resp['success'])
        assessment = self_api.get_assessment(submission["uuid"])

        # Check the assessment for the criterion that has options
        self.assertEqual(assessment['parts'][0]['criterion']['name'],
                         'vocabulary')
        self.assertEqual(assessment['parts'][0]['option']['name'], 'good')
        self.assertEqual(assessment['parts'][0]['option']['points'], 1)

        # Check the feedback-only criterion score/feedback
        self.assertEqual(assessment['parts'][1]['criterion']['name'],
                         u'𝖋𝖊𝖊𝖉𝖇𝖆𝖈𝖐 𝖔𝖓𝖑𝖞')
        self.assertIs(assessment['parts'][1]['option'], None)
        self.assertEqual(assessment['parts'][1]['feedback'],
                         u'fairly illegible.')
Пример #10
0
    def test_self_assess_handler(self, xblock):
        student_item = xblock.get_student_item_dict()

        # Create a submission for the student
        submission = xblock.create_submission(student_item, self.SUBMISSION)

        # Submit a self-assessment
        resp = self.request(xblock,
                            'self_assess',
                            json.dumps(self.ASSESSMENT),
                            response_format='json')
        self.assertTrue(resp['success'])

        # Expect that a self-assessment was created
        assessment = self_api.get_assessment(submission["uuid"])
        self.assertEqual(assessment['submission_uuid'], submission['uuid'])
        self.assertEqual(assessment['points_earned'], 5)
        self.assertEqual(assessment['points_possible'], 6)
        self.assertEqual(assessment['scorer_id'], 'Bob')
        self.assertEqual(assessment['score_type'], 'SE')
        self.assertEqual(assessment['feedback'], u'')

        parts = sorted(assessment['parts'])
        self.assertEqual(len(parts), 2)
        self.assertEqual(parts[0]['option']['criterion']['name'], u'Form')
        self.assertEqual(parts[0]['option']['name'], 'Fair')
        self.assertEqual(parts[1]['option']['criterion']['name'], u'𝓒𝓸𝓷𝓬𝓲𝓼𝓮')
        self.assertEqual(parts[1]['option']['name'], u'ﻉซƈﻉɭɭﻉกՇ')
Пример #11
0
    def self_path_and_context(self):
        """
        Determine the template path and context to use when rendering the self-assessment step.

        Returns:
            tuple of `(path, context)`, where `path` (str) is the path to the template,
            and `context` (dict) is the template context.

        Raises:
            SubmissionError: Error occurred while retrieving the current submission.
            SelfAssessmentRequestError: Error occurred while checking if we had a self-assessment.
        """
        context = {}
        path = 'openassessmentblock/self/oa_self_unavailable.html'
        problem_closed, reason, start_date, due_date = self.is_closed(
            step="self-assessment")

        # We display the due date whether the problem is open or closed.
        # If no date is set, it defaults to the distant future, in which
        # case we don't display the date.
        if due_date < DISTANT_FUTURE:
            context['self_due'] = due_date

        # If we haven't submitted yet, `workflow` will be an empty dict,
        # and `workflow_status` will be None.
        workflow = self.get_workflow_info()
        workflow_status = workflow.get('status')
        self_complete = workflow.get('status_details',
                                     {}).get('self',
                                             {}).get('complete', False)

        if self_complete:
            path = 'openassessmentblock/self/oa_self_complete.html'
        elif workflow_status == 'self' or problem_closed:
            assessment = self_api.get_assessment(
                workflow.get("submission_uuid"))

            if assessment is not None:
                path = 'openassessmentblock/self/oa_self_complete.html'
            elif problem_closed:
                if reason == 'start':
                    context["self_start"] = start_date
                    path = 'openassessmentblock/self/oa_self_unavailable.html'
                elif reason == 'due':
                    path = 'openassessmentblock/self/oa_self_closed.html'
            else:
                submission = submission_api.get_submission(
                    self.submission_uuid)
                context["rubric_criteria"] = self.rubric_criteria
                context[
                    "estimated_time"] = "20 minutes"  # TODO: Need to configure this.
                context["self_submission"] = submission
                path = 'openassessmentblock/self/oa_self_assessment.html'
        else:
            # No submission yet or in peer assessment
            path = 'openassessmentblock/self/oa_self_unavailable.html'

        return path, context
Пример #12
0
    def self_path_and_context(self):
        """
        Determine the template path and context to use when rendering the self-assessment step.

        Returns:
            tuple of `(path, context)`, where `path` (str) is the path to the template,
            and `context` (dict) is the template context.

        Raises:
            SubmissionError: Error occurred while retrieving the current submission.
            SelfAssessmentRequestError: Error occurred while checking if we had a self-assessment.
        """
        context = {}
        path = 'openassessmentblock/self/oa_self_unavailable.html'
        problem_closed, reason, start_date, due_date = self.is_closed(step="self-assessment")

        # We display the due date whether the problem is open or closed.
        # If no date is set, it defaults to the distant future, in which
        # case we don't display the date.
        if due_date < DISTANT_FUTURE:
            context['self_due'] = due_date

        # If we haven't submitted yet, `workflow` will be an empty dict,
        # and `workflow_status` will be None.
        workflow = self.get_workflow_info()
        workflow_status = workflow.get('status')
        self_complete = workflow.get('status_details', {}).get('self', {}).get('complete', False)

        if self_complete:
            path = 'openassessmentblock/self/oa_self_complete.html'
        elif workflow_status == 'self' or problem_closed:
            assessment = self_api.get_assessment(workflow.get("submission_uuid"))

            if assessment is not None:
                path = 'openassessmentblock/self/oa_self_complete.html'
            elif problem_closed:
                if reason == 'start':
                    context["self_start"] = start_date
                    path = 'openassessmentblock/self/oa_self_unavailable.html'
                elif reason == 'due':
                    path = 'openassessmentblock/self/oa_self_closed.html'
            else:
                submission = submission_api.get_submission(self.submission_uuid)
                context["rubric_criteria"] = self.rubric_criteria_with_labels
                context["estimated_time"] = "20 minutes"  # TODO: Need to configure this.
                context["self_submission"] = submission

                # Determine if file upload is supported for this XBlock.
                context["allow_file_upload"] = self.allow_file_upload
                context['self_file_url'] = self.get_download_url_from_submission(submission)

                path = 'openassessmentblock/self/oa_self_assessment.html'
        else:
            # No submission yet or in peer assessment
            path = 'openassessmentblock/self/oa_self_unavailable.html'

        return path, context
Пример #13
0
    def get_student_info_path_and_context(self, data):
        """
        Get the proper path and context for rendering the the student info
        section of the staff debug panel.

        """
        student_id = data.params.get('student_id', '')
        submission_uuid = None
        submission = None
        assessment_steps = self.assessment_steps

        if student_id:
            student_item = self.get_student_item_dict()
            student_item['student_id'] = student_id

            # If there is a submission available for the requested student, present
            # it. If not, there will be no other information to collect.
            submissions = submission_api.get_submissions(student_item, 1)

            if submissions:
                submission = submissions[0]
                submission_uuid = submissions[0]['uuid']

        example_based_assessment = None
        self_assessment = None
        peer_assessments = []
        submitted_assessments = []

        if "peer-assessment" in assessment_steps:
            peer_assessments = peer_api.get_assessments(submission_uuid)
            submitted_assessments = peer_api.get_submitted_assessments(submission_uuid, scored_only=False)

        if "self-assessment" in assessment_steps:
            self_assessment = self_api.get_assessment(submission_uuid)

        if "example-based-assessment" in assessment_steps:
            example_based_assessment = ai_api.get_latest_assessment(submission_uuid)

        context = {
            'submission': submission,
            'peer_assessments': peer_assessments,
            'submitted_assessments': submitted_assessments,
            'self_assessment': self_assessment,
            'example_based_assessment': example_based_assessment,
            'rubric_criteria': copy.deepcopy(self.rubric_criteria),
        }

        if peer_assessments or self_assessment or example_based_assessment:
            max_scores = peer_api.get_rubric_max_scores(submission_uuid)
            for criterion in context["rubric_criteria"]:
                criterion["total_value"] = max_scores[criterion["name"]]

        path = 'openassessmentblock/staff_debug/student_info.html'
        return path, context
Пример #14
0
    def get_student_info_path_and_context(self, data):
        """
        Get the proper path and context for rendering the the student info
        section of the staff debug panel.

        """
        student_id = data.params.get("student_id", "")
        submission_uuid = None
        submission = None
        assessment_steps = self.assessment_steps

        if student_id:
            student_item = self.get_student_item_dict()
            student_item["student_id"] = student_id

            # If there is a submission available for the requested student, present
            # it. If not, there will be no other information to collect.
            submissions = submission_api.get_submissions(student_item, 1)

            if submissions:
                submission = submissions[0]
                submission_uuid = submissions[0]["uuid"]

        if "peer-assessment" in assessment_steps:
            peer_assessments = peer_api.get_assessments(submission_uuid)
            submitted_assessments = peer_api.get_submitted_assessments(submission_uuid, scored_only=False)
        else:
            peer_assessments = []
            submitted_assessments = []

        if "self-assessment" in assessment_steps:
            self_assessment = self_api.get_assessment(submission_uuid)
        else:
            self_assessment = None

        context = {
            "submission": submission,
            "peer_assessments": peer_assessments,
            "submitted_assessments": submitted_assessments,
            "self_assessment": self_assessment,
            "rubric_criteria": copy.deepcopy(self.rubric_criteria),
        }

        if peer_assessments or self_assessment:
            max_scores = peer_api.get_rubric_max_scores(submission_uuid)
            for criterion in context["rubric_criteria"]:
                criterion["total_value"] = max_scores[criterion["name"]]

        path = "openassessmentblock/staff_debug/student_info.html"
        return path, context
Пример #15
0
    def get_student_info_path_and_context(self, data):
        """
        Get the proper path and context for rendering the the student info
        section of the staff debug panel.

        """
        student_id = data.params.get('student_id', '')
        submission_uuid = None
        submission = None
        assessment_steps = self.assessment_steps

        if student_id:
            student_item = self.get_student_item_dict()
            student_item['student_id'] = student_id

            # If there is a submission available for the requested student, present
            # it. If not, there will be no other information to collect.
            submissions = submission_api.get_submissions(student_item, 1)

            if submissions:
                submission = submissions[0]
                submission_uuid = submissions[0]['uuid']

        if "peer-assessment" in assessment_steps:
            peer_assessments = peer_api.get_assessments(submission_uuid)
            submitted_assessments = peer_api.get_submitted_assessments(submission_uuid, scored_only=False)
        else:
            peer_assessments = []
            submitted_assessments = []

        if "self-assessment" in assessment_steps:
            self_assessment = self_api.get_assessment(submission_uuid)
        else:
            self_assessment = None

        context = {
            'submission': submission,
            'peer_assessments': peer_assessments,
            'submitted_assessments': submitted_assessments,
            'self_assessment': self_assessment,
            'rubric_criteria': copy.deepcopy(self.rubric_criteria),
        }

        if peer_assessments or self_assessment:
            max_scores = peer_api.get_rubric_max_scores(submission_uuid)
            for criterion in context["rubric_criteria"]:
                criterion["total_value"] = max_scores[criterion["name"]]

        path = 'openassessmentblock/staff_debug/student_info.html'
        return path, context
Пример #16
0
    def test_create_multiple_self_assessments(self):
        # Create a submission to self-assess
        submission = create_submission(self.STUDENT_ITEM, "Test answer")

        # Self assess once
        assessment = create_assessment(
            submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
            self.OPTIONS_SELECTED, self.RUBRIC,
        )

        # Attempt to self-assess again, which should raise an exception
        with self.assertRaises(SelfAssessmentRequestError):
            create_assessment(
                submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
                self.OPTIONS_SELECTED, self.RUBRIC,
            )

        # Expect that we still have the original assessment
        retrieved = get_assessment(submission["uuid"])
        self.assertItemsEqual(assessment, retrieved)
Пример #17
0
    def test_create_assessment_timestamp(self):
        # Create a submission to self-assess
        submission = create_submission(self.STUDENT_ITEM, "Test answer")

        # Record the current system clock time
        before = datetime.datetime.utcnow().replace(tzinfo=pytz.utc)

        # Create a self-assessment for the submission
        # Do not override the scored_at timestamp, so it should be set to the current time
        assessment = create_assessment(
            submission['uuid'], u'𝖙𝖊𝖘𝖙 𝖚𝖘𝖊𝖗',
            self.OPTIONS_SELECTED, self.RUBRIC,
        )

        # Retrieve the self-assessment
        retrieved = get_assessment(submission["uuid"])

        # Expect that both the created and retrieved assessments have the same
        # timestamp, and it's >= our recorded time.
        self.assertEqual(assessment['scored_at'], retrieved['scored_at'])
        self.assertGreaterEqual(assessment['scored_at'], before)
Пример #18
0
    def get_student_info_path_and_context(self, student_id):
        """
        Get the proper path and context for rendering the the student info
        section of the staff debug panel.

        Args:
            student_id (unicode): The ID of the student to report.

        """
        submission_uuid = None
        submission = None
        assessment_steps = self.assessment_steps

        if student_id:
            student_item = self.get_student_item_dict()
            student_item['student_id'] = student_id

            # If there is a submission available for the requested student, present
            # it. If not, there will be no other information to collect.
            submissions = submission_api.get_submissions(student_item, 1)

            if submissions:
                submission_uuid = submissions[0]['uuid']
                submission = submissions[0]

                if 'file_key' in submission.get('answer', {}):
                    file_key = submission['answer']['file_key']

                    try:
                        submission['image_url'] = file_api.get_download_url(
                            file_key)
                    except file_api.FileUploadError:
                        # Log the error, but do not prevent the rest of the student info
                        # from being displayed.
                        msg = (
                            u"Could not retrieve image URL for staff debug page.  "
                            u"The student ID is '{student_id}', and the file key is {file_key}"
                        ).format(student_id=student_id, file_key=file_key)
                        logger.exception(msg)

        example_based_assessment = None
        self_assessment = None
        peer_assessments = []
        submitted_assessments = []

        if "peer-assessment" in assessment_steps:
            peer_assessments = peer_api.get_assessments(submission_uuid)
            submitted_assessments = peer_api.get_submitted_assessments(
                submission_uuid, scored_only=False)

        if "self-assessment" in assessment_steps:
            self_assessment = self_api.get_assessment(submission_uuid)

        if "example-based-assessment" in assessment_steps:
            example_based_assessment = ai_api.get_latest_assessment(
                submission_uuid)

        context = {
            'submission': submission,
            'peer_assessments': peer_assessments,
            'submitted_assessments': submitted_assessments,
            'self_assessment': self_assessment,
            'example_based_assessment': example_based_assessment,
            'rubric_criteria': copy.deepcopy(self.rubric_criteria_with_labels),
        }

        if peer_assessments or self_assessment or example_based_assessment:
            max_scores = peer_api.get_rubric_max_scores(submission_uuid)
            for criterion in context["rubric_criteria"]:
                criterion["total_value"] = max_scores[criterion["name"]]

        path = 'openassessmentblock/staff_debug/student_info.html'
        return path, context
Пример #19
0
    def self_path_and_context(self):
        """
        Determine the template path and context to use when rendering the self-assessment step.

        Returns:
            tuple of `(path, context)`, where `path` (str) is the path to the template,
            and `context` (dict) is the template context.

        Raises:
            SubmissionError: Error occurred while retrieving the current submission.
            SelfAssessmentRequestError: Error occurred while checking if we had a self-assessment.
        """
        # Import is placed here to avoid model import at project startup.
        from submissions import api as submission_api

        path = 'openassessmentblock/self/oa_self_unavailable.html'
        problem_closed, reason, start_date, due_date = self.is_closed(step="self-assessment")
        user_preferences = get_user_preferences(self.runtime.service(self, 'user'))

        context = {
            'allow_latex': self.allow_latex,
            'prompts_type': self.prompts_type,
            "xblock_id": self.get_xblock_id(),
            'user_timezone': user_preferences['user_timezone'],
            'user_language': user_preferences['user_language']
        }

        # We display the due date whether the problem is open or closed.
        # If no date is set, it defaults to the distant future, in which
        # case we don't display the date.
        if due_date < DISTANT_FUTURE:
            context['self_due'] = due_date

        # If we haven't submitted yet, `workflow` will be an empty dict,
        # and `workflow_status` will be None.
        workflow = self.get_workflow_info()
        workflow_status = workflow.get('status')
        self_complete = workflow.get('status_details', {}).get('self', {}).get('complete', False)
        if workflow_status == 'cancelled':
            path = 'openassessmentblock/self/oa_self_cancelled.html'
            # Sets the XBlock boolean to signal to Message that it WAS able to grab a submission
            self.no_peers = True

        elif self_complete:
            path = 'openassessmentblock/self/oa_self_complete.html'
        elif workflow_status == 'self' or problem_closed:
            assessment = self_api.get_assessment(workflow.get("submission_uuid"))

            if assessment is not None:
                path = 'openassessmentblock/self/oa_self_complete.html'
            elif problem_closed:
                if reason == 'start':
                    context["self_start"] = start_date
                    path = 'openassessmentblock/self/oa_self_unavailable.html'
                elif reason == 'due':
                    path = 'openassessmentblock/self/oa_self_closed.html'
            else:
                submission = submission_api.get_submission(self.submission_uuid)
                context["rubric_criteria"] = self.rubric_criteria_with_labels
                context["self_submission"] = create_submission_dict(submission, self.prompts)
                if self.rubric_feedback_prompt is not None:
                    context["rubric_feedback_prompt"] = self.rubric_feedback_prompt

                if self.rubric_feedback_default_text is not None:
                    context['rubric_feedback_default_text'] = self.rubric_feedback_default_text

                # Determine if file upload is supported for this XBlock and what kind of files can be uploaded.
                context["file_upload_type"] = self.file_upload_type
                context['self_file_urls'] = self.get_download_urls_from_submission(submission)

                path = 'openassessmentblock/self/oa_self_assessment.html'
        else:
            # No submission yet or in peer assessment
            path = 'openassessmentblock/self/oa_self_unavailable.html'

        return path, context
Пример #20
0
    def add_submission_context(self, submission_uuid, context):
        """
        Add the submission information (self asssessment, peer assessments, final grade, etc.)
        to the supplied context for display in the "learner info" portion of staff tools.
        Args:
            submission_uuid (unicode): The uuid of the submission, should NOT be None.
            context: the context to update with additional information
        """
        # Import is placed here to avoid model import at project startup.
        from openassessment.assessment.api import peer as peer_api
        from openassessment.assessment.api import self as self_api
        from openassessment.assessment.api import staff as staff_api

        assessment_steps = self.assessment_steps

        self_assessment = None
        self_assessment_grade_context = None

        peer_assessments = None
        peer_assessments_grade_context = []

        staff_assessment = staff_api.get_latest_staff_assessment(
            submission_uuid)
        staff_assessment_grade_context = None

        submitted_assessments = None

        grade_details = None

        workflow = self.get_workflow_info(submission_uuid=submission_uuid)
        grade_exists = workflow.get('status') == "done"
        grade_utils = self.runtime._services.get('grade_utils')  # pylint: disable=protected-access

        if "peer-assessment" in assessment_steps:
            peer_assessments = peer_api.get_assessments(submission_uuid)
            submitted_assessments = peer_api.get_submitted_assessments(
                submission_uuid)
            if grade_exists:
                peer_api.get_score(submission_uuid,
                                   self.workflow_requirements()["peer"])
                peer_assessments_grade_context = [
                    self._assessment_grade_context(peer_assessment)
                    for peer_assessment in peer_assessments
                ]

        if "self-assessment" in assessment_steps:
            self_assessment = self_api.get_assessment(submission_uuid)
            if grade_exists:
                self_assessment_grade_context = self._assessment_grade_context(
                    self_assessment)

        if grade_exists:
            if staff_assessment:
                staff_assessment_grade_context = self._assessment_grade_context(
                    staff_assessment)

            grade_details = self.grade_details(
                submission_uuid,
                peer_assessments_grade_context,
                self_assessment_grade_context,
                staff_assessment_grade_context,
                is_staff=True,
            )

        workflow_cancellation = self.get_workflow_cancellation_info(
            workflow['submission_uuid'])

        context.update({
            'self_assessment': [self_assessment] if self_assessment else None,
            'peer_assessments':
            peer_assessments,
            'staff_assessment':
            [staff_assessment] if staff_assessment else None,
            'submitted_assessments':
            submitted_assessments,
            'grade_details':
            grade_details,
            'score':
            workflow.get('score'),
            'workflow_status':
            workflow.get('status'),
            'workflow_cancellation':
            workflow_cancellation,
            'are_grades_frozen':
            grade_utils.are_grades_frozen() if grade_utils else None
        })

        if peer_assessments or self_assessment or staff_assessment:
            max_scores = peer_api.get_rubric_max_scores(submission_uuid)
            for criterion in context["rubric_criteria"]:
                criterion["total_value"] = max_scores[criterion["name"]]
Пример #21
0
    def add_submission_context(self, submission_uuid, context):
        """
        Add the submission information (self asssessment, peer assessments, final grade, etc.)
        to the supplied context for display in the "learner info" portion of staff tools.
        Args:
            submission_uuid (unicode): The uuid of the submission, should NOT be None.
            context: the context to update with additional information
        """
        # Import is placed here to avoid model import at project startup.
        from openassessment.assessment.api import peer as peer_api
        from openassessment.assessment.api import self as self_api
        from openassessment.assessment.api import staff as staff_api

        assessment_steps = self.assessment_steps

        self_assessment = None
        self_assessment_grade_context = None

        peer_assessments = None
        peer_assessments_grade_context = []

        staff_assessment = staff_api.get_latest_staff_assessment(submission_uuid)
        staff_assessment_grade_context = None

        submitted_assessments = None

        grade_details = None

        workflow = self.get_workflow_info(submission_uuid=submission_uuid)
        grade_exists = workflow.get('status') == "done"
        grade_utils = self.runtime._services.get('grade_utils')

        if "peer-assessment" in assessment_steps:
            peer_assessments = peer_api.get_assessments(submission_uuid)
            submitted_assessments = peer_api.get_submitted_assessments(submission_uuid)
            if grade_exists:
                peer_api.get_score(submission_uuid, self.workflow_requirements()["peer"])
                peer_assessments_grade_context = [
                    self._assessment_grade_context(peer_assessment)
                    for peer_assessment in peer_assessments
                ]

        if "self-assessment" in assessment_steps:
            self_assessment = self_api.get_assessment(submission_uuid)
            if grade_exists:
                self_assessment_grade_context = self._assessment_grade_context(self_assessment)

        if grade_exists:
            if staff_assessment:
                staff_assessment_grade_context = self._assessment_grade_context(staff_assessment)

            grade_details = self.grade_details(
                submission_uuid,
                peer_assessments_grade_context,
                self_assessment_grade_context,
                staff_assessment_grade_context,
                is_staff=True,
            )

        workflow_cancellation = self.get_workflow_cancellation_info(submission_uuid)

        context.update({
            'self_assessment': [self_assessment] if self_assessment else None,
            'peer_assessments': peer_assessments,
            'staff_assessment': [staff_assessment] if staff_assessment else None,
            'submitted_assessments': submitted_assessments,
            'grade_details': grade_details,
            'score': workflow.get('score'),
            'workflow_status': workflow.get('status'),
            'workflow_cancellation': workflow_cancellation,
            'are_grades_frozen': grade_utils.are_grades_frozen()
        })

        if peer_assessments or self_assessment or staff_assessment:
            max_scores = peer_api.get_rubric_max_scores(submission_uuid)
            for criterion in context["rubric_criteria"]:
                criterion["total_value"] = max_scores[criterion["name"]]
Пример #22
0
    def render_grade_complete(self, workflow):
        """
        Render the grade complete state.

        Args:
            workflow (dict): The serialized Workflow model.

        Returns:
            tuple of context (dict), template_path (string)
        """
        # Peer specific stuff...
        assessment_steps = self.assessment_steps
        submission_uuid = workflow['submission_uuid']

        example_based_assessment = None
        self_assessment = None
        feedback = None
        peer_assessments = []
        has_submitted_feedback = False

        if "peer-assessment" in assessment_steps:
            feedback = peer_api.get_assessment_feedback(submission_uuid)
            peer_assessments = [
                self._assessment_grade_context(asmnt)
                for asmnt in peer_api.get_assessments(submission_uuid)
            ]
            has_submitted_feedback = feedback is not None

        if "self-assessment" in assessment_steps:
            self_assessment = self._assessment_grade_context(
                self_api.get_assessment(submission_uuid))

        if "example-based-assessment" in assessment_steps:
            example_based_assessment = self._assessment_grade_context(
                ai_api.get_latest_assessment(submission_uuid))

        feedback_text = feedback.get('feedback', '') if feedback else ''
        student_submission = sub_api.get_submission(submission_uuid)

        # We retrieve the score from the workflow, which in turn retrieves
        # the score for our current submission UUID.
        # We look up the score by submission UUID instead of student item
        # to ensure that the score always matches the rubric.
        # It's possible for the score to be `None` even if the workflow status is "done"
        # when all the criteria in the rubric are feedback-only (no options).
        score = workflow['score']

        context = {
            'score':
            score,
            'feedback_text':
            feedback_text,
            'student_submission':
            student_submission,
            'peer_assessments':
            peer_assessments,
            'self_assessment':
            self_assessment,
            'example_based_assessment':
            example_based_assessment,
            'rubric_criteria':
            self._rubric_criteria_grade_context(peer_assessments,
                                                self_assessment),
            'has_submitted_feedback':
            has_submitted_feedback,
            'allow_file_upload':
            self.allow_file_upload,
            'file_url':
            self.get_download_url_from_submission(student_submission)
        }

        # Update the scores we will display to the user
        # Note that we are updating a *copy* of the rubric criteria stored in
        # the XBlock field
        max_scores = peer_api.get_rubric_max_scores(submission_uuid)
        median_scores = None
        if "peer-assessment" in assessment_steps:
            median_scores = peer_api.get_assessment_median_scores(
                submission_uuid)
        elif "self-assessment" in assessment_steps:
            median_scores = self_api.get_assessment_scores_by_criteria(
                submission_uuid)
        elif "example-based-assessment" in assessment_steps:
            median_scores = ai_api.get_assessment_scores_by_criteria(
                submission_uuid)

        if median_scores is not None and max_scores is not None:
            for criterion in context["rubric_criteria"]:
                # Although we prevent course authors from modifying criteria post-release,
                # it's still possible for assessments created by course staff to
                # have criteria that differ from the current problem definition.
                # It's also possible to circumvent the post-release restriction
                # if course authors directly import a course into Studio.
                # If this happens, we simply leave the score blank so that the grade
                # section can render without error.
                criterion["median_score"] = median_scores.get(
                    criterion["name"], '')
                criterion["total_value"] = max_scores.get(
                    criterion["name"], '')

        return ('openassessmentblock/grade/oa_grade_complete.html', context)
Пример #23
0
    def render_grade_complete(self, workflow):
        """
        Render the grade complete state.

        Args:
            workflow (dict): The serialized Workflow model.

        Returns:
            tuple of context (dict), template_path (string)
        """
        # Peer specific stuff...
        assessment_steps = self.assessment_steps
        submission_uuid = workflow['submission_uuid']

        if "peer-assessment" in assessment_steps:
            feedback = peer_api.get_assessment_feedback(submission_uuid)
            peer_assessments = peer_api.get_assessments(submission_uuid)
            has_submitted_feedback = feedback is not None
        else:
            feedback = None
            peer_assessments = []
            has_submitted_feedback = False

        if "self-assessment" in assessment_steps:
            self_assessment = self_api.get_assessment(submission_uuid)
        else:
            self_assessment = None

        feedback_text = feedback.get('feedback', '') if feedback else ''
        student_submission = sub_api.get_submission(submission_uuid)

        # We retrieve the score from the workflow, which in turn retrieves
        # the score for our current submission UUID.
        # We look up the score by submission UUID instead of student item
        # to ensure that the score always matches the rubric.
        score = workflow['score']

        context = {
            'score': score,
            'feedback_text': feedback_text,
            'student_submission': student_submission,
            'peer_assessments': peer_assessments,
            'self_assessment': self_assessment,
            'rubric_criteria': self._rubric_criteria_with_feedback(peer_assessments),
            'has_submitted_feedback': has_submitted_feedback,
        }

        # Update the scores we will display to the user
        # Note that we are updating a *copy* of the rubric criteria stored in
        # the XBlock field
        max_scores = peer_api.get_rubric_max_scores(submission_uuid)
        if "peer-assessment" in assessment_steps:
            median_scores = peer_api.get_assessment_median_scores(submission_uuid)
        elif "self-assessment" in assessment_steps:
            median_scores = self_api.get_assessment_scores_by_criteria(submission_uuid)

        if median_scores is not None and max_scores is not None:
            for criterion in context["rubric_criteria"]:
                criterion["median_score"] = median_scores[criterion["name"]]
                criterion["total_value"] = max_scores[criterion["name"]]

        return ('openassessmentblock/grade/oa_grade_complete.html', context)
Пример #24
0
    def add_submission_context(self, submission_uuid, context):
        """
        Add the submission information (self asssessment, peer assessments, final grade, etc.)
        to the supplied context for display in the "learner info" portion of staff tools.
        Args:
            submission_uuid (unicode): The uuid of the submission, should NOT be None.
            context: the context to update with additional information
        """
        assessment_steps = self.assessment_steps

        example_based_assessment = None
        example_based_assessment_grade_context = None

        self_assessment = None
        self_assessment_grade_context = None

        peer_assessments = None
        peer_assessments_grade_context = []

        staff_assessment = staff_api.get_latest_staff_assessment(submission_uuid)
        staff_assessment_grade_context = None

        submitted_assessments = None

        grade_details = None

        workflow = self.get_workflow_info(submission_uuid=submission_uuid)
        grade_exists = workflow.get('status') == "done"

        if "peer-assessment" in assessment_steps:
            peer_assessments = peer_api.get_assessments(submission_uuid)
            submitted_assessments = peer_api.get_submitted_assessments(submission_uuid)
            if grade_exists:
                peer_api.get_score(submission_uuid, self.workflow_requirements()["peer"])
                peer_assessments_grade_context = [
                    self._assessment_grade_context(peer_assessment)
                    for peer_assessment in peer_assessments
                ]

        if "self-assessment" in assessment_steps:
            self_assessment = self_api.get_assessment(submission_uuid)
            if grade_exists:
                self_assessment_grade_context = self._assessment_grade_context(self_assessment)

        if "example-based-assessment" in assessment_steps:
            example_based_assessment = ai_api.get_latest_assessment(submission_uuid)
            if grade_exists:
                example_based_assessment_grade_context = self._assessment_grade_context(example_based_assessment)

        if grade_exists:
            if staff_assessment:
                staff_assessment_grade_context = self._assessment_grade_context(staff_assessment)

            grade_details = self.grade_details(
                submission_uuid,
                peer_assessments_grade_context,
                self_assessment_grade_context,
                example_based_assessment_grade_context,
                staff_assessment_grade_context,
                is_staff=True,
            )

        workflow_cancellation = self.get_workflow_cancellation_info(submission_uuid)

        context.update({
            'example_based_assessment': [example_based_assessment] if example_based_assessment else None,
            'self_assessment': [self_assessment] if self_assessment else None,
            'peer_assessments': peer_assessments,
            'staff_assessment': [staff_assessment] if staff_assessment else None,
            'submitted_assessments': submitted_assessments,
            'grade_details': grade_details,
            'score': workflow.get('score'),
            'workflow_status': workflow.get('status'),
            'workflow_cancellation': workflow_cancellation,
        })

        if peer_assessments or self_assessment or example_based_assessment or staff_assessment:
            max_scores = peer_api.get_rubric_max_scores(submission_uuid)
            for criterion in context["rubric_criteria"]:
                criterion["total_value"] = max_scores[criterion["name"]]
Пример #25
0
    def get_student_info_path_and_context(self, student_id):
        """
        Get the proper path and context for rendering the the student info
        section of the staff debug panel.

        Args:
            student_id (unicode): The ID of the student to report.

        """
        submission_uuid = None
        submission = None
        assessment_steps = self.assessment_steps
        student_item = self.get_student_item_dict()
        scores = {}
        problem_closed = None

        if student_id:
            student_item['student_id'] = student_id

            # If there is a submission available for the requested student, present
            # it. If not, there will be no other information to collect.
            submissions = submission_api.get_submissions(student_item, 1)

            if submissions:
                submission_uuid = submissions[0]['uuid']
                submission = submissions[0]

                if 'file_key' in submission.get('answer', {}):
                    file_key = submission['answer']['file_key']

                    try:
                        submission['image_url'] = file_api.get_download_url(file_key)
                    except file_api.FileUploadError:
                        # Log the error, but do not prevent the rest of the student info
                        # from being displayed.
                        msg = (
                            u"Could not retrieve image URL for staff debug page.  "
                            u"The student ID is '{student_id}', and the file key is {file_key}"
                        ).format(student_id=student_id, file_key=file_key)
                        logger.exception(msg)

        example_based_assessment = None
        self_assessment = None
        peer_assessments = []
        submitted_assessments = []

        if "peer-assessment" in assessment_steps:
            peer_assessments = peer_api.get_assessments(submission_uuid)
            submitted_assessments = peer_api.get_submitted_assessments(submission_uuid, scored_only=False)

            # Get the data we need for instructor override of the student's score
            rubric_dict = create_rubric_dict(self.prompt, self.rubric_criteria_with_labels)
            scores = peer_api.get_data_for_override_score(
                submission_uuid,
                student_item,
                rubric_dict,
            )
            problem_closed, dummy0, dummy1, dummy2 = self.is_closed(step='peer-assessment', course_staff=False)

        if "self-assessment" in assessment_steps:
            self_assessment = self_api.get_assessment(submission_uuid)

        if "example-based-assessment" in assessment_steps:
            example_based_assessment = ai_api.get_latest_assessment(submission_uuid)

        context = {
            'submission': submission,
            'peer_assessments': peer_assessments,
            'submitted_assessments': submitted_assessments,
            'self_assessment': self_assessment,
            'example_based_assessment': example_based_assessment,
            'rubric_criteria': copy.deepcopy(self.rubric_criteria_with_labels),
            'scores': scores,
            'problem_closed': problem_closed,
        }

        if peer_assessments or self_assessment or example_based_assessment:
            max_scores = peer_api.get_rubric_max_scores(submission_uuid)
            for criterion in context["rubric_criteria"]:
                criterion["total_value"] = max_scores[criterion["name"]]

        path = 'openassessmentblock/staff_debug/student_info.html'
        return path, context
Пример #26
0
    def render_grade_complete(self, workflow):
        """
        Render the grade complete state.

        Args:
            workflow (dict): The serialized Workflow model.

        Returns:
            tuple of context (dict), template_path (string)
        """
        # Import is placed here to avoid model import at project startup.
        from openassessment.assessment.api import peer as peer_api
        from openassessment.assessment.api import self as self_api
        from openassessment.assessment.api import staff as staff_api
        from submissions import api as sub_api

        # Peer specific stuff...
        assessment_steps = self.assessment_steps
        submission_uuid = workflow['submission_uuid']

        staff_assessment = None
        self_assessment = None
        feedback = None
        peer_assessments = []
        has_submitted_feedback = False

        if "peer-assessment" in assessment_steps:
            peer_api.get_score(submission_uuid, self.workflow_requirements()["peer"])
            feedback = peer_api.get_assessment_feedback(submission_uuid)
            peer_assessments = [
                self._assessment_grade_context(peer_assessment)
                for peer_assessment in peer_api.get_assessments(submission_uuid)
            ]
            has_submitted_feedback = feedback is not None

        if "self-assessment" in assessment_steps:
            self_assessment = self._assessment_grade_context(
                self_api.get_assessment(submission_uuid)
            )

        raw_staff_assessment = staff_api.get_latest_staff_assessment(submission_uuid)
        if raw_staff_assessment:
            staff_assessment = self._assessment_grade_context(raw_staff_assessment)

        feedback_text = feedback.get('feedback', '') if feedback else ''
        student_submission = sub_api.get_submission(submission_uuid)

        # We retrieve the score from the workflow, which in turn retrieves
        # the score for our current submission UUID.
        # We look up the score by submission UUID instead of student item
        # to ensure that the score always matches the rubric.
        # It's possible for the score to be `None` even if the workflow status is "done"
        # when all the criteria in the rubric are feedback-only (no options).
        score = workflow['score']

        context = {
            'score': score,
            'feedback_text': feedback_text,
            'has_submitted_feedback': has_submitted_feedback,
            'student_submission': create_submission_dict(student_submission, self.prompts),
            'peer_assessments': peer_assessments,
            'grade_details': self.grade_details(
                submission_uuid,
                peer_assessments=peer_assessments,
                self_assessment=self_assessment,
                staff_assessment=staff_assessment,
            ),
            'file_upload_type': self.file_upload_type,
            'allow_latex': self.allow_latex,
            'file_urls': self.get_download_urls_from_submission(student_submission),
            'xblock_id': self.get_xblock_id()
        }

        return ('openassessmentblock/grade/oa_grade_complete.html', context)
Пример #27
0
    def render_grade_complete(self, workflow):
        """
        Render the grade complete state.

        Args:
            workflow (dict): The serialized Workflow model.

        Returns:
            tuple of context (dict), template_path (string)
        """
        # Peer specific stuff...
        assessment_steps = self.assessment_steps
        submission_uuid = workflow['submission_uuid']

        example_based_assessment = None
        self_assessment = None
        feedback = None
        peer_assessments = []
        has_submitted_feedback = False

        if "peer-assessment" in assessment_steps:
            feedback = peer_api.get_assessment_feedback(submission_uuid)
            peer_assessments = [
                self._assessment_grade_context(asmnt)
                for asmnt in peer_api.get_assessments(submission_uuid)
            ]
            has_submitted_feedback = feedback is not None

        if "self-assessment" in assessment_steps:
            self_assessment = self._assessment_grade_context(
                self_api.get_assessment(submission_uuid)
            )

        if "example-based-assessment" in assessment_steps:
            example_based_assessment = self._assessment_grade_context(
                ai_api.get_latest_assessment(submission_uuid)
            )

        feedback_text = feedback.get('feedback', '') if feedback else ''
        student_submission = sub_api.get_submission(submission_uuid)

        # We retrieve the score from the workflow, which in turn retrieves
        # the score for our current submission UUID.
        # We look up the score by submission UUID instead of student item
        # to ensure that the score always matches the rubric.
        # It's possible for the score to be `None` even if the workflow status is "done"
        # when all the criteria in the rubric are feedback-only (no options).
        score = workflow['score']

        context = {
            'score': score,
            'feedback_text': feedback_text,
            'student_submission': student_submission,
            'peer_assessments': peer_assessments,
            'self_assessment': self_assessment,
            'example_based_assessment': example_based_assessment,
            'rubric_criteria': self._rubric_criteria_grade_context(peer_assessments, self_assessment),
            'has_submitted_feedback': has_submitted_feedback,
            'allow_file_upload': self.allow_file_upload,
            'allow_latex': self.allow_latex,
            'file_url': self.get_download_url_from_submission(student_submission)
        }

        # Update the scores we will display to the user
        # Note that we are updating a *copy* of the rubric criteria stored in
        # the XBlock field
        max_scores = peer_api.get_rubric_max_scores(submission_uuid)
        median_scores = None
        if "peer-assessment" in assessment_steps:
            median_scores = peer_api.get_assessment_median_scores(submission_uuid)
        elif "self-assessment" in assessment_steps:
            median_scores = self_api.get_assessment_scores_by_criteria(submission_uuid)
        elif "example-based-assessment" in assessment_steps:
            median_scores = ai_api.get_assessment_scores_by_criteria(submission_uuid)

        if median_scores is not None and max_scores is not None:
            for criterion in context["rubric_criteria"]:
                # Although we prevent course authors from modifying criteria post-release,
                # it's still possible for assessments created by course staff to
                # have criteria that differ from the current problem definition.
                # It's also possible to circumvent the post-release restriction
                # if course authors directly import a course into Studio.
                # If this happens, we simply leave the score blank so that the grade
                # section can render without error.
                criterion["median_score"] = median_scores.get(criterion["name"], '')
                criterion["total_value"] = max_scores.get(criterion["name"], '')

        return ('openassessmentblock/grade/oa_grade_complete.html', context)
Пример #28
0
    def get_student_info_path_and_context(self, student_username):
        """
        Get the proper path and context for rendering the the student info
        section of the staff debug panel.

        Args:
            student_username (unicode): The username of the student to report.

        """
        submission_uuid = None
        submission = None
        assessment_steps = self.assessment_steps
        anonymous_user_id = None
        submissions = None
        student_item = None

        if student_username:
            anonymous_user_id = self.get_anonymous_user_id(student_username, self.course_id)
            student_item = self.get_student_item_dict(anonymous_user_id=anonymous_user_id)

        if anonymous_user_id:
            # If there is a submission available for the requested student, present
            # it. If not, there will be no other information to collect.
            submissions = submission_api.get_submissions(student_item, 1)

        if submissions:
            submission_uuid = submissions[0]['uuid']
            submission = submissions[0]

            if 'file_key' in submission.get('answer', {}):
                file_key = submission['answer']['file_key']

                try:
                    submission['image_url'] = file_api.get_download_url(file_key)
                except file_api.FileUploadError:
                    # Log the error, but do not prevent the rest of the student info
                    # from being displayed.
                    msg = (
                        u"Could not retrieve image URL for staff debug page.  "
                        u"The student username is '{student_username}', and the file key is {file_key}"
                    ).format(student_username=student_username, file_key=file_key)
                    logger.exception(msg)

        example_based_assessment = None
        self_assessment = None
        peer_assessments = []
        submitted_assessments = []

        if "peer-assessment" in assessment_steps:
            peer_assessments = peer_api.get_assessments(submission_uuid)
            submitted_assessments = peer_api.get_submitted_assessments(submission_uuid, scored_only=False)

        if "self-assessment" in assessment_steps:
            self_assessment = self_api.get_assessment(submission_uuid)

        if "example-based-assessment" in assessment_steps:
            example_based_assessment = ai_api.get_latest_assessment(submission_uuid)

        workflow_cancellation = workflow_api.get_assessment_workflow_cancellation(submission_uuid)
        if workflow_cancellation:
            workflow_cancellation['cancelled_by'] = self.get_username(workflow_cancellation['cancelled_by_id'])

        context = {
            'submission': submission,
            'workflow_cancellation': workflow_cancellation,
            'peer_assessments': peer_assessments,
            'submitted_assessments': submitted_assessments,
            'self_assessment': self_assessment,
            'example_based_assessment': example_based_assessment,
            'rubric_criteria': copy.deepcopy(self.rubric_criteria_with_labels),
        }

        if peer_assessments or self_assessment or example_based_assessment:
            max_scores = peer_api.get_rubric_max_scores(submission_uuid)
            for criterion in context["rubric_criteria"]:
                criterion["total_value"] = max_scores[criterion["name"]]

        path = 'openassessmentblock/staff_debug/student_info.html'
        return path, context
Пример #29
0
    def render_grade_complete(self, workflow):
        """
        Render the grade complete state.

        Args:
            workflow (dict): The serialized Workflow model.

        Returns:
            tuple of context (dict), template_path (string)
        """
        # Import is placed here to avoid model import at project startup.
        from openassessment.assessment.api import peer as peer_api
        from openassessment.assessment.api import self as self_api
        from openassessment.assessment.api import staff as staff_api
        from submissions import api as sub_api

        # Peer specific stuff...
        assessment_steps = self.assessment_steps
        submission_uuid = workflow['submission_uuid']

        staff_assessment = None
        self_assessment = None
        feedback = None
        peer_assessments = []
        has_submitted_feedback = False

        if "peer-assessment" in assessment_steps:
            peer_api.get_score(submission_uuid,
                               self.workflow_requirements()["peer"])
            feedback = peer_api.get_assessment_feedback(submission_uuid)
            peer_assessments = [
                self._assessment_grade_context(peer_assessment) for
                peer_assessment in peer_api.get_assessments(submission_uuid)
            ]
            has_submitted_feedback = feedback is not None

        if "self-assessment" in assessment_steps:
            self_assessment = self._assessment_grade_context(
                self_api.get_assessment(submission_uuid))

        raw_staff_assessment = staff_api.get_latest_staff_assessment(
            submission_uuid)
        if raw_staff_assessment:
            staff_assessment = self._assessment_grade_context(
                raw_staff_assessment)

        feedback_text = feedback.get('feedback', '') if feedback else ''
        student_submission = sub_api.get_submission(submission_uuid)

        # We retrieve the score from the workflow, which in turn retrieves
        # the score for our current submission UUID.
        # We look up the score by submission UUID instead of student item
        # to ensure that the score always matches the rubric.
        # It's possible for the score to be `None` even if the workflow status is "done"
        # when all the criteria in the rubric are feedback-only (no options).
        score = workflow['score']

        context = {
            'score':
            score,
            'feedback_text':
            feedback_text,
            'has_submitted_feedback':
            has_submitted_feedback,
            'student_submission':
            create_submission_dict(student_submission, self.prompts),
            'peer_assessments':
            peer_assessments,
            'grade_details':
            self.grade_details(
                submission_uuid,
                peer_assessments=peer_assessments,
                self_assessment=self_assessment,
                staff_assessment=staff_assessment,
            ),
            'file_upload_type':
            self.file_upload_type,
            'allow_latex':
            self.allow_latex,
            'prompts_type':
            self.prompts_type,
            'file_urls':
            self.get_download_urls_from_submission(student_submission),
            'xblock_id':
            self.get_xblock_id()
        }

        return ('openassessmentblock/grade/oa_grade_complete.html', context)
Пример #30
0
    def add_submission_context(self, submission_uuid, context):
        """
        Add the submission information (self asssessment, peer assessments, final grade, etc.)
        to the supplied context for display in the "learner info" portion of staff tools.
        Args:
            submission_uuid (unicode): The uuid of the submission, should NOT be None.
            context: the context to update with additional information
        """
        assessment_steps = self.assessment_steps

        example_based_assessment = None
        example_based_assessment_grade_context = None

        self_assessment = None
        self_assessment_grade_context = None

        peer_assessments = None
        peer_assessments_grade_context = []

        staff_assessment = staff_api.get_latest_staff_assessment(submission_uuid)
        staff_assessment_grade_context = None

        submitted_assessments = None

        grade_details = None

        workflow = self.get_workflow_info(submission_uuid=submission_uuid)
        grade_exists = workflow.get('status') == "done"

        if "peer-assessment" in assessment_steps:
            peer_assessments = peer_api.get_assessments(submission_uuid)
            submitted_assessments = peer_api.get_submitted_assessments(submission_uuid)
            if grade_exists:
                peer_api.get_score(submission_uuid, self.workflow_requirements()["peer"])
                peer_assessments_grade_context = [
                    self._assessment_grade_context(peer_assessment)
                    for peer_assessment in peer_assessments
                ]

        if "self-assessment" in assessment_steps:
            self_assessment = self_api.get_assessment(submission_uuid)
            if grade_exists:
                self_assessment_grade_context = self._assessment_grade_context(self_assessment)

        if "example-based-assessment" in assessment_steps:
            example_based_assessment = ai_api.get_latest_assessment(submission_uuid)
            if grade_exists:
                example_based_assessment_grade_context = self._assessment_grade_context(example_based_assessment)

        if grade_exists:
            if staff_assessment:
                staff_assessment_grade_context = self._assessment_grade_context(staff_assessment)

            grade_details = self.grade_details(
                submission_uuid,
                peer_assessments_grade_context,
                self_assessment_grade_context,
                example_based_assessment_grade_context,
                staff_assessment_grade_context,
                is_staff=True,
            )

        workflow_cancellation = self.get_workflow_cancellation_info(submission_uuid)

        context.update({
            'example_based_assessment': [example_based_assessment] if example_based_assessment else None,
            'self_assessment': [self_assessment] if self_assessment else None,
            'peer_assessments': peer_assessments,
            'staff_assessment': [staff_assessment] if staff_assessment else None,
            'submitted_assessments': submitted_assessments,
            'grade_details': grade_details,
            'score': workflow.get('score'),
            'workflow_status': workflow.get('status'),
            'workflow_cancellation': workflow_cancellation,
        })

        if peer_assessments or self_assessment or example_based_assessment or staff_assessment:
            max_scores = peer_api.get_rubric_max_scores(submission_uuid)
            for criterion in context["rubric_criteria"]:
                criterion["total_value"] = max_scores[criterion["name"]]
Пример #31
0
    def self_path_and_context(self):
        """
        Determine the template path and context to use when rendering the self-assessment step.

        Returns:
            tuple of `(path, context)`, where `path` (str) is the path to the template,
            and `context` (dict) is the template context.

        Raises:
            SubmissionError: Error occurred while retrieving the current submission.
            SelfAssessmentRequestError: Error occurred while checking if we had a self-assessment.
        """

        path = 'openassessmentblock/self/oa_self_unavailable.html'
        problem_closed, reason, start_date, due_date = self.is_closed(
            step="self-assessment")
        user_preferences = get_user_preferences(
            self.runtime.service(self, 'user'))

        context = {
            'allow_latex': self.allow_latex,
            "xblock_id": self.get_xblock_id(),
            'user_timezone': user_preferences['user_timezone'],
            'user_language': user_preferences['user_language']
        }

        # We display the due date whether the problem is open or closed.
        # If no date is set, it defaults to the distant future, in which
        # case we don't display the date.
        if due_date < DISTANT_FUTURE:
            context['self_due'] = due_date

        # If we haven't submitted yet, `workflow` will be an empty dict,
        # and `workflow_status` will be None.
        workflow = self.get_workflow_info()
        workflow_status = workflow.get('status')
        self_complete = workflow.get('status_details',
                                     {}).get('self',
                                             {}).get('complete', False)
        if workflow_status == 'cancelled':
            path = 'openassessmentblock/self/oa_self_cancelled.html'
            # Sets the XBlock boolean to signal to Message that it WAS able to grab a submission
            self.no_peers = True

        elif self_complete:
            path = 'openassessmentblock/self/oa_self_complete.html'
        elif workflow_status == 'self' or problem_closed:
            assessment = self_api.get_assessment(
                workflow.get("submission_uuid"))

            if assessment is not None:
                path = 'openassessmentblock/self/oa_self_complete.html'
            elif problem_closed:
                if reason == 'start':
                    context["self_start"] = start_date
                    path = 'openassessmentblock/self/oa_self_unavailable.html'
                elif reason == 'due':
                    path = 'openassessmentblock/self/oa_self_closed.html'
            else:
                submission = submission_api.get_submission(
                    self.submission_uuid)
                context["rubric_criteria"] = self.rubric_criteria_with_labels
                context["self_submission"] = create_submission_dict(
                    submission, self.prompts)
                if self.rubric_feedback_prompt is not None:
                    context[
                        "rubric_feedback_prompt"] = self.rubric_feedback_prompt

                if self.rubric_feedback_default_text is not None:
                    context[
                        'rubric_feedback_default_text'] = self.rubric_feedback_default_text

                # Determine if file upload is supported for this XBlock and what kind of files can be uploaded.
                context["file_upload_type"] = self.file_upload_type
                context[
                    'self_file_urls'] = self.get_download_urls_from_submission(
                        submission)

                path = 'openassessmentblock/self/oa_self_assessment.html'
        else:
            # No submission yet or in peer assessment
            path = 'openassessmentblock/self/oa_self_unavailable.html'

        return path, context
Пример #32
0
    def render_grade_complete(self, workflow):
        """
        Render the grade complete state.

        Args:
            workflow (dict): The serialized Workflow model.

        Returns:
            tuple of context (dict), template_path (string)
        """
        # Peer specific stuff...
        assessment_steps = self.assessment_steps
        submission_uuid = workflow['submission_uuid']

        if "peer-assessment" in assessment_steps:
            feedback = peer_api.get_assessment_feedback(submission_uuid)
            peer_assessments = peer_api.get_assessments(submission_uuid)
            has_submitted_feedback = feedback is not None
        else:
            feedback = None
            peer_assessments = []
            has_submitted_feedback = False

        if "self-assessment" in assessment_steps:
            self_assessment = self_api.get_assessment(submission_uuid)
        else:
            self_assessment = None

        feedback_text = feedback.get('feedback', '') if feedback else ''
        student_submission = sub_api.get_submission(submission_uuid)

        # We retrieve the score from the workflow, which in turn retrieves
        # the score for our current submission UUID.
        # We look up the score by submission UUID instead of student item
        # to ensure that the score always matches the rubric.
        score = workflow['score']

        context = {
            'score':
            score,
            'feedback_text':
            feedback_text,
            'student_submission':
            student_submission,
            'peer_assessments':
            peer_assessments,
            'self_assessment':
            self_assessment,
            'rubric_criteria':
            self._rubric_criteria_with_feedback(peer_assessments),
            'has_submitted_feedback':
            has_submitted_feedback,
        }

        # Update the scores we will display to the user
        # Note that we are updating a *copy* of the rubric criteria stored in
        # the XBlock field
        max_scores = peer_api.get_rubric_max_scores(submission_uuid)
        if "peer-assessment" in assessment_steps:
            median_scores = peer_api.get_assessment_median_scores(
                submission_uuid)
        elif "self-assessment" in assessment_steps:
            median_scores = self_api.get_assessment_scores_by_criteria(
                submission_uuid)

        if median_scores is not None and max_scores is not None:
            for criterion in context["rubric_criteria"]:
                criterion["median_score"] = median_scores[criterion["name"]]
                criterion["total_value"] = max_scores[criterion["name"]]

        return ('openassessmentblock/grade/oa_grade_complete.html', context)