Beispiel #1
0
    def test_get_old_submission(self):
        # hack in an old-style submission, this can't be created with the ORM (EDUCATOR-1090)
        with transaction.atomic():
            student_item = StudentItem.objects.create()
            connection.cursor().execute("""
                INSERT INTO submissions_submission
                    (id, uuid, attempt_number, submitted_at, created_at, raw_answer, student_item_id, status)
                VALUES (
                    {}, {}, {}, {}, {}, {}, {}, {}
                );""".format(
                    1,
                    "\'deadbeef-1234-5678-9100-1234deadbeef\'",
                    1,
                    "\'2017-07-13 17:56:02.656129\'",
                    "\'2017-07-13 17:56:02.656129\'",
                    "\'{\"parts\":[{\"text\":\"raw answer text\"}]}\'",
                    int(student_item.id),
                    "\'A\'"
                ), []
            )

        with mock.patch.object(
            Submission.objects, 'raw',
            wraps=Submission.objects.raw
        ) as mock_raw:
            _ = api.get_submission('deadbeef-1234-5678-9100-1234deadbeef')
            self.assertEqual(1, mock_raw.call_count)

            # On subsequent accesses we still get the submission, but raw() isn't needed
            mock_raw.reset_mock()
            _ = api.get_submission('deadbeef-1234-5678-9100-1234deadbeef')
            self.assertEqual(0, mock_raw.call_count)
Beispiel #2
0
    def test_get_old_submission(self):
        # hack in an old-style submission, this can't be created with the ORM (EDUCATOR-1090)
        with transaction.atomic():
            student_item = StudentItem.objects.create()
            connection.cursor().execute("""
                INSERT INTO submissions_submission
                    (id, uuid, attempt_number, submitted_at, created_at, raw_answer, student_item_id, status)
                VALUES (
                    {}, {}, {}, {}, {}, {}, {}, {}
                );""".format(
                    1,
                    "\'deadbeef-1234-5678-9100-1234deadbeef\'",
                    1,
                    "\'2017-07-13 17:56:02.656129\'",
                    "\'2017-07-13 17:56:02.656129\'",
                    "\'{\"parts\":[{\"text\":\"raw answer text\"}]}\'",
                    int(student_item.id),
                    "\'A\'"
                ), []
            )

        with mock.patch.object(
            Submission.objects, 'raw',
            wraps=Submission.objects.raw
        ) as mock_raw:
            _ = api.get_submission('deadbeef-1234-5678-9100-1234deadbeef')
            self.assertEqual(1, mock_raw.call_count)

            # On subsequent accesses we still get the submission, but raw() isn't needed
            mock_raw.reset_mock()
            _ = api.get_submission('deadbeef-1234-5678-9100-1234deadbeef')
            self.assertEqual(0, mock_raw.call_count)
Beispiel #3
0
    def test_load_non_json_answer(self):
        # This should never happen, if folks are using the public API.
        # Create a submission with a raw answer that is NOT valid JSON
        submission = api.create_submission(STUDENT_ITEM, ANSWER_ONE)
        sub_model = Submission.objects.get(uuid=submission['uuid'])
        sub_model.raw_answer = ''
        sub_model.save()

        with self.assertRaises(api.SubmissionInternalError):
            api.get_submission(sub_model.uuid)

        with self.assertRaises(api.SubmissionInternalError):
            api.get_submission_and_student(sub_model.uuid)
Beispiel #4
0
    def test_load_non_json_answer(self):
        # This should never happen, if folks are using the public API.
        # Create a submission with a raw answer that is NOT valid JSON
        submission = api.create_submission(STUDENT_ITEM, ANSWER_ONE)
        sub_model = Submission.objects.get(uuid=submission['uuid'])
        sub_model.raw_answer = ''
        sub_model.save()

        with self.assertRaises(api.SubmissionInternalError):
            api.get_submission(sub_model.uuid)

        with self.assertRaises(api.SubmissionInternalError):
            api.get_submission_and_student(sub_model.uuid)
Beispiel #5
0
    def test_caching(self):
        sub = api.create_submission(STUDENT_ITEM, "Hello World!")

        # The first request to get the submission hits the database...
        with self.assertNumQueries(1):
            db_sub = api.get_submission(sub["uuid"])

        # The next one hits the cache only...
        with self.assertNumQueries(0):
            cached_sub = api.get_submission(sub["uuid"])

        # The data that gets passed back matches the original in both cases
        self.assertEqual(sub, db_sub)
        self.assertEqual(sub, cached_sub)
Beispiel #6
0
    def test_caching(self):
        sub = api.create_submission(STUDENT_ITEM, "Hello World!")

        # The first request to get the submission hits the database...
        with self.assertNumQueries(1):
            db_sub = api.get_submission(sub["uuid"])

        # The next one hits the cache only...
        with self.assertNumQueries(0):
            cached_sub = api.get_submission(sub["uuid"])

        # The data that gets passed back matches the original in both cases
        self.assertEqual(sub, db_sub)
        self.assertEqual(sub, cached_sub)
Beispiel #7
0
    def test_load_non_json_answer(self):
        submission = api.create_submission(STUDENT_ITEM, ANSWER_ONE)
        sub_model = Submission.objects.get(uuid=submission['uuid'])

        # This should never happen, if folks are using the public API.
        # Create a submission with a raw answer that is NOT valid JSON
        query = "UPDATE submissions_submission SET raw_answer = '}' WHERE id = %s"
        connection.cursor().execute(query, [str(sub_model.id)])
        transaction.commit_unless_managed()

        with self.assertRaises(api.SubmissionInternalError):
            api.get_submission(sub_model.uuid)

        with self.assertRaises(api.SubmissionInternalError):
            api.get_submission_and_student(sub_model.uuid)
Beispiel #8
0
    def test_load_non_json_answer(self):
        submission = api.create_submission(STUDENT_ITEM, ANSWER_ONE)
        sub_model = Submission.objects.get(uuid=submission['uuid'])

        # This should never happen, if folks are using the public API.
        # Create a submission with a raw answer that is NOT valid JSON
        with transaction.atomic():
            query = "UPDATE submissions_submission SET raw_answer = '}' WHERE id = %s"
            connection.cursor().execute(query, [str(sub_model.id)])

        with self.assertRaises(api.SubmissionInternalError):
            api.get_submission(sub_model.uuid)

        with self.assertRaises(api.SubmissionInternalError):
            api.get_submission_and_student(sub_model.uuid)
Beispiel #9
0
def required_peer_grades(submission_uuid, peer_requirements):
    """
    Given a submission id, finds how many peer assessment required.

    Args:
        submission_uuid (str): The UUID of the submission being tracked.
        peer_requirements (dict): Dictionary with the key "must_grade" indicating
            the required number of submissions the student must grade
            and "enable_flexible_grading" indicating if flexible grading enabled.

    Returns:
        int
    """

    submission = sub_api.get_submission(submission_uuid)

    must_grade = peer_requirements["must_be_graded_by"]

    if peer_requirements.get("enable_flexible_grading"):

        # find how many days elapsed since subimitted
        days_elapsed = (timezone.now().date() -
                        submission['submitted_at'].date()).days

        # check if flexible grading applies. if it does, then update must_grade
        if days_elapsed >= FLEXIBLE_PEER_GRADING_REQUIRED_SUBMISSION_AGE_IN_DAYS:
            must_grade = int(must_grade *
                             FLEXIBLE_PEER_GRADING_GRADED_BY_PERCENTAGE / 100)
            if must_grade == 0:
                must_grade = 1

    return must_grade
Beispiel #10
0
    def get_submitted_archives(self, request, suffix):
        def get_64_contents(filename):
            with default_storage.open(filename, 'r') as f:
                return b64encode(f.read())

        instructor_fs_path = self.get_instructor_path()

        response = {
            'instructor_archive_name': instructor_fs_path,
            'instructor_archive': get_64_contents(instructor_fs_path),
        }

        if suffix:
            user_id = self.student_submission_dict(anon_student_id=suffix)
            submission = submissions_api.get_submission(
                user_id.get('student_id'))
            answer = submission['answer']
            response.update({
                'user_archive_name':
                answer.get('real_path'),
                'user_archive':
                get_64_contents(answer.get('real_path')),
            })

        return Response(json_body=response)
Beispiel #11
0
    def get_submission_info(self, submission_uuid, _, suffix=''):  # pylint: disable=unused-argument
        """
        Return a dict representation of a submission in the form
        {
            'text': <list of strings representing the raw response for each prompt>
            'files': <list of:>
                {
                    'download_url': <file url>
                    'description': <file description>
                    'name': <file name>
                }
        }
        """
        try:
            if self.is_team_assignment():
                submission = get_team_submission(submission_uuid)
            else:
                submission = get_submission(submission_uuid)
            answer = OraSubmissionAnswerFactory.parse_submission_raw_answer(
                submission.get('answer'))
        except SubmissionError as err:
            raise JsonHandlerError(404, str(err)) from err
        except VersionNotFoundException as err:
            raise JsonHandlerError(500, str(err)) from err

        return {
            'files': [
                SubmissionDetailFileSerilaizer(file_data).data for file_data in
                self.get_download_urls_from_submission(submission)
            ],
            'text':
            answer.get_text_responses()
        }
Beispiel #12
0
    def self_path_and_context(self):
        """
        Determine the template path and context to use when rendering the self-assessment step.

        Returns:
            tuple of `(path, context)`, where `path` (str) is the path to the template,
            and `context` (dict) is the template context.

        Raises:
            SubmissionError: Error occurred while retrieving the current submission.
            SelfAssessmentRequestError: Error occurred while checking if we had a self-assessment.
        """
        context = {}
        path = 'openassessmentblock/self/oa_self_unavailable.html'
        problem_closed, reason, start_date, due_date = self.is_closed(
            step="self-assessment")

        # We display the due date whether the problem is open or closed.
        # If no date is set, it defaults to the distant future, in which
        # case we don't display the date.
        if due_date < DISTANT_FUTURE:
            context['self_due'] = due_date

        # If we haven't submitted yet, `workflow` will be an empty dict,
        # and `workflow_status` will be None.
        workflow = self.get_workflow_info()
        workflow_status = workflow.get('status')
        self_complete = workflow.get('status_details',
                                     {}).get('self',
                                             {}).get('complete', False)

        if self_complete:
            path = 'openassessmentblock/self/oa_self_complete.html'
        elif workflow_status == 'self' or problem_closed:
            assessment = self_api.get_assessment(
                workflow.get("submission_uuid"))

            if assessment is not None:
                path = 'openassessmentblock/self/oa_self_complete.html'
            elif problem_closed:
                if reason == 'start':
                    context["self_start"] = start_date
                    path = 'openassessmentblock/self/oa_self_unavailable.html'
                elif reason == 'due':
                    path = 'openassessmentblock/self/oa_self_closed.html'
            else:
                submission = submission_api.get_submission(
                    self.submission_uuid)
                context["rubric_criteria"] = self.rubric_criteria
                context[
                    "estimated_time"] = "20 minutes"  # TODO: Need to configure this.
                context["self_submission"] = submission
                path = 'openassessmentblock/self/oa_self_assessment.html'
        else:
            # No submission yet or in peer assessment
            path = 'openassessmentblock/self/oa_self_unavailable.html'

        return path, context
    def self_path_and_context(self):
        """
        Determine the template path and context to use when rendering the self-assessment step.

        Returns:
            tuple of `(path, context)`, where `path` (str) is the path to the template,
            and `context` (dict) is the template context.

        Raises:
            SubmissionError: Error occurred while retrieving the current submission.
            SelfAssessmentRequestError: Error occurred while checking if we had a self-assessment.
        """
        context = {}
        path = 'openassessmentblock/self/oa_self_unavailable.html'
        problem_closed, reason, start_date, due_date = self.is_closed(step="self-assessment")

        # We display the due date whether the problem is open or closed.
        # If no date is set, it defaults to the distant future, in which
        # case we don't display the date.
        if due_date < DISTANT_FUTURE:
            context['self_due'] = due_date

        # If we haven't submitted yet, `workflow` will be an empty dict,
        # and `workflow_status` will be None.
        workflow = self.get_workflow_info()
        workflow_status = workflow.get('status')
        self_complete = workflow.get('status_details', {}).get('self', {}).get('complete', False)

        if self_complete:
            path = 'openassessmentblock/self/oa_self_complete.html'
        elif workflow_status == 'self' or problem_closed:
            assessment = self_api.get_assessment(workflow.get("submission_uuid"))

            if assessment is not None:
                path = 'openassessmentblock/self/oa_self_complete.html'
            elif problem_closed:
                if reason == 'start':
                    context["self_start"] = start_date
                    path = 'openassessmentblock/self/oa_self_unavailable.html'
                elif reason == 'due':
                    path = 'openassessmentblock/self/oa_self_closed.html'
            else:
                submission = submission_api.get_submission(self.submission_uuid)
                context["rubric_criteria"] = self.rubric_criteria_with_labels
                context["estimated_time"] = "20 minutes"  # TODO: Need to configure this.
                context["self_submission"] = submission

                # Determine if file upload is supported for this XBlock.
                context["allow_file_upload"] = self.allow_file_upload
                context['self_file_url'] = self.get_download_url_from_submission(submission)

                path = 'openassessmentblock/self/oa_self_assessment.html'
        else:
            # No submission yet or in peer assessment
            path = 'openassessmentblock/self/oa_self_unavailable.html'

        return path, context
Beispiel #14
0
    def test_get_submission(self):
        # Test base case that we can create a submission and get it back
        sub_dict1 = api.create_submission(STUDENT_ITEM, ANSWER_ONE)
        sub_dict2 = api.get_submission(sub_dict1["uuid"])
        self.assertEqual(sub_dict1, sub_dict2)

        # Test invalid inputs
        with self.assertRaises(api.SubmissionRequestError):
            api.get_submission(20)
        with self.assertRaises(api.SubmissionRequestError):
            api.get_submission({})

        # Test not found
        with self.assertRaises(api.SubmissionNotFoundError):
            api.get_submission("notarealuuid")
        with self.assertRaises(api.SubmissionNotFoundError):
            api.get_submission("0" * 50)  # This is bigger than our field size
Beispiel #15
0
    def test_get_submission(self):
        # Test base case that we can create a submission and get it back
        sub_dict1 = api.create_submission(STUDENT_ITEM, ANSWER_ONE)
        sub_dict2 = api.get_submission(sub_dict1["uuid"])
        self.assertEqual(sub_dict1, sub_dict2)

        # Test invalid inputs
        with self.assertRaises(api.SubmissionRequestError):
            api.get_submission(20)
        with self.assertRaises(api.SubmissionRequestError):
            api.get_submission({})

        # Test not found
        with self.assertRaises(api.SubmissionNotFoundError):
            api.get_submission("notarealuuid")
        with self.assertRaises(api.SubmissionNotFoundError):
            api.get_submission("0" * 50)  # This is bigger than our field size
Beispiel #16
0
 def wrapped_handler(self, data, suffix=""):  # pylint: disable=unused-argument
     submission_uuid = data.get('submission_uuid', None)
     if not submission_uuid:
         raise JsonHandlerError(400,
                                "Body must contain a submission_uuid")
     if validate:
         try:
             if self.is_team_assignment():
                 get_team_submission(submission_uuid)
             else:
                 get_submission(submission_uuid)
         except SubmissionNotFoundError as exc:
             raise JsonHandlerError(404,
                                    "Submission not found") from exc
         except SubmissionRequestError as exc:
             raise JsonHandlerError(
                 400, "Bad submission_uuid provided") from exc
         except (SubmissionInternalError, Exception) as exc:
             raise JsonHandlerError(
                 500, "Internal error getting submission info") from exc
     return handler(self, submission_uuid, data, suffix=suffix)
Beispiel #17
0
    def render_grade_complete(self, workflow):
        """
        Render the grade complete state.

        Args:
            workflow (dict): The serialized Workflow model.

        Returns:
            tuple of context (dict), template_path (string)
        """
        feedback = peer_api.get_assessment_feedback(self.submission_uuid)
        feedback_text = feedback.get('feedback', '') if feedback else ''
        student_submission = sub_api.get_submission(
            workflow['submission_uuid'])
        peer_assessments = peer_api.get_assessments(student_submission['uuid'])
        self_assessment = self_api.get_assessment(student_submission['uuid'])
        has_submitted_feedback = peer_api.get_assessment_feedback(
            workflow['submission_uuid']) is not None

        # We retrieve the score from the workflow, which in turn retrieves
        # the score for our current submission UUID.
        # We look up the score by submission UUID instead of student item
        # to ensure that the score always matches the rubric.
        score = workflow['score']

        context = {
            'score':
            score,
            'feedback_text':
            feedback_text,
            'student_submission':
            student_submission,
            'peer_assessments':
            peer_assessments,
            'self_assessment':
            self_assessment,
            'rubric_criteria':
            self._rubric_criteria_with_feedback(peer_assessments),
            'has_submitted_feedback':
            has_submitted_feedback,
        }

        # Update the scores we will display to the user
        # Note that we are updating a *copy* of the rubric criteria stored in the XBlock field
        max_scores = peer_api.get_rubric_max_scores(self.submission_uuid)
        median_scores = peer_api.get_assessment_median_scores(
            student_submission["uuid"])
        if median_scores is not None and max_scores is not None:
            for criterion in context["rubric_criteria"]:
                criterion["median_score"] = median_scores[criterion["name"]]
                criterion["total_value"] = max_scores[criterion["name"]]

        return ('openassessmentblock/grade/oa_grade_complete.html', context)
Beispiel #18
0
def get_submission_to_assess(course_id, item_id, scorer_id):
    """
    Get a submission for staff evaluation.

    Retrieves a submission for assessment for the given staff member.

    Args:
        course_id (str): The course that we would like to fetch submissions from.
        item_id (str): The student_item (problem) that we would like to retrieve submissions for.
        scorer_id (str): The user id of the staff member scoring this submission

    Returns:
        dict: A student submission for assessment. This contains a 'student_item',
            'attempt_number', 'submitted_at', 'created_at', and 'answer' field to be
            used for assessment.

    Raises:
        StaffAssessmentInternalError: Raised when there is an internal error
            retrieving staff workflow information.

    Examples:
        >>> get_submission_to_assess("a_course_id", "an_item_id", "a_scorer_id")
        {
            'student_item': 2,
            'attempt_number': 1,
            'submitted_at': datetime.datetime(2014, 1, 29, 23, 14, 52, 649284, tzinfo=<UTC>),
            'created_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>),
            'answer': { ... }
        }

    """
    student_submission_uuid = StaffWorkflow.get_submission_for_review(course_id, item_id, scorer_id)
    if student_submission_uuid:
        try:
            submission_data = submissions_api.get_submission(student_submission_uuid)
            return submission_data
        except submissions_api.SubmissionNotFoundError:
            error_message = (
                u"Could not find a submission with the uuid {}"
            ).format(student_submission_uuid)
            logger.exception(error_message)
            raise StaffAssessmentInternalError(error_message)
    else:
        logger.info(
            u"No submission found for staff to assess ({}, {})"
            .format(
                course_id,
                item_id,
            )
        )
        return None
Beispiel #19
0
    def test_get_submission(self):
        # Test base case that we can create a submission and get it back
        sub_dict1 = api.create_submission(STUDENT_ITEM, ANSWER_ONE)
        sub_dict2 = api.get_submission(sub_dict1["uuid"])
        self.assertEqual(sub_dict1, sub_dict2)

        # Test invalid inputs
        with self.assertRaises(api.SubmissionRequestError):
            api.get_submission(20)
        with self.assertRaises(api.SubmissionRequestError):
            api.get_submission({})

        # Test not found
        with self.assertRaises(api.SubmissionNotFoundError):
            api.get_submission("deadbeef-1234-5678-9100-1234deadbeef")
Beispiel #20
0
    def test_get_submission(self):
        # Test base case that we can create a submission and get it back
        sub_dict1 = api.create_submission(STUDENT_ITEM, ANSWER_ONE)
        sub_dict2 = api.get_submission(sub_dict1["uuid"])
        self.assertEqual(sub_dict1, sub_dict2)

        # Test invalid inputs
        with self.assertRaises(api.SubmissionRequestError):
            api.get_submission(20)
        with self.assertRaises(api.SubmissionRequestError):
            api.get_submission({})

        # Test not found
        with self.assertRaises(api.SubmissionNotFoundError):
            api.get_submission("deadbeef-1234-5678-9100-1234deadbeef")
Beispiel #21
0
    def render_grade_complete(self, workflow):
        """
        Render the grade complete state.

        Args:
            workflow (dict): The serialized Workflow model.

        Returns:
            tuple of context (dict), template_path (string)
        """
        feedback = peer_api.get_assessment_feedback(self.submission_uuid)
        feedback_text = feedback.get('feedback', '') if feedback else ''
        student_submission = sub_api.get_submission(workflow['submission_uuid'])
        peer_assessments = peer_api.get_assessments(student_submission['uuid'])
        self_assessment = self_api.get_assessment(student_submission['uuid'])
        has_submitted_feedback = peer_api.get_assessment_feedback(workflow['submission_uuid']) is not None

        # We retrieve the score from the workflow, which in turn retrieves
        # the score for our current submission UUID.
        # We look up the score by submission UUID instead of student item
        # to ensure that the score always matches the rubric.
        score = workflow['score']

        context = {
            'score': score,
            'feedback_text': feedback_text,
            'student_submission': student_submission,
            'peer_assessments': peer_assessments,
            'self_assessment': self_assessment,
            'rubric_criteria': self._rubric_criteria_with_feedback(peer_assessments),
            'has_submitted_feedback': has_submitted_feedback,
        }

        # Update the scores we will display to the user
        # Note that we are updating a *copy* of the rubric criteria stored in the XBlock field
        max_scores = peer_api.get_rubric_max_scores(self.submission_uuid)
        median_scores = peer_api.get_assessment_median_scores(student_submission["uuid"])
        if median_scores is not None and max_scores is not None:
            for criterion in context["rubric_criteria"]:
                criterion["median_score"] = median_scores[criterion["name"]]
                criterion["total_value"] = max_scores[criterion["name"]]

        return ('openassessmentblock/grade/oa_grade_complete.html', context)
Beispiel #22
0
    def render_self_assessment(self, data, suffix=''):
        context = {}

        assessment_module = self.get_assessment_module('self-assessment')

        path = 'openassessmentblock/self/oa_self_unavailable.html'
        problem_closed, reason, date = self.is_closed(step="self-assessment")

        if problem_closed:
            if date == 'start':
                context["self_start"] = self.format_datetime_string(date)
            elif date == 'due':
                context["self_due"] = self.format_datetime_string(date)

        workflow = self.get_workflow_info()
        if not workflow:
            return self.render_assessment(path, context)

        try:
            submission = submission_api.get_submission(self.submission_uuid)
            assessment = self_api.get_assessment(
                workflow["submission_uuid"]
            )
        except (submission_api.SubmissionError, self_api.SelfAssessmentRequestError):
            logger.exception(
                u"Could not retrieve self assessment for submission {}"
                .format(workflow["submission_uuid"])
            )
            return self.render_error(_(u"An unexpected error occurred."))
        if workflow["status"] == "self":
            path = 'openassessmentblock/self/oa_self_assessment.html'
            context = {
                "rubric_criteria": self.rubric_criteria,
                "estimated_time": "20 minutes",  # TODO: Need to configure this.
                "self_submission": submission,
            }
        elif assessment is not None:
            path = 'openassessmentblock/self/oa_self_complete.html'
        elif date == "due" and problem_closed:
            path = 'openassessmentblock/self/oa_self_closed.html'

        return self.render_assessment(path, context)
Beispiel #23
0
    def get_user_submission(submission_uuid):
        """Return the most recent submission by user in workflow

        Return the most recent submission.  If no submission is available,
        return None. All submissions are preserved, but only the most recent
        will be returned in this function, since the active workflow will only
        be concerned with the most recent submission.

        Args:
            submission_uuid (str): The uuid for the submission to retrieve.

        Returns:
            (dict): A dictionary representation of a submission to render to
                the front end.

        """
        try:
            return api.get_submission(submission_uuid)
        except api.SubmissionRequestError:
            # This error is actually ok.
            return None
Beispiel #24
0
    def get_user_submission(submission_uuid):
        """Return the most recent submission by user in workflow

        Return the most recent submission.  If no submission is available,
        return None. All submissions are preserved, but only the most recent
        will be returned in this function, since the active workflow will only
        be concerned with the most recent submission.

        Args:
            submission_uuid (str): The uuid for the submission to retrieve.

        Returns:
            (dict): A dictionary representation of a submission to render to
                the front end.

        """
        try:
            return api.get_submission(submission_uuid)
        except api.SubmissionRequestError:
            # This error is actually ok.
            return None
Beispiel #25
0
    def render_grade_complete(self, workflow):
        """
        Render the grade complete state.

        Args:
            workflow (dict): The serialized Workflow model.

        Returns:
            tuple of context (dict), template_path (string)
        """
        feedback = peer_api.get_assessment_feedback(self.submission_uuid)
        feedback_text = feedback.get('feedback', '') if feedback else ''
        student_submission = sub_api.get_submission(workflow['submission_uuid'])
        peer_assessments = peer_api.get_assessments(student_submission['uuid'])
        self_assessment = self_api.get_assessment(student_submission['uuid'])
        has_submitted_feedback = peer_api.get_assessment_feedback(workflow['submission_uuid']) is not None

        context = {
            'score': workflow['score'],
            'feedback_text': feedback_text,
            'student_submission': student_submission,
            'peer_assessments': peer_assessments,
            'self_assessment': self_assessment,
            'rubric_criteria': copy.deepcopy(self.rubric_criteria),
            'has_submitted_feedback': has_submitted_feedback,
        }

        # Update the scores we will display to the user
        # Note that we are updating a *copy* of the rubric criteria stored in the XBlock field
        max_scores = peer_api.get_rubric_max_scores(self.submission_uuid)
        median_scores = peer_api.get_assessment_median_scores(student_submission["uuid"])
        if median_scores is not None and max_scores is not None:
            for criterion in context["rubric_criteria"]:
                criterion["median_score"] = median_scores[criterion["name"]]
                criterion["total_value"] = max_scores[criterion["name"]]

        return ('openassessmentblock/grade/oa_grade_complete.html', context)
Beispiel #26
0
 def test_get_submission_deep_error(self, mock_get):
     # Test deep explosions are wrapped
     with self.assertRaises(api.SubmissionInternalError):
         mock_get.side_effect = DatabaseError("Kaboom!")
         api.get_submission("000000000000000")
    def self_path_and_context(self):
        """
        Determine the template path and context to use when rendering the self-assessment step.

        Returns:
            tuple of `(path, context)`, where `path` (str) is the path to the template,
            and `context` (dict) is the template context.

        Raises:
            SubmissionError: Error occurred while retrieving the current submission.
            SelfAssessmentRequestError: Error occurred while checking if we had a self-assessment.
        """

        path = 'openassessmentblock/self/oa_self_unavailable.html'
        problem_closed, reason, start_date, due_date = self.is_closed(
            step="self-assessment")
        user_preferences = get_user_preferences(
            self.runtime.service(self, 'user'))

        context = {
            'allow_latex': self.allow_latex,
            "xblock_id": self.get_xblock_id(),
            'user_timezone': user_preferences['user_timezone'],
            'user_language': user_preferences['user_language']
        }

        # We display the due date whether the problem is open or closed.
        # If no date is set, it defaults to the distant future, in which
        # case we don't display the date.
        if due_date < DISTANT_FUTURE:
            context['self_due'] = due_date

        # If we haven't submitted yet, `workflow` will be an empty dict,
        # and `workflow_status` will be None.
        workflow = self.get_workflow_info()
        workflow_status = workflow.get('status')
        self_complete = workflow.get('status_details',
                                     {}).get('self',
                                             {}).get('complete', False)
        if workflow_status == 'cancelled':
            path = 'openassessmentblock/self/oa_self_cancelled.html'
            # Sets the XBlock boolean to signal to Message that it WAS able to grab a submission
            self.no_peers = True

        elif self_complete:
            path = 'openassessmentblock/self/oa_self_complete.html'
        elif workflow_status == 'self' or problem_closed:
            assessment = self_api.get_assessment(
                workflow.get("submission_uuid"))

            if assessment is not None:
                path = 'openassessmentblock/self/oa_self_complete.html'
            elif problem_closed:
                if reason == 'start':
                    context["self_start"] = start_date
                    path = 'openassessmentblock/self/oa_self_unavailable.html'
                elif reason == 'due':
                    path = 'openassessmentblock/self/oa_self_closed.html'
            else:
                submission = submission_api.get_submission(
                    self.submission_uuid)
                context["rubric_criteria"] = self.rubric_criteria_with_labels
                context["self_submission"] = create_submission_dict(
                    submission, self.prompts)
                if self.rubric_feedback_prompt is not None:
                    context[
                        "rubric_feedback_prompt"] = self.rubric_feedback_prompt

                if self.rubric_feedback_default_text is not None:
                    context[
                        'rubric_feedback_default_text'] = self.rubric_feedback_default_text

                # Determine if file upload is supported for this XBlock and what kind of files can be uploaded.
                context["file_upload_type"] = self.file_upload_type
                context[
                    'self_file_urls'] = self.get_download_urls_from_submission(
                        submission)

                path = 'openassessmentblock/self/oa_self_assessment.html'
        else:
            # No submission yet or in peer assessment
            path = 'openassessmentblock/self/oa_self_unavailable.html'

        return path, context
Beispiel #28
0
def get_submission_to_assess(submission_uuid, graded_by):
    """Get a submission to peer evaluate.

    Retrieves a submission for assessment for the given student. This will
    not return a submission submitted by the requesting scorer. Submissions are
    returned based on how many assessments are still required, and if there are
    peers actively assessing a particular submission. If there are no
    submissions requiring assessment, a submission may be returned that will be
    'over graded', and the assessment will not be counted towards the overall
    grade.

    Args:
        submission_uuid (str): The submission UUID from the student
            requesting a submission for assessment. This is used to explicitly
            avoid giving the student their own submission, and determines the
            associated Peer Workflow.
        graded_by (int): The number of assessments a submission
            requires before it has completed the peer assessment process.

    Returns:
        dict: A peer submission for assessment. This contains a 'student_item',
            'attempt_number', 'submitted_at', 'created_at', and 'answer' field to be
            used for assessment.

    Raises:
        PeerAssessmentRequestError: Raised when the request parameters are
            invalid for the request.
        PeerAssessmentInternalError: Raised when there is an internal error
            retrieving peer workflow information.
        PeerAssessmentWorkflowError: Raised when an error occurs because this
            function, or the student item, is not in the proper workflow state
            to retrieve a peer submission.

    Examples:
        >>> get_submission_to_assess("abc123", 3)
        {
            'student_item': 2,
            'attempt_number': 1,
            'submitted_at': datetime.datetime(2014, 1, 29, 23, 14, 52, 649284, tzinfo=<UTC>),
            'created_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>),
            'answer': u'The answer is 42.'
        }

    """
    workflow = PeerWorkflow.get_by_submission_uuid(submission_uuid)
    if not workflow:
        raise PeerAssessmentWorkflowError(
            u"A Peer Assessment Workflow does not exist for the student "
            u"with submission UUID {}".format(submission_uuid)
        )
    peer_submission_uuid = workflow.find_active_assessments()
    # If there is an active assessment for this user, get that submission,
    # otherwise, get the first assessment for review, otherwise,
    # get the first submission available for over grading ("over-grading").
    if peer_submission_uuid is None:
        peer_submission_uuid = workflow.get_submission_for_review(graded_by)
    if peer_submission_uuid is None:
        peer_submission_uuid = workflow.get_submission_for_over_grading()
    if peer_submission_uuid:
        try:
            submission_data = sub_api.get_submission(peer_submission_uuid)
            PeerWorkflow.create_item(workflow, peer_submission_uuid)
            _log_workflow(peer_submission_uuid, workflow)
            return submission_data
        except sub_api.SubmissionNotFoundError:
            error_message = (
                u"Could not find a submission with the uuid {} for student {} "
                u"in the peer workflow."
            ).format(peer_submission_uuid, workflow.student_id)
            logger.exception(error_message)
            raise PeerAssessmentWorkflowError(error_message)
    else:
        logger.info(
            u"No submission found for {} to assess ({}, {})"
            .format(
                workflow.student_id,
                workflow.course_id,
                workflow.item_id,
            )
        )
        return None
Beispiel #29
0
def get_submission_to_assess(submission_uuid, graded_by):
    """Get a submission to peer evaluate.

    Retrieves a submission for assessment for the given student. This will
    not return a submission submitted by the requesting scorer. Submissions are
    returned based on how many assessments are still required, and if there are
    peers actively assessing a particular submission. If there are no
    submissions requiring assessment, a submission may be returned that will be
    'over graded', and the assessment will not be counted towards the overall
    grade.

    Args:
        submission_uuid (str): The submission UUID from the student
            requesting a submission for assessment. This is used to explicitly
            avoid giving the student their own submission, and determines the
            associated Peer Workflow.
        graded_by (int): The number of assessments a submission
            requires before it has completed the peer assessment process.

    Returns:
        dict: A peer submission for assessment. This contains a 'student_item',
            'attempt_number', 'submitted_at', 'created_at', and 'answer' field to be
            used for assessment.

    Raises:
        PeerAssessmentRequestError: Raised when the request parameters are
            invalid for the request.
        PeerAssessmentInternalError: Raised when there is an internal error
            retrieving peer workflow information.
        PeerAssessmentWorkflowError: Raised when an error occurs because this
            function, or the student item, is not in the proper workflow state
            to retrieve a peer submission.

    Examples:
        >>> get_submission_to_assess("abc123", 3)
        {
            'student_item': 2,
            'attempt_number': 1,
            'submitted_at': datetime.datetime(2014, 1, 29, 23, 14, 52, 649284, tzinfo=<UTC>),
            'created_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>),
            'answer': u'The answer is 42.'
        }

    """
    workflow = PeerWorkflow.get_by_submission_uuid(submission_uuid)
    if not workflow:
        raise PeerAssessmentWorkflowError(
            u"A Peer Assessment Workflow does not exist for the student "
            u"with submission UUID {}".format(submission_uuid)
        )
    peer_submission_uuid = workflow.find_active_assessments()
    # If there is an active assessment for this user, get that submission,
    # otherwise, get the first assessment for review, otherwise,
    # get the first submission available for over grading ("over-grading").
    if peer_submission_uuid is None:
        peer_submission_uuid = workflow.get_submission_for_review(graded_by)
    if peer_submission_uuid is None:
        peer_submission_uuid = workflow.get_submission_for_over_grading()
    if peer_submission_uuid:
        try:
            submission_data = sub_api.get_submission(peer_submission_uuid)
            PeerWorkflow.create_item(workflow, peer_submission_uuid)
            _log_workflow(peer_submission_uuid, workflow)
            return submission_data
        except sub_api.SubmissionNotFoundError:
            error_message = (
                u"Could not find a submission with the uuid {} for student {} "
                u"in the peer workflow."
            ).format(peer_submission_uuid, workflow.student_id)
            logger.exception(error_message)
            raise PeerAssessmentWorkflowError(error_message)
    else:
        logger.info(
            u"No submission found for {} to assess ({}, {})"
            .format(
                workflow.student_id,
                workflow.course_id,
                workflow.item_id,
            )
        )
        return None
Beispiel #30
0
    def render_grade_complete(self, workflow):
        """
        Render the grade complete state.

        Args:
            workflow (dict): The serialized Workflow model.

        Returns:
            tuple of context (dict), template_path (string)
        """
        # Import is placed here to avoid model import at project startup.
        from openassessment.assessment.api import peer as peer_api
        from openassessment.assessment.api import self as self_api
        from openassessment.assessment.api import staff as staff_api
        from submissions import api as sub_api

        # Peer specific stuff...
        assessment_steps = self.assessment_steps
        submission_uuid = workflow['submission_uuid']

        staff_assessment = None
        self_assessment = None
        feedback = None
        peer_assessments = []
        has_submitted_feedback = False

        if "peer-assessment" in assessment_steps:
            peer_api.get_score(submission_uuid,
                               self.workflow_requirements()["peer"])
            feedback = peer_api.get_assessment_feedback(submission_uuid)
            peer_assessments = [
                self._assessment_grade_context(peer_assessment) for
                peer_assessment in peer_api.get_assessments(submission_uuid)
            ]
            has_submitted_feedback = feedback is not None

        if "self-assessment" in assessment_steps:
            self_assessment = self._assessment_grade_context(
                self_api.get_assessment(submission_uuid))

        raw_staff_assessment = staff_api.get_latest_staff_assessment(
            submission_uuid)
        if raw_staff_assessment:
            staff_assessment = self._assessment_grade_context(
                raw_staff_assessment)

        feedback_text = feedback.get('feedback', '') if feedback else ''
        student_submission = sub_api.get_submission(submission_uuid)

        # We retrieve the score from the workflow, which in turn retrieves
        # the score for our current submission UUID.
        # We look up the score by submission UUID instead of student item
        # to ensure that the score always matches the rubric.
        # It's possible for the score to be `None` even if the workflow status is "done"
        # when all the criteria in the rubric are feedback-only (no options).
        score = workflow['score']

        context = {
            'score':
            score,
            'feedback_text':
            feedback_text,
            'has_submitted_feedback':
            has_submitted_feedback,
            'student_submission':
            create_submission_dict(student_submission, self.prompts),
            'peer_assessments':
            peer_assessments,
            'grade_details':
            self.grade_details(
                submission_uuid,
                peer_assessments=peer_assessments,
                self_assessment=self_assessment,
                staff_assessment=staff_assessment,
            ),
            'file_upload_type':
            self.file_upload_type,
            'allow_latex':
            self.allow_latex,
            'prompts_type':
            self.prompts_type,
            'file_urls':
            self.get_download_urls_from_submission(student_submission),
            'xblock_id':
            self.get_xblock_id()
        }

        return ('openassessmentblock/grade/oa_grade_complete.html', context)
Beispiel #31
0
    def render_grade_complete(self, workflow):
        """
        Render the grade complete state.

        Args:
            workflow (dict): The serialized Workflow model.

        Returns:
            tuple of context (dict), template_path (string)
        """
        # Import is placed here to avoid model import at project startup.
        from openassessment.assessment.api import peer as peer_api
        from openassessment.assessment.api import self as self_api
        from openassessment.assessment.api import staff as staff_api
        from submissions import api as sub_api

        # Peer specific stuff...
        assessment_steps = self.assessment_steps
        submission_uuid = workflow['submission_uuid']

        staff_assessment = None
        self_assessment = None
        feedback = None
        peer_assessments = []
        has_submitted_feedback = False

        if "peer-assessment" in assessment_steps:
            peer_api.get_score(submission_uuid, self.workflow_requirements()["peer"])
            feedback = peer_api.get_assessment_feedback(submission_uuid)
            peer_assessments = [
                self._assessment_grade_context(peer_assessment)
                for peer_assessment in peer_api.get_assessments(submission_uuid)
            ]
            has_submitted_feedback = feedback is not None

        if "self-assessment" in assessment_steps:
            self_assessment = self._assessment_grade_context(
                self_api.get_assessment(submission_uuid)
            )

        raw_staff_assessment = staff_api.get_latest_staff_assessment(submission_uuid)
        if raw_staff_assessment:
            staff_assessment = self._assessment_grade_context(raw_staff_assessment)

        feedback_text = feedback.get('feedback', '') if feedback else ''
        student_submission = sub_api.get_submission(submission_uuid)

        # We retrieve the score from the workflow, which in turn retrieves
        # the score for our current submission UUID.
        # We look up the score by submission UUID instead of student item
        # to ensure that the score always matches the rubric.
        # It's possible for the score to be `None` even if the workflow status is "done"
        # when all the criteria in the rubric are feedback-only (no options).
        score = workflow['score']

        context = {
            'score': score,
            'feedback_text': feedback_text,
            'has_submitted_feedback': has_submitted_feedback,
            'student_submission': create_submission_dict(student_submission, self.prompts),
            'peer_assessments': peer_assessments,
            'grade_details': self.grade_details(
                submission_uuid,
                peer_assessments=peer_assessments,
                self_assessment=self_assessment,
                staff_assessment=staff_assessment,
            ),
            'file_upload_type': self.file_upload_type,
            'allow_latex': self.allow_latex,
            'file_urls': self.get_download_urls_from_submission(student_submission),
            'xblock_id': self.get_xblock_id()
        }

        return ('openassessmentblock/grade/oa_grade_complete.html', context)
Beispiel #32
0
def get_submission_to_assess(
        student_item_dict,
        graded_by,
        over_grading=False):
    """Get a submission to peer evaluate.

    Retrieves a submission for assessment for the given student_item. This will
    not return a submission submitted by the requesting scorer. Submissions are
    returned based on how many assessments are still required, and if there are
    peers actively assessing a particular submission. If there are no
    submissions requiring assessment, a submission may be returned that will be
    'over graded', and the assessment will not be counted towards the overall
    grade.

    Args:
        student_item_dict (dict): The student item information from the student
            requesting a submission for assessment. The dict contains an
            item_id, course_id, and item_type, used to identify the unique
            question for the review, while the student_id is used to explicitly
            avoid giving the student their own submission.
        graded_by (int): The number of assessments a submission
            requires before it has completed the peer assessment process.
        over_grading (bool): Allows over grading to be performed if no submission
            requires assessments. Over grading should only occur if the deadline
            for submissions has passed, but there is still a window for peer
            assessment. Defaults to False.

    Returns:
        dict: A peer submission for assessment. This contains a 'student_item',
            'attempt_number', 'submitted_at', 'created_at', and 'answer' field to be
            used for assessment.

    Raises:
        PeerAssessmentRequestError: Raised when the request parameters are
            invalid for the request.
        PeerAssessmentInternalError: Raised when there is an internal error
            retrieving peer workflow information.
        PeerAssessmentWorkflowError: Raised when an error occurs because this
            function, or the student item, is not in the proper workflow state
            to retrieve a peer submission.

    Examples:
        >>> student_item_dict = dict(
        >>>    item_id="item_1",
        >>>    course_id="course_1",
        >>>    item_type="type_one",
        >>>    student_id="Bob",
        >>> )
        >>> get_submission_to_assess(student_item_dict, 3)
        {
            'student_item': 2,
            'attempt_number': 1,
            'submitted_at': datetime.datetime(2014, 1, 29, 23, 14, 52, 649284, tzinfo=<UTC>),
            'created_at': datetime.datetime(2014, 1, 29, 17, 14, 52, 668850, tzinfo=<UTC>),
            'answer': u'The answer is 42.'
        }

    """
    workflow = _get_latest_workflow(student_item_dict)
    if not workflow:
        raise PeerAssessmentWorkflowError(_(
            u"A Peer Assessment Workflow does not exist for the specified "
            u"student."))
    submission_uuid = _find_active_assessments(workflow)
    # If there is an active assessment for this user, get that submission,
    # otherwise, get the first assessment for review, otherwise, if over grading
    # is turned on, get the first submission available for over grading.
    if submission_uuid is None:
        submission_uuid = _get_submission_for_review(workflow, graded_by)
    if submission_uuid is None and over_grading:
        submission_uuid = _get_submission_for_over_grading(workflow)
    if submission_uuid:
        try:
            submission_data = sub_api.get_submission(submission_uuid)
            _create_peer_workflow_item(workflow, submission_uuid)
            _log_workflow(submission_uuid, student_item_dict, over_grading)
            return submission_data
        except sub_api.SubmissionNotFoundError:
            error_message = _(
                u"Could not find a submission with the uuid {} for student {} "
                u"in the peer workflow."
                .format(submission_uuid, student_item_dict)
            )
            logger.exception(error_message)
            raise PeerAssessmentWorkflowError(error_message)
    else:
        logger.info(
            u"No submission found for {} to assess ({}, {})"
            .format(
                student_item_dict["student_id"],
                student_item_dict["course_id"],
                student_item_dict["item_id"],
            )
        )
        return None
Beispiel #33
0
 def test_get_submission_deep_error(self, mock_get):
     # Test deep explosions are wrapped
     mock_get.side_effect = DatabaseError("Kaboom!")
     api.get_submission("000000000000000")
Beispiel #34
0
    def render_grade_complete(self, workflow):
        """
        Render the grade complete state.

        Args:
            workflow (dict): The serialized Workflow model.

        Returns:
            tuple of context (dict), template_path (string)
        """
        # Peer specific stuff...
        assessment_steps = self.assessment_steps
        submission_uuid = workflow['submission_uuid']

        example_based_assessment = None
        self_assessment = None
        feedback = None
        peer_assessments = []
        has_submitted_feedback = False

        if "peer-assessment" in assessment_steps:
            feedback = peer_api.get_assessment_feedback(submission_uuid)
            peer_assessments = peer_api.get_assessments(submission_uuid)
            has_submitted_feedback = feedback is not None

        if "self-assessment" in assessment_steps:
            self_assessment = self_api.get_assessment(submission_uuid)

        if "example-based-assessment" in assessment_steps:
            example_based_assessment = ai_api.get_latest_assessment(submission_uuid)

        feedback_text = feedback.get('feedback', '') if feedback else ''
        student_submission = sub_api.get_submission(submission_uuid)

        # We retrieve the score from the workflow, which in turn retrieves
        # the score for our current submission UUID.
        # We look up the score by submission UUID instead of student item
        # to ensure that the score always matches the rubric.
        # It's possible for the score to be `None` even if the workflow status is "done"
        # when all the criteria in the rubric are feedback-only (no options).
        score = workflow['score']

        context = {
            'score': score,
            'feedback_text': feedback_text,
            'student_submission': student_submission,
            'peer_assessments': peer_assessments,
            'self_assessment': self_assessment,
            'example_based_assessment': example_based_assessment,
            'rubric_criteria': self._rubric_criteria_with_feedback(peer_assessments),
            'has_submitted_feedback': has_submitted_feedback,
            'allow_file_upload': self.allow_file_upload,
            'file_url': self.get_download_url_from_submission(student_submission)
        }

        # Update the scores we will display to the user
        # Note that we are updating a *copy* of the rubric criteria stored in
        # the XBlock field
        max_scores = peer_api.get_rubric_max_scores(submission_uuid)
        median_scores = None
        if "peer-assessment" in assessment_steps:
            median_scores = peer_api.get_assessment_median_scores(submission_uuid)
        elif "self-assessment" in assessment_steps:
            median_scores = self_api.get_assessment_scores_by_criteria(submission_uuid)
        elif "example-based-assessment" in assessment_steps:
            median_scores = ai_api.get_assessment_scores_by_criteria(submission_uuid)

        if median_scores is not None and max_scores is not None:
            for criterion in context["rubric_criteria"]:
                criterion["median_score"] = median_scores[criterion["name"]]
                criterion["total_value"] = max_scores[criterion["name"]]

        return ('openassessmentblock/grade/oa_grade_complete.html', context)
Beispiel #35
0
 def test_get_submission_deep_error(self, mock_get):
     # Test deep explosions are wrapped
     mock_get.side_effect = DatabaseError("Kaboom!")
     api.get_submission("000000000000000")
Beispiel #36
0
    def self_path_and_context(self):
        """
        Determine the template path and context to use when rendering the self-assessment step.

        Returns:
            tuple of `(path, context)`, where `path` (str) is the path to the template,
            and `context` (dict) is the template context.

        Raises:
            SubmissionError: Error occurred while retrieving the current submission.
            SelfAssessmentRequestError: Error occurred while checking if we had a self-assessment.
        """
        # Import is placed here to avoid model import at project startup.
        from submissions import api as submission_api

        path = 'openassessmentblock/self/oa_self_unavailable.html'
        problem_closed, reason, start_date, due_date = self.is_closed(step="self-assessment")
        user_preferences = get_user_preferences(self.runtime.service(self, 'user'))

        context = {
            'allow_latex': self.allow_latex,
            'prompts_type': self.prompts_type,
            "xblock_id": self.get_xblock_id(),
            'user_timezone': user_preferences['user_timezone'],
            'user_language': user_preferences['user_language']
        }

        # We display the due date whether the problem is open or closed.
        # If no date is set, it defaults to the distant future, in which
        # case we don't display the date.
        if due_date < DISTANT_FUTURE:
            context['self_due'] = due_date

        # If we haven't submitted yet, `workflow` will be an empty dict,
        # and `workflow_status` will be None.
        workflow = self.get_workflow_info()
        workflow_status = workflow.get('status')
        self_complete = workflow.get('status_details', {}).get('self', {}).get('complete', False)
        if workflow_status == 'cancelled':
            path = 'openassessmentblock/self/oa_self_cancelled.html'
            # Sets the XBlock boolean to signal to Message that it WAS able to grab a submission
            self.no_peers = True

        elif self_complete:
            path = 'openassessmentblock/self/oa_self_complete.html'
        elif workflow_status == 'self' or problem_closed:
            assessment = self_api.get_assessment(workflow.get("submission_uuid"))

            if assessment is not None:
                path = 'openassessmentblock/self/oa_self_complete.html'
            elif problem_closed:
                if reason == 'start':
                    context["self_start"] = start_date
                    path = 'openassessmentblock/self/oa_self_unavailable.html'
                elif reason == 'due':
                    path = 'openassessmentblock/self/oa_self_closed.html'
            else:
                submission = submission_api.get_submission(self.submission_uuid)
                context["rubric_criteria"] = self.rubric_criteria_with_labels
                context["self_submission"] = create_submission_dict(submission, self.prompts)
                if self.rubric_feedback_prompt is not None:
                    context["rubric_feedback_prompt"] = self.rubric_feedback_prompt

                if self.rubric_feedback_default_text is not None:
                    context['rubric_feedback_default_text'] = self.rubric_feedback_default_text

                # Determine if file upload is supported for this XBlock and what kind of files can be uploaded.
                context["file_upload_type"] = self.file_upload_type
                context['self_file_urls'] = self.get_download_urls_from_submission(submission)

                path = 'openassessmentblock/self/oa_self_assessment.html'
        else:
            # No submission yet or in peer assessment
            path = 'openassessmentblock/self/oa_self_unavailable.html'

        return path, context
    def render_grade_complete(self, workflow):
        """
        Render the grade complete state.

        Args:
            workflow (dict): The serialized Workflow model.

        Returns:
            tuple of context (dict), template_path (string)
        """
        # Peer specific stuff...
        assessment_steps = self.assessment_steps
        submission_uuid = workflow['submission_uuid']

        example_based_assessment = None
        self_assessment = None
        feedback = None
        peer_assessments = []
        has_submitted_feedback = False

        if "peer-assessment" in assessment_steps:
            feedback = peer_api.get_assessment_feedback(submission_uuid)
            peer_assessments = [
                self._assessment_grade_context(asmnt)
                for asmnt in peer_api.get_assessments(submission_uuid)
            ]
            has_submitted_feedback = feedback is not None

        if "self-assessment" in assessment_steps:
            self_assessment = self._assessment_grade_context(
                self_api.get_assessment(submission_uuid)
            )

        if "example-based-assessment" in assessment_steps:
            example_based_assessment = self._assessment_grade_context(
                ai_api.get_latest_assessment(submission_uuid)
            )

        feedback_text = feedback.get('feedback', '') if feedback else ''
        student_submission = sub_api.get_submission(submission_uuid)

        # We retrieve the score from the workflow, which in turn retrieves
        # the score for our current submission UUID.
        # We look up the score by submission UUID instead of student item
        # to ensure that the score always matches the rubric.
        # It's possible for the score to be `None` even if the workflow status is "done"
        # when all the criteria in the rubric are feedback-only (no options).
        score = workflow['score']

        context = {
            'score': score,
            'feedback_text': feedback_text,
            'student_submission': student_submission,
            'peer_assessments': peer_assessments,
            'self_assessment': self_assessment,
            'example_based_assessment': example_based_assessment,
            'rubric_criteria': self._rubric_criteria_grade_context(peer_assessments, self_assessment),
            'has_submitted_feedback': has_submitted_feedback,
            'allow_file_upload': self.allow_file_upload,
            'allow_latex': self.allow_latex,
            'file_url': self.get_download_url_from_submission(student_submission)
        }

        # Update the scores we will display to the user
        # Note that we are updating a *copy* of the rubric criteria stored in
        # the XBlock field
        max_scores = peer_api.get_rubric_max_scores(submission_uuid)
        median_scores = None
        if "peer-assessment" in assessment_steps:
            median_scores = peer_api.get_assessment_median_scores(submission_uuid)
        elif "self-assessment" in assessment_steps:
            median_scores = self_api.get_assessment_scores_by_criteria(submission_uuid)
        elif "example-based-assessment" in assessment_steps:
            median_scores = ai_api.get_assessment_scores_by_criteria(submission_uuid)

        if median_scores is not None and max_scores is not None:
            for criterion in context["rubric_criteria"]:
                # Although we prevent course authors from modifying criteria post-release,
                # it's still possible for assessments created by course staff to
                # have criteria that differ from the current problem definition.
                # It's also possible to circumvent the post-release restriction
                # if course authors directly import a course into Studio.
                # If this happens, we simply leave the score blank so that the grade
                # section can render without error.
                criterion["median_score"] = median_scores.get(criterion["name"], '')
                criterion["total_value"] = max_scores.get(criterion["name"], '')

        return ('openassessmentblock/grade/oa_grade_complete.html', context)
Beispiel #38
0
    def render_grade_complete(self, workflow):
        """
        Render the grade complete state.

        Args:
            workflow (dict): The serialized Workflow model.

        Returns:
            tuple of context (dict), template_path (string)
        """
        # Peer specific stuff...
        assessment_steps = self.assessment_steps
        submission_uuid = workflow['submission_uuid']

        example_based_assessment = None
        self_assessment = None
        feedback = None
        peer_assessments = []
        has_submitted_feedback = False

        if "peer-assessment" in assessment_steps:
            feedback = peer_api.get_assessment_feedback(submission_uuid)
            peer_assessments = [
                self._assessment_grade_context(asmnt)
                for asmnt in peer_api.get_assessments(submission_uuid)
            ]
            has_submitted_feedback = feedback is not None

        if "self-assessment" in assessment_steps:
            self_assessment = self._assessment_grade_context(
                self_api.get_assessment(submission_uuid))

        if "example-based-assessment" in assessment_steps:
            example_based_assessment = self._assessment_grade_context(
                ai_api.get_latest_assessment(submission_uuid))

        feedback_text = feedback.get('feedback', '') if feedback else ''
        student_submission = sub_api.get_submission(submission_uuid)

        # We retrieve the score from the workflow, which in turn retrieves
        # the score for our current submission UUID.
        # We look up the score by submission UUID instead of student item
        # to ensure that the score always matches the rubric.
        # It's possible for the score to be `None` even if the workflow status is "done"
        # when all the criteria in the rubric are feedback-only (no options).
        score = workflow['score']

        context = {
            'score':
            score,
            'feedback_text':
            feedback_text,
            'student_submission':
            student_submission,
            'peer_assessments':
            peer_assessments,
            'self_assessment':
            self_assessment,
            'example_based_assessment':
            example_based_assessment,
            'rubric_criteria':
            self._rubric_criteria_grade_context(peer_assessments,
                                                self_assessment),
            'has_submitted_feedback':
            has_submitted_feedback,
            'allow_file_upload':
            self.allow_file_upload,
            'file_url':
            self.get_download_url_from_submission(student_submission)
        }

        # Update the scores we will display to the user
        # Note that we are updating a *copy* of the rubric criteria stored in
        # the XBlock field
        max_scores = peer_api.get_rubric_max_scores(submission_uuid)
        median_scores = None
        if "peer-assessment" in assessment_steps:
            median_scores = peer_api.get_assessment_median_scores(
                submission_uuid)
        elif "self-assessment" in assessment_steps:
            median_scores = self_api.get_assessment_scores_by_criteria(
                submission_uuid)
        elif "example-based-assessment" in assessment_steps:
            median_scores = ai_api.get_assessment_scores_by_criteria(
                submission_uuid)

        if median_scores is not None and max_scores is not None:
            for criterion in context["rubric_criteria"]:
                # Although we prevent course authors from modifying criteria post-release,
                # it's still possible for assessments created by course staff to
                # have criteria that differ from the current problem definition.
                # It's also possible to circumvent the post-release restriction
                # if course authors directly import a course into Studio.
                # If this happens, we simply leave the score blank so that the grade
                # section can render without error.
                criterion["median_score"] = median_scores.get(
                    criterion["name"], '')
                criterion["total_value"] = max_scores.get(
                    criterion["name"], '')

        return ('openassessmentblock/grade/oa_grade_complete.html', context)