Example #1
0
    def test_two_students(self):
        api.create_submission(STUDENT_ITEM, ANSWER_ONE)
        api.create_submission(SECOND_STUDENT_ITEM, ANSWER_TWO)

        submissions = api.get_submissions(STUDENT_ITEM)
        self.assertEqual(1, len(submissions))
        self._assert_submission(submissions[0], ANSWER_ONE, 1, 1)

        submissions = api.get_submissions(SECOND_STUDENT_ITEM)
        self.assertEqual(1, len(submissions))
        self._assert_submission(submissions[0], ANSWER_TWO, 2, 1)
Example #2
0
    def test_get_submissions(self):
        api.create_submission(STUDENT_ITEM, ANSWER_ONE)
        api.create_submission(STUDENT_ITEM, ANSWER_TWO)
        submissions = api.get_submissions(STUDENT_ITEM)

        self._assert_submission(submissions[1], ANSWER_ONE, 1, 1)
        self._assert_submission(submissions[0], ANSWER_TWO, 1, 2)
Example #3
0
def get_submissions_for_student_item(request, course_id, student_id, item_id):
    """Retrieve all submissions associated with the given student item.

    Developer utility for accessing all the submissions associated with a
    student item. The student item is specified by the unique combination of
    course, student, and item.

    Args:
        request (dict): The request.
        course_id (str): The course id for this student item.
        student_id (str): The student id for this student item.
        item_id (str): The item id for this student item.

    Returns:
        HttpResponse: The response object for this request. Renders a simple
            development page with all the submissions related to the specified
            student item.

    """
    student_item_dict = dict(
        course_id=course_id,
        student_id=student_id,
        item_id=item_id,
    )
    context = dict(**student_item_dict)
    try:
        submissions = get_submissions(student_item_dict)
        context["submissions"] = submissions
    except SubmissionRequestError:
        context["error"] = "The specified student item was not found."

    return render_to_response('submissions.html', context)
    def test_create_submissions(self):

        # Create some submissions
        cmd = create_oa_submissions.Command(**{'self_assessment_required': True})
        cmd.handle("test_course", "test_item", "5", 100)
        self.assertEqual(len(cmd.student_items), 5)
        for student_item in cmd.student_items:

            # Check that the student item was created for the right course / item
            self.assertEqual(student_item['course_id'], 'test_course')
            self.assertEqual(student_item['item_id'], 'test_item')

            # Check that a submission was created
            submissions = sub_api.get_submissions(student_item)
            self.assertEqual(len(submissions), 1)

            answer_dict = submissions[0]['answer']
            self.assertIsInstance(answer_dict['text'], basestring)
            self.assertGreater(len(answer_dict['text']), 0)

            # Check that peer and self assessments were created
            assessments = peer_api.get_assessments(submissions[0]['uuid'])

            # Verify that the assessments exist and have content
            self.assertEqual(len(assessments), cmd.NUM_PEER_ASSESSMENTS)

            for assessment in assessments:
                self.assertGreater(assessment['points_possible'], 0)

            # Check that a self-assessment was created
            assessment = self_api.get_assessment(submissions[0]['uuid'])

            # Verify that the assessment exists and has content
            self.assertIsNot(assessment, None)
            self.assertGreater(assessment['points_possible'], 0)
    def clear_student_state(self, user_id, course_id, item_id):
        """
        This xblock method is called (from our LMS runtime, which defines this method signature) to clear student state
        for a given problem. It will cancel the workflow using traditional methods to remove it from the grading pools,
        and pass through to the submissions API to orphan the submission so that the user can create a new one.
        """
        # Note that student_item cannot be constructed using get_student_item_dict, since we're in a staff context
        student_item = {
            'course_id': course_id,
            'student_id': user_id,
            'item_id': item_id,
            'item_type': 'openassessment',
        }
        # There *should* only be one submission, but the logic is easy to extend for multiples so we may as well do it
        submissions = submission_api.get_submissions(student_item)
        for sub in submissions:
            # Remove the submission from grading pools
            self._cancel_workflow(sub['uuid'], "Student state cleared")

            # Tell the submissions API to orphan the submission to prevent it from being accessed
            submission_api.reset_score(
                user_id,
                course_id,
                item_id,
                clear_state=True  # pylint: disable=unexpected-keyword-arg
            )
Example #6
0
    def test_get_submissions(self):
        api.create_submission(STUDENT_ITEM, ANSWER_ONE)
        api.create_submission(STUDENT_ITEM, ANSWER_TWO)
        submissions = api.get_submissions(STUDENT_ITEM)

        student_item = self._get_student_item(STUDENT_ITEM)
        self._assert_submission(submissions[1], ANSWER_ONE, student_item.pk, 1)
        self._assert_submission(submissions[0], ANSWER_TWO, student_item.pk, 2)
Example #7
0
 def get_submission(self, submission_id=None):
     """
     Get student's most recent submission.
     """
     submissions = submissions_api.get_submissions(
         self.student_submission_id(submission_id))
     if submissions:
         # If I understand docs correctly, most recent submission should
         # be first
         return submissions[0]
Example #8
0
    def get_student_info_path_and_context(self, data):
        """
        Get the proper path and context for rendering the the student info
        section of the staff debug panel.

        """
        student_id = data.params.get('student_id', '')
        submission_uuid = None
        submission = None
        assessment_steps = self.assessment_steps

        if student_id:
            student_item = self.get_student_item_dict()
            student_item['student_id'] = student_id

            # If there is a submission available for the requested student, present
            # it. If not, there will be no other information to collect.
            submissions = submission_api.get_submissions(student_item, 1)

            if submissions:
                submission = submissions[0]
                submission_uuid = submissions[0]['uuid']

        example_based_assessment = None
        self_assessment = None
        peer_assessments = []
        submitted_assessments = []

        if "peer-assessment" in assessment_steps:
            peer_assessments = peer_api.get_assessments(submission_uuid)
            submitted_assessments = peer_api.get_submitted_assessments(submission_uuid, scored_only=False)

        if "self-assessment" in assessment_steps:
            self_assessment = self_api.get_assessment(submission_uuid)

        if "example-based-assessment" in assessment_steps:
            example_based_assessment = ai_api.get_latest_assessment(submission_uuid)

        context = {
            'submission': submission,
            'peer_assessments': peer_assessments,
            'submitted_assessments': submitted_assessments,
            'self_assessment': self_assessment,
            'example_based_assessment': example_based_assessment,
            'rubric_criteria': copy.deepcopy(self.rubric_criteria),
        }

        if peer_assessments or self_assessment or example_based_assessment:
            max_scores = peer_api.get_rubric_max_scores(submission_uuid)
            for criterion in context["rubric_criteria"]:
                criterion["total_value"] = max_scores[criterion["name"]]

        path = 'openassessmentblock/staff_debug/student_info.html'
        return path, context
Example #9
0
    def test_get_latest_submission(self):
        past_date = datetime.datetime(2007, 9, 12, 0, 0, 0, 0, pytz.UTC)
        more_recent_date = datetime.datetime(2007, 9, 13, 0, 0, 0, 0, pytz.UTC)
        api.create_submission(STUDENT_ITEM, ANSWER_ONE, more_recent_date)
        api.create_submission(STUDENT_ITEM, ANSWER_TWO, past_date)

        # Test a limit on the submissions
        submissions = api.get_submissions(STUDENT_ITEM, 1)
        self.assertEqual(1, len(submissions))
        self.assertEqual(ANSWER_ONE, submissions[0]["answer"])
        self.assertEqual(more_recent_date.year,
                         submissions[0]["submitted_at"].year)
Example #10
0
    def get_student_info_path_and_context(self, data):
        """
        Get the proper path and context for rendering the the student info
        section of the staff debug panel.

        """
        student_id = data.params.get("student_id", "")
        submission_uuid = None
        submission = None
        assessment_steps = self.assessment_steps

        if student_id:
            student_item = self.get_student_item_dict()
            student_item["student_id"] = student_id

            # If there is a submission available for the requested student, present
            # it. If not, there will be no other information to collect.
            submissions = submission_api.get_submissions(student_item, 1)

            if submissions:
                submission = submissions[0]
                submission_uuid = submissions[0]["uuid"]

        if "peer-assessment" in assessment_steps:
            peer_assessments = peer_api.get_assessments(submission_uuid)
            submitted_assessments = peer_api.get_submitted_assessments(submission_uuid, scored_only=False)
        else:
            peer_assessments = []
            submitted_assessments = []

        if "self-assessment" in assessment_steps:
            self_assessment = self_api.get_assessment(submission_uuid)
        else:
            self_assessment = None

        context = {
            "submission": submission,
            "peer_assessments": peer_assessments,
            "submitted_assessments": submitted_assessments,
            "self_assessment": self_assessment,
            "rubric_criteria": copy.deepcopy(self.rubric_criteria),
        }

        if peer_assessments or self_assessment:
            max_scores = peer_api.get_rubric_max_scores(submission_uuid)
            for criterion in context["rubric_criteria"]:
                criterion["total_value"] = max_scores[criterion["name"]]

        path = "openassessmentblock/staff_debug/student_info.html"
        return path, context
Example #11
0
    def test_clear_state(self):
        # Create a submission, give it a score, and verify that score exists
        submission = api.create_submission(STUDENT_ITEM, ANSWER_ONE)
        api.set_score(submission["uuid"], 11, 12)
        score = api.get_score(STUDENT_ITEM)
        self._assert_score(score, 11, 12)
        self.assertEqual(score['submission_uuid'], submission['uuid'])

        # Reset the score with clear_state=True
        # This should set the submission's score to None, and make it unavailable to get_submissions
        api.reset_score(
            STUDENT_ITEM["student_id"],
            STUDENT_ITEM["course_id"],
            STUDENT_ITEM["item_id"],
            clear_state=True,
        )
        score = api.get_score(STUDENT_ITEM)
        self.assertIsNone(score)
        subs = api.get_submissions(STUDENT_ITEM)
        self.assertEqual(subs, [])
Example #12
0
def get_evaluations_for_student_item(request, course_id, student_id, item_id):  # pylint: disable=unused-argument
    """Retrieve all evaluations associated with the given student item.

    Developer utility for accessing all the evaluations associated with a
    student item. The student item is specified by the unique combination of
    course, student, and item.

    Args:
        request (dict): The request.
        course_id (str): The course id for this student item.
        student_id (str): The student id for this student item.
        item_id (str): The item id for this student item.

    Returns:
        HttpResponse: The response object for this request. Renders a simple
            development page with all the evaluations related to the specified
            student item.

    """
    student_item_dict = dict(
        course_id=course_id,
        student_id=student_id,
        item_id=item_id,
    )
    context = dict(**student_item_dict)
    try:
        submissions = get_submissions(student_item_dict)
        evaluations = []
        for submission in submissions:
            submission_evaluations = get_assessments(submission["uuid"])
            for evaluation in submission_evaluations:
                evaluation["submission_uuid"] = submission["uuid"]
                evaluations.append(evaluation)

        context["evaluations"] = evaluations

    except SubmissionRequestError:
        context["error"] = "The specified student item was not found."

    return render(request, 'evaluations.html', context)
Example #13
0
    def get_student_info_path_and_context(self, student_username):
        """
        Get the proper path and context for rendering the student info
        section of the staff area.

        Args:
            student_username (unicode): The username of the student to report.
        """
        # Import is placed here to avoid model import at project startup.
        from submissions import api as submission_api

        anonymous_user_id = None
        student_item = None
        submissions = None
        submission = None
        submission_uuid = None

        if student_username:
            anonymous_user_id = self.get_anonymous_user_id(student_username, self.course_id)
            student_item = self.get_student_item_dict(anonymous_user_id=anonymous_user_id)

        if anonymous_user_id:
            # If there is a submission available for the requested student, present
            # it. If not, there will be no other information to collect.
            submissions = submission_api.get_submissions(student_item, 1)

        if submissions:
            submission = submissions[0]
            submission_uuid = submission['uuid']

        # This will add submission (which may be None) and username to the context.
        context = self.get_student_submission_context(student_username, submission)

        # Only add the rest of the details to the context if a submission exists.
        if submission_uuid:
            self.add_submission_context(submission_uuid, context)

        path = 'openassessmentblock/staff_area/oa_student_info.html'
        return path, context
    def clear_student_state(self, user_id, course_id, item_id,
                            requesting_user_id):
        """
        This xblock method is called (from our LMS runtime, which defines this method signature) to clear student state
        for a given problem. It will cancel the workflow using traditional methods to remove it from the grading pools,
        and pass through to the submissions API to orphan the submission so that the user can create a new one.
        """
        # Import is placed here to avoid model import at project startup.
        from submissions import api as submission_api
        # Note that student_item cannot be constructed using get_student_item_dict, since we're in a staff context
        student_item = {
            'course_id': course_id,
            'student_id': user_id,
            'item_id': item_id,
            'item_type': 'openassessment',
        }
        submissions = submission_api.get_submissions(student_item)

        if self.is_team_assignment():
            self.clear_team_state(user_id, course_id, item_id,
                                  requesting_user_id, submissions)
        else:
            # There *should* only be one submission, but the logic is easy to extend for multiples so we may as well
            for sub in submissions:
                # Remove the submission from grading pools
                self._cancel_workflow(sub['uuid'],
                                      "Student state cleared",
                                      requesting_user_id=requesting_user_id)

                # Delete files from the backend
                if 'file_keys' in sub['answer']:
                    for key in sub['answer']['file_keys']:
                        remove_file(key)

                # Tell the submissions API to orphan the submission to prevent it from being accessed
                submission_api.reset_score(user_id,
                                           course_id,
                                           item_id,
                                           clear_state=True)
Example #15
0
    def get_student_info_path_and_context(self, student_username):
        """
        Get the proper path and context for rendering the student info
        section of the staff area.

        Args:
            student_username (unicode): The username of the student to report.
        """
        # Import is placed here to avoid model import at project startup.
        from submissions import api as submission_api

        anonymous_user_id = None
        student_item = None
        submissions = None
        submission = None
        submission_uuid = None

        if student_username:
            anonymous_user_id = self.get_anonymous_user_id(student_username, self.course_id)
            student_item = self.get_student_item_dict(anonymous_user_id=anonymous_user_id)

        if anonymous_user_id:
            # If there is a submission available for the requested student, present
            # it. If not, there will be no other information to collect.
            submissions = submission_api.get_submissions(student_item, 1)

        if submissions:
            submission = submissions[0]
            submission_uuid = submission['uuid']

        # This will add submission (which may be None) and username to the context.
        context = self.get_student_submission_context(student_username, submission)

        # Only add the rest of the details to the context if a submission exists.
        if submission_uuid:
            self.add_submission_context(submission_uuid, context)

        path = 'openassessmentblock/staff_area/oa_student_info.html'
        return path, context
Example #16
0
    def get_submission_uuid(self):
        """ Submission UUIDs can be in multiple spots based on the submission type,
            try the various locations to try to find it.

            No submission ID will be found if a learner has not submitted a response

            Indiviual submissions will be in the user's context.

            Returns:
                (string) Submission ID if found
                (None) None if not found
        """
        if self.submission_uuid is not None:
            return self.submission_uuid
        elif self.is_team_assignment():
            try:
                # Query for submissions by the student item
                student_item = self.get_student_item_dict()
                submission_list = get_submissions(student_item)
                if submission_list and submission_list[0]["uuid"] is not None:
                    return submission_list[0]["uuid"]
            except (SubmissionInternalError, SubmissionNotFoundError):
                return None
Example #17
0
    def clear_student_state(self, *args, **kwargs):
        # pylint: disable=unused-argument
        """
        For a given user, clears submissions and uploaded files for this XBlock.

        Staff users are able to delete a learner's state for a block in LMS. When that capability is
        used, the block's "clear_student_state" function is called if it exists.
        """
        student_id = kwargs['user_id']
        for submission in submissions_api.get_submissions(
                self.get_student_item_dict(student_id)
        ):
            submission_file_sha1 = submission['answer'].get('sha1')
            submission_filename = submission['answer'].get('filename')
            submission_file_path = self.file_storage_path(submission_file_sha1, submission_filename)
            if default_storage.exists(submission_file_path):
                default_storage.delete(submission_file_path)
            submissions_api.reset_score(
                student_id,
                self.block_course_id,
                self.block_id,
                clear_state=True
            )
    def test_create_submissions(self):

        # Create some submissions
        cmd = create_oa_submissions.Command()
        cmd.handle("test_course", "test_item", "5")

        self.assertEqual(len(cmd.student_items), 5)
        for student_item in cmd.student_items:

            # Check that the student item was created for the right course / item
            self.assertEqual(student_item['course_id'], 'test_course')
            self.assertEqual(student_item['item_id'], 'test_item')

            # Check that a submission was created
            submissions = sub_api.get_submissions(student_item)
            self.assertEqual(len(submissions), 1)

            answer_dict = submissions[0]['answer']
            self.assertIsInstance(answer_dict['text'], basestring)
            self.assertGreater(len(answer_dict['text']), 0)

            # Check that peer and self assessments were created
            assessments = peer_api.get_assessments(submissions[0]['uuid'],
                                                   scored_only=False)

            # Verify that the assessments exist and have content
            self.assertEqual(len(assessments), cmd.NUM_PEER_ASSESSMENTS)

            for assessment in assessments:
                self.assertGreater(assessment['points_possible'], 0)

            # Check that a self-assessment was created
            assessment = self_api.get_assessment(submissions[0]['uuid'])

            # Verify that the assessment exists and has content
            self.assertIsNot(assessment, None)
            self.assertGreater(assessment['points_possible'], 0)
Example #19
0
 def comment(self):
     """str: The staff comment."""
     log.error('submission_id {}'.format(self.submission_id))
     submissions = submissions_api.get_submissions(self.submission_id)
     if submissions:
         return submissions[0]['answer']['comment']
Example #20
0
 def test_set_attempt_number(self):
     api.create_submission(STUDENT_ITEM, ANSWER_ONE, None, 2)
     submissions = api.get_submissions(STUDENT_ITEM)
     student_item = self._get_student_item(STUDENT_ITEM)
     self._assert_submission(submissions[0], ANSWER_ONE, student_item.pk, 2)
Example #21
0
 def test_unicode_enforcement(self):
     api.create_submission(STUDENT_ITEM, "Testing unicode answers.")
     submissions = api.get_submissions(STUDENT_ITEM, 1)
     self.assertEqual(u"Testing unicode answers.", submissions[0]["answer"])
Example #22
0
 def test_various_student_items(self, valid_student_item):
     api.create_submission(valid_student_item, ANSWER_ONE)
     student_item = self._get_student_item(valid_student_item)
     submission = api.get_submissions(valid_student_item)[0]
     self._assert_submission(submission, ANSWER_ONE, student_item.pk, 1)
Example #23
0
 def comment(self):
     """str: The staff comment."""
     submissions = submissions_api.get_submissions(self.submission_id)
     if submissions:
         return submissions[0]['answer']['comment']
Example #24
0
 def test_set_attempt_number(self):
     api.create_submission(STUDENT_ITEM, ANSWER_ONE, None, 2)
     submissions = api.get_submissions(STUDENT_ITEM)
     student_item = self._get_student_item(STUDENT_ITEM)
     self._assert_submission(submissions[0], ANSWER_ONE, student_item.pk, 2)
Example #25
0
 def test_various_student_items(self, valid_student_item):
     api.create_submission(valid_student_item, ANSWER_ONE)
     student_item = self._get_student_item(valid_student_item)
     submission = api.get_submissions(valid_student_item)[0]
     self._assert_submission(submission, ANSWER_ONE, student_item.pk, 1)
Example #26
0
    def get_student_info_path_and_context(self, student_id):
        """
        Get the proper path and context for rendering the the student info
        section of the staff debug panel.

        Args:
            student_id (unicode): The ID of the student to report.

        """
        submission_uuid = None
        submission = None
        assessment_steps = self.assessment_steps

        if student_id:
            student_item = self.get_student_item_dict()
            student_item['student_id'] = student_id

            # If there is a submission available for the requested student, present
            # it. If not, there will be no other information to collect.
            submissions = submission_api.get_submissions(student_item, 1)

            if submissions:
                submission_uuid = submissions[0]['uuid']
                submission = submissions[0]

                if 'file_key' in submission.get('answer', {}):
                    file_key = submission['answer']['file_key']

                    try:
                        submission['image_url'] = file_api.get_download_url(
                            file_key)
                    except file_api.FileUploadError:
                        # Log the error, but do not prevent the rest of the student info
                        # from being displayed.
                        msg = (
                            u"Could not retrieve image URL for staff debug page.  "
                            u"The student ID is '{student_id}', and the file key is {file_key}"
                        ).format(student_id=student_id, file_key=file_key)
                        logger.exception(msg)

        example_based_assessment = None
        self_assessment = None
        peer_assessments = []
        submitted_assessments = []

        if "peer-assessment" in assessment_steps:
            peer_assessments = peer_api.get_assessments(submission_uuid)
            submitted_assessments = peer_api.get_submitted_assessments(
                submission_uuid, scored_only=False)

        if "self-assessment" in assessment_steps:
            self_assessment = self_api.get_assessment(submission_uuid)

        if "example-based-assessment" in assessment_steps:
            example_based_assessment = ai_api.get_latest_assessment(
                submission_uuid)

        context = {
            'submission': submission,
            'peer_assessments': peer_assessments,
            'submitted_assessments': submitted_assessments,
            'self_assessment': self_assessment,
            'example_based_assessment': example_based_assessment,
            'rubric_criteria': copy.deepcopy(self.rubric_criteria_with_labels),
        }

        if peer_assessments or self_assessment or example_based_assessment:
            max_scores = peer_api.get_rubric_max_scores(submission_uuid)
            for criterion in context["rubric_criteria"]:
                criterion["total_value"] = max_scores[criterion["name"]]

        path = 'openassessmentblock/staff_debug/student_info.html'
        return path, context
Example #27
0
 def test_unicode_enforcement(self):
     api.create_submission(STUDENT_ITEM, "Testing unicode answers.")
     submissions = api.get_submissions(STUDENT_ITEM, 1)
     self.assertEqual(u"Testing unicode answers.", submissions[0]["answer"])
 def get_comment(self):
     """
     """
     submissions = submissions_api.get_submissions(self.submission_id)
     if submissions:
         return submissions[0]['answer']['comment']
    def student_view(self, context=None):
        """
        The primary view of the FormulaExerciseXBlock, shown to students when viewing courses.
        """

        context = {}
        self.submitted_expressions = {}

        if self.xblock_id is None:
            self.xblock_id = unicode(
                self.location.replace(branch=None, version=None))

        if self.newly_created_block:
            self.newly_created_block = (db_service.is_block_in_db(
                self.xblock_id) is False)

        if (self.newly_created_block is
                True):  # generate question template for newly created XBlock
            self.question_template, self.variables, self.expressions = question_service.generate_question_template(
            )
            db_service.create_question_template(self.xblock_id,
                                                self.question_template,
                                                self.variables,
                                                self.expressions)
            self.newly_created_block = False
        else:  # existing question template in dbms
            self.load_data_from_dbms()

        # generate question from template if necessary
        if (self.generated_question == ""):
            self.generated_question, self.generated_variables = question_service.generate_question(
                self.question_template, self.variables)

        for expression_name, expression_value in self.expressions.iteritems():
            self.submitted_expressions[expression_name] = ''

        # load submission data to display the previously submitted result
        submissions = sub_api.get_submissions(self.student_item_key, 1)
        if submissions:
            latest_submission = submissions[0]

            # parse the answer
            answer = latest_submission['answer']
            self.generated_question = answer['generated_question']

            if ('variable_values' in answer):  # backward compatibility
                saved_generated_variables = json.loads(
                    answer['variable_values'])
                for var_name, var_value in saved_generated_variables.iteritems(
                ):
                    self.generated_variables[var_name] = var_value

            saved_submitted_expressions = json.loads(
                answer['expression_values'])
            for submitted_expr_name, submitted_expr_val in saved_submitted_expressions.iteritems(
            ):
                self.submitted_expressions[
                    submitted_expr_name] = submitted_expr_val

            self.attempt_number = latest_submission['attempt_number']
            if (self.attempt_number >= self.max_attempts):
                context['disabled'] = 'disabled'
            else:
                context['disabled'] = ''

        self.serialize_data_to_context(context)

        context['attempt_number'] = self.attempt_number_string
        context['point_string'] = self.point_string
        context['question'] = self.generated_question
        context['xblock_id'] = self.xblock_id
        context['submitted_expressions'] = self.submitted_expressions
        context['show_answer'] = self.show_answer

        frag = Fragment()
        frag.content = loader.render_template(
            'static/html/formula_exercise_block.html', context)
        frag.add_css(
            self.resource_string("static/css/formula_exercise_block.css"))
        frag.add_javascript(
            self.resource_string("static/js/src/formula_exercise_block.js"))
        frag.initialize_js('FormulaExerciseXBlock')
        return frag
Example #30
0
 def comment(self):
     """str: The staff comment."""
     submissions = submissions_api.get_submissions(self.submission_id)
     if submissions:
         return submissions[0]['answer']['comment']
Example #31
0
    def get_student_info_path_and_context(self, student_username):
        """
        Get the proper path and context for rendering the the student info
        section of the staff debug panel.

        Args:
            student_username (unicode): The username of the student to report.

        """
        submission_uuid = None
        submission = None
        assessment_steps = self.assessment_steps
        anonymous_user_id = None
        submissions = None
        student_item = None

        if student_username:
            anonymous_user_id = self.get_anonymous_user_id(student_username, self.course_id)
            student_item = self.get_student_item_dict(anonymous_user_id=anonymous_user_id)

        if anonymous_user_id:
            # If there is a submission available for the requested student, present
            # it. If not, there will be no other information to collect.
            submissions = submission_api.get_submissions(student_item, 1)

        if submissions:
            submission_uuid = submissions[0]['uuid']
            submission = submissions[0]

            if 'file_key' in submission.get('answer', {}):
                file_key = submission['answer']['file_key']

                try:
                    submission['image_url'] = file_api.get_download_url(file_key)
                except file_api.FileUploadError:
                    # Log the error, but do not prevent the rest of the student info
                    # from being displayed.
                    msg = (
                        u"Could not retrieve image URL for staff debug page.  "
                        u"The student username is '{student_username}', and the file key is {file_key}"
                    ).format(student_username=student_username, file_key=file_key)
                    logger.exception(msg)

        example_based_assessment = None
        self_assessment = None
        peer_assessments = []
        submitted_assessments = []

        if "peer-assessment" in assessment_steps:
            peer_assessments = peer_api.get_assessments(submission_uuid)
            submitted_assessments = peer_api.get_submitted_assessments(submission_uuid, scored_only=False)

        if "self-assessment" in assessment_steps:
            self_assessment = self_api.get_assessment(submission_uuid)

        if "example-based-assessment" in assessment_steps:
            example_based_assessment = ai_api.get_latest_assessment(submission_uuid)

        workflow_cancellation = workflow_api.get_assessment_workflow_cancellation(submission_uuid)
        if workflow_cancellation:
            workflow_cancellation['cancelled_by'] = self.get_username(workflow_cancellation['cancelled_by_id'])

        context = {
            'submission': submission,
            'workflow_cancellation': workflow_cancellation,
            'peer_assessments': peer_assessments,
            'submitted_assessments': submitted_assessments,
            'self_assessment': self_assessment,
            'example_based_assessment': example_based_assessment,
            'rubric_criteria': copy.deepcopy(self.rubric_criteria_with_labels),
        }

        if peer_assessments or self_assessment or example_based_assessment:
            max_scores = peer_api.get_rubric_max_scores(submission_uuid)
            for criterion in context["rubric_criteria"]:
                criterion["total_value"] = max_scores[criterion["name"]]

        path = 'openassessmentblock/staff_debug/student_info.html'
        return path, context
    def get_student_info_path_and_context(self, student_id):
        """
        Get the proper path and context for rendering the the student info
        section of the staff debug panel.

        Args:
            student_id (unicode): The ID of the student to report.

        """
        submission_uuid = None
        submission = None
        assessment_steps = self.assessment_steps
        student_item = self.get_student_item_dict()
        scores = {}
        problem_closed = None

        if student_id:
            student_item['student_id'] = student_id

            # If there is a submission available for the requested student, present
            # it. If not, there will be no other information to collect.
            submissions = submission_api.get_submissions(student_item, 1)

            if submissions:
                submission_uuid = submissions[0]['uuid']
                submission = submissions[0]

                if 'file_key' in submission.get('answer', {}):
                    file_key = submission['answer']['file_key']

                    try:
                        submission['image_url'] = file_api.get_download_url(file_key)
                    except file_api.FileUploadError:
                        # Log the error, but do not prevent the rest of the student info
                        # from being displayed.
                        msg = (
                            u"Could not retrieve image URL for staff debug page.  "
                            u"The student ID is '{student_id}', and the file key is {file_key}"
                        ).format(student_id=student_id, file_key=file_key)
                        logger.exception(msg)

        example_based_assessment = None
        self_assessment = None
        peer_assessments = []
        submitted_assessments = []

        if "peer-assessment" in assessment_steps:
            peer_assessments = peer_api.get_assessments(submission_uuid)
            submitted_assessments = peer_api.get_submitted_assessments(submission_uuid, scored_only=False)

            # Get the data we need for instructor override of the student's score
            rubric_dict = create_rubric_dict(self.prompt, self.rubric_criteria_with_labels)
            scores = peer_api.get_data_for_override_score(
                submission_uuid,
                student_item,
                rubric_dict,
            )
            problem_closed, dummy0, dummy1, dummy2 = self.is_closed(step='peer-assessment', course_staff=False)

        if "self-assessment" in assessment_steps:
            self_assessment = self_api.get_assessment(submission_uuid)

        if "example-based-assessment" in assessment_steps:
            example_based_assessment = ai_api.get_latest_assessment(submission_uuid)

        context = {
            'submission': submission,
            'peer_assessments': peer_assessments,
            'submitted_assessments': submitted_assessments,
            'self_assessment': self_assessment,
            'example_based_assessment': example_based_assessment,
            'rubric_criteria': copy.deepcopy(self.rubric_criteria_with_labels),
            'scores': scores,
            'problem_closed': problem_closed,
        }

        if peer_assessments or self_assessment or example_based_assessment:
            max_scores = peer_api.get_rubric_max_scores(submission_uuid)
            for criterion in context["rubric_criteria"]:
                criterion["total_value"] = max_scores[criterion["name"]]

        path = 'openassessmentblock/staff_debug/student_info.html'
        return path, context